1 /*
2  * drivers/misc/spear13xx_pcie_gadget.c
3  *
4  * Copyright (C) 2010 ST Microelectronics
5  * Pratyush Anand<pratyush.anand@gmail.com>
6  *
7  * This file is licensed under the terms of the GNU General Public
8  * License version 2. This program is licensed "as is" without any
9  * warranty of any kind, whether express or implied.
10  */
11 
12 #include <linux/device.h>
13 #include <linux/clk.h>
14 #include <linux/slab.h>
15 #include <linux/delay.h>
16 #include <linux/io.h>
17 #include <linux/interrupt.h>
18 #include <linux/irq.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/pci_regs.h>
23 #include <linux/configfs.h>
24 #include <mach/pcie.h>
25 #include <mach/misc_regs.h>
26 
27 #define IN0_MEM_SIZE	(200 * 1024 * 1024 - 1)
28 /* In current implementation address translation is done using IN0 only.
29  * So IN1 start address and IN0 end address has been kept same
30 */
31 #define IN1_MEM_SIZE	(0 * 1024 * 1024 - 1)
32 #define IN_IO_SIZE	(20 * 1024 * 1024 - 1)
33 #define IN_CFG0_SIZE	(12 * 1024 * 1024 - 1)
34 #define IN_CFG1_SIZE	(12 * 1024 * 1024 - 1)
35 #define IN_MSG_SIZE	(12 * 1024 * 1024 - 1)
36 /* Keep default BAR size as 4K*/
37 /* AORAM would be mapped by default*/
38 #define INBOUND_ADDR_MASK	(SPEAR13XX_SYSRAM1_SIZE - 1)
39 
40 #define INT_TYPE_NO_INT	0
41 #define INT_TYPE_INTX	1
42 #define INT_TYPE_MSI	2
43 struct spear_pcie_gadget_config {
44 	void __iomem *base;
45 	void __iomem *va_app_base;
46 	void __iomem *va_dbi_base;
47 	char int_type[10];
48 	ulong requested_msi;
49 	ulong configured_msi;
50 	ulong bar0_size;
51 	ulong bar0_rw_offset;
52 	void __iomem *va_bar0_address;
53 };
54 
55 struct pcie_gadget_target {
56 	struct configfs_subsystem subsys;
57 	struct spear_pcie_gadget_config config;
58 };
59 
60 struct pcie_gadget_target_attr {
61 	struct configfs_attribute	attr;
62 	ssize_t		(*show)(struct spear_pcie_gadget_config *config,
63 						char *buf);
64 	ssize_t		(*store)(struct spear_pcie_gadget_config *config,
65 						 const char *buf,
66 						 size_t count);
67 };
68 
enable_dbi_access(struct pcie_app_reg __iomem * app_reg)69 static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg)
70 {
71 	/* Enable DBI access */
72 	writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID),
73 			&app_reg->slv_armisc);
74 	writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID),
75 			&app_reg->slv_awmisc);
76 
77 }
78 
disable_dbi_access(struct pcie_app_reg __iomem * app_reg)79 static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg)
80 {
81 	/* disable DBI access */
82 	writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
83 			&app_reg->slv_armisc);
84 	writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
85 			&app_reg->slv_awmisc);
86 
87 }
88 
spear_dbi_read_reg(struct spear_pcie_gadget_config * config,int where,int size,u32 * val)89 static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config,
90 		int where, int size, u32 *val)
91 {
92 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
93 	ulong va_address;
94 
95 	/* Enable DBI access */
96 	enable_dbi_access(app_reg);
97 
98 	va_address = (ulong)config->va_dbi_base + (where & ~0x3);
99 
100 	*val = readl(va_address);
101 
102 	if (size == 1)
103 		*val = (*val >> (8 * (where & 3))) & 0xff;
104 	else if (size == 2)
105 		*val = (*val >> (8 * (where & 3))) & 0xffff;
106 
107 	/* Disable DBI access */
108 	disable_dbi_access(app_reg);
109 }
110 
spear_dbi_write_reg(struct spear_pcie_gadget_config * config,int where,int size,u32 val)111 static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config,
112 		int where, int size, u32 val)
113 {
114 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
115 	ulong va_address;
116 
117 	/* Enable DBI access */
118 	enable_dbi_access(app_reg);
119 
120 	va_address = (ulong)config->va_dbi_base + (where & ~0x3);
121 
122 	if (size == 4)
123 		writel(val, va_address);
124 	else if (size == 2)
125 		writew(val, va_address + (where & 2));
126 	else if (size == 1)
127 		writeb(val, va_address + (where & 3));
128 
129 	/* Disable DBI access */
130 	disable_dbi_access(app_reg);
131 }
132 
133 #define PCI_FIND_CAP_TTL	48
134 
pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config * config,u32 pos,int cap,int * ttl)135 static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config,
136 		u32 pos, int cap, int *ttl)
137 {
138 	u32 id;
139 
140 	while ((*ttl)--) {
141 		spear_dbi_read_reg(config, pos, 1, &pos);
142 		if (pos < 0x40)
143 			break;
144 		pos &= ~3;
145 		spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id);
146 		if (id == 0xff)
147 			break;
148 		if (id == cap)
149 			return pos;
150 		pos += PCI_CAP_LIST_NEXT;
151 	}
152 	return 0;
153 }
154 
pci_find_own_next_cap(struct spear_pcie_gadget_config * config,u32 pos,int cap)155 static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config,
156 			u32 pos, int cap)
157 {
158 	int ttl = PCI_FIND_CAP_TTL;
159 
160 	return pci_find_own_next_cap_ttl(config, pos, cap, &ttl);
161 }
162 
pci_find_own_cap_start(struct spear_pcie_gadget_config * config,u8 hdr_type)163 static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config,
164 				u8 hdr_type)
165 {
166 	u32 status;
167 
168 	spear_dbi_read_reg(config, PCI_STATUS, 2, &status);
169 	if (!(status & PCI_STATUS_CAP_LIST))
170 		return 0;
171 
172 	switch (hdr_type) {
173 	case PCI_HEADER_TYPE_NORMAL:
174 	case PCI_HEADER_TYPE_BRIDGE:
175 		return PCI_CAPABILITY_LIST;
176 	case PCI_HEADER_TYPE_CARDBUS:
177 		return PCI_CB_CAPABILITY_LIST;
178 	default:
179 		return 0;
180 	}
181 
182 	return 0;
183 }
184 
185 /*
186  * Tell if a device supports a given PCI capability.
187  * Returns the address of the requested capability structure within the
188  * device's PCI configuration space or 0 in case the device does not
189  * support it. Possible values for @cap:
190  *
191  * %PCI_CAP_ID_PM	Power Management
192  * %PCI_CAP_ID_AGP	Accelerated Graphics Port
193  * %PCI_CAP_ID_VPD	Vital Product Data
194  * %PCI_CAP_ID_SLOTID	Slot Identification
195  * %PCI_CAP_ID_MSI	Message Signalled Interrupts
196  * %PCI_CAP_ID_CHSWP	CompactPCI HotSwap
197  * %PCI_CAP_ID_PCIX	PCI-X
198  * %PCI_CAP_ID_EXP	PCI Express
199  */
pci_find_own_capability(struct spear_pcie_gadget_config * config,int cap)200 static int pci_find_own_capability(struct spear_pcie_gadget_config *config,
201 		int cap)
202 {
203 	u32 pos;
204 	u32 hdr_type;
205 
206 	spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type);
207 
208 	pos = pci_find_own_cap_start(config, hdr_type);
209 	if (pos)
210 		pos = pci_find_own_next_cap(config, pos, cap);
211 
212 	return pos;
213 }
214 
spear_pcie_gadget_irq(int irq,void * dev_id)215 static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id)
216 {
217 	return 0;
218 }
219 
220 /*
221  * configfs interfaces show/store functions
222  */
223 
to_target(struct config_item * item)224 static struct pcie_gadget_target *to_target(struct config_item *item)
225 {
226 	return item ?
227 		container_of(to_configfs_subsystem(to_config_group(item)),
228 				struct pcie_gadget_target, subsys) : NULL;
229 }
230 
pcie_gadget_link_show(struct config_item * item,char * buf)231 static ssize_t pcie_gadget_link_show(struct config_item *item, char *buf)
232 {
233 	struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
234 
235 	if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID))
236 		return sprintf(buf, "UP");
237 	else
238 		return sprintf(buf, "DOWN");
239 }
240 
pcie_gadget_link_store(struct config_item * item,const char * buf,size_t count)241 static ssize_t pcie_gadget_link_store(struct config_item *item,
242 		const char *buf, size_t count)
243 {
244 	struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
245 
246 	if (sysfs_streq(buf, "UP"))
247 		writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID),
248 			&app_reg->app_ctrl_0);
249 	else if (sysfs_streq(buf, "DOWN"))
250 		writel(readl(&app_reg->app_ctrl_0)
251 				& ~(1 << APP_LTSSM_ENABLE_ID),
252 				&app_reg->app_ctrl_0);
253 	else
254 		return -EINVAL;
255 	return count;
256 }
257 
pcie_gadget_int_type_show(struct config_item * item,char * buf)258 static ssize_t pcie_gadget_int_type_show(struct config_item *item, char *buf)
259 {
260 	return sprintf(buf, "%s", to_target(item)->int_type);
261 }
262 
pcie_gadget_int_type_store(struct config_item * item,const char * buf,size_t count)263 static ssize_t pcie_gadget_int_type_store(struct config_item *item,
264 		const char *buf, size_t count)
265 {
266 	struct spear_pcie_gadget_config *config = to_target(item)
267 	u32 cap, vec, flags;
268 	ulong vector;
269 
270 	if (sysfs_streq(buf, "INTA"))
271 		spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
272 
273 	else if (sysfs_streq(buf, "MSI")) {
274 		vector = config->requested_msi;
275 		vec = 0;
276 		while (vector > 1) {
277 			vector /= 2;
278 			vec++;
279 		}
280 		spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0);
281 		cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
282 		spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
283 		flags &= ~PCI_MSI_FLAGS_QMASK;
284 		flags |= vec << 1;
285 		spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags);
286 	} else
287 		return -EINVAL;
288 
289 	strcpy(config->int_type, buf);
290 
291 	return count;
292 }
293 
pcie_gadget_no_of_msi_show(struct config_item * item,char * buf)294 static ssize_t pcie_gadget_no_of_msi_show(struct config_item *item, char *buf)
295 {
296 	struct spear_pcie_gadget_config *config = to_target(item)
297 	struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
298 	u32 cap, vec, flags;
299 	ulong vector;
300 
301 	if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID))
302 			!= (1 << CFG_MSI_EN_ID))
303 		vector = 0;
304 	else {
305 		cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
306 		spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
307 		flags &= ~PCI_MSI_FLAGS_QSIZE;
308 		vec = flags >> 4;
309 		vector = 1;
310 		while (vec--)
311 			vector *= 2;
312 	}
313 	config->configured_msi = vector;
314 
315 	return sprintf(buf, "%lu", vector);
316 }
317 
pcie_gadget_no_of_msi_store(struct config_item * item,const char * buf,size_t count)318 static ssize_t pcie_gadget_no_of_msi_store(struct config_item *item,
319 		const char *buf, size_t count)
320 {
321 	int ret;
322 
323 	ret = kstrtoul(buf, 0, &to_target(item)->requested_msi);
324 	if (ret)
325 		return ret;
326 
327 	if (config->requested_msi > 32)
328 		config->requested_msi = 32;
329 
330 	return count;
331 }
332 
pcie_gadget_inta_store(struct config_item * item,const char * buf,size_t count)333 static ssize_t pcie_gadget_inta_store(struct config_item *item,
334 		const char *buf, size_t count)
335 {
336 	struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
337 	ulong en;
338 	int ret;
339 
340 	ret = kstrtoul(buf, 0, &en);
341 	if (ret)
342 		return ret;
343 
344 	if (en)
345 		writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID),
346 				&app_reg->app_ctrl_0);
347 	else
348 		writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID),
349 				&app_reg->app_ctrl_0);
350 
351 	return count;
352 }
353 
pcie_gadget_send_msi_store(struct config_item * item,const char * buf,size_t count)354 static ssize_t pcie_gadget_send_msi_store(struct config_item *item,
355 		const char *buf, size_t count)
356 {
357 	struct spear_pcie_gadget_config *config = to_target(item)
358 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
359 	ulong vector;
360 	u32 ven_msi;
361 	int ret;
362 
363 	ret = kstrtoul(buf, 0, &vector);
364 	if (ret)
365 		return ret;
366 
367 	if (!config->configured_msi)
368 		return -EINVAL;
369 
370 	if (vector >= config->configured_msi)
371 		return -EINVAL;
372 
373 	ven_msi = readl(&app_reg->ven_msi_1);
374 	ven_msi &= ~VEN_MSI_FUN_NUM_MASK;
375 	ven_msi |= 0 << VEN_MSI_FUN_NUM_ID;
376 	ven_msi &= ~VEN_MSI_TC_MASK;
377 	ven_msi |= 0 << VEN_MSI_TC_ID;
378 	ven_msi &= ~VEN_MSI_VECTOR_MASK;
379 	ven_msi |= vector << VEN_MSI_VECTOR_ID;
380 
381 	/* generating interrupt for msi vector */
382 	ven_msi |= VEN_MSI_REQ_EN;
383 	writel(ven_msi, &app_reg->ven_msi_1);
384 	udelay(1);
385 	ven_msi &= ~VEN_MSI_REQ_EN;
386 	writel(ven_msi, &app_reg->ven_msi_1);
387 
388 	return count;
389 }
390 
pcie_gadget_vendor_id_show(struct config_item * item,char * buf)391 static ssize_t pcie_gadget_vendor_id_show(struct config_item *item, char *buf)
392 {
393 	u32 id;
394 
395 	spear_dbi_read_reg(to_target(item), PCI_VENDOR_ID, 2, &id);
396 
397 	return sprintf(buf, "%x", id);
398 }
399 
pcie_gadget_vendor_id_store(struct config_item * item,const char * buf,size_t count)400 static ssize_t pcie_gadget_vendor_id_store(struct config_item *item,
401 		const char *buf, size_t count)
402 {
403 	ulong id;
404 	int ret;
405 
406 	ret = kstrtoul(buf, 0, &id);
407 	if (ret)
408 		return ret;
409 
410 	spear_dbi_write_reg(to_target(item), PCI_VENDOR_ID, 2, id);
411 
412 	return count;
413 }
414 
pcie_gadget_device_id_show(struct config_item * item,char * buf)415 static ssize_t pcie_gadget_device_id_show(struct config_item *item, char *buf)
416 {
417 	u32 id;
418 
419 	spear_dbi_read_reg(to_target(item), PCI_DEVICE_ID, 2, &id);
420 
421 	return sprintf(buf, "%x", id);
422 }
423 
pcie_gadget_device_id_store(struct config_item * item,const char * buf,size_t count)424 static ssize_t pcie_gadget_device_id_store(struct config_item *item,
425 		const char *buf, size_t count)
426 {
427 	ulong id;
428 	int ret;
429 
430 	ret = kstrtoul(buf, 0, &id);
431 	if (ret)
432 		return ret;
433 
434 	spear_dbi_write_reg(to_target(item), PCI_DEVICE_ID, 2, id);
435 
436 	return count;
437 }
438 
pcie_gadget_bar0_size_show(struct config_item * item,char * buf)439 static ssize_t pcie_gadget_bar0_size_show(struct config_item *item, char *buf)
440 {
441 	return sprintf(buf, "%lx", to_target(item)->bar0_size);
442 }
443 
pcie_gadget_bar0_size_store(struct config_item * item,const char * buf,size_t count)444 static ssize_t pcie_gadget_bar0_size_store(struct config_item *item,
445 		const char *buf, size_t count)
446 {
447 	struct spear_pcie_gadget_config *config = to_target(item)
448 	ulong size;
449 	u32 pos, pos1;
450 	u32 no_of_bit = 0;
451 	int ret;
452 
453 	ret = kstrtoul(buf, 0, &size);
454 	if (ret)
455 		return ret;
456 
457 	/* min bar size is 256 */
458 	if (size <= 0x100)
459 		size = 0x100;
460 	/* max bar size is 1MB*/
461 	else if (size >= 0x100000)
462 		size = 0x100000;
463 	else {
464 		pos = 0;
465 		pos1 = 0;
466 		while (pos < 21) {
467 			pos = find_next_bit((ulong *)&size, 21, pos);
468 			if (pos != 21)
469 				pos1 = pos + 1;
470 			pos++;
471 			no_of_bit++;
472 		}
473 		if (no_of_bit == 2)
474 			pos1--;
475 
476 		size = 1 << pos1;
477 	}
478 	config->bar0_size = size;
479 	spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1);
480 
481 	return count;
482 }
483 
pcie_gadget_bar0_address_show(struct config_item * item,char * buf)484 static ssize_t pcie_gadget_bar0_address_show(struct config_item *item,
485 		char *buf)
486 {
487 	struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base;
488 
489 	u32 address = readl(&app_reg->pim0_mem_addr_start);
490 
491 	return sprintf(buf, "%x", address);
492 }
493 
pcie_gadget_bar0_address_store(struct config_item * item,const char * buf,size_t count)494 static ssize_t pcie_gadget_bar0_address_store(struct config_item *item,
495 		const char *buf, size_t count)
496 {
497 	struct spear_pcie_gadget_config *config = to_target(item)
498 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
499 	ulong address;
500 	int ret;
501 
502 	ret = kstrtoul(buf, 0, &address);
503 	if (ret)
504 		return ret;
505 
506 	address &= ~(config->bar0_size - 1);
507 	if (config->va_bar0_address)
508 		iounmap(config->va_bar0_address);
509 	config->va_bar0_address = ioremap(address, config->bar0_size);
510 	if (!config->va_bar0_address)
511 		return -ENOMEM;
512 
513 	writel(address, &app_reg->pim0_mem_addr_start);
514 
515 	return count;
516 }
517 
pcie_gadget_bar0_rw_offset_show(struct config_item * item,char * buf)518 static ssize_t pcie_gadget_bar0_rw_offset_show(struct config_item *item,
519 		char *buf)
520 {
521 	return sprintf(buf, "%lx", to_target(item)->bar0_rw_offset);
522 }
523 
pcie_gadget_bar0_rw_offset_store(struct config_item * item,const char * buf,size_t count)524 static ssize_t pcie_gadget_bar0_rw_offset_store(struct config_item *item,
525 		const char *buf, size_t count)
526 {
527 	ulong offset;
528 	int ret;
529 
530 	ret = kstrtoul(buf, 0, &offset);
531 	if (ret)
532 		return ret;
533 
534 	if (offset % 4)
535 		return -EINVAL;
536 
537 	to_target(item)->bar0_rw_offset = offset;
538 
539 	return count;
540 }
541 
pcie_gadget_bar0_data_show(struct config_item * item,char * buf)542 static ssize_t pcie_gadget_bar0_data_show(struct config_item *item, char *buf)
543 {
544 	struct spear_pcie_gadget_config *config = to_target(item)
545 	ulong data;
546 
547 	if (!config->va_bar0_address)
548 		return -ENOMEM;
549 
550 	data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset);
551 
552 	return sprintf(buf, "%lx", data);
553 }
554 
pcie_gadget_bar0_data_store(struct config_item * item,const char * buf,size_t count)555 static ssize_t pcie_gadget_bar0_data_store(struct config_item *item,
556 		const char *buf, size_t count)
557 {
558 	struct spear_pcie_gadget_config *config = to_target(item)
559 	ulong data;
560 	int ret;
561 
562 	ret = kstrtoul(buf, 0, &data);
563 	if (ret)
564 		return ret;
565 
566 	if (!config->va_bar0_address)
567 		return -ENOMEM;
568 
569 	writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset);
570 
571 	return count;
572 }
573 
574 CONFIGFS_ATTR(pcie_gadget_, link);
575 CONFIGFS_ATTR(pcie_gadget_, int_type);
576 CONFIGFS_ATTR(pcie_gadget_, no_of_msi);
577 CONFIGFS_ATTR_WO(pcie_gadget_, inta);
578 CONFIGFS_ATTR_WO(pcie_gadget_, send_msi);
579 CONFIGFS_ATTR(pcie_gadget_, vendor_id);
580 CONFIGFS_ATTR(pcie_gadget_, device_id);
581 CONFIGFS_ATTR(pcie_gadget_, bar0_size);
582 CONFIGFS_ATTR(pcie_gadget_, bar0_address);
583 CONFIGFS_ATTR(pcie_gadget_, bar0_rw_offset);
584 CONFIGFS_ATTR(pcie_gadget_, bar0_data);
585 
586 static struct configfs_attribute *pcie_gadget_target_attrs[] = {
587 	&pcie_gadget_attr_link,
588 	&pcie_gadget_attr_int_type,
589 	&pcie_gadget_attr_no_of_msi,
590 	&pcie_gadget_attr_inta,
591 	&pcie_gadget_attr_send_msi,
592 	&pcie_gadget_attr_vendor_id,
593 	&pcie_gadget_attr_device_id,
594 	&pcie_gadget_attr_bar0_size,
595 	&pcie_gadget_attr_bar0_address,
596 	&pcie_gadget_attr_bar0_rw_offset,
597 	&pcie_gadget_attr_bar0_data,
598 	NULL,
599 };
600 
601 static struct config_item_type pcie_gadget_target_type = {
602 	.ct_attrs		= pcie_gadget_target_attrs,
603 	.ct_owner		= THIS_MODULE,
604 };
605 
spear13xx_pcie_device_init(struct spear_pcie_gadget_config * config)606 static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config)
607 {
608 	struct pcie_app_reg __iomem *app_reg = config->va_app_base;
609 
610 	/*setup registers for outbound translation */
611 
612 	writel(config->base, &app_reg->in0_mem_addr_start);
613 	writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE,
614 			&app_reg->in0_mem_addr_limit);
615 	writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start);
616 	writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE,
617 			&app_reg->in1_mem_addr_limit);
618 	writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start);
619 	writel(app_reg->in_io_addr_start + IN_IO_SIZE,
620 			&app_reg->in_io_addr_limit);
621 	writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start);
622 	writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE,
623 			&app_reg->in_cfg0_addr_limit);
624 	writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start);
625 	writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE,
626 			&app_reg->in_cfg1_addr_limit);
627 	writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start);
628 	writel(app_reg->in_msg_addr_start + IN_MSG_SIZE,
629 			&app_reg->in_msg_addr_limit);
630 
631 	writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start);
632 	writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start);
633 	writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start);
634 
635 	/*setup registers for inbound translation */
636 
637 	/* Keep AORAM mapped at BAR0 as default */
638 	config->bar0_size = INBOUND_ADDR_MASK + 1;
639 	spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK);
640 	spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC);
641 	config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE,
642 			config->bar0_size);
643 
644 	writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start);
645 	writel(0, &app_reg->pim1_mem_addr_start);
646 	writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit);
647 
648 	writel(0x0, &app_reg->pim_io_addr_start);
649 	writel(0x0, &app_reg->pim_io_addr_start);
650 	writel(0x0, &app_reg->pim_rom_addr_start);
651 
652 	writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID)
653 			| ((u32)1 << REG_TRANSLATION_ENABLE),
654 			&app_reg->app_ctrl_0);
655 	/* disable all rx interrupts */
656 	writel(0, &app_reg->int_mask);
657 
658 	/* Select INTA as default*/
659 	spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
660 }
661 
spear_pcie_gadget_probe(struct platform_device * pdev)662 static int spear_pcie_gadget_probe(struct platform_device *pdev)
663 {
664 	struct resource *res0, *res1;
665 	unsigned int status = 0;
666 	int irq;
667 	struct clk *clk;
668 	static struct pcie_gadget_target *target;
669 	struct spear_pcie_gadget_config *config;
670 	struct config_item		*cg_item;
671 	struct configfs_subsystem *subsys;
672 
673 	target = devm_kzalloc(&pdev->dev, sizeof(*target), GFP_KERNEL);
674 	if (!target) {
675 		dev_err(&pdev->dev, "out of memory\n");
676 		return -ENOMEM;
677 	}
678 
679 	cg_item = &target->subsys.su_group.cg_item;
680 	sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id);
681 	cg_item->ci_type	= &pcie_gadget_target_type;
682 	config = &target->config;
683 
684 	/* get resource for application registers*/
685 	res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
686 	config->va_app_base = devm_ioremap_resource(&pdev->dev, res0);
687 	if (IS_ERR(config->va_app_base)) {
688 		dev_err(&pdev->dev, "ioremap fail\n");
689 		return PTR_ERR(config->va_app_base);
690 	}
691 
692 	/* get resource for dbi registers*/
693 	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
694 	config->base = (void __iomem *)res1->start;
695 
696 	config->va_dbi_base = devm_ioremap_resource(&pdev->dev, res1);
697 	if (IS_ERR(config->va_dbi_base)) {
698 		dev_err(&pdev->dev, "ioremap fail\n");
699 		return PTR_ERR(config->va_dbi_base);
700 	}
701 
702 	platform_set_drvdata(pdev, target);
703 
704 	irq = platform_get_irq(pdev, 0);
705 	if (irq < 0) {
706 		dev_err(&pdev->dev, "no update irq?\n");
707 		return irq;
708 	}
709 
710 	status = devm_request_irq(&pdev->dev, irq, spear_pcie_gadget_irq,
711 				  0, pdev->name, NULL);
712 	if (status) {
713 		dev_err(&pdev->dev,
714 			"pcie gadget interrupt IRQ%d already claimed\n", irq);
715 		return status;
716 	}
717 
718 	/* Register configfs hooks */
719 	subsys = &target->subsys;
720 	config_group_init(&subsys->su_group);
721 	mutex_init(&subsys->su_mutex);
722 	status = configfs_register_subsystem(subsys);
723 	if (status)
724 		return status;
725 
726 	/*
727 	 * init basic pcie application registers
728 	 * do not enable clock if it is PCIE0.Ideally , all controller should
729 	 * have been independent from others with respect to clock. But PCIE1
730 	 * and 2 depends on PCIE0.So PCIE0 clk is provided during board init.
731 	 */
732 	if (pdev->id == 1) {
733 		/*
734 		 * Ideally CFG Clock should have been also enabled here. But
735 		 * it is done currently during board init routne
736 		 */
737 		clk = clk_get_sys("pcie1", NULL);
738 		if (IS_ERR(clk)) {
739 			pr_err("%s:couldn't get clk for pcie1\n", __func__);
740 			return PTR_ERR(clk);
741 		}
742 		status = clk_enable(clk);
743 		if (status) {
744 			pr_err("%s:couldn't enable clk for pcie1\n", __func__);
745 			return status;
746 		}
747 	} else if (pdev->id == 2) {
748 		/*
749 		 * Ideally CFG Clock should have been also enabled here. But
750 		 * it is done currently during board init routne
751 		 */
752 		clk = clk_get_sys("pcie2", NULL);
753 		if (IS_ERR(clk)) {
754 			pr_err("%s:couldn't get clk for pcie2\n", __func__);
755 			return PTR_ERR(clk);
756 		}
757 		status = clk_enable(clk);
758 		if (status) {
759 			pr_err("%s:couldn't enable clk for pcie2\n", __func__);
760 			return status;
761 		}
762 	}
763 	spear13xx_pcie_device_init(config);
764 
765 	return 0;
766 }
767 
spear_pcie_gadget_remove(struct platform_device * pdev)768 static int spear_pcie_gadget_remove(struct platform_device *pdev)
769 {
770 	static struct pcie_gadget_target *target;
771 
772 	target = platform_get_drvdata(pdev);
773 
774 	configfs_unregister_subsystem(&target->subsys);
775 
776 	return 0;
777 }
778 
spear_pcie_gadget_shutdown(struct platform_device * pdev)779 static void spear_pcie_gadget_shutdown(struct platform_device *pdev)
780 {
781 }
782 
783 static struct platform_driver spear_pcie_gadget_driver = {
784 	.probe = spear_pcie_gadget_probe,
785 	.remove = spear_pcie_gadget_remove,
786 	.shutdown = spear_pcie_gadget_shutdown,
787 	.driver = {
788 		.name = "pcie-gadget-spear",
789 		.bus = &platform_bus_type
790 	},
791 };
792 
793 module_platform_driver(spear_pcie_gadget_driver);
794 
795 MODULE_ALIAS("platform:pcie-gadget-spear");
796 MODULE_AUTHOR("Pratyush Anand");
797 MODULE_LICENSE("GPL");
798