Merge branch 'for-linus' of git://git.kernel.dk/linux-block
[deliverable/linux.git] / drivers / pci / host / pci-mvebu.c
1 /*
2 * PCIe driver for Marvell Armada 370 and Armada XP SoCs
3 *
4 * This file is licensed under the terms of the GNU General Public
5 * License version 2. This program is licensed "as is" without any
6 * warranty of any kind, whether express or implied.
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/pci.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/gpio.h>
14 #include <linux/module.h>
15 #include <linux/mbus.h>
16 #include <linux/msi.h>
17 #include <linux/slab.h>
18 #include <linux/platform_device.h>
19 #include <linux/of_address.h>
20 #include <linux/of_irq.h>
21 #include <linux/of_gpio.h>
22 #include <linux/of_pci.h>
23 #include <linux/of_platform.h>
24
25 /*
26 * PCIe unit register offsets.
27 */
28 #define PCIE_DEV_ID_OFF 0x0000
29 #define PCIE_CMD_OFF 0x0004
30 #define PCIE_DEV_REV_OFF 0x0008
31 #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
32 #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
33 #define PCIE_CAP_PCIEXP 0x0060
34 #define PCIE_HEADER_LOG_4_OFF 0x0128
35 #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
36 #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
37 #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
38 #define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4))
39 #define PCIE_WIN5_CTRL_OFF 0x1880
40 #define PCIE_WIN5_BASE_OFF 0x1884
41 #define PCIE_WIN5_REMAP_OFF 0x188c
42 #define PCIE_CONF_ADDR_OFF 0x18f8
43 #define PCIE_CONF_ADDR_EN 0x80000000
44 #define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
45 #define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
46 #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
47 #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
48 #define PCIE_CONF_ADDR(bus, devfn, where) \
49 (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
50 PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
51 PCIE_CONF_ADDR_EN)
52 #define PCIE_CONF_DATA_OFF 0x18fc
53 #define PCIE_MASK_OFF 0x1910
54 #define PCIE_MASK_ENABLE_INTS 0x0f000000
55 #define PCIE_CTRL_OFF 0x1a00
56 #define PCIE_CTRL_X1_MODE 0x0001
57 #define PCIE_STAT_OFF 0x1a04
58 #define PCIE_STAT_BUS 0xff00
59 #define PCIE_STAT_DEV 0x1f0000
60 #define PCIE_STAT_LINK_DOWN BIT(0)
61 #define PCIE_RC_RTSTA 0x1a14
62 #define PCIE_DEBUG_CTRL 0x1a60
63 #define PCIE_DEBUG_SOFT_RESET BIT(20)
64
65 enum {
66 PCISWCAP = PCI_BRIDGE_CONTROL + 2,
67 PCISWCAP_EXP_LIST_ID = PCISWCAP + PCI_CAP_LIST_ID,
68 PCISWCAP_EXP_DEVCAP = PCISWCAP + PCI_EXP_DEVCAP,
69 PCISWCAP_EXP_DEVCTL = PCISWCAP + PCI_EXP_DEVCTL,
70 PCISWCAP_EXP_LNKCAP = PCISWCAP + PCI_EXP_LNKCAP,
71 PCISWCAP_EXP_LNKCTL = PCISWCAP + PCI_EXP_LNKCTL,
72 PCISWCAP_EXP_SLTCAP = PCISWCAP + PCI_EXP_SLTCAP,
73 PCISWCAP_EXP_SLTCTL = PCISWCAP + PCI_EXP_SLTCTL,
74 PCISWCAP_EXP_RTCTL = PCISWCAP + PCI_EXP_RTCTL,
75 PCISWCAP_EXP_RTSTA = PCISWCAP + PCI_EXP_RTSTA,
76 PCISWCAP_EXP_DEVCAP2 = PCISWCAP + PCI_EXP_DEVCAP2,
77 PCISWCAP_EXP_DEVCTL2 = PCISWCAP + PCI_EXP_DEVCTL2,
78 PCISWCAP_EXP_LNKCAP2 = PCISWCAP + PCI_EXP_LNKCAP2,
79 PCISWCAP_EXP_LNKCTL2 = PCISWCAP + PCI_EXP_LNKCTL2,
80 PCISWCAP_EXP_SLTCAP2 = PCISWCAP + PCI_EXP_SLTCAP2,
81 PCISWCAP_EXP_SLTCTL2 = PCISWCAP + PCI_EXP_SLTCTL2,
82 };
83
84 /* PCI configuration space of a PCI-to-PCI bridge */
85 struct mvebu_sw_pci_bridge {
86 u16 vendor;
87 u16 device;
88 u16 command;
89 u16 status;
90 u16 class;
91 u8 interface;
92 u8 revision;
93 u8 bist;
94 u8 header_type;
95 u8 latency_timer;
96 u8 cache_line_size;
97 u32 bar[2];
98 u8 primary_bus;
99 u8 secondary_bus;
100 u8 subordinate_bus;
101 u8 secondary_latency_timer;
102 u8 iobase;
103 u8 iolimit;
104 u16 secondary_status;
105 u16 membase;
106 u16 memlimit;
107 u16 iobaseupper;
108 u16 iolimitupper;
109 u32 romaddr;
110 u8 intline;
111 u8 intpin;
112 u16 bridgectrl;
113
114 /* PCI express capability */
115 u32 pcie_sltcap;
116 u16 pcie_devctl;
117 u16 pcie_rtctl;
118 };
119
120 struct mvebu_pcie_port;
121
122 /* Structure representing all PCIe interfaces */
123 struct mvebu_pcie {
124 struct platform_device *pdev;
125 struct mvebu_pcie_port *ports;
126 struct msi_controller *msi;
127 struct resource io;
128 struct resource realio;
129 struct resource mem;
130 struct resource busn;
131 int nports;
132 };
133
134 /* Structure representing one PCIe interface */
135 struct mvebu_pcie_port {
136 char *name;
137 void __iomem *base;
138 u32 port;
139 u32 lane;
140 int devfn;
141 unsigned int mem_target;
142 unsigned int mem_attr;
143 unsigned int io_target;
144 unsigned int io_attr;
145 struct clk *clk;
146 struct gpio_desc *reset_gpio;
147 char *reset_name;
148 struct mvebu_sw_pci_bridge bridge;
149 struct device_node *dn;
150 struct mvebu_pcie *pcie;
151 phys_addr_t memwin_base;
152 size_t memwin_size;
153 phys_addr_t iowin_base;
154 size_t iowin_size;
155 u32 saved_pcie_stat;
156 };
157
158 static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
159 {
160 writel(val, port->base + reg);
161 }
162
163 static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
164 {
165 return readl(port->base + reg);
166 }
167
168 static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
169 {
170 return port->io_target != -1 && port->io_attr != -1;
171 }
172
173 static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
174 {
175 return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
176 }
177
178 static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
179 {
180 u32 stat;
181
182 stat = mvebu_readl(port, PCIE_STAT_OFF);
183 stat &= ~PCIE_STAT_BUS;
184 stat |= nr << 8;
185 mvebu_writel(port, stat, PCIE_STAT_OFF);
186 }
187
188 static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
189 {
190 u32 stat;
191
192 stat = mvebu_readl(port, PCIE_STAT_OFF);
193 stat &= ~PCIE_STAT_DEV;
194 stat |= nr << 16;
195 mvebu_writel(port, stat, PCIE_STAT_OFF);
196 }
197
198 /*
199 * Setup PCIE BARs and Address Decode Wins:
200 * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks
201 * WIN[0-3] -> DRAM bank[0-3]
202 */
203 static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
204 {
205 const struct mbus_dram_target_info *dram;
206 u32 size;
207 int i;
208
209 dram = mv_mbus_dram_info();
210
211 /* First, disable and clear BARs and windows. */
212 for (i = 1; i < 3; i++) {
213 mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
214 mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
215 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
216 }
217
218 for (i = 0; i < 5; i++) {
219 mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
220 mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
221 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
222 }
223
224 mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
225 mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
226 mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
227
228 /* Setup windows for DDR banks. Count total DDR size on the fly. */
229 size = 0;
230 for (i = 0; i < dram->num_cs; i++) {
231 const struct mbus_dram_window *cs = dram->cs + i;
232
233 mvebu_writel(port, cs->base & 0xffff0000,
234 PCIE_WIN04_BASE_OFF(i));
235 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
236 mvebu_writel(port,
237 ((cs->size - 1) & 0xffff0000) |
238 (cs->mbus_attr << 8) |
239 (dram->mbus_dram_target_id << 4) | 1,
240 PCIE_WIN04_CTRL_OFF(i));
241
242 size += cs->size;
243 }
244
245 /* Round up 'size' to the nearest power of two. */
246 if ((size & (size - 1)) != 0)
247 size = 1 << fls(size);
248
249 /* Setup BAR[1] to all DRAM banks. */
250 mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
251 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
252 mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
253 PCIE_BAR_CTRL_OFF(1));
254 }
255
256 static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
257 {
258 u32 cmd, mask;
259
260 /* Point PCIe unit MBUS decode windows to DRAM space. */
261 mvebu_pcie_setup_wins(port);
262
263 /* Master + slave enable. */
264 cmd = mvebu_readl(port, PCIE_CMD_OFF);
265 cmd |= PCI_COMMAND_IO;
266 cmd |= PCI_COMMAND_MEMORY;
267 cmd |= PCI_COMMAND_MASTER;
268 mvebu_writel(port, cmd, PCIE_CMD_OFF);
269
270 /* Enable interrupt lines A-D. */
271 mask = mvebu_readl(port, PCIE_MASK_OFF);
272 mask |= PCIE_MASK_ENABLE_INTS;
273 mvebu_writel(port, mask, PCIE_MASK_OFF);
274 }
275
276 static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
277 struct pci_bus *bus,
278 u32 devfn, int where, int size, u32 *val)
279 {
280 void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
281
282 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
283 PCIE_CONF_ADDR_OFF);
284
285 switch (size) {
286 case 1:
287 *val = readb_relaxed(conf_data + (where & 3));
288 break;
289 case 2:
290 *val = readw_relaxed(conf_data + (where & 2));
291 break;
292 case 4:
293 *val = readl_relaxed(conf_data);
294 break;
295 }
296
297 return PCIBIOS_SUCCESSFUL;
298 }
299
300 static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
301 struct pci_bus *bus,
302 u32 devfn, int where, int size, u32 val)
303 {
304 void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
305
306 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
307 PCIE_CONF_ADDR_OFF);
308
309 switch (size) {
310 case 1:
311 writeb(val, conf_data + (where & 3));
312 break;
313 case 2:
314 writew(val, conf_data + (where & 2));
315 break;
316 case 4:
317 writel(val, conf_data);
318 break;
319 default:
320 return PCIBIOS_BAD_REGISTER_NUMBER;
321 }
322
323 return PCIBIOS_SUCCESSFUL;
324 }
325
326 /*
327 * Remove windows, starting from the largest ones to the smallest
328 * ones.
329 */
330 static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
331 phys_addr_t base, size_t size)
332 {
333 while (size) {
334 size_t sz = 1 << (fls(size) - 1);
335
336 mvebu_mbus_del_window(base, sz);
337 base += sz;
338 size -= sz;
339 }
340 }
341
342 /*
343 * MBus windows can only have a power of two size, but PCI BARs do not
344 * have this constraint. Therefore, we have to split the PCI BAR into
345 * areas each having a power of two size. We start from the largest
346 * one (i.e highest order bit set in the size).
347 */
348 static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
349 unsigned int target, unsigned int attribute,
350 phys_addr_t base, size_t size,
351 phys_addr_t remap)
352 {
353 size_t size_mapped = 0;
354
355 while (size) {
356 size_t sz = 1 << (fls(size) - 1);
357 int ret;
358
359 ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
360 sz, remap);
361 if (ret) {
362 phys_addr_t end = base + sz - 1;
363
364 dev_err(&port->pcie->pdev->dev,
365 "Could not create MBus window at [mem %pa-%pa]: %d\n",
366 &base, &end, ret);
367 mvebu_pcie_del_windows(port, base - size_mapped,
368 size_mapped);
369 return;
370 }
371
372 size -= sz;
373 size_mapped += sz;
374 base += sz;
375 if (remap != MVEBU_MBUS_NO_REMAP)
376 remap += sz;
377 }
378 }
379
380 static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
381 {
382 phys_addr_t iobase;
383
384 /* Are the new iobase/iolimit values invalid? */
385 if (port->bridge.iolimit < port->bridge.iobase ||
386 port->bridge.iolimitupper < port->bridge.iobaseupper ||
387 !(port->bridge.command & PCI_COMMAND_IO)) {
388
389 /* If a window was configured, remove it */
390 if (port->iowin_base) {
391 mvebu_pcie_del_windows(port, port->iowin_base,
392 port->iowin_size);
393 port->iowin_base = 0;
394 port->iowin_size = 0;
395 }
396
397 return;
398 }
399
400 if (!mvebu_has_ioport(port)) {
401 dev_WARN(&port->pcie->pdev->dev,
402 "Attempt to set IO when IO is disabled\n");
403 return;
404 }
405
406 /*
407 * We read the PCI-to-PCI bridge emulated registers, and
408 * calculate the base address and size of the address decoding
409 * window to setup, according to the PCI-to-PCI bridge
410 * specifications. iobase is the bus address, port->iowin_base
411 * is the CPU address.
412 */
413 iobase = ((port->bridge.iobase & 0xF0) << 8) |
414 (port->bridge.iobaseupper << 16);
415 port->iowin_base = port->pcie->io.start + iobase;
416 port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
417 (port->bridge.iolimitupper << 16)) -
418 iobase) + 1;
419
420 mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
421 port->iowin_base, port->iowin_size,
422 iobase);
423 }
424
425 static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
426 {
427 /* Are the new membase/memlimit values invalid? */
428 if (port->bridge.memlimit < port->bridge.membase ||
429 !(port->bridge.command & PCI_COMMAND_MEMORY)) {
430
431 /* If a window was configured, remove it */
432 if (port->memwin_base) {
433 mvebu_pcie_del_windows(port, port->memwin_base,
434 port->memwin_size);
435 port->memwin_base = 0;
436 port->memwin_size = 0;
437 }
438
439 return;
440 }
441
442 /*
443 * We read the PCI-to-PCI bridge emulated registers, and
444 * calculate the base address and size of the address decoding
445 * window to setup, according to the PCI-to-PCI bridge
446 * specifications.
447 */
448 port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16);
449 port->memwin_size =
450 (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
451 port->memwin_base + 1;
452
453 mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
454 port->memwin_base, port->memwin_size,
455 MVEBU_MBUS_NO_REMAP);
456 }
457
458 /*
459 * Initialize the configuration space of the PCI-to-PCI bridge
460 * associated with the given PCIe interface.
461 */
462 static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
463 {
464 struct mvebu_sw_pci_bridge *bridge = &port->bridge;
465
466 memset(bridge, 0, sizeof(struct mvebu_sw_pci_bridge));
467
468 bridge->class = PCI_CLASS_BRIDGE_PCI;
469 bridge->vendor = PCI_VENDOR_ID_MARVELL;
470 bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
471 bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
472 bridge->header_type = PCI_HEADER_TYPE_BRIDGE;
473 bridge->cache_line_size = 0x10;
474
475 /* We support 32 bits I/O addressing */
476 bridge->iobase = PCI_IO_RANGE_TYPE_32;
477 bridge->iolimit = PCI_IO_RANGE_TYPE_32;
478
479 /* Add capabilities */
480 bridge->status = PCI_STATUS_CAP_LIST;
481 }
482
483 /*
484 * Read the configuration space of the PCI-to-PCI bridge associated to
485 * the given PCIe interface.
486 */
487 static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
488 unsigned int where, int size, u32 *value)
489 {
490 struct mvebu_sw_pci_bridge *bridge = &port->bridge;
491
492 switch (where & ~3) {
493 case PCI_VENDOR_ID:
494 *value = bridge->device << 16 | bridge->vendor;
495 break;
496
497 case PCI_COMMAND:
498 *value = bridge->command | bridge->status << 16;
499 break;
500
501 case PCI_CLASS_REVISION:
502 *value = bridge->class << 16 | bridge->interface << 8 |
503 bridge->revision;
504 break;
505
506 case PCI_CACHE_LINE_SIZE:
507 *value = bridge->bist << 24 | bridge->header_type << 16 |
508 bridge->latency_timer << 8 | bridge->cache_line_size;
509 break;
510
511 case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
512 *value = bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4];
513 break;
514
515 case PCI_PRIMARY_BUS:
516 *value = (bridge->secondary_latency_timer << 24 |
517 bridge->subordinate_bus << 16 |
518 bridge->secondary_bus << 8 |
519 bridge->primary_bus);
520 break;
521
522 case PCI_IO_BASE:
523 if (!mvebu_has_ioport(port))
524 *value = bridge->secondary_status << 16;
525 else
526 *value = (bridge->secondary_status << 16 |
527 bridge->iolimit << 8 |
528 bridge->iobase);
529 break;
530
531 case PCI_MEMORY_BASE:
532 *value = (bridge->memlimit << 16 | bridge->membase);
533 break;
534
535 case PCI_PREF_MEMORY_BASE:
536 *value = 0;
537 break;
538
539 case PCI_IO_BASE_UPPER16:
540 *value = (bridge->iolimitupper << 16 | bridge->iobaseupper);
541 break;
542
543 case PCI_CAPABILITY_LIST:
544 *value = PCISWCAP;
545 break;
546
547 case PCI_ROM_ADDRESS1:
548 *value = 0;
549 break;
550
551 case PCI_INTERRUPT_LINE:
552 /* LINE PIN MIN_GNT MAX_LAT */
553 *value = 0;
554 break;
555
556 case PCISWCAP_EXP_LIST_ID:
557 /* Set PCIe v2, root port, slot support */
558 *value = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
559 PCI_EXP_FLAGS_SLOT) << 16 | PCI_CAP_ID_EXP;
560 break;
561
562 case PCISWCAP_EXP_DEVCAP:
563 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
564 break;
565
566 case PCISWCAP_EXP_DEVCTL:
567 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) &
568 ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
569 PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
570 *value |= bridge->pcie_devctl;
571 break;
572
573 case PCISWCAP_EXP_LNKCAP:
574 /*
575 * PCIe requires the clock power management capability to be
576 * hard-wired to zero for downstream ports
577 */
578 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
579 ~PCI_EXP_LNKCAP_CLKPM;
580 break;
581
582 case PCISWCAP_EXP_LNKCTL:
583 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
584 break;
585
586 case PCISWCAP_EXP_SLTCAP:
587 *value = bridge->pcie_sltcap;
588 break;
589
590 case PCISWCAP_EXP_SLTCTL:
591 *value = PCI_EXP_SLTSTA_PDS << 16;
592 break;
593
594 case PCISWCAP_EXP_RTCTL:
595 *value = bridge->pcie_rtctl;
596 break;
597
598 case PCISWCAP_EXP_RTSTA:
599 *value = mvebu_readl(port, PCIE_RC_RTSTA);
600 break;
601
602 /* PCIe requires the v2 fields to be hard-wired to zero */
603 case PCISWCAP_EXP_DEVCAP2:
604 case PCISWCAP_EXP_DEVCTL2:
605 case PCISWCAP_EXP_LNKCAP2:
606 case PCISWCAP_EXP_LNKCTL2:
607 case PCISWCAP_EXP_SLTCAP2:
608 case PCISWCAP_EXP_SLTCTL2:
609 default:
610 /*
611 * PCI defines configuration read accesses to reserved or
612 * unimplemented registers to read as zero and complete
613 * normally.
614 */
615 *value = 0;
616 return PCIBIOS_SUCCESSFUL;
617 }
618
619 if (size == 2)
620 *value = (*value >> (8 * (where & 3))) & 0xffff;
621 else if (size == 1)
622 *value = (*value >> (8 * (where & 3))) & 0xff;
623
624 return PCIBIOS_SUCCESSFUL;
625 }
626
627 /* Write to the PCI-to-PCI bridge configuration space */
628 static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
629 unsigned int where, int size, u32 value)
630 {
631 struct mvebu_sw_pci_bridge *bridge = &port->bridge;
632 u32 mask, reg;
633 int err;
634
635 if (size == 4)
636 mask = 0x0;
637 else if (size == 2)
638 mask = ~(0xffff << ((where & 3) * 8));
639 else if (size == 1)
640 mask = ~(0xff << ((where & 3) * 8));
641 else
642 return PCIBIOS_BAD_REGISTER_NUMBER;
643
644 err = mvebu_sw_pci_bridge_read(port, where & ~3, 4, &reg);
645 if (err)
646 return err;
647
648 value = (reg & mask) | value << ((where & 3) * 8);
649
650 switch (where & ~3) {
651 case PCI_COMMAND:
652 {
653 u32 old = bridge->command;
654
655 if (!mvebu_has_ioport(port))
656 value &= ~PCI_COMMAND_IO;
657
658 bridge->command = value & 0xffff;
659 if ((old ^ bridge->command) & PCI_COMMAND_IO)
660 mvebu_pcie_handle_iobase_change(port);
661 if ((old ^ bridge->command) & PCI_COMMAND_MEMORY)
662 mvebu_pcie_handle_membase_change(port);
663 break;
664 }
665
666 case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
667 bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value;
668 break;
669
670 case PCI_IO_BASE:
671 /*
672 * We also keep bit 1 set, it is a read-only bit that
673 * indicates we support 32 bits addressing for the
674 * I/O
675 */
676 bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32;
677 bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32;
678 mvebu_pcie_handle_iobase_change(port);
679 break;
680
681 case PCI_MEMORY_BASE:
682 bridge->membase = value & 0xffff;
683 bridge->memlimit = value >> 16;
684 mvebu_pcie_handle_membase_change(port);
685 break;
686
687 case PCI_IO_BASE_UPPER16:
688 bridge->iobaseupper = value & 0xffff;
689 bridge->iolimitupper = value >> 16;
690 mvebu_pcie_handle_iobase_change(port);
691 break;
692
693 case PCI_PRIMARY_BUS:
694 bridge->primary_bus = value & 0xff;
695 bridge->secondary_bus = (value >> 8) & 0xff;
696 bridge->subordinate_bus = (value >> 16) & 0xff;
697 bridge->secondary_latency_timer = (value >> 24) & 0xff;
698 mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus);
699 break;
700
701 case PCISWCAP_EXP_DEVCTL:
702 /*
703 * Armada370 data says these bits must always
704 * be zero when in root complex mode.
705 */
706 value &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
707 PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
708
709 /*
710 * If the mask is 0xffff0000, then we only want to write
711 * the device control register, rather than clearing the
712 * RW1C bits in the device status register. Mask out the
713 * status register bits.
714 */
715 if (mask == 0xffff0000)
716 value &= 0xffff;
717
718 mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
719 break;
720
721 case PCISWCAP_EXP_LNKCTL:
722 /*
723 * If we don't support CLKREQ, we must ensure that the
724 * CLKREQ enable bit always reads zero. Since we haven't
725 * had this capability, and it's dependent on board wiring,
726 * disable it for the time being.
727 */
728 value &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
729
730 /*
731 * If the mask is 0xffff0000, then we only want to write
732 * the link control register, rather than clearing the
733 * RW1C bits in the link status register. Mask out the
734 * status register bits.
735 */
736 if (mask == 0xffff0000)
737 value &= 0xffff;
738
739 mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
740 break;
741
742 case PCISWCAP_EXP_RTSTA:
743 mvebu_writel(port, value, PCIE_RC_RTSTA);
744 break;
745
746 default:
747 break;
748 }
749
750 return PCIBIOS_SUCCESSFUL;
751 }
752
753 static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
754 {
755 return sys->private_data;
756 }
757
758 static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
759 struct pci_bus *bus,
760 int devfn)
761 {
762 int i;
763
764 for (i = 0; i < pcie->nports; i++) {
765 struct mvebu_pcie_port *port = &pcie->ports[i];
766
767 if (bus->number == 0 && port->devfn == devfn)
768 return port;
769 if (bus->number != 0 &&
770 bus->number >= port->bridge.secondary_bus &&
771 bus->number <= port->bridge.subordinate_bus)
772 return port;
773 }
774
775 return NULL;
776 }
777
778 /* PCI configuration space write function */
779 static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
780 int where, int size, u32 val)
781 {
782 struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
783 struct mvebu_pcie_port *port;
784 int ret;
785
786 port = mvebu_pcie_find_port(pcie, bus, devfn);
787 if (!port)
788 return PCIBIOS_DEVICE_NOT_FOUND;
789
790 /* Access the emulated PCI-to-PCI bridge */
791 if (bus->number == 0)
792 return mvebu_sw_pci_bridge_write(port, where, size, val);
793
794 if (!mvebu_pcie_link_up(port))
795 return PCIBIOS_DEVICE_NOT_FOUND;
796
797 /* Access the real PCIe interface */
798 ret = mvebu_pcie_hw_wr_conf(port, bus, devfn,
799 where, size, val);
800
801 return ret;
802 }
803
804 /* PCI configuration space read function */
805 static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
806 int size, u32 *val)
807 {
808 struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
809 struct mvebu_pcie_port *port;
810 int ret;
811
812 port = mvebu_pcie_find_port(pcie, bus, devfn);
813 if (!port) {
814 *val = 0xffffffff;
815 return PCIBIOS_DEVICE_NOT_FOUND;
816 }
817
818 /* Access the emulated PCI-to-PCI bridge */
819 if (bus->number == 0)
820 return mvebu_sw_pci_bridge_read(port, where, size, val);
821
822 if (!mvebu_pcie_link_up(port)) {
823 *val = 0xffffffff;
824 return PCIBIOS_DEVICE_NOT_FOUND;
825 }
826
827 /* Access the real PCIe interface */
828 ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
829 where, size, val);
830
831 return ret;
832 }
833
834 static struct pci_ops mvebu_pcie_ops = {
835 .read = mvebu_pcie_rd_conf,
836 .write = mvebu_pcie_wr_conf,
837 };
838
839 static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
840 {
841 struct mvebu_pcie *pcie = sys_to_pcie(sys);
842 int i;
843
844 pcie->mem.name = "PCI MEM";
845 pcie->realio.name = "PCI I/O";
846
847 if (request_resource(&iomem_resource, &pcie->mem))
848 return 0;
849
850 if (resource_size(&pcie->realio) != 0) {
851 if (request_resource(&ioport_resource, &pcie->realio)) {
852 release_resource(&pcie->mem);
853 return 0;
854 }
855 pci_add_resource_offset(&sys->resources, &pcie->realio,
856 sys->io_offset);
857 }
858 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
859 pci_add_resource(&sys->resources, &pcie->busn);
860
861 for (i = 0; i < pcie->nports; i++) {
862 struct mvebu_pcie_port *port = &pcie->ports[i];
863
864 if (!port->base)
865 continue;
866 mvebu_pcie_setup_hw(port);
867 }
868
869 return 1;
870 }
871
872 static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
873 const struct resource *res,
874 resource_size_t start,
875 resource_size_t size,
876 resource_size_t align)
877 {
878 if (dev->bus->number != 0)
879 return start;
880
881 /*
882 * On the PCI-to-PCI bridge side, the I/O windows must have at
883 * least a 64 KB size and the memory windows must have at
884 * least a 1 MB size. Moreover, MBus windows need to have a
885 * base address aligned on their size, and their size must be
886 * a power of two. This means that if the BAR doesn't have a
887 * power of two size, several MBus windows will actually be
888 * created. We need to ensure that the biggest MBus window
889 * (which will be the first one) is aligned on its size, which
890 * explains the rounddown_pow_of_two() being done here.
891 */
892 if (res->flags & IORESOURCE_IO)
893 return round_up(start, max_t(resource_size_t, SZ_64K,
894 rounddown_pow_of_two(size)));
895 else if (res->flags & IORESOURCE_MEM)
896 return round_up(start, max_t(resource_size_t, SZ_1M,
897 rounddown_pow_of_two(size)));
898 else
899 return start;
900 }
901
902 static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
903 {
904 struct hw_pci hw;
905
906 memset(&hw, 0, sizeof(hw));
907
908 #ifdef CONFIG_PCI_MSI
909 hw.msi_ctrl = pcie->msi;
910 #endif
911
912 hw.nr_controllers = 1;
913 hw.private_data = (void **)&pcie;
914 hw.setup = mvebu_pcie_setup;
915 hw.map_irq = of_irq_parse_and_map_pci;
916 hw.ops = &mvebu_pcie_ops;
917 hw.align_resource = mvebu_pcie_align_resource;
918
919 pci_common_init_dev(&pcie->pdev->dev, &hw);
920 }
921
922 /*
923 * Looks up the list of register addresses encoded into the reg =
924 * <...> property for one that matches the given port/lane. Once
925 * found, maps it.
926 */
927 static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
928 struct device_node *np,
929 struct mvebu_pcie_port *port)
930 {
931 struct resource regs;
932 int ret = 0;
933
934 ret = of_address_to_resource(np, 0, &regs);
935 if (ret)
936 return ERR_PTR(ret);
937
938 return devm_ioremap_resource(&pdev->dev, &regs);
939 }
940
941 #define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
942 #define DT_TYPE_IO 0x1
943 #define DT_TYPE_MEM32 0x2
944 #define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
945 #define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
946
947 static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
948 unsigned long type,
949 unsigned int *tgt,
950 unsigned int *attr)
951 {
952 const int na = 3, ns = 2;
953 const __be32 *range;
954 int rlen, nranges, rangesz, pna, i;
955
956 *tgt = -1;
957 *attr = -1;
958
959 range = of_get_property(np, "ranges", &rlen);
960 if (!range)
961 return -EINVAL;
962
963 pna = of_n_addr_cells(np);
964 rangesz = pna + na + ns;
965 nranges = rlen / sizeof(__be32) / rangesz;
966
967 for (i = 0; i < nranges; i++, range += rangesz) {
968 u32 flags = of_read_number(range, 1);
969 u32 slot = of_read_number(range + 1, 1);
970 u64 cpuaddr = of_read_number(range + na, pna);
971 unsigned long rtype;
972
973 if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
974 rtype = IORESOURCE_IO;
975 else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
976 rtype = IORESOURCE_MEM;
977 else
978 continue;
979
980 if (slot == PCI_SLOT(devfn) && type == rtype) {
981 *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
982 *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
983 return 0;
984 }
985 }
986
987 return -ENOENT;
988 }
989
990 static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie)
991 {
992 struct device_node *msi_node;
993
994 msi_node = of_parse_phandle(pcie->pdev->dev.of_node,
995 "msi-parent", 0);
996 if (!msi_node)
997 return;
998
999 pcie->msi = of_pci_find_msi_chip_by_node(msi_node);
1000 of_node_put(msi_node);
1001
1002 if (pcie->msi)
1003 pcie->msi->dev = &pcie->pdev->dev;
1004 }
1005
1006 #ifdef CONFIG_PM_SLEEP
1007 static int mvebu_pcie_suspend(struct device *dev)
1008 {
1009 struct mvebu_pcie *pcie;
1010 int i;
1011
1012 pcie = dev_get_drvdata(dev);
1013 for (i = 0; i < pcie->nports; i++) {
1014 struct mvebu_pcie_port *port = pcie->ports + i;
1015 port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
1016 }
1017
1018 return 0;
1019 }
1020
1021 static int mvebu_pcie_resume(struct device *dev)
1022 {
1023 struct mvebu_pcie *pcie;
1024 int i;
1025
1026 pcie = dev_get_drvdata(dev);
1027 for (i = 0; i < pcie->nports; i++) {
1028 struct mvebu_pcie_port *port = pcie->ports + i;
1029 mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
1030 mvebu_pcie_setup_hw(port);
1031 }
1032
1033 return 0;
1034 }
1035 #endif
1036
1037 static void mvebu_pcie_port_clk_put(void *data)
1038 {
1039 struct mvebu_pcie_port *port = data;
1040
1041 clk_put(port->clk);
1042 }
1043
1044 static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
1045 struct mvebu_pcie_port *port, struct device_node *child)
1046 {
1047 struct device *dev = &pcie->pdev->dev;
1048 enum of_gpio_flags flags;
1049 int reset_gpio, ret;
1050
1051 port->pcie = pcie;
1052
1053 if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
1054 dev_warn(dev, "ignoring %s, missing pcie-port property\n",
1055 of_node_full_name(child));
1056 goto skip;
1057 }
1058
1059 if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
1060 port->lane = 0;
1061
1062 port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
1063 port->lane);
1064 if (!port->name) {
1065 ret = -ENOMEM;
1066 goto err;
1067 }
1068
1069 port->devfn = of_pci_get_devfn(child);
1070 if (port->devfn < 0)
1071 goto skip;
1072
1073 ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
1074 &port->mem_target, &port->mem_attr);
1075 if (ret < 0) {
1076 dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
1077 port->name);
1078 goto skip;
1079 }
1080
1081 if (resource_size(&pcie->io) != 0) {
1082 mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
1083 &port->io_target, &port->io_attr);
1084 } else {
1085 port->io_target = -1;
1086 port->io_attr = -1;
1087 }
1088
1089 reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags);
1090 if (reset_gpio == -EPROBE_DEFER) {
1091 ret = reset_gpio;
1092 goto err;
1093 }
1094
1095 if (gpio_is_valid(reset_gpio)) {
1096 unsigned long gpio_flags;
1097
1098 port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
1099 port->name);
1100 if (!port->reset_name) {
1101 ret = -ENOMEM;
1102 goto err;
1103 }
1104
1105 if (flags & OF_GPIO_ACTIVE_LOW) {
1106 dev_info(dev, "%s: reset gpio is active low\n",
1107 of_node_full_name(child));
1108 gpio_flags = GPIOF_ACTIVE_LOW |
1109 GPIOF_OUT_INIT_LOW;
1110 } else {
1111 gpio_flags = GPIOF_OUT_INIT_HIGH;
1112 }
1113
1114 ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags,
1115 port->reset_name);
1116 if (ret) {
1117 if (ret == -EPROBE_DEFER)
1118 goto err;
1119 goto skip;
1120 }
1121
1122 port->reset_gpio = gpio_to_desc(reset_gpio);
1123 }
1124
1125 port->clk = of_clk_get_by_name(child, NULL);
1126 if (IS_ERR(port->clk)) {
1127 dev_err(dev, "%s: cannot get clock\n", port->name);
1128 goto skip;
1129 }
1130
1131 ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
1132 if (ret < 0) {
1133 clk_put(port->clk);
1134 goto err;
1135 }
1136
1137 return 1;
1138
1139 skip:
1140 ret = 0;
1141
1142 /* In the case of skipping, we need to free these */
1143 devm_kfree(dev, port->reset_name);
1144 port->reset_name = NULL;
1145 devm_kfree(dev, port->name);
1146 port->name = NULL;
1147
1148 err:
1149 return ret;
1150 }
1151
1152 /*
1153 * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs
1154 * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications
1155 * of the PCI Express Card Electromechanical Specification, 1.1.
1156 */
1157 static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
1158 {
1159 int ret;
1160
1161 ret = clk_prepare_enable(port->clk);
1162 if (ret < 0)
1163 return ret;
1164
1165 if (port->reset_gpio) {
1166 u32 reset_udelay = 20000;
1167
1168 of_property_read_u32(port->dn, "reset-delay-us",
1169 &reset_udelay);
1170
1171 udelay(100);
1172
1173 gpiod_set_value_cansleep(port->reset_gpio, 0);
1174 msleep(reset_udelay / 1000);
1175 }
1176
1177 return 0;
1178 }
1179
1180 /*
1181 * Power down a PCIe port. Strictly, PCIe requires us to place the card
1182 * in D3hot state before asserting PERST#.
1183 */
1184 static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
1185 {
1186 if (port->reset_gpio)
1187 gpiod_set_value_cansleep(port->reset_gpio, 1);
1188
1189 clk_disable_unprepare(port->clk);
1190 }
1191
1192 static int mvebu_pcie_probe(struct platform_device *pdev)
1193 {
1194 struct mvebu_pcie *pcie;
1195 struct device_node *np = pdev->dev.of_node;
1196 struct device_node *child;
1197 int num, i, ret;
1198
1199 pcie = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pcie),
1200 GFP_KERNEL);
1201 if (!pcie)
1202 return -ENOMEM;
1203
1204 pcie->pdev = pdev;
1205 platform_set_drvdata(pdev, pcie);
1206
1207 /* Get the PCIe memory and I/O aperture */
1208 mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
1209 if (resource_size(&pcie->mem) == 0) {
1210 dev_err(&pdev->dev, "invalid memory aperture size\n");
1211 return -EINVAL;
1212 }
1213
1214 mvebu_mbus_get_pcie_io_aperture(&pcie->io);
1215
1216 if (resource_size(&pcie->io) != 0) {
1217 pcie->realio.flags = pcie->io.flags;
1218 pcie->realio.start = PCIBIOS_MIN_IO;
1219 pcie->realio.end = min_t(resource_size_t,
1220 IO_SPACE_LIMIT,
1221 resource_size(&pcie->io));
1222 } else
1223 pcie->realio = pcie->io;
1224
1225 /* Get the bus range */
1226 ret = of_pci_parse_bus_range(np, &pcie->busn);
1227 if (ret) {
1228 dev_err(&pdev->dev, "failed to parse bus-range property: %d\n",
1229 ret);
1230 return ret;
1231 }
1232
1233 num = of_get_available_child_count(pdev->dev.of_node);
1234
1235 pcie->ports = devm_kcalloc(&pdev->dev, num, sizeof(*pcie->ports),
1236 GFP_KERNEL);
1237 if (!pcie->ports)
1238 return -ENOMEM;
1239
1240 i = 0;
1241 for_each_available_child_of_node(pdev->dev.of_node, child) {
1242 struct mvebu_pcie_port *port = &pcie->ports[i];
1243
1244 ret = mvebu_pcie_parse_port(pcie, port, child);
1245 if (ret < 0) {
1246 of_node_put(child);
1247 return ret;
1248 } else if (ret == 0) {
1249 continue;
1250 }
1251
1252 port->dn = child;
1253 i++;
1254 }
1255 pcie->nports = i;
1256
1257 for (i = 0; i < pcie->nports; i++) {
1258 struct mvebu_pcie_port *port = &pcie->ports[i];
1259
1260 child = port->dn;
1261 if (!child)
1262 continue;
1263
1264 ret = mvebu_pcie_powerup(port);
1265 if (ret < 0)
1266 continue;
1267
1268 port->base = mvebu_pcie_map_registers(pdev, child, port);
1269 if (IS_ERR(port->base)) {
1270 dev_err(&pdev->dev, "%s: cannot map registers\n",
1271 port->name);
1272 port->base = NULL;
1273 mvebu_pcie_powerdown(port);
1274 continue;
1275 }
1276
1277 mvebu_pcie_set_local_dev_nr(port, 1);
1278 mvebu_sw_pci_bridge_init(port);
1279 }
1280
1281 pcie->nports = i;
1282
1283 for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K)
1284 pci_ioremap_io(i, pcie->io.start + i);
1285
1286 mvebu_pcie_msi_enable(pcie);
1287 mvebu_pcie_enable(pcie);
1288
1289 platform_set_drvdata(pdev, pcie);
1290
1291 return 0;
1292 }
1293
1294 static const struct of_device_id mvebu_pcie_of_match_table[] = {
1295 { .compatible = "marvell,armada-xp-pcie", },
1296 { .compatible = "marvell,armada-370-pcie", },
1297 { .compatible = "marvell,dove-pcie", },
1298 { .compatible = "marvell,kirkwood-pcie", },
1299 {},
1300 };
1301 MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
1302
1303 static const struct dev_pm_ops mvebu_pcie_pm_ops = {
1304 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
1305 };
1306
1307 static struct platform_driver mvebu_pcie_driver = {
1308 .driver = {
1309 .name = "mvebu-pcie",
1310 .of_match_table = mvebu_pcie_of_match_table,
1311 /* driver unloading/unbinding currently not supported */
1312 .suppress_bind_attrs = true,
1313 .pm = &mvebu_pcie_pm_ops,
1314 },
1315 .probe = mvebu_pcie_probe,
1316 };
1317 module_platform_driver(mvebu_pcie_driver);
1318
1319 MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
1320 MODULE_DESCRIPTION("Marvell EBU PCIe driver");
1321 MODULE_LICENSE("GPL v2");
This page took 0.057381 seconds and 6 git commands to generate.