PCI: imx6: Use new clock names
[deliverable/linux.git] / drivers / pci / host / pci-imx6.c
1 /*
2 * PCIe host controller driver for Freescale i.MX6 SoCs
3 *
4 * Copyright (C) 2013 Kosagi
5 * http://www.kosagi.com
6 *
7 * Author: Sean Cross <xobs@kosagi.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/gpio.h>
17 #include <linux/kernel.h>
18 #include <linux/mfd/syscon.h>
19 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
20 #include <linux/module.h>
21 #include <linux/of_gpio.h>
22 #include <linux/pci.h>
23 #include <linux/platform_device.h>
24 #include <linux/regmap.h>
25 #include <linux/resource.h>
26 #include <linux/signal.h>
27 #include <linux/types.h>
28
29 #include "pcie-designware.h"
30
31 #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
32
33 struct imx6_pcie {
34 int reset_gpio;
35 int power_on_gpio;
36 int wake_up_gpio;
37 int disable_gpio;
38 struct clk *pcie_bus;
39 struct clk *pcie_phy;
40 struct clk *pcie;
41 struct pcie_port pp;
42 struct regmap *iomuxc_gpr;
43 void __iomem *mem_base;
44 };
45
46 /* PCIe Root Complex registers (memory-mapped) */
47 #define PCIE_RC_LCR 0x7c
48 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
49 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
50 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
51
52 /* PCIe Port Logic registers (memory-mapped) */
53 #define PL_OFFSET 0x700
54 #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
55 #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
56 #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
57 #define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
58
59 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
60 #define PCIE_PHY_CTRL_DATA_LOC 0
61 #define PCIE_PHY_CTRL_CAP_ADR_LOC 16
62 #define PCIE_PHY_CTRL_CAP_DAT_LOC 17
63 #define PCIE_PHY_CTRL_WR_LOC 18
64 #define PCIE_PHY_CTRL_RD_LOC 19
65
66 #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
67 #define PCIE_PHY_STAT_ACK_LOC 16
68
69 #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
70 #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
71
72 /* PHY registers (not memory-mapped) */
73 #define PCIE_PHY_RX_ASIC_OUT 0x100D
74
75 #define PHY_RX_OVRD_IN_LO 0x1005
76 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
77 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
78
79 static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
80 {
81 u32 val;
82 u32 max_iterations = 10;
83 u32 wait_counter = 0;
84
85 do {
86 val = readl(dbi_base + PCIE_PHY_STAT);
87 val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
88 wait_counter++;
89
90 if (val == exp_val)
91 return 0;
92
93 udelay(1);
94 } while (wait_counter < max_iterations);
95
96 return -ETIMEDOUT;
97 }
98
99 static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
100 {
101 u32 val;
102 int ret;
103
104 val = addr << PCIE_PHY_CTRL_DATA_LOC;
105 writel(val, dbi_base + PCIE_PHY_CTRL);
106
107 val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
108 writel(val, dbi_base + PCIE_PHY_CTRL);
109
110 ret = pcie_phy_poll_ack(dbi_base, 1);
111 if (ret)
112 return ret;
113
114 val = addr << PCIE_PHY_CTRL_DATA_LOC;
115 writel(val, dbi_base + PCIE_PHY_CTRL);
116
117 ret = pcie_phy_poll_ack(dbi_base, 0);
118 if (ret)
119 return ret;
120
121 return 0;
122 }
123
124 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
125 static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
126 {
127 u32 val, phy_ctl;
128 int ret;
129
130 ret = pcie_phy_wait_ack(dbi_base, addr);
131 if (ret)
132 return ret;
133
134 /* assert Read signal */
135 phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
136 writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
137
138 ret = pcie_phy_poll_ack(dbi_base, 1);
139 if (ret)
140 return ret;
141
142 val = readl(dbi_base + PCIE_PHY_STAT);
143 *data = val & 0xffff;
144
145 /* deassert Read signal */
146 writel(0x00, dbi_base + PCIE_PHY_CTRL);
147
148 ret = pcie_phy_poll_ack(dbi_base, 0);
149 if (ret)
150 return ret;
151
152 return 0;
153 }
154
155 static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
156 {
157 u32 var;
158 int ret;
159
160 /* write addr */
161 /* cap addr */
162 ret = pcie_phy_wait_ack(dbi_base, addr);
163 if (ret)
164 return ret;
165
166 var = data << PCIE_PHY_CTRL_DATA_LOC;
167 writel(var, dbi_base + PCIE_PHY_CTRL);
168
169 /* capture data */
170 var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
171 writel(var, dbi_base + PCIE_PHY_CTRL);
172
173 ret = pcie_phy_poll_ack(dbi_base, 1);
174 if (ret)
175 return ret;
176
177 /* deassert cap data */
178 var = data << PCIE_PHY_CTRL_DATA_LOC;
179 writel(var, dbi_base + PCIE_PHY_CTRL);
180
181 /* wait for ack de-assertion */
182 ret = pcie_phy_poll_ack(dbi_base, 0);
183 if (ret)
184 return ret;
185
186 /* assert wr signal */
187 var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
188 writel(var, dbi_base + PCIE_PHY_CTRL);
189
190 /* wait for ack */
191 ret = pcie_phy_poll_ack(dbi_base, 1);
192 if (ret)
193 return ret;
194
195 /* deassert wr signal */
196 var = data << PCIE_PHY_CTRL_DATA_LOC;
197 writel(var, dbi_base + PCIE_PHY_CTRL);
198
199 /* wait for ack de-assertion */
200 ret = pcie_phy_poll_ack(dbi_base, 0);
201 if (ret)
202 return ret;
203
204 writel(0x0, dbi_base + PCIE_PHY_CTRL);
205
206 return 0;
207 }
208
209 /* Added for PCI abort handling */
210 static int imx6q_pcie_abort_handler(unsigned long addr,
211 unsigned int fsr, struct pt_regs *regs)
212 {
213 return 0;
214 }
215
216 static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
217 {
218 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
219
220 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
221 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
222 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
223 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
224
225 return 0;
226 }
227
228 static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
229 {
230 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
231 int ret;
232
233 if (gpio_is_valid(imx6_pcie->power_on_gpio))
234 gpio_set_value(imx6_pcie->power_on_gpio, 1);
235
236 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
237 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
238 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
239 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
240
241 ret = clk_prepare_enable(imx6_pcie->pcie_phy);
242 if (ret) {
243 dev_err(pp->dev, "unable to enable pcie_phy clock\n");
244 goto err_pcie_phy;
245 }
246
247 ret = clk_prepare_enable(imx6_pcie->pcie_bus);
248 if (ret) {
249 dev_err(pp->dev, "unable to enable pcie_bus clock\n");
250 goto err_pcie_bus;
251 }
252
253 ret = clk_prepare_enable(imx6_pcie->pcie);
254 if (ret) {
255 dev_err(pp->dev, "unable to enable pcie clock\n");
256 goto err_pcie;
257 }
258
259 /* allow the clocks to stabilize */
260 usleep_range(200, 500);
261
262 /* Some boards don't have PCIe reset GPIO. */
263 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
264 gpio_set_value(imx6_pcie->reset_gpio, 0);
265 msleep(100);
266 gpio_set_value(imx6_pcie->reset_gpio, 1);
267 }
268 return 0;
269
270 err_pcie:
271 clk_disable_unprepare(imx6_pcie->pcie_bus);
272 err_pcie_bus:
273 clk_disable_unprepare(imx6_pcie->pcie_phy);
274 err_pcie_phy:
275 return ret;
276
277 }
278
279 static void imx6_pcie_init_phy(struct pcie_port *pp)
280 {
281 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
282
283 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
284 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
285
286 /* configure constant input signal to the pcie ctrl and phy */
287 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
288 IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
289 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
290 IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
291
292 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
293 IMX6Q_GPR8_TX_DEEMPH_GEN1, 0 << 0);
294 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
295 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 0 << 6);
296 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
297 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 20 << 12);
298 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
299 IMX6Q_GPR8_TX_SWING_FULL, 127 << 18);
300 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
301 IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
302 }
303
304 static int imx6_pcie_wait_for_link(struct pcie_port *pp)
305 {
306 int count = 200;
307
308 while (!dw_pcie_link_up(pp)) {
309 usleep_range(100, 1000);
310 if (--count)
311 continue;
312
313 dev_err(pp->dev, "phy link never came up\n");
314 dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
315 readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
316 readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
317 return -EINVAL;
318 }
319
320 return 0;
321 }
322
323 static int imx6_pcie_start_link(struct pcie_port *pp)
324 {
325 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
326 uint32_t tmp;
327 int ret, count;
328
329 /*
330 * Force Gen1 operation when starting the link. In case the link is
331 * started in Gen2 mode, there is a possibility the devices on the
332 * bus will not be detected at all. This happens with PCIe switches.
333 */
334 tmp = readl(pp->dbi_base + PCIE_RC_LCR);
335 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
336 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
337 writel(tmp, pp->dbi_base + PCIE_RC_LCR);
338
339 /* Start LTSSM. */
340 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
341 IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
342
343 ret = imx6_pcie_wait_for_link(pp);
344 if (ret)
345 return ret;
346
347 /* Allow Gen2 mode after the link is up. */
348 tmp = readl(pp->dbi_base + PCIE_RC_LCR);
349 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
350 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
351 writel(tmp, pp->dbi_base + PCIE_RC_LCR);
352
353 /*
354 * Start Directed Speed Change so the best possible speed both link
355 * partners support can be negotiated.
356 */
357 tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
358 tmp |= PORT_LOGIC_SPEED_CHANGE;
359 writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
360
361 count = 200;
362 while (count--) {
363 tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
364 /* Test if the speed change finished. */
365 if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
366 break;
367 usleep_range(100, 1000);
368 }
369
370 /* Make sure link training is finished as well! */
371 if (count)
372 ret = imx6_pcie_wait_for_link(pp);
373 else
374 ret = -EINVAL;
375
376 if (ret) {
377 dev_err(pp->dev, "Failed to bring link up!\n");
378 } else {
379 tmp = readl(pp->dbi_base + 0x80);
380 dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
381 }
382
383 return ret;
384 }
385
386 static void imx6_pcie_host_init(struct pcie_port *pp)
387 {
388 imx6_pcie_assert_core_reset(pp);
389
390 imx6_pcie_init_phy(pp);
391
392 imx6_pcie_deassert_core_reset(pp);
393
394 dw_pcie_setup_rc(pp);
395
396 imx6_pcie_start_link(pp);
397 }
398
399 static void imx6_pcie_reset_phy(struct pcie_port *pp)
400 {
401 uint32_t temp;
402
403 pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
404 temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
405 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
406 pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
407
408 usleep_range(2000, 3000);
409
410 pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
411 temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
412 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
413 pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
414 }
415
416 static int imx6_pcie_link_up(struct pcie_port *pp)
417 {
418 u32 rc, debug_r0, rx_valid;
419 int count = 5;
420
421 /*
422 * Test if the PHY reports that the link is up and also that the LTSSM
423 * training finished. There are three possible states of the link when
424 * this code is called:
425 * 1) The link is DOWN (unlikely)
426 * The link didn't come up yet for some reason. This usually means
427 * we have a real problem somewhere. Reset the PHY and exit. This
428 * state calls for inspection of the DEBUG registers.
429 * 2) The link is UP, but still in LTSSM training
430 * Wait for the training to finish, which should take a very short
431 * time. If the training does not finish, we have a problem and we
432 * need to inspect the DEBUG registers. If the training does finish,
433 * the link is up and operating correctly.
434 * 3) The link is UP and no longer in LTSSM training
435 * The link is up and operating correctly.
436 */
437 while (1) {
438 rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
439 if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP))
440 break;
441 if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
442 return 1;
443 if (!count--)
444 break;
445 dev_dbg(pp->dev, "Link is up, but still in training\n");
446 /*
447 * Wait a little bit, then re-check if the link finished
448 * the training.
449 */
450 usleep_range(1000, 2000);
451 }
452 /*
453 * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
454 * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
455 * If (MAC/LTSSM.state == Recovery.RcvrLock)
456 * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
457 * to gen2 is stuck
458 */
459 pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
460 debug_r0 = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0);
461
462 if (rx_valid & 0x01)
463 return 0;
464
465 if ((debug_r0 & 0x3f) != 0x0d)
466 return 0;
467
468 dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
469 dev_dbg(pp->dev, "debug_r0=%08x debug_r1=%08x\n", debug_r0, rc);
470
471 imx6_pcie_reset_phy(pp);
472
473 return 0;
474 }
475
476 static struct pcie_host_ops imx6_pcie_host_ops = {
477 .link_up = imx6_pcie_link_up,
478 .host_init = imx6_pcie_host_init,
479 };
480
481 static int __init imx6_add_pcie_port(struct pcie_port *pp,
482 struct platform_device *pdev)
483 {
484 int ret;
485
486 pp->irq = platform_get_irq(pdev, 0);
487 if (!pp->irq) {
488 dev_err(&pdev->dev, "failed to get irq\n");
489 return -ENODEV;
490 }
491
492 pp->root_bus_nr = -1;
493 pp->ops = &imx6_pcie_host_ops;
494
495 spin_lock_init(&pp->conf_lock);
496 ret = dw_pcie_host_init(pp);
497 if (ret) {
498 dev_err(&pdev->dev, "failed to initialize host\n");
499 return ret;
500 }
501
502 return 0;
503 }
504
505 static int __init imx6_pcie_probe(struct platform_device *pdev)
506 {
507 struct imx6_pcie *imx6_pcie;
508 struct pcie_port *pp;
509 struct device_node *np = pdev->dev.of_node;
510 struct resource *dbi_base;
511 int ret;
512
513 imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
514 if (!imx6_pcie)
515 return -ENOMEM;
516
517 pp = &imx6_pcie->pp;
518 pp->dev = &pdev->dev;
519
520 /* Added for PCI abort handling */
521 hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
522 "imprecise external abort");
523
524 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
525 pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
526 if (IS_ERR(pp->dbi_base))
527 return PTR_ERR(pp->dbi_base);
528
529 /* Fetch GPIOs */
530 imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
531 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
532 ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
533 GPIOF_OUT_INIT_LOW, "PCIe reset");
534 if (ret) {
535 dev_err(&pdev->dev, "unable to get reset gpio\n");
536 return ret;
537 }
538 }
539
540 imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0);
541 if (gpio_is_valid(imx6_pcie->power_on_gpio)) {
542 ret = devm_gpio_request_one(&pdev->dev,
543 imx6_pcie->power_on_gpio,
544 GPIOF_OUT_INIT_LOW,
545 "PCIe power enable");
546 if (ret) {
547 dev_err(&pdev->dev, "unable to get power-on gpio\n");
548 return ret;
549 }
550 }
551
552 imx6_pcie->wake_up_gpio = of_get_named_gpio(np, "wake-up-gpio", 0);
553 if (gpio_is_valid(imx6_pcie->wake_up_gpio)) {
554 ret = devm_gpio_request_one(&pdev->dev,
555 imx6_pcie->wake_up_gpio,
556 GPIOF_IN,
557 "PCIe wake up");
558 if (ret) {
559 dev_err(&pdev->dev, "unable to get wake-up gpio\n");
560 return ret;
561 }
562 }
563
564 imx6_pcie->disable_gpio = of_get_named_gpio(np, "disable-gpio", 0);
565 if (gpio_is_valid(imx6_pcie->disable_gpio)) {
566 ret = devm_gpio_request_one(&pdev->dev,
567 imx6_pcie->disable_gpio,
568 GPIOF_OUT_INIT_HIGH,
569 "PCIe disable endpoint");
570 if (ret) {
571 dev_err(&pdev->dev, "unable to get disable-ep gpio\n");
572 return ret;
573 }
574 }
575
576 /* Fetch clocks */
577 imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
578 if (IS_ERR(imx6_pcie->pcie_phy)) {
579 dev_err(&pdev->dev,
580 "pcie_phy clock source missing or invalid\n");
581 return PTR_ERR(imx6_pcie->pcie_phy);
582 }
583
584 imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus");
585 if (IS_ERR(imx6_pcie->pcie_bus)) {
586 dev_err(&pdev->dev,
587 "pcie_bus clock source missing or invalid\n");
588 return PTR_ERR(imx6_pcie->pcie_bus);
589 }
590
591 imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie");
592 if (IS_ERR(imx6_pcie->pcie)) {
593 dev_err(&pdev->dev,
594 "pcie clock source missing or invalid\n");
595 return PTR_ERR(imx6_pcie->pcie);
596 }
597
598 /* Grab GPR config register range */
599 imx6_pcie->iomuxc_gpr =
600 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
601 if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
602 dev_err(&pdev->dev, "unable to find iomuxc registers\n");
603 return PTR_ERR(imx6_pcie->iomuxc_gpr);
604 }
605
606 ret = imx6_add_pcie_port(pp, pdev);
607 if (ret < 0)
608 return ret;
609
610 platform_set_drvdata(pdev, imx6_pcie);
611 return 0;
612 }
613
614 static const struct of_device_id imx6_pcie_of_match[] = {
615 { .compatible = "fsl,imx6q-pcie", },
616 {},
617 };
618 MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
619
620 static struct platform_driver imx6_pcie_driver = {
621 .driver = {
622 .name = "imx6q-pcie",
623 .owner = THIS_MODULE,
624 .of_match_table = imx6_pcie_of_match,
625 },
626 };
627
628 /* Freescale PCIe driver does not allow module unload */
629
630 static int __init imx6_pcie_init(void)
631 {
632 return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
633 }
634 fs_initcall(imx6_pcie_init);
635
636 MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
637 MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
638 MODULE_LICENSE("GPL v2");
This page took 0.044359 seconds and 5 git commands to generate.