PCI: imx6: Use enum instead of bool for variant indicator
[deliverable/linux.git] / drivers / pci / host / pci-imx6.c
1 /*
2 * PCIe host controller driver for Freescale i.MX6 SoCs
3 *
4 * Copyright (C) 2013 Kosagi
5 * http://www.kosagi.com
6 *
7 * Author: Sean Cross <xobs@kosagi.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/gpio.h>
17 #include <linux/kernel.h>
18 #include <linux/mfd/syscon.h>
19 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
20 #include <linux/module.h>
21 #include <linux/of_gpio.h>
22 #include <linux/of_device.h>
23 #include <linux/pci.h>
24 #include <linux/platform_device.h>
25 #include <linux/regmap.h>
26 #include <linux/resource.h>
27 #include <linux/signal.h>
28 #include <linux/types.h>
29 #include <linux/interrupt.h>
30
31 #include "pcie-designware.h"
32
33 #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
34
35 enum imx6_pcie_variants {
36 IMX6Q,
37 IMX6SX
38 };
39
40 struct imx6_pcie {
41 int reset_gpio;
42 bool gpio_active_high;
43 struct clk *pcie_bus;
44 struct clk *pcie_phy;
45 struct clk *pcie_inbound_axi;
46 struct clk *pcie;
47 struct pcie_port pp;
48 struct regmap *iomuxc_gpr;
49 enum imx6_pcie_variants variant;
50 void __iomem *mem_base;
51 u32 tx_deemph_gen1;
52 u32 tx_deemph_gen2_3p5db;
53 u32 tx_deemph_gen2_6db;
54 u32 tx_swing_full;
55 u32 tx_swing_low;
56 int link_gen;
57 };
58
59 /* PCIe Root Complex registers (memory-mapped) */
60 #define PCIE_RC_LCR 0x7c
61 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
62 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
63 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
64
65 #define PCIE_RC_LCSR 0x80
66
67 /* PCIe Port Logic registers (memory-mapped) */
68 #define PL_OFFSET 0x700
69 #define PCIE_PL_PFLR (PL_OFFSET + 0x08)
70 #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
71 #define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
72 #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
73 #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
74 #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
75 #define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
76
77 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
78 #define PCIE_PHY_CTRL_DATA_LOC 0
79 #define PCIE_PHY_CTRL_CAP_ADR_LOC 16
80 #define PCIE_PHY_CTRL_CAP_DAT_LOC 17
81 #define PCIE_PHY_CTRL_WR_LOC 18
82 #define PCIE_PHY_CTRL_RD_LOC 19
83
84 #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
85 #define PCIE_PHY_STAT_ACK_LOC 16
86
87 #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
88 #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
89
90 /* PHY registers (not memory-mapped) */
91 #define PCIE_PHY_RX_ASIC_OUT 0x100D
92 #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
93
94 #define PHY_RX_OVRD_IN_LO 0x1005
95 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
96 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
97
98 static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
99 {
100 u32 val;
101 u32 max_iterations = 10;
102 u32 wait_counter = 0;
103
104 do {
105 val = readl(dbi_base + PCIE_PHY_STAT);
106 val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
107 wait_counter++;
108
109 if (val == exp_val)
110 return 0;
111
112 udelay(1);
113 } while (wait_counter < max_iterations);
114
115 return -ETIMEDOUT;
116 }
117
118 static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
119 {
120 u32 val;
121 int ret;
122
123 val = addr << PCIE_PHY_CTRL_DATA_LOC;
124 writel(val, dbi_base + PCIE_PHY_CTRL);
125
126 val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
127 writel(val, dbi_base + PCIE_PHY_CTRL);
128
129 ret = pcie_phy_poll_ack(dbi_base, 1);
130 if (ret)
131 return ret;
132
133 val = addr << PCIE_PHY_CTRL_DATA_LOC;
134 writel(val, dbi_base + PCIE_PHY_CTRL);
135
136 return pcie_phy_poll_ack(dbi_base, 0);
137 }
138
139 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
140 static int pcie_phy_read(void __iomem *dbi_base, int addr, int *data)
141 {
142 u32 val, phy_ctl;
143 int ret;
144
145 ret = pcie_phy_wait_ack(dbi_base, addr);
146 if (ret)
147 return ret;
148
149 /* assert Read signal */
150 phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
151 writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
152
153 ret = pcie_phy_poll_ack(dbi_base, 1);
154 if (ret)
155 return ret;
156
157 val = readl(dbi_base + PCIE_PHY_STAT);
158 *data = val & 0xffff;
159
160 /* deassert Read signal */
161 writel(0x00, dbi_base + PCIE_PHY_CTRL);
162
163 return pcie_phy_poll_ack(dbi_base, 0);
164 }
165
166 static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
167 {
168 u32 var;
169 int ret;
170
171 /* write addr */
172 /* cap addr */
173 ret = pcie_phy_wait_ack(dbi_base, addr);
174 if (ret)
175 return ret;
176
177 var = data << PCIE_PHY_CTRL_DATA_LOC;
178 writel(var, dbi_base + PCIE_PHY_CTRL);
179
180 /* capture data */
181 var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
182 writel(var, dbi_base + PCIE_PHY_CTRL);
183
184 ret = pcie_phy_poll_ack(dbi_base, 1);
185 if (ret)
186 return ret;
187
188 /* deassert cap data */
189 var = data << PCIE_PHY_CTRL_DATA_LOC;
190 writel(var, dbi_base + PCIE_PHY_CTRL);
191
192 /* wait for ack de-assertion */
193 ret = pcie_phy_poll_ack(dbi_base, 0);
194 if (ret)
195 return ret;
196
197 /* assert wr signal */
198 var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
199 writel(var, dbi_base + PCIE_PHY_CTRL);
200
201 /* wait for ack */
202 ret = pcie_phy_poll_ack(dbi_base, 1);
203 if (ret)
204 return ret;
205
206 /* deassert wr signal */
207 var = data << PCIE_PHY_CTRL_DATA_LOC;
208 writel(var, dbi_base + PCIE_PHY_CTRL);
209
210 /* wait for ack de-assertion */
211 ret = pcie_phy_poll_ack(dbi_base, 0);
212 if (ret)
213 return ret;
214
215 writel(0x0, dbi_base + PCIE_PHY_CTRL);
216
217 return 0;
218 }
219
220 static void imx6_pcie_reset_phy(struct pcie_port *pp)
221 {
222 u32 tmp;
223
224 pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
225 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
226 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
227 pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
228
229 usleep_range(2000, 3000);
230
231 pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
232 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
233 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
234 pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
235 }
236
237 /* Added for PCI abort handling */
238 static int imx6q_pcie_abort_handler(unsigned long addr,
239 unsigned int fsr, struct pt_regs *regs)
240 {
241 return 0;
242 }
243
244 static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
245 {
246 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
247 u32 val, gpr1, gpr12;
248
249 switch (imx6_pcie->variant) {
250 case IMX6SX:
251 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
252 IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
253 IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
254 /* Force PCIe PHY reset */
255 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
256 IMX6SX_GPR5_PCIE_BTNRST_RESET,
257 IMX6SX_GPR5_PCIE_BTNRST_RESET);
258 break;
259 case IMX6Q:
260 /*
261 * If the bootloader already enabled the link we need some
262 * special handling to get the core back into a state where
263 * it is safe to touch it for configuration. As there is
264 * no dedicated reset signal wired up for MX6QDL, we need
265 * to manually force LTSSM into "detect" state before
266 * completely disabling LTSSM, which is a prerequisite for
267 * core configuration.
268 *
269 * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we
270 * have a strong indication that the bootloader activated
271 * the link.
272 */
273 regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
274 regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
275
276 if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
277 (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
278 val = readl(pp->dbi_base + PCIE_PL_PFLR);
279 val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
280 val |= PCIE_PL_PFLR_FORCE_LINK;
281 writel(val, pp->dbi_base + PCIE_PL_PFLR);
282
283 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
284 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
285 }
286
287 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
288 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
289 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
290 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
291 break;
292 }
293
294 return 0;
295 }
296
297 static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
298 {
299 struct pcie_port *pp = &imx6_pcie->pp;
300 int ret = 0;
301
302 switch (imx6_pcie->variant) {
303 case IMX6SX:
304 ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
305 if (ret) {
306 dev_err(pp->dev, "unable to enable pcie_axi clock\n");
307 break;
308 }
309
310 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
311 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
312 break;
313 case IMX6Q:
314 /* power up core phy and enable ref clock */
315 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
316 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
317 /*
318 * the async reset input need ref clock to sync internally,
319 * when the ref clock comes after reset, internal synced
320 * reset time is too short, cannot meet the requirement.
321 * add one ~10us delay here.
322 */
323 udelay(10);
324 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
325 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
326 break;
327 }
328
329 return ret;
330 }
331
332 static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
333 {
334 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
335 int ret;
336
337 ret = clk_prepare_enable(imx6_pcie->pcie_phy);
338 if (ret) {
339 dev_err(pp->dev, "unable to enable pcie_phy clock\n");
340 goto err_pcie_phy;
341 }
342
343 ret = clk_prepare_enable(imx6_pcie->pcie_bus);
344 if (ret) {
345 dev_err(pp->dev, "unable to enable pcie_bus clock\n");
346 goto err_pcie_bus;
347 }
348
349 ret = clk_prepare_enable(imx6_pcie->pcie);
350 if (ret) {
351 dev_err(pp->dev, "unable to enable pcie clock\n");
352 goto err_pcie;
353 }
354
355 ret = imx6_pcie_enable_ref_clk(imx6_pcie);
356 if (ret) {
357 dev_err(pp->dev, "unable to enable pcie ref clock\n");
358 goto err_ref_clk;
359 }
360
361 /* allow the clocks to stabilize */
362 usleep_range(200, 500);
363
364 /* Some boards don't have PCIe reset GPIO. */
365 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
366 gpio_set_value_cansleep(imx6_pcie->reset_gpio,
367 imx6_pcie->gpio_active_high);
368 msleep(100);
369 gpio_set_value_cansleep(imx6_pcie->reset_gpio,
370 !imx6_pcie->gpio_active_high);
371 }
372
373 if (imx6_pcie->variant == IMX6SX)
374 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
375 IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
376
377 return 0;
378
379 err_ref_clk:
380 clk_disable_unprepare(imx6_pcie->pcie);
381 err_pcie:
382 clk_disable_unprepare(imx6_pcie->pcie_bus);
383 err_pcie_bus:
384 clk_disable_unprepare(imx6_pcie->pcie_phy);
385 err_pcie_phy:
386 return ret;
387 }
388
389 static void imx6_pcie_init_phy(struct pcie_port *pp)
390 {
391 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
392
393 if (imx6_pcie->variant == IMX6SX)
394 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
395 IMX6SX_GPR12_PCIE_RX_EQ_MASK,
396 IMX6SX_GPR12_PCIE_RX_EQ_2);
397
398 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
399 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
400
401 /* configure constant input signal to the pcie ctrl and phy */
402 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
403 IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
404 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
405 IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
406
407 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
408 IMX6Q_GPR8_TX_DEEMPH_GEN1,
409 imx6_pcie->tx_deemph_gen1 << 0);
410 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
411 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
412 imx6_pcie->tx_deemph_gen2_3p5db << 6);
413 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
414 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
415 imx6_pcie->tx_deemph_gen2_6db << 12);
416 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
417 IMX6Q_GPR8_TX_SWING_FULL,
418 imx6_pcie->tx_swing_full << 18);
419 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
420 IMX6Q_GPR8_TX_SWING_LOW,
421 imx6_pcie->tx_swing_low << 25);
422 }
423
424 static int imx6_pcie_wait_for_link(struct pcie_port *pp)
425 {
426 /* check if the link is up or not */
427 if (!dw_pcie_wait_for_link(pp))
428 return 0;
429
430 dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
431 readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
432 readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
433 return -ETIMEDOUT;
434 }
435
436 static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp)
437 {
438 u32 tmp;
439 unsigned int retries;
440
441 for (retries = 0; retries < 200; retries++) {
442 tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
443 /* Test if the speed change finished. */
444 if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
445 return 0;
446 usleep_range(100, 1000);
447 }
448
449 dev_err(pp->dev, "Speed change timeout\n");
450 return -EINVAL;
451 }
452
453 static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
454 {
455 struct pcie_port *pp = arg;
456
457 return dw_handle_msi_irq(pp);
458 }
459
460 static int imx6_pcie_establish_link(struct pcie_port *pp)
461 {
462 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
463 u32 tmp;
464 int ret;
465
466 /*
467 * Force Gen1 operation when starting the link. In case the link is
468 * started in Gen2 mode, there is a possibility the devices on the
469 * bus will not be detected at all. This happens with PCIe switches.
470 */
471 tmp = readl(pp->dbi_base + PCIE_RC_LCR);
472 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
473 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
474 writel(tmp, pp->dbi_base + PCIE_RC_LCR);
475
476 /* Start LTSSM. */
477 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
478 IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
479
480 ret = imx6_pcie_wait_for_link(pp);
481 if (ret) {
482 dev_info(pp->dev, "Link never came up\n");
483 goto err_reset_phy;
484 }
485
486 if (imx6_pcie->link_gen == 2) {
487 /* Allow Gen2 mode after the link is up. */
488 tmp = readl(pp->dbi_base + PCIE_RC_LCR);
489 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
490 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
491 writel(tmp, pp->dbi_base + PCIE_RC_LCR);
492 } else {
493 dev_info(pp->dev, "Link: Gen2 disabled\n");
494 }
495
496 /*
497 * Start Directed Speed Change so the best possible speed both link
498 * partners support can be negotiated.
499 */
500 tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
501 tmp |= PORT_LOGIC_SPEED_CHANGE;
502 writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
503
504 ret = imx6_pcie_wait_for_speed_change(pp);
505 if (ret) {
506 dev_err(pp->dev, "Failed to bring link up!\n");
507 goto err_reset_phy;
508 }
509
510 /* Make sure link training is finished as well! */
511 ret = imx6_pcie_wait_for_link(pp);
512 if (ret) {
513 dev_err(pp->dev, "Failed to bring link up!\n");
514 goto err_reset_phy;
515 }
516
517 tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
518 dev_info(pp->dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
519 return 0;
520
521 err_reset_phy:
522 dev_dbg(pp->dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
523 readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
524 readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
525 imx6_pcie_reset_phy(pp);
526
527 return ret;
528 }
529
530 static void imx6_pcie_host_init(struct pcie_port *pp)
531 {
532 imx6_pcie_assert_core_reset(pp);
533
534 imx6_pcie_init_phy(pp);
535
536 imx6_pcie_deassert_core_reset(pp);
537
538 dw_pcie_setup_rc(pp);
539
540 imx6_pcie_establish_link(pp);
541
542 if (IS_ENABLED(CONFIG_PCI_MSI))
543 dw_pcie_msi_init(pp);
544 }
545
546 static int imx6_pcie_link_up(struct pcie_port *pp)
547 {
548 return readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) &
549 PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
550 }
551
552 static struct pcie_host_ops imx6_pcie_host_ops = {
553 .link_up = imx6_pcie_link_up,
554 .host_init = imx6_pcie_host_init,
555 };
556
557 static int __init imx6_add_pcie_port(struct pcie_port *pp,
558 struct platform_device *pdev)
559 {
560 int ret;
561
562 if (IS_ENABLED(CONFIG_PCI_MSI)) {
563 pp->msi_irq = platform_get_irq_byname(pdev, "msi");
564 if (pp->msi_irq <= 0) {
565 dev_err(&pdev->dev, "failed to get MSI irq\n");
566 return -ENODEV;
567 }
568
569 ret = devm_request_irq(&pdev->dev, pp->msi_irq,
570 imx6_pcie_msi_handler,
571 IRQF_SHARED | IRQF_NO_THREAD,
572 "mx6-pcie-msi", pp);
573 if (ret) {
574 dev_err(&pdev->dev, "failed to request MSI irq\n");
575 return ret;
576 }
577 }
578
579 pp->root_bus_nr = -1;
580 pp->ops = &imx6_pcie_host_ops;
581
582 ret = dw_pcie_host_init(pp);
583 if (ret) {
584 dev_err(&pdev->dev, "failed to initialize host\n");
585 return ret;
586 }
587
588 return 0;
589 }
590
591 static int __init imx6_pcie_probe(struct platform_device *pdev)
592 {
593 struct imx6_pcie *imx6_pcie;
594 struct pcie_port *pp;
595 struct device_node *np = pdev->dev.of_node;
596 struct resource *dbi_base;
597 struct device_node *node = pdev->dev.of_node;
598 int ret;
599
600 imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
601 if (!imx6_pcie)
602 return -ENOMEM;
603
604 pp = &imx6_pcie->pp;
605 pp->dev = &pdev->dev;
606
607 imx6_pcie->variant =
608 (enum imx6_pcie_variants)of_device_get_match_data(&pdev->dev);
609
610 /* Added for PCI abort handling */
611 hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
612 "imprecise external abort");
613
614 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
615 pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
616 if (IS_ERR(pp->dbi_base))
617 return PTR_ERR(pp->dbi_base);
618
619 /* Fetch GPIOs */
620 imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
621 imx6_pcie->gpio_active_high = of_property_read_bool(np,
622 "reset-gpio-active-high");
623 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
624 ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
625 imx6_pcie->gpio_active_high ?
626 GPIOF_OUT_INIT_HIGH :
627 GPIOF_OUT_INIT_LOW,
628 "PCIe reset");
629 if (ret) {
630 dev_err(&pdev->dev, "unable to get reset gpio\n");
631 return ret;
632 }
633 }
634
635 /* Fetch clocks */
636 imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
637 if (IS_ERR(imx6_pcie->pcie_phy)) {
638 dev_err(&pdev->dev,
639 "pcie_phy clock source missing or invalid\n");
640 return PTR_ERR(imx6_pcie->pcie_phy);
641 }
642
643 imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus");
644 if (IS_ERR(imx6_pcie->pcie_bus)) {
645 dev_err(&pdev->dev,
646 "pcie_bus clock source missing or invalid\n");
647 return PTR_ERR(imx6_pcie->pcie_bus);
648 }
649
650 imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie");
651 if (IS_ERR(imx6_pcie->pcie)) {
652 dev_err(&pdev->dev,
653 "pcie clock source missing or invalid\n");
654 return PTR_ERR(imx6_pcie->pcie);
655 }
656
657 if (imx6_pcie->variant == IMX6SX) {
658 imx6_pcie->pcie_inbound_axi = devm_clk_get(&pdev->dev,
659 "pcie_inbound_axi");
660 if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
661 dev_err(&pdev->dev,
662 "pcie_incbound_axi clock missing or invalid\n");
663 return PTR_ERR(imx6_pcie->pcie_inbound_axi);
664 }
665 }
666
667 /* Grab GPR config register range */
668 imx6_pcie->iomuxc_gpr =
669 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
670 if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
671 dev_err(&pdev->dev, "unable to find iomuxc registers\n");
672 return PTR_ERR(imx6_pcie->iomuxc_gpr);
673 }
674
675 /* Grab PCIe PHY Tx Settings */
676 if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
677 &imx6_pcie->tx_deemph_gen1))
678 imx6_pcie->tx_deemph_gen1 = 0;
679
680 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
681 &imx6_pcie->tx_deemph_gen2_3p5db))
682 imx6_pcie->tx_deemph_gen2_3p5db = 0;
683
684 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
685 &imx6_pcie->tx_deemph_gen2_6db))
686 imx6_pcie->tx_deemph_gen2_6db = 20;
687
688 if (of_property_read_u32(node, "fsl,tx-swing-full",
689 &imx6_pcie->tx_swing_full))
690 imx6_pcie->tx_swing_full = 127;
691
692 if (of_property_read_u32(node, "fsl,tx-swing-low",
693 &imx6_pcie->tx_swing_low))
694 imx6_pcie->tx_swing_low = 127;
695
696 /* Limit link speed */
697 ret = of_property_read_u32(pp->dev->of_node, "fsl,max-link-speed",
698 &imx6_pcie->link_gen);
699 if (ret)
700 imx6_pcie->link_gen = 1;
701
702 ret = imx6_add_pcie_port(pp, pdev);
703 if (ret < 0)
704 return ret;
705
706 platform_set_drvdata(pdev, imx6_pcie);
707 return 0;
708 }
709
710 static void imx6_pcie_shutdown(struct platform_device *pdev)
711 {
712 struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
713
714 /* bring down link, so bootloader gets clean state in case of reboot */
715 imx6_pcie_assert_core_reset(&imx6_pcie->pp);
716 }
717
718 static const struct of_device_id imx6_pcie_of_match[] = {
719 { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, },
720 { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
721 {},
722 };
723 MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
724
725 static struct platform_driver imx6_pcie_driver = {
726 .driver = {
727 .name = "imx6q-pcie",
728 .of_match_table = imx6_pcie_of_match,
729 },
730 .shutdown = imx6_pcie_shutdown,
731 };
732
733 /* Freescale PCIe driver does not allow module unload */
734
735 static int __init imx6_pcie_init(void)
736 {
737 return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
738 }
739 module_init(imx6_pcie_init);
740
741 MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
742 MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
743 MODULE_LICENSE("GPL v2");
This page took 0.047023 seconds and 5 git commands to generate.