2 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
3 * Copyright 2015 Linaro Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/gpio.h>
18 #include <linux/interrupt.h>
20 #include <linux/iopoll.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/of_device.h>
24 #include <linux/of_gpio.h>
25 #include <linux/pci.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy/phy.h>
28 #include <linux/regulator/consumer.h>
29 #include <linux/reset.h>
30 #include <linux/slab.h>
31 #include <linux/types.h>
33 #include "pcie-designware.h"
35 #define PCIE20_PARF_PHY_CTRL 0x40
36 #define PCIE20_PARF_PHY_REFCLK 0x4C
37 #define PCIE20_PARF_DBI_BASE_ADDR 0x168
38 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16c
39 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
41 #define PCIE20_ELBI_SYS_CTRL 0x04
42 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
44 #define PCIE20_CAP 0x70
46 #define PERST_DELAY_US 1000
48 struct qcom_pcie_resources_v0
{
49 struct clk
*iface_clk
;
52 struct reset_control
*pci_reset
;
53 struct reset_control
*axi_reset
;
54 struct reset_control
*ahb_reset
;
55 struct reset_control
*por_reset
;
56 struct reset_control
*phy_reset
;
57 struct regulator
*vdda
;
58 struct regulator
*vdda_phy
;
59 struct regulator
*vdda_refclk
;
62 struct qcom_pcie_resources_v1
{
65 struct clk
*master_bus
;
66 struct clk
*slave_bus
;
67 struct reset_control
*core
;
68 struct regulator
*vdda
;
71 union qcom_pcie_resources
{
72 struct qcom_pcie_resources_v0 v0
;
73 struct qcom_pcie_resources_v1 v1
;
78 struct qcom_pcie_ops
{
79 int (*get_resources
)(struct qcom_pcie
*pcie
);
80 int (*init
)(struct qcom_pcie
*pcie
);
81 void (*deinit
)(struct qcom_pcie
*pcie
);
87 union qcom_pcie_resources res
;
92 struct gpio_desc
*reset
;
93 struct qcom_pcie_ops
*ops
;
96 #define to_qcom_pcie(x) container_of(x, struct qcom_pcie, pp)
98 static void qcom_ep_reset_assert(struct qcom_pcie
*pcie
)
100 gpiod_set_value(pcie
->reset
, 1);
101 usleep_range(PERST_DELAY_US
, PERST_DELAY_US
+ 500);
104 static void qcom_ep_reset_deassert(struct qcom_pcie
*pcie
)
106 gpiod_set_value(pcie
->reset
, 0);
107 usleep_range(PERST_DELAY_US
, PERST_DELAY_US
+ 500);
110 static irqreturn_t
qcom_pcie_msi_irq_handler(int irq
, void *arg
)
112 struct pcie_port
*pp
= arg
;
114 return dw_handle_msi_irq(pp
);
117 static int qcom_pcie_establish_link(struct qcom_pcie
*pcie
)
119 struct device
*dev
= pcie
->dev
;
120 unsigned int retries
= 0;
123 if (dw_pcie_link_up(&pcie
->pp
))
126 /* enable link training */
127 val
= readl(pcie
->elbi
+ PCIE20_ELBI_SYS_CTRL
);
128 val
|= PCIE20_ELBI_SYS_CTRL_LT_ENABLE
;
129 writel(val
, pcie
->elbi
+ PCIE20_ELBI_SYS_CTRL
);
132 if (dw_pcie_link_up(&pcie
->pp
))
134 usleep_range(250, 1000);
135 } while (retries
< 200);
137 dev_warn(dev
, "phy link never came up\n");
142 static int qcom_pcie_get_resources_v0(struct qcom_pcie
*pcie
)
144 struct qcom_pcie_resources_v0
*res
= &pcie
->res
.v0
;
145 struct device
*dev
= pcie
->dev
;
147 res
->vdda
= devm_regulator_get(dev
, "vdda");
148 if (IS_ERR(res
->vdda
))
149 return PTR_ERR(res
->vdda
);
151 res
->vdda_phy
= devm_regulator_get(dev
, "vdda_phy");
152 if (IS_ERR(res
->vdda_phy
))
153 return PTR_ERR(res
->vdda_phy
);
155 res
->vdda_refclk
= devm_regulator_get(dev
, "vdda_refclk");
156 if (IS_ERR(res
->vdda_refclk
))
157 return PTR_ERR(res
->vdda_refclk
);
159 res
->iface_clk
= devm_clk_get(dev
, "iface");
160 if (IS_ERR(res
->iface_clk
))
161 return PTR_ERR(res
->iface_clk
);
163 res
->core_clk
= devm_clk_get(dev
, "core");
164 if (IS_ERR(res
->core_clk
))
165 return PTR_ERR(res
->core_clk
);
167 res
->phy_clk
= devm_clk_get(dev
, "phy");
168 if (IS_ERR(res
->phy_clk
))
169 return PTR_ERR(res
->phy_clk
);
171 res
->pci_reset
= devm_reset_control_get(dev
, "pci");
172 if (IS_ERR(res
->pci_reset
))
173 return PTR_ERR(res
->pci_reset
);
175 res
->axi_reset
= devm_reset_control_get(dev
, "axi");
176 if (IS_ERR(res
->axi_reset
))
177 return PTR_ERR(res
->axi_reset
);
179 res
->ahb_reset
= devm_reset_control_get(dev
, "ahb");
180 if (IS_ERR(res
->ahb_reset
))
181 return PTR_ERR(res
->ahb_reset
);
183 res
->por_reset
= devm_reset_control_get(dev
, "por");
184 if (IS_ERR(res
->por_reset
))
185 return PTR_ERR(res
->por_reset
);
187 res
->phy_reset
= devm_reset_control_get(dev
, "phy");
188 if (IS_ERR(res
->phy_reset
))
189 return PTR_ERR(res
->phy_reset
);
194 static int qcom_pcie_get_resources_v1(struct qcom_pcie
*pcie
)
196 struct qcom_pcie_resources_v1
*res
= &pcie
->res
.v1
;
197 struct device
*dev
= pcie
->dev
;
199 res
->vdda
= devm_regulator_get(dev
, "vdda");
200 if (IS_ERR(res
->vdda
))
201 return PTR_ERR(res
->vdda
);
203 res
->iface
= devm_clk_get(dev
, "iface");
204 if (IS_ERR(res
->iface
))
205 return PTR_ERR(res
->iface
);
207 res
->aux
= devm_clk_get(dev
, "aux");
208 if (IS_ERR(res
->aux
))
209 return PTR_ERR(res
->aux
);
211 res
->master_bus
= devm_clk_get(dev
, "master_bus");
212 if (IS_ERR(res
->master_bus
))
213 return PTR_ERR(res
->master_bus
);
215 res
->slave_bus
= devm_clk_get(dev
, "slave_bus");
216 if (IS_ERR(res
->slave_bus
))
217 return PTR_ERR(res
->slave_bus
);
219 res
->core
= devm_reset_control_get(dev
, "core");
220 if (IS_ERR(res
->core
))
221 return PTR_ERR(res
->core
);
226 static void qcom_pcie_deinit_v0(struct qcom_pcie
*pcie
)
228 struct qcom_pcie_resources_v0
*res
= &pcie
->res
.v0
;
230 reset_control_assert(res
->pci_reset
);
231 reset_control_assert(res
->axi_reset
);
232 reset_control_assert(res
->ahb_reset
);
233 reset_control_assert(res
->por_reset
);
234 reset_control_assert(res
->pci_reset
);
235 clk_disable_unprepare(res
->iface_clk
);
236 clk_disable_unprepare(res
->core_clk
);
237 clk_disable_unprepare(res
->phy_clk
);
238 regulator_disable(res
->vdda
);
239 regulator_disable(res
->vdda_phy
);
240 regulator_disable(res
->vdda_refclk
);
243 static int qcom_pcie_init_v0(struct qcom_pcie
*pcie
)
245 struct qcom_pcie_resources_v0
*res
= &pcie
->res
.v0
;
246 struct device
*dev
= pcie
->dev
;
250 ret
= regulator_enable(res
->vdda
);
252 dev_err(dev
, "cannot enable vdda regulator\n");
256 ret
= regulator_enable(res
->vdda_refclk
);
258 dev_err(dev
, "cannot enable vdda_refclk regulator\n");
262 ret
= regulator_enable(res
->vdda_phy
);
264 dev_err(dev
, "cannot enable vdda_phy regulator\n");
268 ret
= reset_control_assert(res
->ahb_reset
);
270 dev_err(dev
, "cannot assert ahb reset\n");
274 ret
= clk_prepare_enable(res
->iface_clk
);
276 dev_err(dev
, "cannot prepare/enable iface clock\n");
280 ret
= clk_prepare_enable(res
->phy_clk
);
282 dev_err(dev
, "cannot prepare/enable phy clock\n");
286 ret
= clk_prepare_enable(res
->core_clk
);
288 dev_err(dev
, "cannot prepare/enable core clock\n");
292 ret
= reset_control_deassert(res
->ahb_reset
);
294 dev_err(dev
, "cannot deassert ahb reset\n");
295 goto err_deassert_ahb
;
298 /* enable PCIe clocks and resets */
299 val
= readl(pcie
->parf
+ PCIE20_PARF_PHY_CTRL
);
301 writel(val
, pcie
->parf
+ PCIE20_PARF_PHY_CTRL
);
303 /* enable external reference clock */
304 val
= readl(pcie
->parf
+ PCIE20_PARF_PHY_REFCLK
);
306 writel(val
, pcie
->parf
+ PCIE20_PARF_PHY_REFCLK
);
308 ret
= reset_control_deassert(res
->phy_reset
);
310 dev_err(dev
, "cannot deassert phy reset\n");
314 ret
= reset_control_deassert(res
->pci_reset
);
316 dev_err(dev
, "cannot deassert pci reset\n");
320 ret
= reset_control_deassert(res
->por_reset
);
322 dev_err(dev
, "cannot deassert por reset\n");
326 ret
= reset_control_deassert(res
->axi_reset
);
328 dev_err(dev
, "cannot deassert axi reset\n");
332 /* wait for clock acquisition */
333 usleep_range(1000, 1500);
338 clk_disable_unprepare(res
->core_clk
);
340 clk_disable_unprepare(res
->phy_clk
);
342 clk_disable_unprepare(res
->iface_clk
);
344 regulator_disable(res
->vdda_phy
);
346 regulator_disable(res
->vdda_refclk
);
348 regulator_disable(res
->vdda
);
353 static void qcom_pcie_deinit_v1(struct qcom_pcie
*pcie
)
355 struct qcom_pcie_resources_v1
*res
= &pcie
->res
.v1
;
357 reset_control_assert(res
->core
);
358 clk_disable_unprepare(res
->slave_bus
);
359 clk_disable_unprepare(res
->master_bus
);
360 clk_disable_unprepare(res
->iface
);
361 clk_disable_unprepare(res
->aux
);
362 regulator_disable(res
->vdda
);
365 static int qcom_pcie_init_v1(struct qcom_pcie
*pcie
)
367 struct qcom_pcie_resources_v1
*res
= &pcie
->res
.v1
;
368 struct device
*dev
= pcie
->dev
;
371 ret
= reset_control_deassert(res
->core
);
373 dev_err(dev
, "cannot deassert core reset\n");
377 ret
= clk_prepare_enable(res
->aux
);
379 dev_err(dev
, "cannot prepare/enable aux clock\n");
383 ret
= clk_prepare_enable(res
->iface
);
385 dev_err(dev
, "cannot prepare/enable iface clock\n");
389 ret
= clk_prepare_enable(res
->master_bus
);
391 dev_err(dev
, "cannot prepare/enable master_bus clock\n");
395 ret
= clk_prepare_enable(res
->slave_bus
);
397 dev_err(dev
, "cannot prepare/enable slave_bus clock\n");
401 ret
= regulator_enable(res
->vdda
);
403 dev_err(dev
, "cannot enable vdda regulator\n");
407 /* change DBI base address */
408 writel(0, pcie
->parf
+ PCIE20_PARF_DBI_BASE_ADDR
);
410 if (IS_ENABLED(CONFIG_PCI_MSI
)) {
411 u32 val
= readl(pcie
->parf
+ PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT
);
414 writel(val
, pcie
->parf
+ PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT
);
419 clk_disable_unprepare(res
->slave_bus
);
421 clk_disable_unprepare(res
->master_bus
);
423 clk_disable_unprepare(res
->iface
);
425 clk_disable_unprepare(res
->aux
);
427 reset_control_assert(res
->core
);
432 static int qcom_pcie_link_up(struct pcie_port
*pp
)
434 struct qcom_pcie
*pcie
= to_qcom_pcie(pp
);
435 u16 val
= readw(pcie
->dbi
+ PCIE20_CAP
+ PCI_EXP_LNKSTA
);
437 return !!(val
& PCI_EXP_LNKSTA_DLLLA
);
440 static void qcom_pcie_host_init(struct pcie_port
*pp
)
442 struct qcom_pcie
*pcie
= to_qcom_pcie(pp
);
445 qcom_ep_reset_assert(pcie
);
447 ret
= pcie
->ops
->init(pcie
);
451 ret
= phy_power_on(pcie
->phy
);
455 dw_pcie_setup_rc(pp
);
457 if (IS_ENABLED(CONFIG_PCI_MSI
))
458 dw_pcie_msi_init(pp
);
460 qcom_ep_reset_deassert(pcie
);
462 ret
= qcom_pcie_establish_link(pcie
);
468 qcom_ep_reset_assert(pcie
);
469 phy_power_off(pcie
->phy
);
471 pcie
->ops
->deinit(pcie
);
474 static int qcom_pcie_rd_own_conf(struct pcie_port
*pp
, int where
, int size
,
477 /* the device class is not reported correctly from the register */
478 if (where
== PCI_CLASS_REVISION
&& size
== 4) {
479 *val
= readl(pp
->dbi_base
+ PCI_CLASS_REVISION
);
480 *val
&= 0xff; /* keep revision id */
481 *val
|= PCI_CLASS_BRIDGE_PCI
<< 16;
482 return PCIBIOS_SUCCESSFUL
;
485 return dw_pcie_cfg_read(pp
->dbi_base
+ where
, size
, val
);
488 static struct pcie_host_ops qcom_pcie_dw_ops
= {
489 .link_up
= qcom_pcie_link_up
,
490 .host_init
= qcom_pcie_host_init
,
491 .rd_own_conf
= qcom_pcie_rd_own_conf
,
494 static const struct qcom_pcie_ops ops_v0
= {
495 .get_resources
= qcom_pcie_get_resources_v0
,
496 .init
= qcom_pcie_init_v0
,
497 .deinit
= qcom_pcie_deinit_v0
,
500 static const struct qcom_pcie_ops ops_v1
= {
501 .get_resources
= qcom_pcie_get_resources_v1
,
502 .init
= qcom_pcie_init_v1
,
503 .deinit
= qcom_pcie_deinit_v1
,
506 static int qcom_pcie_probe(struct platform_device
*pdev
)
508 struct device
*dev
= &pdev
->dev
;
509 struct resource
*res
;
510 struct qcom_pcie
*pcie
;
511 struct pcie_port
*pp
;
514 pcie
= devm_kzalloc(dev
, sizeof(*pcie
), GFP_KERNEL
);
518 pcie
->ops
= (struct qcom_pcie_ops
*)of_device_get_match_data(dev
);
521 pcie
->reset
= devm_gpiod_get_optional(dev
, "perst", GPIOD_OUT_LOW
);
522 if (IS_ERR(pcie
->reset
))
523 return PTR_ERR(pcie
->reset
);
525 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "parf");
526 pcie
->parf
= devm_ioremap_resource(dev
, res
);
527 if (IS_ERR(pcie
->parf
))
528 return PTR_ERR(pcie
->parf
);
530 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "dbi");
531 pcie
->dbi
= devm_ioremap_resource(dev
, res
);
532 if (IS_ERR(pcie
->dbi
))
533 return PTR_ERR(pcie
->dbi
);
535 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "elbi");
536 pcie
->elbi
= devm_ioremap_resource(dev
, res
);
537 if (IS_ERR(pcie
->elbi
))
538 return PTR_ERR(pcie
->elbi
);
540 pcie
->phy
= devm_phy_optional_get(dev
, "pciephy");
541 if (IS_ERR(pcie
->phy
))
542 return PTR_ERR(pcie
->phy
);
544 ret
= pcie
->ops
->get_resources(pcie
);
550 pp
->dbi_base
= pcie
->dbi
;
551 pp
->root_bus_nr
= -1;
552 pp
->ops
= &qcom_pcie_dw_ops
;
554 if (IS_ENABLED(CONFIG_PCI_MSI
)) {
555 pp
->msi_irq
= platform_get_irq_byname(pdev
, "msi");
559 ret
= devm_request_irq(dev
, pp
->msi_irq
,
560 qcom_pcie_msi_irq_handler
,
561 IRQF_SHARED
, "qcom-pcie-msi", pp
);
563 dev_err(dev
, "cannot request msi irq\n");
568 ret
= phy_init(pcie
->phy
);
572 ret
= dw_pcie_host_init(pp
);
574 dev_err(dev
, "cannot initialize host\n");
578 platform_set_drvdata(pdev
, pcie
);
583 static int qcom_pcie_remove(struct platform_device
*pdev
)
585 struct qcom_pcie
*pcie
= platform_get_drvdata(pdev
);
587 qcom_ep_reset_assert(pcie
);
588 phy_power_off(pcie
->phy
);
590 pcie
->ops
->deinit(pcie
);
595 static const struct of_device_id qcom_pcie_match
[] = {
596 { .compatible
= "qcom,pcie-ipq8064", .data
= &ops_v0
},
597 { .compatible
= "qcom,pcie-apq8064", .data
= &ops_v0
},
598 { .compatible
= "qcom,pcie-apq8084", .data
= &ops_v1
},
601 MODULE_DEVICE_TABLE(of
, qcom_pcie_match
);
603 static struct platform_driver qcom_pcie_driver
= {
604 .probe
= qcom_pcie_probe
,
605 .remove
= qcom_pcie_remove
,
608 .of_match_table
= qcom_pcie_match
,
612 module_platform_driver(qcom_pcie_driver
);
614 MODULE_AUTHOR("Stanimir Varbanov <svarbanov@mm-sol.com>");
615 MODULE_DESCRIPTION("Qualcomm PCIe root complex driver");
616 MODULE_LICENSE("GPL v2");