Commit | Line | Data |
---|---|---|
0c4ffcfe MK |
1 | /* |
2 | * PCIe host controller driver for Texas Instruments Keystone SoCs | |
3 | * | |
4 | * Copyright (C) 2013-2014 Texas Instruments., Ltd. | |
5 | * http://www.ti.com | |
6 | * | |
7 | * Author: Murali Karicheri <m-karicheri2@ti.com> | |
8 | * Implementation based on pci-exynos.c and pcie-designware.c | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | */ | |
14 | ||
15 | #include <linux/irqchip/chained_irq.h> | |
16 | #include <linux/clk.h> | |
17 | #include <linux/delay.h> | |
025dd3da | 18 | #include <linux/interrupt.h> |
0c4ffcfe MK |
19 | #include <linux/irqdomain.h> |
20 | #include <linux/module.h> | |
21 | #include <linux/msi.h> | |
22 | #include <linux/of_irq.h> | |
23 | #include <linux/of.h> | |
24 | #include <linux/of_pci.h> | |
25 | #include <linux/platform_device.h> | |
26 | #include <linux/phy/phy.h> | |
27 | #include <linux/resource.h> | |
28 | #include <linux/signal.h> | |
29 | ||
30 | #include "pcie-designware.h" | |
31 | #include "pci-keystone.h" | |
32 | ||
33 | #define DRIVER_NAME "keystone-pcie" | |
34 | ||
35 | /* driver specific constants */ | |
36 | #define MAX_MSI_HOST_IRQS 8 | |
37 | #define MAX_LEGACY_HOST_IRQS 4 | |
38 | ||
0c4ffcfe MK |
39 | /* DEV_STAT_CTRL */ |
40 | #define PCIE_CAP_BASE 0x70 | |
41 | ||
c15982df MK |
42 | /* PCIE controller device IDs */ |
43 | #define PCIE_RC_K2HK 0xb008 | |
44 | #define PCIE_RC_K2E 0xb009 | |
45 | #define PCIE_RC_K2L 0xb00a | |
46 | ||
0c4ffcfe MK |
47 | #define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp) |
48 | ||
c15982df MK |
49 | static void quirk_limit_mrrs(struct pci_dev *dev) |
50 | { | |
51 | struct pci_bus *bus = dev->bus; | |
52 | struct pci_dev *bridge = bus->self; | |
53 | static const struct pci_device_id rc_pci_devids[] = { | |
54 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK), | |
55 | .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, | |
56 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E), | |
57 | .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, | |
58 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), | |
59 | .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, | |
60 | { 0, }, | |
61 | }; | |
62 | ||
63 | if (pci_is_root_bus(bus)) | |
64 | return; | |
65 | ||
66 | /* look for the host bridge */ | |
67 | while (!pci_is_root_bus(bus)) { | |
68 | bridge = bus->self; | |
69 | bus = bus->parent; | |
70 | } | |
71 | ||
72 | if (bridge) { | |
73 | /* | |
74 | * Keystone PCI controller has a h/w limitation of | |
75 | * 256 bytes maximum read request size. It can't handle | |
76 | * anything higher than this. So force this limit on | |
77 | * all downstream devices. | |
78 | */ | |
79 | if (pci_match_id(rc_pci_devids, bridge)) { | |
80 | if (pcie_get_readrq(dev) > 256) { | |
81 | dev_info(&dev->dev, "limiting MRRS to 256\n"); | |
82 | pcie_set_readrq(dev, 256); | |
83 | } | |
84 | } | |
85 | } | |
86 | } | |
87 | DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs); | |
88 | ||
0c4ffcfe MK |
89 | static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) |
90 | { | |
91 | struct pcie_port *pp = &ks_pcie->pp; | |
6cbb247e | 92 | unsigned int retries; |
0c4ffcfe MK |
93 | |
94 | dw_pcie_setup_rc(pp); | |
95 | ||
96 | if (dw_pcie_link_up(pp)) { | |
97 | dev_err(pp->dev, "Link already up\n"); | |
98 | return 0; | |
99 | } | |
100 | ||
0c4ffcfe | 101 | /* check if the link is up or not */ |
886bc5ce | 102 | for (retries = 0; retries < 5; retries++) { |
6cbb247e | 103 | ks_dw_pcie_initiate_link_train(ks_pcie); |
886bc5ce JP |
104 | if (!dw_pcie_wait_for_link(pp)) |
105 | return 0; | |
0c4ffcfe MK |
106 | } |
107 | ||
6cbb247e | 108 | dev_err(pp->dev, "phy link never came up\n"); |
886bc5ce | 109 | return -ETIMEDOUT; |
0c4ffcfe MK |
110 | } |
111 | ||
bd0b9ac4 | 112 | static void ks_pcie_msi_irq_handler(struct irq_desc *desc) |
0c4ffcfe | 113 | { |
97a85964 | 114 | unsigned int irq = irq_desc_get_irq(desc); |
0c4ffcfe MK |
115 | struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); |
116 | u32 offset = irq - ks_pcie->msi_host_irqs[0]; | |
117 | struct pcie_port *pp = &ks_pcie->pp; | |
118 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
119 | ||
4808c35e | 120 | dev_dbg(pp->dev, "%s, irq %d\n", __func__, irq); |
0c4ffcfe MK |
121 | |
122 | /* | |
123 | * The chained irq handler installation would have replaced normal | |
124 | * interrupt driver handler so we need to take care of mask/unmask and | |
125 | * ack operation. | |
126 | */ | |
127 | chained_irq_enter(chip, desc); | |
128 | ks_dw_pcie_handle_msi_irq(ks_pcie, offset); | |
129 | chained_irq_exit(chip, desc); | |
130 | } | |
131 | ||
132 | /** | |
133 | * ks_pcie_legacy_irq_handler() - Handle legacy interrupt | |
134 | * @irq: IRQ line for legacy interrupts | |
135 | * @desc: Pointer to irq descriptor | |
136 | * | |
137 | * Traverse through pending legacy interrupts and invoke handler for each. Also | |
138 | * takes care of interrupt controller level mask/ack operation. | |
139 | */ | |
bd0b9ac4 | 140 | static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) |
0c4ffcfe | 141 | { |
97a85964 | 142 | unsigned int irq = irq_desc_get_irq(desc); |
0c4ffcfe MK |
143 | struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); |
144 | struct pcie_port *pp = &ks_pcie->pp; | |
145 | u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0]; | |
146 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
147 | ||
148 | dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq); | |
149 | ||
150 | /* | |
151 | * The chained irq handler installation would have replaced normal | |
152 | * interrupt driver handler so we need to take care of mask/unmask and | |
153 | * ack operation. | |
154 | */ | |
155 | chained_irq_enter(chip, desc); | |
156 | ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset); | |
157 | chained_irq_exit(chip, desc); | |
158 | } | |
159 | ||
160 | static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, | |
161 | char *controller, int *num_irqs) | |
162 | { | |
163 | int temp, max_host_irqs, legacy = 1, *host_irqs, ret = -EINVAL; | |
164 | struct device *dev = ks_pcie->pp.dev; | |
165 | struct device_node *np_pcie = dev->of_node, **np_temp; | |
166 | ||
167 | if (!strcmp(controller, "msi-interrupt-controller")) | |
168 | legacy = 0; | |
169 | ||
170 | if (legacy) { | |
171 | np_temp = &ks_pcie->legacy_intc_np; | |
172 | max_host_irqs = MAX_LEGACY_HOST_IRQS; | |
173 | host_irqs = &ks_pcie->legacy_host_irqs[0]; | |
174 | } else { | |
175 | np_temp = &ks_pcie->msi_intc_np; | |
176 | max_host_irqs = MAX_MSI_HOST_IRQS; | |
177 | host_irqs = &ks_pcie->msi_host_irqs[0]; | |
178 | } | |
179 | ||
180 | /* interrupt controller is in a child node */ | |
181 | *np_temp = of_find_node_by_name(np_pcie, controller); | |
182 | if (!(*np_temp)) { | |
183 | dev_err(dev, "Node for %s is absent\n", controller); | |
184 | goto out; | |
185 | } | |
186 | temp = of_irq_count(*np_temp); | |
187 | if (!temp) | |
188 | goto out; | |
189 | if (temp > max_host_irqs) | |
190 | dev_warn(dev, "Too many %s interrupts defined %u\n", | |
191 | (legacy ? "legacy" : "MSI"), temp); | |
192 | ||
193 | /* | |
194 | * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to | |
195 | * 7 (MSI) | |
196 | */ | |
197 | for (temp = 0; temp < max_host_irqs; temp++) { | |
198 | host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp); | |
ea3651fe | 199 | if (!host_irqs[temp]) |
0c4ffcfe MK |
200 | break; |
201 | } | |
202 | if (temp) { | |
203 | *num_irqs = temp; | |
204 | ret = 0; | |
205 | } | |
206 | out: | |
207 | return ret; | |
208 | } | |
209 | ||
210 | static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) | |
211 | { | |
212 | int i; | |
213 | ||
214 | /* Legacy IRQ */ | |
215 | for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) { | |
5168a73c TG |
216 | irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i], |
217 | ks_pcie_legacy_irq_handler, | |
218 | ks_pcie); | |
0c4ffcfe MK |
219 | } |
220 | ks_dw_pcie_enable_legacy_irqs(ks_pcie); | |
221 | ||
222 | /* MSI IRQ */ | |
223 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | |
224 | for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) { | |
2cf5a03c TG |
225 | irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i], |
226 | ks_pcie_msi_irq_handler, | |
227 | ks_pcie); | |
0c4ffcfe MK |
228 | } |
229 | } | |
025dd3da MK |
230 | |
231 | if (ks_pcie->error_irq > 0) | |
232 | ks_dw_pcie_enable_error_irq(ks_pcie->va_app_base); | |
0c4ffcfe MK |
233 | } |
234 | ||
235 | /* | |
236 | * When a PCI device does not exist during config cycles, keystone host gets a | |
237 | * bus error instead of returning 0xffffffff. This handler always returns 0 | |
238 | * for this kind of faults. | |
239 | */ | |
240 | static int keystone_pcie_fault(unsigned long addr, unsigned int fsr, | |
241 | struct pt_regs *regs) | |
242 | { | |
243 | unsigned long instr = *(unsigned long *) instruction_pointer(regs); | |
244 | ||
245 | if ((instr & 0x0e100090) == 0x00100090) { | |
246 | int reg = (instr >> 12) & 15; | |
247 | ||
248 | regs->uregs[reg] = -1; | |
249 | regs->ARM_pc += 4; | |
250 | } | |
251 | ||
252 | return 0; | |
253 | } | |
254 | ||
255 | static void __init ks_pcie_host_init(struct pcie_port *pp) | |
256 | { | |
0c4ffcfe | 257 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); |
8665a482 | 258 | u32 val; |
0c4ffcfe MK |
259 | |
260 | ks_pcie_establish_link(ks_pcie); | |
261 | ks_dw_pcie_setup_rc_app_regs(ks_pcie); | |
262 | ks_pcie_setup_interrupts(ks_pcie); | |
263 | writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), | |
264 | pp->dbi_base + PCI_IO_BASE); | |
265 | ||
266 | /* update the Vendor ID */ | |
8665a482 | 267 | writew(ks_pcie->device_id, pp->dbi_base + PCI_DEVICE_ID); |
0c4ffcfe MK |
268 | |
269 | /* update the DEV_STAT_CTRL to publish right mrrs */ | |
270 | val = readl(pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); | |
271 | val &= ~PCI_EXP_DEVCTL_READRQ; | |
272 | /* set the mrrs to 256 bytes */ | |
273 | val |= BIT(12); | |
274 | writel(val, pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); | |
275 | ||
276 | /* | |
277 | * PCIe access errors that result into OCP errors are caught by ARM as | |
278 | * "External aborts" | |
279 | */ | |
280 | hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0, | |
281 | "Asynchronous external abort"); | |
282 | } | |
283 | ||
284 | static struct pcie_host_ops keystone_pcie_host_ops = { | |
285 | .rd_other_conf = ks_dw_pcie_rd_other_conf, | |
286 | .wr_other_conf = ks_dw_pcie_wr_other_conf, | |
287 | .link_up = ks_dw_pcie_link_up, | |
288 | .host_init = ks_pcie_host_init, | |
289 | .msi_set_irq = ks_dw_pcie_msi_set_irq, | |
290 | .msi_clear_irq = ks_dw_pcie_msi_clear_irq, | |
1104528b | 291 | .get_msi_addr = ks_dw_pcie_get_msi_addr, |
0c4ffcfe MK |
292 | .msi_host_init = ks_dw_pcie_msi_host_init, |
293 | .scan_bus = ks_dw_pcie_v3_65_scan_bus, | |
294 | }; | |
295 | ||
025dd3da MK |
296 | static irqreturn_t pcie_err_irq_handler(int irq, void *priv) |
297 | { | |
298 | struct keystone_pcie *ks_pcie = priv; | |
299 | ||
300 | return ks_dw_pcie_handle_error_irq(ks_pcie->pp.dev, | |
301 | ks_pcie->va_app_base); | |
302 | } | |
303 | ||
0c4ffcfe MK |
304 | static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, |
305 | struct platform_device *pdev) | |
306 | { | |
307 | struct pcie_port *pp = &ks_pcie->pp; | |
308 | int ret; | |
309 | ||
310 | ret = ks_pcie_get_irq_controller_info(ks_pcie, | |
311 | "legacy-interrupt-controller", | |
312 | &ks_pcie->num_legacy_host_irqs); | |
313 | if (ret) | |
314 | return ret; | |
315 | ||
316 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | |
317 | ret = ks_pcie_get_irq_controller_info(ks_pcie, | |
318 | "msi-interrupt-controller", | |
319 | &ks_pcie->num_msi_host_irqs); | |
320 | if (ret) | |
321 | return ret; | |
322 | } | |
323 | ||
025dd3da MK |
324 | /* |
325 | * Index 0 is the platform interrupt for error interrupt | |
326 | * from RC. This is optional. | |
327 | */ | |
328 | ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0); | |
329 | if (ks_pcie->error_irq <= 0) | |
330 | dev_info(&pdev->dev, "no error IRQ defined\n"); | |
331 | else { | |
332 | if (request_irq(ks_pcie->error_irq, pcie_err_irq_handler, | |
333 | IRQF_SHARED, "pcie-error-irq", ks_pcie) < 0) { | |
334 | dev_err(&pdev->dev, "failed to request error IRQ %d\n", | |
335 | ks_pcie->error_irq); | |
336 | return ret; | |
337 | } | |
338 | } | |
339 | ||
0c4ffcfe MK |
340 | pp->root_bus_nr = -1; |
341 | pp->ops = &keystone_pcie_host_ops; | |
342 | ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np); | |
343 | if (ret) { | |
344 | dev_err(&pdev->dev, "failed to initialize host\n"); | |
345 | return ret; | |
346 | } | |
347 | ||
348 | return ret; | |
349 | } | |
350 | ||
351 | static const struct of_device_id ks_pcie_of_match[] = { | |
352 | { | |
353 | .type = "pci", | |
354 | .compatible = "ti,keystone-pcie", | |
355 | }, | |
356 | { }, | |
357 | }; | |
358 | MODULE_DEVICE_TABLE(of, ks_pcie_of_match); | |
359 | ||
360 | static int __exit ks_pcie_remove(struct platform_device *pdev) | |
361 | { | |
362 | struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev); | |
363 | ||
364 | clk_disable_unprepare(ks_pcie->clk); | |
365 | ||
366 | return 0; | |
367 | } | |
368 | ||
369 | static int __init ks_pcie_probe(struct platform_device *pdev) | |
370 | { | |
371 | struct device *dev = &pdev->dev; | |
372 | struct keystone_pcie *ks_pcie; | |
373 | struct pcie_port *pp; | |
374 | struct resource *res; | |
375 | void __iomem *reg_p; | |
376 | struct phy *phy; | |
377 | int ret = 0; | |
0c4ffcfe MK |
378 | |
379 | ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie), | |
380 | GFP_KERNEL); | |
66700707 | 381 | if (!ks_pcie) |
0c4ffcfe | 382 | return -ENOMEM; |
66700707 | 383 | |
0c4ffcfe MK |
384 | pp = &ks_pcie->pp; |
385 | ||
0c4ffcfe MK |
386 | /* initialize SerDes Phy if present */ |
387 | phy = devm_phy_get(dev, "pcie-phy"); | |
25de15c9 SL |
388 | if (PTR_ERR_OR_ZERO(phy) == -EPROBE_DEFER) |
389 | return PTR_ERR(phy); | |
390 | ||
0c4ffcfe MK |
391 | if (!IS_ERR_OR_NULL(phy)) { |
392 | ret = phy_init(phy); | |
393 | if (ret < 0) | |
394 | return ret; | |
395 | } | |
396 | ||
4455efc9 MK |
397 | /* index 2 is to read PCI DEVICE_ID */ |
398 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); | |
0c4ffcfe MK |
399 | reg_p = devm_ioremap_resource(dev, res); |
400 | if (IS_ERR(reg_p)) | |
401 | return PTR_ERR(reg_p); | |
8665a482 MK |
402 | ks_pcie->device_id = readl(reg_p) >> 16; |
403 | devm_iounmap(dev, reg_p); | |
404 | devm_release_mem_region(dev, res->start, resource_size(res)); | |
0c4ffcfe MK |
405 | |
406 | pp->dev = dev; | |
025dd3da | 407 | ks_pcie->np = dev->of_node; |
0c4ffcfe MK |
408 | platform_set_drvdata(pdev, ks_pcie); |
409 | ks_pcie->clk = devm_clk_get(dev, "pcie"); | |
410 | if (IS_ERR(ks_pcie->clk)) { | |
411 | dev_err(dev, "Failed to get pcie rc clock\n"); | |
412 | return PTR_ERR(ks_pcie->clk); | |
413 | } | |
414 | ret = clk_prepare_enable(ks_pcie->clk); | |
415 | if (ret) | |
416 | return ret; | |
417 | ||
418 | ret = ks_add_pcie_port(ks_pcie, pdev); | |
419 | if (ret < 0) | |
420 | goto fail_clk; | |
421 | ||
422 | return 0; | |
423 | fail_clk: | |
424 | clk_disable_unprepare(ks_pcie->clk); | |
425 | ||
426 | return ret; | |
427 | } | |
428 | ||
429 | static struct platform_driver ks_pcie_driver __refdata = { | |
430 | .probe = ks_pcie_probe, | |
431 | .remove = __exit_p(ks_pcie_remove), | |
432 | .driver = { | |
433 | .name = "keystone-pcie", | |
0c4ffcfe MK |
434 | .of_match_table = of_match_ptr(ks_pcie_of_match), |
435 | }, | |
436 | }; | |
437 | ||
438 | module_platform_driver(ks_pcie_driver); | |
439 | ||
440 | MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>"); | |
441 | MODULE_DESCRIPTION("Keystone PCIe host controller driver"); | |
442 | MODULE_LICENSE("GPL v2"); |