Commit | Line | Data |
---|---|---|
8961def5 ST |
1 | /* |
2 | * PCIe host controller driver for Xilinx AXI PCIe Bridge | |
3 | * | |
4 | * Copyright (c) 2012 - 2014 Xilinx, Inc. | |
5 | * | |
6 | * Based on the Tegra PCIe driver | |
7 | * | |
8 | * Bits taken from Synopsys Designware Host controller driver and | |
9 | * ARM PCI Host generic driver. | |
10 | * | |
11 | * This program is free software: you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License as published by | |
13 | * the Free Software Foundation, either version 2 of the License, or | |
14 | * (at your option) any later version. | |
15 | */ | |
16 | ||
17 | #include <linux/interrupt.h> | |
18 | #include <linux/irq.h> | |
19 | #include <linux/irqdomain.h> | |
20 | #include <linux/kernel.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/msi.h> | |
23 | #include <linux/of_address.h> | |
24 | #include <linux/of_pci.h> | |
25 | #include <linux/of_platform.h> | |
26 | #include <linux/of_irq.h> | |
27 | #include <linux/pci.h> | |
28 | #include <linux/platform_device.h> | |
29 | ||
30 | /* Register definitions */ | |
31 | #define XILINX_PCIE_REG_BIR 0x00000130 | |
32 | #define XILINX_PCIE_REG_IDR 0x00000138 | |
33 | #define XILINX_PCIE_REG_IMR 0x0000013c | |
34 | #define XILINX_PCIE_REG_PSCR 0x00000144 | |
35 | #define XILINX_PCIE_REG_RPSC 0x00000148 | |
36 | #define XILINX_PCIE_REG_MSIBASE1 0x0000014c | |
37 | #define XILINX_PCIE_REG_MSIBASE2 0x00000150 | |
38 | #define XILINX_PCIE_REG_RPEFR 0x00000154 | |
39 | #define XILINX_PCIE_REG_RPIFR1 0x00000158 | |
40 | #define XILINX_PCIE_REG_RPIFR2 0x0000015c | |
41 | ||
42 | /* Interrupt registers definitions */ | |
43 | #define XILINX_PCIE_INTR_LINK_DOWN BIT(0) | |
44 | #define XILINX_PCIE_INTR_ECRC_ERR BIT(1) | |
45 | #define XILINX_PCIE_INTR_STR_ERR BIT(2) | |
46 | #define XILINX_PCIE_INTR_HOT_RESET BIT(3) | |
47 | #define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8) | |
48 | #define XILINX_PCIE_INTR_CORRECTABLE BIT(9) | |
49 | #define XILINX_PCIE_INTR_NONFATAL BIT(10) | |
50 | #define XILINX_PCIE_INTR_FATAL BIT(11) | |
51 | #define XILINX_PCIE_INTR_INTX BIT(16) | |
52 | #define XILINX_PCIE_INTR_MSI BIT(17) | |
53 | #define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20) | |
54 | #define XILINX_PCIE_INTR_SLV_UNEXP BIT(21) | |
55 | #define XILINX_PCIE_INTR_SLV_COMPL BIT(22) | |
56 | #define XILINX_PCIE_INTR_SLV_ERRP BIT(23) | |
57 | #define XILINX_PCIE_INTR_SLV_CMPABT BIT(24) | |
58 | #define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25) | |
59 | #define XILINX_PCIE_INTR_MST_DECERR BIT(26) | |
60 | #define XILINX_PCIE_INTR_MST_SLVERR BIT(27) | |
61 | #define XILINX_PCIE_INTR_MST_ERRP BIT(28) | |
62 | #define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED | |
63 | #define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF | |
64 | ||
65 | /* Root Port Error FIFO Read Register definitions */ | |
66 | #define XILINX_PCIE_RPEFR_ERR_VALID BIT(18) | |
67 | #define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0) | |
68 | #define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF | |
69 | ||
70 | /* Root Port Interrupt FIFO Read Register 1 definitions */ | |
71 | #define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31) | |
72 | #define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30) | |
73 | #define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27) | |
74 | #define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF | |
75 | #define XILINX_PCIE_RPIFR1_INTR_SHIFT 27 | |
76 | ||
77 | /* Bridge Info Register definitions */ | |
78 | #define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16) | |
79 | #define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16 | |
80 | ||
81 | /* Root Port Interrupt FIFO Read Register 2 definitions */ | |
82 | #define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0) | |
83 | ||
84 | /* Root Port Status/control Register definitions */ | |
85 | #define XILINX_PCIE_REG_RPSC_BEN BIT(0) | |
86 | ||
87 | /* Phy Status/Control Register definitions */ | |
88 | #define XILINX_PCIE_REG_PSCR_LNKUP BIT(11) | |
89 | ||
90 | /* ECAM definitions */ | |
91 | #define ECAM_BUS_NUM_SHIFT 20 | |
92 | #define ECAM_DEV_NUM_SHIFT 12 | |
93 | ||
94 | /* Number of MSI IRQs */ | |
95 | #define XILINX_NUM_MSI_IRQS 128 | |
96 | ||
97 | /* Number of Memory Resources */ | |
98 | #define XILINX_MAX_NUM_RESOURCES 3 | |
99 | ||
100 | /** | |
101 | * struct xilinx_pcie_port - PCIe port information | |
102 | * @reg_base: IO Mapped Register Base | |
103 | * @irq: Interrupt number | |
104 | * @msi_pages: MSI pages | |
105 | * @root_busno: Root Bus number | |
106 | * @dev: Device pointer | |
107 | * @irq_domain: IRQ domain pointer | |
108 | * @bus_range: Bus range | |
109 | * @resources: Bus Resources | |
110 | */ | |
111 | struct xilinx_pcie_port { | |
112 | void __iomem *reg_base; | |
113 | u32 irq; | |
114 | unsigned long msi_pages; | |
115 | u8 root_busno; | |
116 | struct device *dev; | |
117 | struct irq_domain *irq_domain; | |
118 | struct resource bus_range; | |
119 | struct list_head resources; | |
120 | }; | |
121 | ||
122 | static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS); | |
123 | ||
124 | static inline struct xilinx_pcie_port *sys_to_pcie(struct pci_sys_data *sys) | |
125 | { | |
126 | return sys->private_data; | |
127 | } | |
128 | ||
129 | static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg) | |
130 | { | |
131 | return readl(port->reg_base + reg); | |
132 | } | |
133 | ||
134 | static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg) | |
135 | { | |
136 | writel(val, port->reg_base + reg); | |
137 | } | |
138 | ||
139 | static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port) | |
140 | { | |
141 | return (pcie_read(port, XILINX_PCIE_REG_PSCR) & | |
142 | XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0; | |
143 | } | |
144 | ||
145 | /** | |
146 | * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts | |
147 | * @port: PCIe port information | |
148 | */ | |
149 | static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port) | |
150 | { | |
151 | u32 val = pcie_read(port, XILINX_PCIE_REG_RPEFR); | |
152 | ||
153 | if (val & XILINX_PCIE_RPEFR_ERR_VALID) { | |
154 | dev_dbg(port->dev, "Requester ID %d\n", | |
155 | val & XILINX_PCIE_RPEFR_REQ_ID); | |
156 | pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK, | |
157 | XILINX_PCIE_REG_RPEFR); | |
158 | } | |
159 | } | |
160 | ||
161 | /** | |
162 | * xilinx_pcie_valid_device - Check if a valid device is present on bus | |
163 | * @bus: PCI Bus structure | |
164 | * @devfn: device/function | |
165 | * | |
166 | * Return: 'true' on success and 'false' if invalid device is found | |
167 | */ | |
168 | static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) | |
169 | { | |
170 | struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata); | |
171 | ||
172 | /* Check if link is up when trying to access downstream ports */ | |
173 | if (bus->number != port->root_busno) | |
174 | if (!xilinx_pcie_link_is_up(port)) | |
175 | return false; | |
176 | ||
177 | /* Only one device down on each root port */ | |
178 | if (bus->number == port->root_busno && devfn > 0) | |
179 | return false; | |
180 | ||
181 | /* | |
182 | * Do not read more than one device on the bus directly attached | |
183 | * to RC. | |
184 | */ | |
185 | if (bus->primary == port->root_busno && devfn > 0) | |
186 | return false; | |
187 | ||
188 | return true; | |
189 | } | |
190 | ||
191 | /** | |
192 | * xilinx_pcie_config_base - Get configuration base | |
193 | * @bus: PCI Bus structure | |
194 | * @devfn: Device/function | |
195 | * @where: Offset from base | |
196 | * | |
197 | * Return: Base address of the configuration space needed to be | |
198 | * accessed. | |
199 | */ | |
200 | static void __iomem *xilinx_pcie_config_base(struct pci_bus *bus, | |
201 | unsigned int devfn, int where) | |
202 | { | |
203 | struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata); | |
204 | int relbus; | |
205 | ||
206 | relbus = (bus->number << ECAM_BUS_NUM_SHIFT) | | |
207 | (devfn << ECAM_DEV_NUM_SHIFT); | |
208 | ||
209 | return port->reg_base + relbus + where; | |
210 | } | |
211 | ||
212 | /** | |
213 | * xilinx_pcie_read_config - Read configuration space | |
214 | * @bus: PCI Bus structure | |
215 | * @devfn: Device/function | |
216 | * @where: Offset from base | |
217 | * @size: Byte/word/dword | |
218 | * @val: Value to be read | |
219 | * | |
220 | * Return: PCIBIOS_SUCCESSFUL on success | |
221 | * PCIBIOS_DEVICE_NOT_FOUND on failure | |
222 | */ | |
223 | static int xilinx_pcie_read_config(struct pci_bus *bus, unsigned int devfn, | |
224 | int where, int size, u32 *val) | |
225 | { | |
226 | void __iomem *addr; | |
227 | ||
228 | if (!xilinx_pcie_valid_device(bus, devfn)) { | |
229 | *val = 0xFFFFFFFF; | |
230 | return PCIBIOS_DEVICE_NOT_FOUND; | |
231 | } | |
232 | ||
233 | addr = xilinx_pcie_config_base(bus, devfn, where); | |
234 | ||
235 | switch (size) { | |
236 | case 1: | |
237 | *val = readb(addr); | |
238 | break; | |
239 | case 2: | |
240 | *val = readw(addr); | |
241 | break; | |
242 | default: | |
243 | *val = readl(addr); | |
244 | break; | |
245 | } | |
246 | ||
247 | return PCIBIOS_SUCCESSFUL; | |
248 | } | |
249 | ||
250 | /** | |
251 | * xilinx_pcie_write_config - Write configuration space | |
252 | * @bus: PCI Bus structure | |
253 | * @devfn: Device/function | |
254 | * @where: Offset from base | |
255 | * @size: Byte/word/dword | |
256 | * @val: Value to be written to device | |
257 | * | |
258 | * Return: PCIBIOS_SUCCESSFUL on success | |
259 | * PCIBIOS_DEVICE_NOT_FOUND on failure | |
260 | */ | |
261 | static int xilinx_pcie_write_config(struct pci_bus *bus, unsigned int devfn, | |
262 | int where, int size, u32 val) | |
263 | { | |
264 | void __iomem *addr; | |
265 | ||
266 | if (!xilinx_pcie_valid_device(bus, devfn)) | |
267 | return PCIBIOS_DEVICE_NOT_FOUND; | |
268 | ||
269 | addr = xilinx_pcie_config_base(bus, devfn, where); | |
270 | ||
271 | switch (size) { | |
272 | case 1: | |
273 | writeb(val, addr); | |
274 | break; | |
275 | case 2: | |
276 | writew(val, addr); | |
277 | break; | |
278 | default: | |
279 | writel(val, addr); | |
280 | break; | |
281 | } | |
282 | ||
283 | return PCIBIOS_SUCCESSFUL; | |
284 | } | |
285 | ||
286 | /* PCIe operations */ | |
287 | static struct pci_ops xilinx_pcie_ops = { | |
288 | .read = xilinx_pcie_read_config, | |
289 | .write = xilinx_pcie_write_config, | |
290 | }; | |
291 | ||
292 | /* MSI functions */ | |
293 | ||
294 | /** | |
295 | * xilinx_pcie_destroy_msi - Free MSI number | |
296 | * @irq: IRQ to be freed | |
297 | */ | |
298 | static void xilinx_pcie_destroy_msi(unsigned int irq) | |
299 | { | |
300 | struct irq_desc *desc; | |
301 | struct msi_desc *msi; | |
302 | struct xilinx_pcie_port *port; | |
303 | ||
304 | desc = irq_to_desc(irq); | |
305 | msi = irq_desc_get_msi_desc(desc); | |
306 | port = sys_to_pcie(msi->dev->bus->sysdata); | |
307 | ||
308 | if (!test_bit(irq, msi_irq_in_use)) | |
309 | dev_err(port->dev, "Trying to free unused MSI#%d\n", irq); | |
310 | else | |
311 | clear_bit(irq, msi_irq_in_use); | |
312 | } | |
313 | ||
314 | /** | |
315 | * xilinx_pcie_assign_msi - Allocate MSI number | |
316 | * @port: PCIe port structure | |
317 | * | |
318 | * Return: A valid IRQ on success and error value on failure. | |
319 | */ | |
320 | static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port) | |
321 | { | |
322 | int pos; | |
323 | ||
324 | pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS); | |
325 | if (pos < XILINX_NUM_MSI_IRQS) | |
326 | set_bit(pos, msi_irq_in_use); | |
327 | else | |
328 | return -ENOSPC; | |
329 | ||
330 | return pos; | |
331 | } | |
332 | ||
333 | /** | |
334 | * xilinx_msi_teardown_irq - Destroy the MSI | |
335 | * @chip: MSI Chip descriptor | |
336 | * @irq: MSI IRQ to destroy | |
337 | */ | |
338 | static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) | |
339 | { | |
340 | xilinx_pcie_destroy_msi(irq); | |
341 | } | |
342 | ||
343 | /** | |
344 | * xilinx_pcie_msi_setup_irq - Setup MSI request | |
345 | * @chip: MSI chip pointer | |
346 | * @pdev: PCIe device pointer | |
347 | * @desc: MSI descriptor pointer | |
348 | * | |
349 | * Return: '0' on success and error value on failure | |
350 | */ | |
351 | static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip, | |
352 | struct pci_dev *pdev, | |
353 | struct msi_desc *desc) | |
354 | { | |
355 | struct xilinx_pcie_port *port = sys_to_pcie(pdev->bus->sysdata); | |
356 | unsigned int irq; | |
357 | int hwirq; | |
358 | struct msi_msg msg; | |
359 | phys_addr_t msg_addr; | |
360 | ||
361 | hwirq = xilinx_pcie_assign_msi(port); | |
362 | if (irq < 0) | |
363 | return irq; | |
364 | ||
365 | irq = irq_create_mapping(port->irq_domain, hwirq); | |
366 | if (!irq) | |
367 | return -EINVAL; | |
368 | ||
369 | irq_set_msi_desc(irq, desc); | |
370 | ||
371 | msg_addr = virt_to_phys((void *)port->msi_pages); | |
372 | ||
373 | msg.address_hi = 0; | |
374 | msg.address_lo = msg_addr; | |
375 | msg.data = irq; | |
376 | ||
377 | write_msi_msg(irq, &msg); | |
378 | ||
379 | return 0; | |
380 | } | |
381 | ||
382 | /* MSI Chip Descriptor */ | |
383 | static struct msi_chip xilinx_pcie_msi_chip = { | |
384 | .setup_irq = xilinx_pcie_msi_setup_irq, | |
385 | .teardown_irq = xilinx_msi_teardown_irq, | |
386 | }; | |
387 | ||
388 | /* HW Interrupt Chip Descriptor */ | |
389 | static struct irq_chip xilinx_msi_irq_chip = { | |
390 | .name = "Xilinx PCIe MSI", | |
391 | .irq_enable = unmask_msi_irq, | |
392 | .irq_disable = mask_msi_irq, | |
393 | .irq_mask = mask_msi_irq, | |
394 | .irq_unmask = unmask_msi_irq, | |
395 | }; | |
396 | ||
397 | /** | |
398 | * xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid | |
399 | * @domain: IRQ domain | |
400 | * @irq: Virtual IRQ number | |
401 | * @hwirq: HW interrupt number | |
402 | * | |
403 | * Return: Always returns 0. | |
404 | */ | |
405 | static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq, | |
406 | irq_hw_number_t hwirq) | |
407 | { | |
408 | irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq); | |
409 | irq_set_chip_data(irq, domain->host_data); | |
410 | set_irq_flags(irq, IRQF_VALID); | |
411 | ||
412 | return 0; | |
413 | } | |
414 | ||
415 | /* IRQ Domain operations */ | |
416 | static const struct irq_domain_ops msi_domain_ops = { | |
417 | .map = xilinx_pcie_msi_map, | |
418 | }; | |
419 | ||
420 | /** | |
421 | * xilinx_pcie_enable_msi - Enable MSI support | |
422 | * @port: PCIe port information | |
423 | */ | |
424 | static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) | |
425 | { | |
426 | phys_addr_t msg_addr; | |
427 | ||
428 | port->msi_pages = __get_free_pages(GFP_KERNEL, 0); | |
429 | msg_addr = virt_to_phys((void *)port->msi_pages); | |
430 | pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1); | |
431 | pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); | |
432 | } | |
433 | ||
434 | /** | |
435 | * xilinx_pcie_add_bus - Add MSI chip info to PCIe bus | |
436 | * @bus: PCIe bus | |
437 | */ | |
438 | static void xilinx_pcie_add_bus(struct pci_bus *bus) | |
439 | { | |
440 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | |
441 | struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata); | |
442 | ||
443 | xilinx_pcie_msi_chip.dev = port->dev; | |
444 | bus->msi = &xilinx_pcie_msi_chip; | |
445 | } | |
446 | } | |
447 | ||
448 | /* INTx Functions */ | |
449 | ||
450 | /** | |
451 | * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid | |
452 | * @domain: IRQ domain | |
453 | * @irq: Virtual IRQ number | |
454 | * @hwirq: HW interrupt number | |
455 | * | |
456 | * Return: Always returns 0. | |
457 | */ | |
458 | static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, | |
459 | irq_hw_number_t hwirq) | |
460 | { | |
461 | irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); | |
462 | irq_set_chip_data(irq, domain->host_data); | |
463 | set_irq_flags(irq, IRQF_VALID); | |
464 | ||
465 | return 0; | |
466 | } | |
467 | ||
468 | /* INTx IRQ Domain operations */ | |
469 | static const struct irq_domain_ops intx_domain_ops = { | |
470 | .map = xilinx_pcie_intx_map, | |
471 | }; | |
472 | ||
473 | /* PCIe HW Functions */ | |
474 | ||
475 | /** | |
476 | * xilinx_pcie_intr_handler - Interrupt Service Handler | |
477 | * @irq: IRQ number | |
478 | * @data: PCIe port information | |
479 | * | |
480 | * Return: IRQ_HANDLED on success and IRQ_NONE on failure | |
481 | */ | |
482 | static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) | |
483 | { | |
484 | struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data; | |
485 | u32 val, mask, status, msi_data; | |
486 | ||
487 | /* Read interrupt decode and mask registers */ | |
488 | val = pcie_read(port, XILINX_PCIE_REG_IDR); | |
489 | mask = pcie_read(port, XILINX_PCIE_REG_IMR); | |
490 | ||
491 | status = val & mask; | |
492 | if (!status) | |
493 | return IRQ_NONE; | |
494 | ||
495 | if (status & XILINX_PCIE_INTR_LINK_DOWN) | |
496 | dev_warn(port->dev, "Link Down\n"); | |
497 | ||
498 | if (status & XILINX_PCIE_INTR_ECRC_ERR) | |
499 | dev_warn(port->dev, "ECRC failed\n"); | |
500 | ||
501 | if (status & XILINX_PCIE_INTR_STR_ERR) | |
502 | dev_warn(port->dev, "Streaming error\n"); | |
503 | ||
504 | if (status & XILINX_PCIE_INTR_HOT_RESET) | |
505 | dev_info(port->dev, "Hot reset\n"); | |
506 | ||
507 | if (status & XILINX_PCIE_INTR_CFG_TIMEOUT) | |
508 | dev_warn(port->dev, "ECAM access timeout\n"); | |
509 | ||
510 | if (status & XILINX_PCIE_INTR_CORRECTABLE) { | |
511 | dev_warn(port->dev, "Correctable error message\n"); | |
512 | xilinx_pcie_clear_err_interrupts(port); | |
513 | } | |
514 | ||
515 | if (status & XILINX_PCIE_INTR_NONFATAL) { | |
516 | dev_warn(port->dev, "Non fatal error message\n"); | |
517 | xilinx_pcie_clear_err_interrupts(port); | |
518 | } | |
519 | ||
520 | if (status & XILINX_PCIE_INTR_FATAL) { | |
521 | dev_warn(port->dev, "Fatal error message\n"); | |
522 | xilinx_pcie_clear_err_interrupts(port); | |
523 | } | |
524 | ||
525 | if (status & XILINX_PCIE_INTR_INTX) { | |
526 | /* INTx interrupt received */ | |
527 | val = pcie_read(port, XILINX_PCIE_REG_RPIFR1); | |
528 | ||
529 | /* Check whether interrupt valid */ | |
530 | if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { | |
531 | dev_warn(port->dev, "RP Intr FIFO1 read error\n"); | |
532 | return IRQ_HANDLED; | |
533 | } | |
534 | ||
535 | /* Clear interrupt FIFO register 1 */ | |
536 | pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK, | |
537 | XILINX_PCIE_REG_RPIFR1); | |
538 | ||
539 | /* Handle INTx Interrupt */ | |
540 | val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >> | |
541 | XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1; | |
542 | generic_handle_irq(irq_find_mapping(port->irq_domain, val)); | |
543 | } | |
544 | ||
545 | if (status & XILINX_PCIE_INTR_MSI) { | |
546 | /* MSI Interrupt */ | |
547 | val = pcie_read(port, XILINX_PCIE_REG_RPIFR1); | |
548 | ||
549 | if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { | |
550 | dev_warn(port->dev, "RP Intr FIFO1 read error\n"); | |
551 | return IRQ_HANDLED; | |
552 | } | |
553 | ||
554 | if (val & XILINX_PCIE_RPIFR1_MSI_INTR) { | |
555 | msi_data = pcie_read(port, XILINX_PCIE_REG_RPIFR2) & | |
556 | XILINX_PCIE_RPIFR2_MSG_DATA; | |
557 | ||
558 | /* Clear interrupt FIFO register 1 */ | |
559 | pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK, | |
560 | XILINX_PCIE_REG_RPIFR1); | |
561 | ||
562 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | |
563 | /* Handle MSI Interrupt */ | |
564 | generic_handle_irq(msi_data); | |
565 | } | |
566 | } | |
567 | } | |
568 | ||
569 | if (status & XILINX_PCIE_INTR_SLV_UNSUPP) | |
570 | dev_warn(port->dev, "Slave unsupported request\n"); | |
571 | ||
572 | if (status & XILINX_PCIE_INTR_SLV_UNEXP) | |
573 | dev_warn(port->dev, "Slave unexpected completion\n"); | |
574 | ||
575 | if (status & XILINX_PCIE_INTR_SLV_COMPL) | |
576 | dev_warn(port->dev, "Slave completion timeout\n"); | |
577 | ||
578 | if (status & XILINX_PCIE_INTR_SLV_ERRP) | |
579 | dev_warn(port->dev, "Slave Error Poison\n"); | |
580 | ||
581 | if (status & XILINX_PCIE_INTR_SLV_CMPABT) | |
582 | dev_warn(port->dev, "Slave Completer Abort\n"); | |
583 | ||
584 | if (status & XILINX_PCIE_INTR_SLV_ILLBUR) | |
585 | dev_warn(port->dev, "Slave Illegal Burst\n"); | |
586 | ||
587 | if (status & XILINX_PCIE_INTR_MST_DECERR) | |
588 | dev_warn(port->dev, "Master decode error\n"); | |
589 | ||
590 | if (status & XILINX_PCIE_INTR_MST_SLVERR) | |
591 | dev_warn(port->dev, "Master slave error\n"); | |
592 | ||
593 | if (status & XILINX_PCIE_INTR_MST_ERRP) | |
594 | dev_warn(port->dev, "Master error poison\n"); | |
595 | ||
596 | /* Clear the Interrupt Decode register */ | |
597 | pcie_write(port, status, XILINX_PCIE_REG_IDR); | |
598 | ||
599 | return IRQ_HANDLED; | |
600 | } | |
601 | ||
602 | /** | |
603 | * xilinx_pcie_free_irq_domain - Free IRQ domain | |
604 | * @port: PCIe port information | |
605 | */ | |
606 | static void xilinx_pcie_free_irq_domain(struct xilinx_pcie_port *port) | |
607 | { | |
608 | int i; | |
609 | u32 irq, num_irqs; | |
610 | ||
611 | /* Free IRQ Domain */ | |
612 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | |
613 | ||
614 | free_pages(port->msi_pages, 0); | |
615 | ||
616 | num_irqs = XILINX_NUM_MSI_IRQS; | |
617 | } else { | |
618 | /* INTx */ | |
619 | num_irqs = 4; | |
620 | } | |
621 | ||
622 | for (i = 0; i < num_irqs; i++) { | |
623 | irq = irq_find_mapping(port->irq_domain, i); | |
624 | if (irq > 0) | |
625 | irq_dispose_mapping(irq); | |
626 | } | |
627 | ||
628 | irq_domain_remove(port->irq_domain); | |
629 | } | |
630 | ||
631 | /** | |
632 | * xilinx_pcie_init_irq_domain - Initialize IRQ domain | |
633 | * @port: PCIe port information | |
634 | * | |
635 | * Return: '0' on success and error value on failure | |
636 | */ | |
637 | static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) | |
638 | { | |
639 | struct device *dev = port->dev; | |
640 | struct device_node *node = dev->of_node; | |
641 | struct device_node *pcie_intc_node; | |
642 | ||
643 | /* Setup INTx */ | |
644 | pcie_intc_node = of_get_next_child(node, NULL); | |
645 | if (!pcie_intc_node) { | |
646 | dev_err(dev, "No PCIe Intc node found\n"); | |
647 | return PTR_ERR(pcie_intc_node); | |
648 | } | |
649 | ||
650 | port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4, | |
651 | &intx_domain_ops, | |
652 | port); | |
653 | if (!port->irq_domain) { | |
654 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); | |
655 | return PTR_ERR(port->irq_domain); | |
656 | } | |
657 | ||
658 | /* Setup MSI */ | |
659 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | |
660 | port->irq_domain = irq_domain_add_linear(node, | |
661 | XILINX_NUM_MSI_IRQS, | |
662 | &msi_domain_ops, | |
663 | &xilinx_pcie_msi_chip); | |
664 | if (!port->irq_domain) { | |
665 | dev_err(dev, "Failed to get a MSI IRQ domain\n"); | |
666 | return PTR_ERR(port->irq_domain); | |
667 | } | |
668 | ||
669 | xilinx_pcie_enable_msi(port); | |
670 | } | |
671 | ||
672 | return 0; | |
673 | } | |
674 | ||
675 | /** | |
676 | * xilinx_pcie_init_port - Initialize hardware | |
677 | * @port: PCIe port information | |
678 | */ | |
679 | static void xilinx_pcie_init_port(struct xilinx_pcie_port *port) | |
680 | { | |
681 | if (xilinx_pcie_link_is_up(port)) | |
682 | dev_info(port->dev, "PCIe Link is UP\n"); | |
683 | else | |
684 | dev_info(port->dev, "PCIe Link is DOWN\n"); | |
685 | ||
686 | /* Disable all interrupts */ | |
687 | pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK, | |
688 | XILINX_PCIE_REG_IMR); | |
689 | ||
690 | /* Clear pending interrupts */ | |
691 | pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) & | |
692 | XILINX_PCIE_IMR_ALL_MASK, | |
693 | XILINX_PCIE_REG_IDR); | |
694 | ||
695 | /* Enable all interrupts */ | |
696 | pcie_write(port, XILINX_PCIE_IMR_ALL_MASK, XILINX_PCIE_REG_IMR); | |
697 | ||
698 | /* Enable the Bridge enable bit */ | |
699 | pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) | | |
700 | XILINX_PCIE_REG_RPSC_BEN, | |
701 | XILINX_PCIE_REG_RPSC); | |
702 | } | |
703 | ||
704 | /** | |
705 | * xilinx_pcie_setup - Setup memory resources | |
706 | * @nr: Bus number | |
707 | * @sys: Per controller structure | |
708 | * | |
709 | * Return: '1' on success and error value on failure | |
710 | */ | |
711 | static int xilinx_pcie_setup(int nr, struct pci_sys_data *sys) | |
712 | { | |
713 | struct xilinx_pcie_port *port = sys_to_pcie(sys); | |
714 | ||
715 | list_splice_init(&port->resources, &sys->resources); | |
716 | ||
717 | return 1; | |
718 | } | |
719 | ||
720 | /** | |
721 | * xilinx_pcie_scan_bus - Scan PCIe bus for devices | |
722 | * @nr: Bus number | |
723 | * @sys: Per controller structure | |
724 | * | |
725 | * Return: Valid Bus pointer on success and NULL on failure | |
726 | */ | |
727 | static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys) | |
728 | { | |
729 | struct xilinx_pcie_port *port = sys_to_pcie(sys); | |
730 | struct pci_bus *bus; | |
731 | ||
732 | port->root_busno = sys->busnr; | |
733 | bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops, | |
734 | sys, &sys->resources); | |
735 | ||
736 | return bus; | |
737 | } | |
738 | ||
739 | /** | |
740 | * xilinx_pcie_parse_and_add_res - Add resources by parsing ranges | |
741 | * @port: PCIe port information | |
742 | * | |
743 | * Return: '0' on success and error value on failure | |
744 | */ | |
745 | static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port) | |
746 | { | |
747 | struct device *dev = port->dev; | |
748 | struct device_node *node = dev->of_node; | |
749 | struct resource *mem; | |
750 | resource_size_t offset; | |
751 | struct of_pci_range_parser parser; | |
752 | struct of_pci_range range; | |
753 | struct pci_host_bridge_window *win; | |
754 | int err = 0, mem_resno = 0; | |
755 | ||
756 | /* Get the ranges */ | |
757 | if (of_pci_range_parser_init(&parser, node)) { | |
758 | dev_err(dev, "missing \"ranges\" property\n"); | |
759 | return -EINVAL; | |
760 | } | |
761 | ||
762 | /* Parse the ranges and add the resources found to the list */ | |
763 | for_each_of_pci_range(&parser, &range) { | |
764 | ||
765 | if (mem_resno >= XILINX_MAX_NUM_RESOURCES) { | |
766 | dev_err(dev, "Maximum memory resources exceeded\n"); | |
767 | return -EINVAL; | |
768 | } | |
769 | ||
770 | mem = devm_kmalloc(dev, sizeof(*mem), GFP_KERNEL); | |
771 | if (!mem) { | |
772 | err = -ENOMEM; | |
773 | goto free_resources; | |
774 | } | |
775 | ||
776 | of_pci_range_to_resource(&range, node, mem); | |
777 | ||
778 | switch (mem->flags & IORESOURCE_TYPE_BITS) { | |
779 | case IORESOURCE_MEM: | |
780 | offset = range.cpu_addr - range.pci_addr; | |
781 | mem_resno++; | |
782 | break; | |
783 | default: | |
784 | err = -EINVAL; | |
785 | break; | |
786 | } | |
787 | ||
788 | if (err < 0) { | |
789 | dev_warn(dev, "Invalid resource found %pR\n", mem); | |
790 | continue; | |
791 | } | |
792 | ||
793 | err = request_resource(&iomem_resource, mem); | |
794 | if (err) | |
795 | goto free_resources; | |
796 | ||
797 | pci_add_resource_offset(&port->resources, mem, offset); | |
798 | } | |
799 | ||
800 | /* Get the bus range */ | |
801 | if (of_pci_parse_bus_range(node, &port->bus_range)) { | |
802 | u32 val = pcie_read(port, XILINX_PCIE_REG_BIR); | |
803 | u8 last; | |
804 | ||
805 | last = (val & XILINX_PCIE_BIR_ECAM_SZ_MASK) >> | |
806 | XILINX_PCIE_BIR_ECAM_SZ_SHIFT; | |
807 | ||
808 | port->bus_range = (struct resource) { | |
809 | .name = node->name, | |
810 | .start = 0, | |
811 | .end = last, | |
812 | .flags = IORESOURCE_BUS, | |
813 | }; | |
814 | } | |
815 | ||
816 | /* Register bus resource */ | |
817 | pci_add_resource(&port->resources, &port->bus_range); | |
818 | ||
819 | return 0; | |
820 | ||
821 | free_resources: | |
822 | release_child_resources(&iomem_resource); | |
823 | list_for_each_entry(win, &port->resources, list) | |
824 | devm_kfree(dev, win->res); | |
825 | pci_free_resource_list(&port->resources); | |
826 | ||
827 | return err; | |
828 | } | |
829 | ||
830 | /** | |
831 | * xilinx_pcie_parse_dt - Parse Device tree | |
832 | * @port: PCIe port information | |
833 | * | |
834 | * Return: '0' on success and error value on failure | |
835 | */ | |
836 | static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port) | |
837 | { | |
838 | struct device *dev = port->dev; | |
839 | struct device_node *node = dev->of_node; | |
840 | struct resource regs; | |
841 | const char *type; | |
842 | int err; | |
843 | ||
844 | type = of_get_property(node, "device_type", NULL); | |
845 | if (!type || strcmp(type, "pci")) { | |
846 | dev_err(dev, "invalid \"device_type\" %s\n", type); | |
847 | return -EINVAL; | |
848 | } | |
849 | ||
850 | err = of_address_to_resource(node, 0, ®s); | |
851 | if (err) { | |
852 | dev_err(dev, "missing \"reg\" property\n"); | |
853 | return err; | |
854 | } | |
855 | ||
856 | port->reg_base = devm_ioremap_resource(dev, ®s); | |
857 | if (IS_ERR(port->reg_base)) | |
858 | return PTR_ERR(port->reg_base); | |
859 | ||
860 | port->irq = irq_of_parse_and_map(node, 0); | |
861 | err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler, | |
862 | IRQF_SHARED, "xilinx-pcie", port); | |
863 | if (err) { | |
864 | dev_err(dev, "unable to request irq %d\n", port->irq); | |
865 | return err; | |
866 | } | |
867 | ||
868 | return 0; | |
869 | } | |
870 | ||
871 | /** | |
872 | * xilinx_pcie_probe - Probe function | |
873 | * @pdev: Platform device pointer | |
874 | * | |
875 | * Return: '0' on success and error value on failure | |
876 | */ | |
877 | static int xilinx_pcie_probe(struct platform_device *pdev) | |
878 | { | |
879 | struct xilinx_pcie_port *port; | |
880 | struct hw_pci hw; | |
881 | struct device *dev = &pdev->dev; | |
882 | int err; | |
883 | ||
884 | if (!dev->of_node) | |
885 | return -ENODEV; | |
886 | ||
887 | port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); | |
888 | if (!port) | |
889 | return -ENOMEM; | |
890 | ||
891 | port->dev = dev; | |
892 | ||
893 | err = xilinx_pcie_parse_dt(port); | |
894 | if (err) { | |
895 | dev_err(dev, "Parsing DT failed\n"); | |
896 | return err; | |
897 | } | |
898 | ||
899 | xilinx_pcie_init_port(port); | |
900 | ||
901 | err = xilinx_pcie_init_irq_domain(port); | |
902 | if (err) { | |
903 | dev_err(dev, "Failed creating IRQ Domain\n"); | |
904 | return err; | |
905 | } | |
906 | ||
907 | /* | |
908 | * Parse PCI ranges, configuration bus range and | |
909 | * request their resources | |
910 | */ | |
911 | INIT_LIST_HEAD(&port->resources); | |
912 | err = xilinx_pcie_parse_and_add_res(port); | |
913 | if (err) { | |
914 | dev_err(dev, "Failed adding resources\n"); | |
915 | return err; | |
916 | } | |
917 | ||
918 | platform_set_drvdata(pdev, port); | |
919 | ||
920 | /* Register the device */ | |
921 | memset(&hw, 0, sizeof(hw)); | |
922 | hw = (struct hw_pci) { | |
923 | .nr_controllers = 1, | |
924 | .private_data = (void **)&port, | |
925 | .setup = xilinx_pcie_setup, | |
926 | .map_irq = of_irq_parse_and_map_pci, | |
927 | .add_bus = xilinx_pcie_add_bus, | |
928 | .scan = xilinx_pcie_scan_bus, | |
929 | .ops = &xilinx_pcie_ops, | |
930 | }; | |
931 | pci_common_init_dev(dev, &hw); | |
932 | ||
933 | return 0; | |
934 | } | |
935 | ||
936 | /** | |
937 | * xilinx_pcie_remove - Remove function | |
938 | * @pdev: Platform device pointer | |
939 | * | |
940 | * Return: '0' always | |
941 | */ | |
942 | static int xilinx_pcie_remove(struct platform_device *pdev) | |
943 | { | |
944 | struct xilinx_pcie_port *port = platform_get_drvdata(pdev); | |
945 | ||
946 | xilinx_pcie_free_irq_domain(port); | |
947 | ||
948 | return 0; | |
949 | } | |
950 | ||
951 | static struct of_device_id xilinx_pcie_of_match[] = { | |
952 | { .compatible = "xlnx,axi-pcie-host-1.00.a", }, | |
953 | {} | |
954 | }; | |
955 | ||
956 | static struct platform_driver xilinx_pcie_driver = { | |
957 | .driver = { | |
958 | .name = "xilinx-pcie", | |
959 | .owner = THIS_MODULE, | |
960 | .of_match_table = xilinx_pcie_of_match, | |
961 | .suppress_bind_attrs = true, | |
962 | }, | |
963 | .probe = xilinx_pcie_probe, | |
964 | .remove = xilinx_pcie_remove, | |
965 | }; | |
966 | module_platform_driver(xilinx_pcie_driver); | |
967 | ||
968 | MODULE_AUTHOR("Xilinx Inc"); | |
969 | MODULE_DESCRIPTION("Xilinx AXI PCIe driver"); | |
970 | MODULE_LICENSE("GPL v2"); |