Commit | Line | Data |
---|---|---|
c24a8a7a J |
1 | /* |
2 | * Copyright (c) 2003-2012 Broadcom Corporation | |
3 | * All Rights Reserved | |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the Broadcom | |
9 | * license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or without | |
12 | * modification, are permitted provided that the following conditions | |
13 | * are met: | |
14 | * | |
15 | * 1. Redistributions of source code must retain the above copyright | |
16 | * notice, this list of conditions and the following disclaimer. | |
17 | * 2. Redistributions in binary form must reproduce the above copyright | |
18 | * notice, this list of conditions and the following disclaimer in | |
19 | * the documentation and/or other materials provided with the | |
20 | * distribution. | |
21 | * | |
22 | * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR | |
23 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
24 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
25 | * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE | |
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | |
29 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | |
30 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | |
31 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN | |
32 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
33 | */ | |
34 | ||
35 | #include <linux/types.h> | |
36 | #include <linux/pci.h> | |
37 | #include <linux/kernel.h> | |
38 | #include <linux/init.h> | |
39 | #include <linux/msi.h> | |
40 | #include <linux/mm.h> | |
41 | #include <linux/irq.h> | |
42 | #include <linux/irqdesc.h> | |
43 | #include <linux/console.h> | |
44 | ||
45 | #include <asm/io.h> | |
46 | ||
47 | #include <asm/netlogic/interrupt.h> | |
48 | #include <asm/netlogic/haldefs.h> | |
49 | #include <asm/netlogic/common.h> | |
50 | #include <asm/netlogic/mips-extns.h> | |
51 | ||
52 | #include <asm/netlogic/xlp-hal/iomap.h> | |
53 | #include <asm/netlogic/xlp-hal/xlp.h> | |
54 | #include <asm/netlogic/xlp-hal/pic.h> | |
55 | #include <asm/netlogic/xlp-hal/pcibus.h> | |
56 | #include <asm/netlogic/xlp-hal/bridge.h> | |
57 | ||
58 | #define XLP_MSIVEC_PER_LINK 32 | |
d66f3f0e GR |
59 | #define XLP_MSIXVEC_TOTAL (cpu_is_xlp9xx() ? 128 : 32) |
60 | #define XLP_MSIXVEC_PER_LINK (cpu_is_xlp9xx() ? 32 : 8) | |
c24a8a7a J |
61 | |
62 | /* 128 MSI irqs per node, mapped starting at NLM_MSI_VEC_BASE */ | |
63 | static inline int nlm_link_msiirq(int link, int msivec) | |
64 | { | |
65 | return NLM_MSI_VEC_BASE + link * XLP_MSIVEC_PER_LINK + msivec; | |
66 | } | |
67 | ||
d66f3f0e | 68 | /* get the link MSI vector from irq number */ |
c24a8a7a J |
69 | static inline int nlm_irq_msivec(int irq) |
70 | { | |
d66f3f0e | 71 | return (irq - NLM_MSI_VEC_BASE) % XLP_MSIVEC_PER_LINK; |
c24a8a7a J |
72 | } |
73 | ||
d66f3f0e | 74 | /* get the link from the irq number */ |
c24a8a7a J |
75 | static inline int nlm_irq_msilink(int irq) |
76 | { | |
d66f3f0e GR |
77 | int total_msivec = XLP_MSIVEC_PER_LINK * PCIE_NLINKS; |
78 | ||
79 | return ((irq - NLM_MSI_VEC_BASE) % total_msivec) / | |
80 | XLP_MSIVEC_PER_LINK; | |
c24a8a7a J |
81 | } |
82 | ||
83 | /* | |
d66f3f0e GR |
84 | * For XLP 8xx/4xx/3xx/2xx, only 32 MSI-X vectors are possible because |
85 | * there are only 32 PIC interrupts for MSI. We split them statically | |
86 | * and use 8 MSI-X vectors per link - this keeps the allocation and | |
87 | * lookup simple. | |
88 | * On XLP 9xx, there are 32 vectors per link, and the interrupts are | |
89 | * not routed thru PIC, so we can use all 128 MSI-X vectors. | |
c24a8a7a J |
90 | */ |
91 | static inline int nlm_link_msixirq(int link, int bit) | |
92 | { | |
93 | return NLM_MSIX_VEC_BASE + link * XLP_MSIXVEC_PER_LINK + bit; | |
94 | } | |
95 | ||
d66f3f0e | 96 | /* get the link MSI vector from irq number */ |
c24a8a7a J |
97 | static inline int nlm_irq_msixvec(int irq) |
98 | { | |
d66f3f0e | 99 | return (irq - NLM_MSIX_VEC_BASE) % XLP_MSIXVEC_TOTAL; |
c24a8a7a J |
100 | } |
101 | ||
d66f3f0e GR |
102 | /* get the link from MSIX vec */ |
103 | static inline int nlm_irq_msixlink(int msixvec) | |
c24a8a7a | 104 | { |
d66f3f0e | 105 | return msixvec / XLP_MSIXVEC_PER_LINK; |
c24a8a7a J |
106 | } |
107 | ||
108 | /* | |
109 | * Per link MSI and MSI-X information, set as IRQ handler data for | |
110 | * MSI and MSI-X interrupts. | |
111 | */ | |
112 | struct xlp_msi_data { | |
113 | struct nlm_soc_info *node; | |
114 | uint64_t lnkbase; | |
115 | uint32_t msi_enabled_mask; | |
116 | uint32_t msi_alloc_mask; | |
117 | uint32_t msix_alloc_mask; | |
118 | spinlock_t msi_lock; | |
119 | }; | |
120 | ||
121 | /* | |
122 | * MSI Chip definitions | |
123 | * | |
124 | * On XLP, there is a PIC interrupt associated with each PCIe link on the | |
125 | * chip (which appears as a PCI bridge to us). This gives us 32 MSI irqa | |
126 | * per link and 128 overall. | |
127 | * | |
128 | * When a device connected to the link raises a MSI interrupt, we get a | |
129 | * link interrupt and we then have to look at PCIE_MSI_STATUS register at | |
130 | * the bridge to map it to the IRQ | |
131 | */ | |
132 | static void xlp_msi_enable(struct irq_data *d) | |
133 | { | |
134 | struct xlp_msi_data *md = irq_data_get_irq_handler_data(d); | |
135 | unsigned long flags; | |
136 | int vec; | |
137 | ||
138 | vec = nlm_irq_msivec(d->irq); | |
139 | spin_lock_irqsave(&md->msi_lock, flags); | |
140 | md->msi_enabled_mask |= 1u << vec; | |
d66f3f0e GR |
141 | if (cpu_is_xlp9xx()) |
142 | nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN, | |
143 | md->msi_enabled_mask); | |
144 | else | |
145 | nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); | |
c24a8a7a J |
146 | spin_unlock_irqrestore(&md->msi_lock, flags); |
147 | } | |
148 | ||
149 | static void xlp_msi_disable(struct irq_data *d) | |
150 | { | |
151 | struct xlp_msi_data *md = irq_data_get_irq_handler_data(d); | |
152 | unsigned long flags; | |
153 | int vec; | |
154 | ||
155 | vec = nlm_irq_msivec(d->irq); | |
156 | spin_lock_irqsave(&md->msi_lock, flags); | |
157 | md->msi_enabled_mask &= ~(1u << vec); | |
d66f3f0e GR |
158 | if (cpu_is_xlp9xx()) |
159 | nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN, | |
160 | md->msi_enabled_mask); | |
161 | else | |
162 | nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); | |
c24a8a7a J |
163 | spin_unlock_irqrestore(&md->msi_lock, flags); |
164 | } | |
165 | ||
166 | static void xlp_msi_mask_ack(struct irq_data *d) | |
167 | { | |
168 | struct xlp_msi_data *md = irq_data_get_irq_handler_data(d); | |
169 | int link, vec; | |
170 | ||
171 | link = nlm_irq_msilink(d->irq); | |
172 | vec = nlm_irq_msivec(d->irq); | |
173 | xlp_msi_disable(d); | |
174 | ||
175 | /* Ack MSI on bridge */ | |
d66f3f0e GR |
176 | if (cpu_is_xlp9xx()) |
177 | nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_STATUS, 1u << vec); | |
178 | else | |
179 | nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec); | |
c24a8a7a J |
180 | |
181 | /* Ack at eirr and PIC */ | |
182 | ack_c0_eirr(PIC_PCIE_LINK_MSI_IRQ(link)); | |
d66f3f0e GR |
183 | if (cpu_is_xlp9xx()) |
184 | nlm_pic_ack(md->node->picbase, | |
185 | PIC_9XX_IRT_PCIE_LINK_INDEX(link)); | |
186 | else | |
187 | nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link)); | |
c24a8a7a J |
188 | } |
189 | ||
190 | static struct irq_chip xlp_msi_chip = { | |
191 | .name = "XLP-MSI", | |
192 | .irq_enable = xlp_msi_enable, | |
193 | .irq_disable = xlp_msi_disable, | |
194 | .irq_mask_ack = xlp_msi_mask_ack, | |
195 | .irq_unmask = xlp_msi_enable, | |
196 | }; | |
197 | ||
198 | /* | |
d66f3f0e GR |
199 | * XLP8XX/4XX/3XX/2XX: |
200 | * The MSI-X interrupt handling is different from MSI, there are 32 MSI-X | |
201 | * interrupts generated by the PIC and each of these correspond to a MSI-X | |
202 | * vector (0-31) that can be assigned. | |
c24a8a7a | 203 | * |
d66f3f0e GR |
204 | * We divide the MSI-X vectors to 8 per link and do a per-link allocation |
205 | * | |
206 | * XLP9XX: | |
207 | * 32 MSI-X vectors are available per link, and the interrupts are not routed | |
208 | * thru the PIC. PIC ack not needed. | |
c24a8a7a J |
209 | * |
210 | * Enable and disable done using standard MSI functions. | |
211 | */ | |
212 | static void xlp_msix_mask_ack(struct irq_data *d) | |
213 | { | |
d66f3f0e | 214 | struct xlp_msi_data *md; |
c24a8a7a | 215 | int link, msixvec; |
d66f3f0e | 216 | uint32_t status_reg, bit; |
c24a8a7a J |
217 | |
218 | msixvec = nlm_irq_msixvec(d->irq); | |
d66f3f0e | 219 | link = nlm_irq_msixlink(msixvec); |
280510f1 | 220 | pci_msi_mask_irq(d); |
d66f3f0e | 221 | md = irq_data_get_irq_handler_data(d); |
c24a8a7a J |
222 | |
223 | /* Ack MSI on bridge */ | |
d66f3f0e GR |
224 | if (cpu_is_xlp9xx()) { |
225 | status_reg = PCIE_9XX_MSIX_STATUSX(link); | |
226 | bit = msixvec % XLP_MSIXVEC_PER_LINK; | |
227 | } else { | |
228 | status_reg = PCIE_MSIX_STATUS; | |
229 | bit = msixvec; | |
230 | } | |
231 | nlm_write_reg(md->lnkbase, status_reg, 1u << bit); | |
c24a8a7a J |
232 | |
233 | /* Ack at eirr and PIC */ | |
234 | ack_c0_eirr(PIC_PCIE_MSIX_IRQ(link)); | |
d66f3f0e GR |
235 | if (!cpu_is_xlp9xx()) |
236 | nlm_pic_ack(md->node->picbase, | |
237 | PIC_IRT_PCIE_MSIX_INDEX(msixvec)); | |
c24a8a7a J |
238 | } |
239 | ||
240 | static struct irq_chip xlp_msix_chip = { | |
241 | .name = "XLP-MSIX", | |
280510f1 TG |
242 | .irq_enable = pci_msi_unmask_irq, |
243 | .irq_disable = pci_msi_mask_irq, | |
c24a8a7a | 244 | .irq_mask_ack = xlp_msix_mask_ack, |
280510f1 | 245 | .irq_unmask = pci_msi_unmask_irq, |
c24a8a7a J |
246 | }; |
247 | ||
c24a8a7a J |
248 | void arch_teardown_msi_irq(unsigned int irq) |
249 | { | |
c24a8a7a J |
250 | } |
251 | ||
252 | /* | |
253 | * Setup a PCIe link for MSI. By default, the links are in | |
254 | * legacy interrupt mode. We will switch them to MSI mode | |
255 | * at the first MSI request. | |
256 | */ | |
257 | static void xlp_config_link_msi(uint64_t lnkbase, int lirq, uint64_t msiaddr) | |
258 | { | |
259 | u32 val; | |
260 | ||
d66f3f0e GR |
261 | if (cpu_is_xlp9xx()) { |
262 | val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0); | |
263 | if ((val & 0x200) == 0) { | |
264 | val |= 0x200; /* MSI Interrupt enable */ | |
265 | nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val); | |
266 | } | |
267 | } else { | |
268 | val = nlm_read_reg(lnkbase, PCIE_INT_EN0); | |
269 | if ((val & 0x200) == 0) { | |
270 | val |= 0x200; | |
271 | nlm_write_reg(lnkbase, PCIE_INT_EN0, val); | |
272 | } | |
c24a8a7a J |
273 | } |
274 | ||
275 | val = nlm_read_reg(lnkbase, 0x1); /* CMD */ | |
276 | if ((val & 0x0400) == 0) { | |
277 | val |= 0x0400; | |
278 | nlm_write_reg(lnkbase, 0x1, val); | |
279 | } | |
280 | ||
281 | /* Update IRQ in the PCI irq reg */ | |
282 | val = nlm_read_pci_reg(lnkbase, 0xf); | |
283 | val &= ~0x1fu; | |
284 | val |= (1 << 8) | lirq; | |
285 | nlm_write_pci_reg(lnkbase, 0xf, val); | |
286 | ||
287 | /* MSI addr */ | |
288 | nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_ADDRH, msiaddr >> 32); | |
289 | nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_ADDRL, msiaddr & 0xffffffff); | |
290 | ||
291 | /* MSI cap for bridge */ | |
292 | val = nlm_read_reg(lnkbase, PCIE_BRIDGE_MSI_CAP); | |
293 | if ((val & (1 << 16)) == 0) { | |
294 | val |= 0xb << 16; /* mmc32, msi enable */ | |
295 | nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_CAP, val); | |
296 | } | |
297 | } | |
298 | ||
299 | /* | |
300 | * Allocate a MSI vector on a link | |
301 | */ | |
302 | static int xlp_setup_msi(uint64_t lnkbase, int node, int link, | |
303 | struct msi_desc *desc) | |
304 | { | |
305 | struct xlp_msi_data *md; | |
306 | struct msi_msg msg; | |
307 | unsigned long flags; | |
308 | int msivec, irt, lirq, xirq, ret; | |
309 | uint64_t msiaddr; | |
310 | ||
311 | /* Get MSI data for the link */ | |
312 | lirq = PIC_PCIE_LINK_MSI_IRQ(link); | |
313 | xirq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0)); | |
314 | md = irq_get_handler_data(xirq); | |
315 | msiaddr = MSI_LINK_ADDR(node, link); | |
316 | ||
317 | spin_lock_irqsave(&md->msi_lock, flags); | |
318 | if (md->msi_alloc_mask == 0) { | |
c24a8a7a | 319 | xlp_config_link_msi(lnkbase, lirq, msiaddr); |
d66f3f0e GR |
320 | /* switch the link IRQ to MSI range */ |
321 | if (cpu_is_xlp9xx()) | |
322 | irt = PIC_9XX_IRT_PCIE_LINK_INDEX(link); | |
323 | else | |
324 | irt = PIC_IRT_PCIE_LINK_INDEX(link); | |
c24a8a7a J |
325 | nlm_setup_pic_irq(node, lirq, lirq, irt); |
326 | nlm_pic_init_irt(nlm_get_node(node)->picbase, irt, lirq, | |
98d4884c | 327 | node * nlm_threads_per_node(), 1 /*en */); |
c24a8a7a J |
328 | } |
329 | ||
330 | /* allocate a MSI vec, and tell the bridge about it */ | |
331 | msivec = fls(md->msi_alloc_mask); | |
332 | if (msivec == XLP_MSIVEC_PER_LINK) { | |
333 | spin_unlock_irqrestore(&md->msi_lock, flags); | |
334 | return -ENOMEM; | |
335 | } | |
336 | md->msi_alloc_mask |= (1u << msivec); | |
337 | spin_unlock_irqrestore(&md->msi_lock, flags); | |
338 | ||
339 | msg.address_hi = msiaddr >> 32; | |
340 | msg.address_lo = msiaddr & 0xffffffff; | |
341 | msg.data = 0xc00 | msivec; | |
342 | ||
343 | xirq = xirq + msivec; /* msi mapped to global irq space */ | |
344 | ret = irq_set_msi_desc(xirq, desc); | |
465665f7 | 345 | if (ret < 0) |
c24a8a7a | 346 | return ret; |
c24a8a7a | 347 | |
83a18912 | 348 | pci_write_msi_msg(xirq, &msg); |
c24a8a7a J |
349 | return 0; |
350 | } | |
351 | ||
352 | /* | |
353 | * Switch a link to MSI-X mode | |
354 | */ | |
355 | static void xlp_config_link_msix(uint64_t lnkbase, int lirq, uint64_t msixaddr) | |
356 | { | |
357 | u32 val; | |
358 | ||
359 | val = nlm_read_reg(lnkbase, 0x2C); | |
360 | if ((val & 0x80000000U) == 0) { | |
361 | val |= 0x80000000U; | |
362 | nlm_write_reg(lnkbase, 0x2C, val); | |
363 | } | |
d66f3f0e GR |
364 | |
365 | if (cpu_is_xlp9xx()) { | |
366 | val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0); | |
367 | if ((val & 0x200) == 0) { | |
368 | val |= 0x200; /* MSI Interrupt enable */ | |
369 | nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val); | |
370 | } | |
371 | } else { | |
372 | val = nlm_read_reg(lnkbase, PCIE_INT_EN0); | |
373 | if ((val & 0x200) == 0) { | |
374 | val |= 0x200; /* MSI Interrupt enable */ | |
375 | nlm_write_reg(lnkbase, PCIE_INT_EN0, val); | |
376 | } | |
c24a8a7a J |
377 | } |
378 | ||
379 | val = nlm_read_reg(lnkbase, 0x1); /* CMD */ | |
380 | if ((val & 0x0400) == 0) { | |
381 | val |= 0x0400; | |
382 | nlm_write_reg(lnkbase, 0x1, val); | |
383 | } | |
384 | ||
385 | /* Update IRQ in the PCI irq reg */ | |
386 | val = nlm_read_pci_reg(lnkbase, 0xf); | |
387 | val &= ~0x1fu; | |
388 | val |= (1 << 8) | lirq; | |
389 | nlm_write_pci_reg(lnkbase, 0xf, val); | |
390 | ||
d66f3f0e GR |
391 | if (cpu_is_xlp9xx()) { |
392 | /* MSI-X addresses */ | |
393 | nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_BASE, | |
394 | msixaddr >> 8); | |
395 | nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT, | |
396 | (msixaddr + MSI_ADDR_SZ) >> 8); | |
397 | } else { | |
398 | /* MSI-X addresses */ | |
399 | nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_BASE, | |
400 | msixaddr >> 8); | |
401 | nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_LIMIT, | |
402 | (msixaddr + MSI_ADDR_SZ) >> 8); | |
403 | } | |
c24a8a7a J |
404 | } |
405 | ||
406 | /* | |
407 | * Allocate a MSI-X vector | |
408 | */ | |
409 | static int xlp_setup_msix(uint64_t lnkbase, int node, int link, | |
410 | struct msi_desc *desc) | |
411 | { | |
412 | struct xlp_msi_data *md; | |
413 | struct msi_msg msg; | |
414 | unsigned long flags; | |
415 | int t, msixvec, lirq, xirq, ret; | |
416 | uint64_t msixaddr; | |
417 | ||
418 | /* Get MSI data for the link */ | |
419 | lirq = PIC_PCIE_MSIX_IRQ(link); | |
420 | xirq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0)); | |
421 | md = irq_get_handler_data(xirq); | |
422 | msixaddr = MSIX_LINK_ADDR(node, link); | |
423 | ||
424 | spin_lock_irqsave(&md->msi_lock, flags); | |
425 | /* switch the PCIe link to MSI-X mode at the first alloc */ | |
426 | if (md->msix_alloc_mask == 0) | |
427 | xlp_config_link_msix(lnkbase, lirq, msixaddr); | |
428 | ||
429 | /* allocate a MSI-X vec, and tell the bridge about it */ | |
430 | t = fls(md->msix_alloc_mask); | |
431 | if (t == XLP_MSIXVEC_PER_LINK) { | |
432 | spin_unlock_irqrestore(&md->msi_lock, flags); | |
433 | return -ENOMEM; | |
434 | } | |
435 | md->msix_alloc_mask |= (1u << t); | |
436 | spin_unlock_irqrestore(&md->msi_lock, flags); | |
437 | ||
438 | xirq += t; | |
439 | msixvec = nlm_irq_msixvec(xirq); | |
d66f3f0e | 440 | |
c24a8a7a J |
441 | msg.address_hi = msixaddr >> 32; |
442 | msg.address_lo = msixaddr & 0xffffffff; | |
443 | msg.data = 0xc00 | msixvec; | |
444 | ||
445 | ret = irq_set_msi_desc(xirq, desc); | |
446 | if (ret < 0) { | |
447 | destroy_irq(xirq); | |
448 | return ret; | |
449 | } | |
450 | ||
83a18912 | 451 | pci_write_msi_msg(xirq, &msg); |
c24a8a7a J |
452 | return 0; |
453 | } | |
454 | ||
455 | int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) | |
456 | { | |
457 | struct pci_dev *lnkdev; | |
458 | uint64_t lnkbase; | |
459 | int node, link, slot; | |
460 | ||
461 | lnkdev = xlp_get_pcie_link(dev); | |
462 | if (lnkdev == NULL) { | |
463 | dev_err(&dev->dev, "Could not find bridge\n"); | |
464 | return 1; | |
465 | } | |
466 | slot = PCI_SLOT(lnkdev->devfn); | |
467 | link = PCI_FUNC(lnkdev->devfn); | |
468 | node = slot / 8; | |
469 | lnkbase = nlm_get_pcie_base(node, link); | |
470 | ||
471 | if (desc->msi_attrib.is_msix) | |
472 | return xlp_setup_msix(lnkbase, node, link, desc); | |
473 | else | |
474 | return xlp_setup_msi(lnkbase, node, link, desc); | |
475 | } | |
476 | ||
477 | void __init xlp_init_node_msi_irqs(int node, int link) | |
478 | { | |
479 | struct nlm_soc_info *nodep; | |
480 | struct xlp_msi_data *md; | |
d66f3f0e | 481 | int irq, i, irt, msixvec, val; |
c24a8a7a J |
482 | |
483 | pr_info("[%d %d] Init node PCI IRT\n", node, link); | |
484 | nodep = nlm_get_node(node); | |
485 | ||
486 | /* Alloc an MSI block for the link */ | |
487 | md = kzalloc(sizeof(*md), GFP_KERNEL); | |
488 | spin_lock_init(&md->msi_lock); | |
489 | md->msi_enabled_mask = 0; | |
490 | md->msi_alloc_mask = 0; | |
491 | md->msix_alloc_mask = 0; | |
492 | md->node = nodep; | |
493 | md->lnkbase = nlm_get_pcie_base(node, link); | |
494 | ||
495 | /* extended space for MSI interrupts */ | |
496 | irq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0)); | |
497 | for (i = irq; i < irq + XLP_MSIVEC_PER_LINK; i++) { | |
498 | irq_set_chip_and_handler(i, &xlp_msi_chip, handle_level_irq); | |
499 | irq_set_handler_data(i, md); | |
500 | } | |
501 | ||
d66f3f0e GR |
502 | for (i = 0; i < XLP_MSIXVEC_PER_LINK ; i++) { |
503 | if (cpu_is_xlp9xx()) { | |
504 | val = ((node * nlm_threads_per_node()) << 7 | | |
505 | PIC_PCIE_MSIX_IRQ(link) << 1 | 0 << 0); | |
506 | nlm_write_pcie_reg(md->lnkbase, PCIE_9XX_MSIX_VECX(i + | |
507 | (link * XLP_MSIXVEC_PER_LINK)), val); | |
508 | } else { | |
509 | /* Initialize MSI-X irts to generate one interrupt | |
510 | * per link | |
511 | */ | |
512 | msixvec = link * XLP_MSIXVEC_PER_LINK + i; | |
513 | irt = PIC_IRT_PCIE_MSIX_INDEX(msixvec); | |
514 | nlm_pic_init_irt(nodep->picbase, irt, | |
515 | PIC_PCIE_MSIX_IRQ(link), | |
516 | node * nlm_threads_per_node(), 1); | |
517 | } | |
c24a8a7a J |
518 | |
519 | /* Initialize MSI-X extended irq space for the link */ | |
520 | irq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, i)); | |
521 | irq_set_chip_and_handler(irq, &xlp_msix_chip, handle_level_irq); | |
522 | irq_set_handler_data(irq, md); | |
523 | } | |
c24a8a7a J |
524 | } |
525 | ||
526 | void nlm_dispatch_msi(int node, int lirq) | |
527 | { | |
528 | struct xlp_msi_data *md; | |
529 | int link, i, irqbase; | |
530 | u32 status; | |
531 | ||
532 | link = lirq - PIC_PCIE_LINK_MSI_IRQ_BASE; | |
533 | irqbase = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0)); | |
534 | md = irq_get_handler_data(irqbase); | |
d66f3f0e GR |
535 | if (cpu_is_xlp9xx()) |
536 | status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSI_STATUS) & | |
537 | md->msi_enabled_mask; | |
538 | else | |
539 | status = nlm_read_reg(md->lnkbase, PCIE_MSI_STATUS) & | |
c24a8a7a J |
540 | md->msi_enabled_mask; |
541 | while (status) { | |
542 | i = __ffs(status); | |
543 | do_IRQ(irqbase + i); | |
544 | status &= status - 1; | |
545 | } | |
546 | } | |
547 | ||
548 | void nlm_dispatch_msix(int node, int lirq) | |
549 | { | |
550 | struct xlp_msi_data *md; | |
551 | int link, i, irqbase; | |
552 | u32 status; | |
553 | ||
554 | link = lirq - PIC_PCIE_MSIX_IRQ_BASE; | |
555 | irqbase = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0)); | |
556 | md = irq_get_handler_data(irqbase); | |
d66f3f0e GR |
557 | if (cpu_is_xlp9xx()) |
558 | status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSIX_STATUSX(link)); | |
559 | else | |
560 | status = nlm_read_reg(md->lnkbase, PCIE_MSIX_STATUS); | |
c24a8a7a J |
561 | |
562 | /* narrow it down to the MSI-x vectors for our link */ | |
d66f3f0e GR |
563 | if (!cpu_is_xlp9xx()) |
564 | status = (status >> (link * XLP_MSIXVEC_PER_LINK)) & | |
c24a8a7a J |
565 | ((1 << XLP_MSIXVEC_PER_LINK) - 1); |
566 | ||
567 | while (status) { | |
568 | i = __ffs(status); | |
569 | do_IRQ(irqbase + i); | |
570 | status &= status - 1; | |
571 | } | |
572 | } |