Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / arch / powerpc / sysdev / fsl_msi.c
1 /*
2 * Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
3 *
4 * Author: Tony Li <tony.li@freescale.com>
5 * Jason Jin <Jason.jin@freescale.com>
6 *
7 * The hwirq alloc and free code reuse from sysdev/mpic_msi.c
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 */
15 #include <linux/irq.h>
16 #include <linux/bootmem.h>
17 #include <linux/msi.h>
18 #include <linux/pci.h>
19 #include <linux/slab.h>
20 #include <linux/of_platform.h>
21 #include <linux/interrupt.h>
22 #include <linux/seq_file.h>
23 #include <sysdev/fsl_soc.h>
24 #include <asm/prom.h>
25 #include <asm/hw_irq.h>
26 #include <asm/ppc-pci.h>
27 #include <asm/mpic.h>
28 #include <asm/fsl_hcalls.h>
29
30 #include "fsl_msi.h"
31 #include "fsl_pci.h"
32
33 #define MSIIR_OFFSET_MASK 0xfffff
34 #define MSIIR_IBS_SHIFT 0
35 #define MSIIR_SRS_SHIFT 5
36 #define MSIIR1_IBS_SHIFT 4
37 #define MSIIR1_SRS_SHIFT 0
38 #define MSI_SRS_MASK 0xf
39 #define MSI_IBS_MASK 0x1f
40
41 #define msi_hwirq(msi, msir_index, intr_index) \
42 ((msir_index) << (msi)->srs_shift | \
43 ((intr_index) << (msi)->ibs_shift))
44
45 static LIST_HEAD(msi_head);
46
47 struct fsl_msi_feature {
48 u32 fsl_pic_ip;
49 u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */
50 };
51
52 struct fsl_msi_cascade_data {
53 struct fsl_msi *msi_data;
54 int index;
55 int virq;
56 };
57
58 static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
59 {
60 return in_be32(base + (reg >> 2));
61 }
62
63 /*
64 * We do not need this actually. The MSIR register has been read once
65 * in the cascade interrupt. So, this MSI interrupt has been acked
66 */
67 static void fsl_msi_end_irq(struct irq_data *d)
68 {
69 }
70
71 static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
72 {
73 struct fsl_msi *msi_data = irqd->domain->host_data;
74 irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
75 int cascade_virq, srs;
76
77 srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK;
78 cascade_virq = msi_data->cascade_array[srs]->virq;
79
80 seq_printf(p, " fsl-msi-%d", cascade_virq);
81 }
82
83
84 static struct irq_chip fsl_msi_chip = {
85 .irq_mask = mask_msi_irq,
86 .irq_unmask = unmask_msi_irq,
87 .irq_ack = fsl_msi_end_irq,
88 .irq_print_chip = fsl_msi_print_chip,
89 };
90
91 static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
92 irq_hw_number_t hw)
93 {
94 struct fsl_msi *msi_data = h->host_data;
95 struct irq_chip *chip = &fsl_msi_chip;
96
97 irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
98
99 irq_set_chip_data(virq, msi_data);
100 irq_set_chip_and_handler(virq, chip, handle_edge_irq);
101
102 return 0;
103 }
104
105 static const struct irq_domain_ops fsl_msi_host_ops = {
106 .map = fsl_msi_host_map,
107 };
108
109 static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
110 {
111 int rc, hwirq;
112
113 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
114 msi_data->irqhost->of_node);
115 if (rc)
116 return rc;
117
118 /*
119 * Reserve all the hwirqs
120 * The available hwirqs will be released in fsl_msi_setup_hwirq()
121 */
122 for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++)
123 msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq);
124
125 return 0;
126 }
127
128 static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
129 {
130 struct msi_desc *entry;
131 struct fsl_msi *msi_data;
132
133 list_for_each_entry(entry, &pdev->msi_list, list) {
134 if (entry->irq == NO_IRQ)
135 continue;
136 msi_data = irq_get_chip_data(entry->irq);
137 irq_set_msi_desc(entry->irq, NULL);
138 msi_bitmap_free_hwirqs(&msi_data->bitmap,
139 virq_to_hw(entry->irq), 1);
140 irq_dispose_mapping(entry->irq);
141 }
142
143 return;
144 }
145
146 static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
147 struct msi_msg *msg,
148 struct fsl_msi *fsl_msi_data)
149 {
150 struct fsl_msi *msi_data = fsl_msi_data;
151 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
152 u64 address; /* Physical address of the MSIIR */
153 int len;
154 const __be64 *reg;
155
156 /* If the msi-address-64 property exists, then use it */
157 reg = of_get_property(hose->dn, "msi-address-64", &len);
158 if (reg && (len == sizeof(u64)))
159 address = be64_to_cpup(reg);
160 else
161 address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset;
162
163 msg->address_lo = lower_32_bits(address);
164 msg->address_hi = upper_32_bits(address);
165
166 msg->data = hwirq;
167
168 pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
169 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
170 (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK);
171 }
172
173 static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
174 {
175 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
176 struct device_node *np;
177 phandle phandle = 0;
178 int rc, hwirq = -ENOMEM;
179 unsigned int virq;
180 struct msi_desc *entry;
181 struct msi_msg msg;
182 struct fsl_msi *msi_data;
183
184 if (type == PCI_CAP_ID_MSIX)
185 pr_debug("fslmsi: MSI-X untested, trying anyway.\n");
186
187 /*
188 * If the PCI node has an fsl,msi property, then we need to use it
189 * to find the specific MSI.
190 */
191 np = of_parse_phandle(hose->dn, "fsl,msi", 0);
192 if (np) {
193 if (of_device_is_compatible(np, "fsl,mpic-msi") ||
194 of_device_is_compatible(np, "fsl,vmpic-msi") ||
195 of_device_is_compatible(np, "fsl,vmpic-msi-v4.3"))
196 phandle = np->phandle;
197 else {
198 dev_err(&pdev->dev,
199 "node %s has an invalid fsl,msi phandle %u\n",
200 hose->dn->full_name, np->phandle);
201 return -EINVAL;
202 }
203 }
204
205 list_for_each_entry(entry, &pdev->msi_list, list) {
206 /*
207 * Loop over all the MSI devices until we find one that has an
208 * available interrupt.
209 */
210 list_for_each_entry(msi_data, &msi_head, list) {
211 /*
212 * If the PCI node has an fsl,msi property, then we
213 * restrict our search to the corresponding MSI node.
214 * The simplest way is to skip over MSI nodes with the
215 * wrong phandle. Under the Freescale hypervisor, this
216 * has the additional benefit of skipping over MSI
217 * nodes that are not mapped in the PAMU.
218 */
219 if (phandle && (phandle != msi_data->phandle))
220 continue;
221
222 hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
223 if (hwirq >= 0)
224 break;
225 }
226
227 if (hwirq < 0) {
228 rc = hwirq;
229 dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
230 goto out_free;
231 }
232
233 virq = irq_create_mapping(msi_data->irqhost, hwirq);
234
235 if (virq == NO_IRQ) {
236 dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
237 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
238 rc = -ENOSPC;
239 goto out_free;
240 }
241 /* chip_data is msi_data via host->hostdata in host->map() */
242 irq_set_msi_desc(virq, entry);
243
244 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
245 write_msi_msg(virq, &msg);
246 }
247 return 0;
248
249 out_free:
250 /* free by the caller of this function */
251 return rc;
252 }
253
254 static irqreturn_t fsl_msi_cascade(int irq, void *data)
255 {
256 unsigned int cascade_irq;
257 struct fsl_msi *msi_data;
258 int msir_index = -1;
259 u32 msir_value = 0;
260 u32 intr_index;
261 u32 have_shift = 0;
262 struct fsl_msi_cascade_data *cascade_data = data;
263 irqreturn_t ret = IRQ_NONE;
264
265 msi_data = cascade_data->msi_data;
266
267 msir_index = cascade_data->index;
268
269 if (msir_index >= NR_MSI_REG_MAX)
270 cascade_irq = NO_IRQ;
271
272 switch (msi_data->feature & FSL_PIC_IP_MASK) {
273 case FSL_PIC_IP_MPIC:
274 msir_value = fsl_msi_read(msi_data->msi_regs,
275 msir_index * 0x10);
276 break;
277 case FSL_PIC_IP_IPIC:
278 msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
279 break;
280 #ifdef CONFIG_EPAPR_PARAVIRT
281 case FSL_PIC_IP_VMPIC: {
282 unsigned int ret;
283 ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
284 if (ret) {
285 pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
286 "irq %u (ret=%u)\n", irq, ret);
287 msir_value = 0;
288 }
289 break;
290 }
291 #endif
292 }
293
294 while (msir_value) {
295 intr_index = ffs(msir_value) - 1;
296
297 cascade_irq = irq_linear_revmap(msi_data->irqhost,
298 msi_hwirq(msi_data, msir_index,
299 intr_index + have_shift));
300 if (cascade_irq != NO_IRQ) {
301 generic_handle_irq(cascade_irq);
302 ret = IRQ_HANDLED;
303 }
304 have_shift += intr_index + 1;
305 msir_value = msir_value >> (intr_index + 1);
306 }
307
308 return ret;
309 }
310
311 static int fsl_of_msi_remove(struct platform_device *ofdev)
312 {
313 struct fsl_msi *msi = platform_get_drvdata(ofdev);
314 int virq, i;
315
316 if (msi->list.prev != NULL)
317 list_del(&msi->list);
318 for (i = 0; i < NR_MSI_REG_MAX; i++) {
319 if (msi->cascade_array[i]) {
320 virq = msi->cascade_array[i]->virq;
321
322 BUG_ON(virq == NO_IRQ);
323
324 free_irq(virq, msi->cascade_array[i]);
325 kfree(msi->cascade_array[i]);
326 irq_dispose_mapping(virq);
327 }
328 }
329 if (msi->bitmap.bitmap)
330 msi_bitmap_free(&msi->bitmap);
331 if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
332 iounmap(msi->msi_regs);
333 kfree(msi);
334
335 return 0;
336 }
337
338 static struct lock_class_key fsl_msi_irq_class;
339
340 static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
341 int offset, int irq_index)
342 {
343 struct fsl_msi_cascade_data *cascade_data = NULL;
344 int virt_msir, i, ret;
345
346 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
347 if (virt_msir == NO_IRQ) {
348 dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
349 __func__, irq_index);
350 return 0;
351 }
352
353 cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
354 if (!cascade_data) {
355 dev_err(&dev->dev, "No memory for MSI cascade data\n");
356 return -ENOMEM;
357 }
358 irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class);
359 cascade_data->index = offset;
360 cascade_data->msi_data = msi;
361 cascade_data->virq = virt_msir;
362 msi->cascade_array[irq_index] = cascade_data;
363
364 ret = request_irq(virt_msir, fsl_msi_cascade, 0,
365 "fsl-msi-cascade", cascade_data);
366 if (ret) {
367 dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n",
368 virt_msir, ret);
369 return ret;
370 }
371
372 /* Release the hwirqs corresponding to this MSI register */
373 for (i = 0; i < IRQS_PER_MSI_REG; i++)
374 msi_bitmap_free_hwirqs(&msi->bitmap,
375 msi_hwirq(msi, offset, i), 1);
376
377 return 0;
378 }
379
380 static const struct of_device_id fsl_of_msi_ids[];
381 static int fsl_of_msi_probe(struct platform_device *dev)
382 {
383 const struct of_device_id *match;
384 struct fsl_msi *msi;
385 struct resource res, msiir;
386 int err, i, j, irq_index, count;
387 const u32 *p;
388 const struct fsl_msi_feature *features;
389 int len;
390 u32 offset;
391
392 match = of_match_device(fsl_of_msi_ids, &dev->dev);
393 if (!match)
394 return -EINVAL;
395 features = match->data;
396
397 printk(KERN_DEBUG "Setting up Freescale MSI support\n");
398
399 msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
400 if (!msi) {
401 dev_err(&dev->dev, "No memory for MSI structure\n");
402 return -ENOMEM;
403 }
404 platform_set_drvdata(dev, msi);
405
406 msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
407 NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
408
409 if (msi->irqhost == NULL) {
410 dev_err(&dev->dev, "No memory for MSI irqhost\n");
411 err = -ENOMEM;
412 goto error_out;
413 }
414
415 /*
416 * Under the Freescale hypervisor, the msi nodes don't have a 'reg'
417 * property. Instead, we use hypercalls to access the MSI.
418 */
419 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
420 err = of_address_to_resource(dev->dev.of_node, 0, &res);
421 if (err) {
422 dev_err(&dev->dev, "invalid resource for node %s\n",
423 dev->dev.of_node->full_name);
424 goto error_out;
425 }
426
427 msi->msi_regs = ioremap(res.start, resource_size(&res));
428 if (!msi->msi_regs) {
429 err = -ENOMEM;
430 dev_err(&dev->dev, "could not map node %s\n",
431 dev->dev.of_node->full_name);
432 goto error_out;
433 }
434 msi->msiir_offset =
435 features->msiir_offset + (res.start & 0xfffff);
436
437 /*
438 * First read the MSIIR/MSIIR1 offset from dts
439 * On failure use the hardcode MSIIR offset
440 */
441 if (of_address_to_resource(dev->dev.of_node, 1, &msiir))
442 msi->msiir_offset = features->msiir_offset +
443 (res.start & MSIIR_OFFSET_MASK);
444 else
445 msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK;
446 }
447
448 msi->feature = features->fsl_pic_ip;
449
450 /*
451 * Remember the phandle, so that we can match with any PCI nodes
452 * that have an "fsl,msi" property.
453 */
454 msi->phandle = dev->dev.of_node->phandle;
455
456 err = fsl_msi_init_allocator(msi);
457 if (err) {
458 dev_err(&dev->dev, "Error allocating MSI bitmap\n");
459 goto error_out;
460 }
461
462 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
463
464 if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") ||
465 of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) {
466 msi->srs_shift = MSIIR1_SRS_SHIFT;
467 msi->ibs_shift = MSIIR1_IBS_SHIFT;
468 if (p)
469 dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
470 __func__);
471
472 for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1;
473 irq_index++) {
474 err = fsl_msi_setup_hwirq(msi, dev,
475 irq_index, irq_index);
476 if (err)
477 goto error_out;
478 }
479 } else {
480 static const u32 all_avail[] =
481 { 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG };
482
483 msi->srs_shift = MSIIR_SRS_SHIFT;
484 msi->ibs_shift = MSIIR_IBS_SHIFT;
485
486 if (p && len % (2 * sizeof(u32)) != 0) {
487 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
488 __func__);
489 err = -EINVAL;
490 goto error_out;
491 }
492
493 if (!p) {
494 p = all_avail;
495 len = sizeof(all_avail);
496 }
497
498 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
499 if (p[i * 2] % IRQS_PER_MSI_REG ||
500 p[i * 2 + 1] % IRQS_PER_MSI_REG) {
501 pr_warn("%s: %s: msi available range of %u at %u is not IRQ-aligned\n",
502 __func__, dev->dev.of_node->full_name,
503 p[i * 2 + 1], p[i * 2]);
504 err = -EINVAL;
505 goto error_out;
506 }
507
508 offset = p[i * 2] / IRQS_PER_MSI_REG;
509 count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
510
511 for (j = 0; j < count; j++, irq_index++) {
512 err = fsl_msi_setup_hwirq(msi, dev, offset + j,
513 irq_index);
514 if (err)
515 goto error_out;
516 }
517 }
518 }
519
520 list_add_tail(&msi->list, &msi_head);
521
522 /* The multiple setting ppc_md.setup_msi_irqs will not harm things */
523 if (!ppc_md.setup_msi_irqs) {
524 ppc_md.setup_msi_irqs = fsl_setup_msi_irqs;
525 ppc_md.teardown_msi_irqs = fsl_teardown_msi_irqs;
526 } else if (ppc_md.setup_msi_irqs != fsl_setup_msi_irqs) {
527 dev_err(&dev->dev, "Different MSI driver already installed!\n");
528 err = -ENODEV;
529 goto error_out;
530 }
531 return 0;
532 error_out:
533 fsl_of_msi_remove(dev);
534 return err;
535 }
536
537 static const struct fsl_msi_feature mpic_msi_feature = {
538 .fsl_pic_ip = FSL_PIC_IP_MPIC,
539 .msiir_offset = 0x140,
540 };
541
542 static const struct fsl_msi_feature ipic_msi_feature = {
543 .fsl_pic_ip = FSL_PIC_IP_IPIC,
544 .msiir_offset = 0x38,
545 };
546
547 static const struct fsl_msi_feature vmpic_msi_feature = {
548 .fsl_pic_ip = FSL_PIC_IP_VMPIC,
549 .msiir_offset = 0,
550 };
551
552 static const struct of_device_id fsl_of_msi_ids[] = {
553 {
554 .compatible = "fsl,mpic-msi",
555 .data = &mpic_msi_feature,
556 },
557 {
558 .compatible = "fsl,mpic-msi-v4.3",
559 .data = &mpic_msi_feature,
560 },
561 {
562 .compatible = "fsl,ipic-msi",
563 .data = &ipic_msi_feature,
564 },
565 #ifdef CONFIG_EPAPR_PARAVIRT
566 {
567 .compatible = "fsl,vmpic-msi",
568 .data = &vmpic_msi_feature,
569 },
570 {
571 .compatible = "fsl,vmpic-msi-v4.3",
572 .data = &vmpic_msi_feature,
573 },
574 #endif
575 {}
576 };
577
578 static struct platform_driver fsl_of_msi_driver = {
579 .driver = {
580 .name = "fsl-msi",
581 .owner = THIS_MODULE,
582 .of_match_table = fsl_of_msi_ids,
583 },
584 .probe = fsl_of_msi_probe,
585 .remove = fsl_of_msi_remove,
586 };
587
588 static __init int fsl_of_msi_init(void)
589 {
590 return platform_driver_register(&fsl_of_msi_driver);
591 }
592
593 subsys_initcall(fsl_of_msi_init);
This page took 0.044538 seconds and 6 git commands to generate.