119f8efa03d207966e398acbec946c125fee833d
[deliverable/linux.git] / arch / sparc64 / kernel / pci_sun4v.c
1 /* pci_sun4v.c: SUN4V specific PCI controller support.
2 *
3 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/pci.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/log2.h>
16
17 #include <asm/iommu.h>
18 #include <asm/irq.h>
19 #include <asm/upa.h>
20 #include <asm/pstate.h>
21 #include <asm/oplib.h>
22 #include <asm/hypervisor.h>
23 #include <asm/prom.h>
24
25 #include "pci_impl.h"
26 #include "iommu_common.h"
27
28 #include "pci_sun4v.h"
29
30 static unsigned long vpci_major = 1;
31 static unsigned long vpci_minor = 1;
32
33 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
34
35 struct iommu_batch {
36 struct device *dev; /* Device mapping is for. */
37 unsigned long prot; /* IOMMU page protections */
38 unsigned long entry; /* Index into IOTSB. */
39 u64 *pglist; /* List of physical pages */
40 unsigned long npages; /* Number of pages in list. */
41 };
42
43 static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
44
45 /* Interrupts must be disabled. */
46 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
47 {
48 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
49
50 p->dev = dev;
51 p->prot = prot;
52 p->entry = entry;
53 p->npages = 0;
54 }
55
56 /* Interrupts must be disabled. */
57 static long iommu_batch_flush(struct iommu_batch *p)
58 {
59 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
60 unsigned long devhandle = pbm->devhandle;
61 unsigned long prot = p->prot;
62 unsigned long entry = p->entry;
63 u64 *pglist = p->pglist;
64 unsigned long npages = p->npages;
65
66 while (npages != 0) {
67 long num;
68
69 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
70 npages, prot, __pa(pglist));
71 if (unlikely(num < 0)) {
72 if (printk_ratelimit())
73 printk("iommu_batch_flush: IOMMU map of "
74 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
75 "status %ld\n",
76 devhandle, HV_PCI_TSBID(0, entry),
77 npages, prot, __pa(pglist), num);
78 return -1;
79 }
80
81 entry += num;
82 npages -= num;
83 pglist += num;
84 }
85
86 p->entry = entry;
87 p->npages = 0;
88
89 return 0;
90 }
91
92 /* Interrupts must be disabled. */
93 static inline long iommu_batch_add(u64 phys_page)
94 {
95 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
96
97 BUG_ON(p->npages >= PGLIST_NENTS);
98
99 p->pglist[p->npages++] = phys_page;
100 if (p->npages == PGLIST_NENTS)
101 return iommu_batch_flush(p);
102
103 return 0;
104 }
105
106 /* Interrupts must be disabled. */
107 static inline long iommu_batch_end(void)
108 {
109 struct iommu_batch *p = &__get_cpu_var(iommu_batch);
110
111 BUG_ON(p->npages >= PGLIST_NENTS);
112
113 return iommu_batch_flush(p);
114 }
115
116 static long arena_alloc(struct iommu_arena *arena, unsigned long npages)
117 {
118 unsigned long n, i, start, end, limit;
119 int pass;
120
121 limit = arena->limit;
122 start = arena->hint;
123 pass = 0;
124
125 again:
126 n = find_next_zero_bit(arena->map, limit, start);
127 end = n + npages;
128 if (unlikely(end >= limit)) {
129 if (likely(pass < 1)) {
130 limit = start;
131 start = 0;
132 pass++;
133 goto again;
134 } else {
135 /* Scanned the whole thing, give up. */
136 return -1;
137 }
138 }
139
140 for (i = n; i < end; i++) {
141 if (test_bit(i, arena->map)) {
142 start = i + 1;
143 goto again;
144 }
145 }
146
147 for (i = n; i < end; i++)
148 __set_bit(i, arena->map);
149
150 arena->hint = end;
151
152 return n;
153 }
154
155 static void arena_free(struct iommu_arena *arena, unsigned long base,
156 unsigned long npages)
157 {
158 unsigned long i;
159
160 for (i = base; i < (base + npages); i++)
161 __clear_bit(i, arena->map);
162 }
163
164 static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
165 dma_addr_t *dma_addrp, gfp_t gfp)
166 {
167 struct iommu *iommu;
168 unsigned long flags, order, first_page, npages, n;
169 void *ret;
170 long entry;
171
172 size = IO_PAGE_ALIGN(size);
173 order = get_order(size);
174 if (unlikely(order >= MAX_ORDER))
175 return NULL;
176
177 npages = size >> IO_PAGE_SHIFT;
178
179 first_page = __get_free_pages(gfp, order);
180 if (unlikely(first_page == 0UL))
181 return NULL;
182
183 memset((char *)first_page, 0, PAGE_SIZE << order);
184
185 iommu = dev->archdata.iommu;
186
187 spin_lock_irqsave(&iommu->lock, flags);
188 entry = arena_alloc(&iommu->arena, npages);
189 spin_unlock_irqrestore(&iommu->lock, flags);
190
191 if (unlikely(entry < 0L))
192 goto arena_alloc_fail;
193
194 *dma_addrp = (iommu->page_table_map_base +
195 (entry << IO_PAGE_SHIFT));
196 ret = (void *) first_page;
197 first_page = __pa(first_page);
198
199 local_irq_save(flags);
200
201 iommu_batch_start(dev,
202 (HV_PCI_MAP_ATTR_READ |
203 HV_PCI_MAP_ATTR_WRITE),
204 entry);
205
206 for (n = 0; n < npages; n++) {
207 long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
208 if (unlikely(err < 0L))
209 goto iommu_map_fail;
210 }
211
212 if (unlikely(iommu_batch_end() < 0L))
213 goto iommu_map_fail;
214
215 local_irq_restore(flags);
216
217 return ret;
218
219 iommu_map_fail:
220 /* Interrupts are disabled. */
221 spin_lock(&iommu->lock);
222 arena_free(&iommu->arena, entry, npages);
223 spin_unlock_irqrestore(&iommu->lock, flags);
224
225 arena_alloc_fail:
226 free_pages(first_page, order);
227 return NULL;
228 }
229
230 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
231 dma_addr_t dvma)
232 {
233 struct pci_pbm_info *pbm;
234 struct iommu *iommu;
235 unsigned long flags, order, npages, entry;
236 u32 devhandle;
237
238 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
239 iommu = dev->archdata.iommu;
240 pbm = dev->archdata.host_controller;
241 devhandle = pbm->devhandle;
242 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
243
244 spin_lock_irqsave(&iommu->lock, flags);
245
246 arena_free(&iommu->arena, entry, npages);
247
248 do {
249 unsigned long num;
250
251 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
252 npages);
253 entry += num;
254 npages -= num;
255 } while (npages != 0);
256
257 spin_unlock_irqrestore(&iommu->lock, flags);
258
259 order = get_order(size);
260 if (order < 10)
261 free_pages((unsigned long)cpu, order);
262 }
263
264 static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
265 enum dma_data_direction direction)
266 {
267 struct iommu *iommu;
268 unsigned long flags, npages, oaddr;
269 unsigned long i, base_paddr;
270 u32 bus_addr, ret;
271 unsigned long prot;
272 long entry;
273
274 iommu = dev->archdata.iommu;
275
276 if (unlikely(direction == DMA_NONE))
277 goto bad;
278
279 oaddr = (unsigned long)ptr;
280 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
281 npages >>= IO_PAGE_SHIFT;
282
283 spin_lock_irqsave(&iommu->lock, flags);
284 entry = arena_alloc(&iommu->arena, npages);
285 spin_unlock_irqrestore(&iommu->lock, flags);
286
287 if (unlikely(entry < 0L))
288 goto bad;
289
290 bus_addr = (iommu->page_table_map_base +
291 (entry << IO_PAGE_SHIFT));
292 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
293 base_paddr = __pa(oaddr & IO_PAGE_MASK);
294 prot = HV_PCI_MAP_ATTR_READ;
295 if (direction != DMA_TO_DEVICE)
296 prot |= HV_PCI_MAP_ATTR_WRITE;
297
298 local_irq_save(flags);
299
300 iommu_batch_start(dev, prot, entry);
301
302 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
303 long err = iommu_batch_add(base_paddr);
304 if (unlikely(err < 0L))
305 goto iommu_map_fail;
306 }
307 if (unlikely(iommu_batch_end() < 0L))
308 goto iommu_map_fail;
309
310 local_irq_restore(flags);
311
312 return ret;
313
314 bad:
315 if (printk_ratelimit())
316 WARN_ON(1);
317 return DMA_ERROR_CODE;
318
319 iommu_map_fail:
320 /* Interrupts are disabled. */
321 spin_lock(&iommu->lock);
322 arena_free(&iommu->arena, entry, npages);
323 spin_unlock_irqrestore(&iommu->lock, flags);
324
325 return DMA_ERROR_CODE;
326 }
327
328 static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
329 size_t sz, enum dma_data_direction direction)
330 {
331 struct pci_pbm_info *pbm;
332 struct iommu *iommu;
333 unsigned long flags, npages;
334 long entry;
335 u32 devhandle;
336
337 if (unlikely(direction == DMA_NONE)) {
338 if (printk_ratelimit())
339 WARN_ON(1);
340 return;
341 }
342
343 iommu = dev->archdata.iommu;
344 pbm = dev->archdata.host_controller;
345 devhandle = pbm->devhandle;
346
347 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
348 npages >>= IO_PAGE_SHIFT;
349 bus_addr &= IO_PAGE_MASK;
350
351 spin_lock_irqsave(&iommu->lock, flags);
352
353 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
354 arena_free(&iommu->arena, entry, npages);
355
356 do {
357 unsigned long num;
358
359 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
360 npages);
361 entry += num;
362 npages -= num;
363 } while (npages != 0);
364
365 spin_unlock_irqrestore(&iommu->lock, flags);
366 }
367
368 #define SG_ENT_PHYS_ADDRESS(SG) \
369 (__pa(page_address((SG)->page)) + (SG)->offset)
370
371 static inline long fill_sg(long entry, struct device *dev,
372 struct scatterlist *sg,
373 int nused, int nelems, unsigned long prot)
374 {
375 struct scatterlist *dma_sg = sg;
376 struct scatterlist *sg_end = sg_last(sg, nelems);
377 unsigned long flags;
378 int i;
379
380 local_irq_save(flags);
381
382 iommu_batch_start(dev, prot, entry);
383
384 for (i = 0; i < nused; i++) {
385 unsigned long pteval = ~0UL;
386 u32 dma_npages;
387
388 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
389 dma_sg->dma_length +
390 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
391 do {
392 unsigned long offset;
393 signed int len;
394
395 /* If we are here, we know we have at least one
396 * more page to map. So walk forward until we
397 * hit a page crossing, and begin creating new
398 * mappings from that spot.
399 */
400 for (;;) {
401 unsigned long tmp;
402
403 tmp = SG_ENT_PHYS_ADDRESS(sg);
404 len = sg->length;
405 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
406 pteval = tmp & IO_PAGE_MASK;
407 offset = tmp & (IO_PAGE_SIZE - 1UL);
408 break;
409 }
410 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
411 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
412 offset = 0UL;
413 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
414 break;
415 }
416 sg = sg_next(sg);
417 }
418
419 pteval = (pteval & IOPTE_PAGE);
420 while (len > 0) {
421 long err;
422
423 err = iommu_batch_add(pteval);
424 if (unlikely(err < 0L))
425 goto iommu_map_failed;
426
427 pteval += IO_PAGE_SIZE;
428 len -= (IO_PAGE_SIZE - offset);
429 offset = 0;
430 dma_npages--;
431 }
432
433 pteval = (pteval & IOPTE_PAGE) + len;
434 sg = sg_next(sg);
435
436 /* Skip over any tail mappings we've fully mapped,
437 * adjusting pteval along the way. Stop when we
438 * detect a page crossing event.
439 */
440 while ((pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
441 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
442 ((pteval ^
443 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
444 pteval += sg->length;
445 if (sg == sg_end)
446 break;
447 sg = sg_next(sg);
448 }
449 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
450 pteval = ~0UL;
451 } while (dma_npages != 0);
452 dma_sg = sg_next(dma_sg);
453 }
454
455 if (unlikely(iommu_batch_end() < 0L))
456 goto iommu_map_failed;
457
458 local_irq_restore(flags);
459 return 0;
460
461 iommu_map_failed:
462 local_irq_restore(flags);
463 return -1L;
464 }
465
466 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
467 int nelems, enum dma_data_direction direction)
468 {
469 struct iommu *iommu;
470 unsigned long flags, npages, prot;
471 u32 dma_base;
472 struct scatterlist *sgtmp;
473 long entry, err;
474 int used;
475
476 /* Fast path single entry scatterlists. */
477 if (nelems == 1) {
478 sglist->dma_address =
479 dma_4v_map_single(dev,
480 (page_address(sglist->page) +
481 sglist->offset),
482 sglist->length, direction);
483 if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
484 return 0;
485 sglist->dma_length = sglist->length;
486 return 1;
487 }
488
489 iommu = dev->archdata.iommu;
490
491 if (unlikely(direction == DMA_NONE))
492 goto bad;
493
494 /* Step 1: Prepare scatter list. */
495 npages = prepare_sg(sglist, nelems);
496
497 /* Step 2: Allocate a cluster and context, if necessary. */
498 spin_lock_irqsave(&iommu->lock, flags);
499 entry = arena_alloc(&iommu->arena, npages);
500 spin_unlock_irqrestore(&iommu->lock, flags);
501
502 if (unlikely(entry < 0L))
503 goto bad;
504
505 dma_base = iommu->page_table_map_base +
506 (entry << IO_PAGE_SHIFT);
507
508 /* Step 3: Normalize DMA addresses. */
509 used = nelems;
510
511 sgtmp = sglist;
512 while (used && sgtmp->dma_length) {
513 sgtmp->dma_address += dma_base;
514 sgtmp = sg_next(sgtmp);
515 used--;
516 }
517 used = nelems - used;
518
519 /* Step 4: Create the mappings. */
520 prot = HV_PCI_MAP_ATTR_READ;
521 if (direction != DMA_TO_DEVICE)
522 prot |= HV_PCI_MAP_ATTR_WRITE;
523
524 err = fill_sg(entry, dev, sglist, used, nelems, prot);
525 if (unlikely(err < 0L))
526 goto iommu_map_failed;
527
528 return used;
529
530 bad:
531 if (printk_ratelimit())
532 WARN_ON(1);
533 return 0;
534
535 iommu_map_failed:
536 spin_lock_irqsave(&iommu->lock, flags);
537 arena_free(&iommu->arena, entry, npages);
538 spin_unlock_irqrestore(&iommu->lock, flags);
539
540 return 0;
541 }
542
543 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
544 int nelems, enum dma_data_direction direction)
545 {
546 struct pci_pbm_info *pbm;
547 struct iommu *iommu;
548 unsigned long flags, i, npages;
549 struct scatterlist *sg, *sgprv;
550 long entry;
551 u32 devhandle, bus_addr;
552
553 if (unlikely(direction == DMA_NONE)) {
554 if (printk_ratelimit())
555 WARN_ON(1);
556 }
557
558 iommu = dev->archdata.iommu;
559 pbm = dev->archdata.host_controller;
560 devhandle = pbm->devhandle;
561
562 bus_addr = sglist->dma_address & IO_PAGE_MASK;
563 sgprv = NULL;
564 for_each_sg(sglist, sg, nelems, i) {
565 if (sg->dma_length == 0)
566 break;
567
568 sgprv = sg;
569 }
570
571 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
572 bus_addr) >> IO_PAGE_SHIFT;
573
574 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
575
576 spin_lock_irqsave(&iommu->lock, flags);
577
578 arena_free(&iommu->arena, entry, npages);
579
580 do {
581 unsigned long num;
582
583 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
584 npages);
585 entry += num;
586 npages -= num;
587 } while (npages != 0);
588
589 spin_unlock_irqrestore(&iommu->lock, flags);
590 }
591
592 static void dma_4v_sync_single_for_cpu(struct device *dev,
593 dma_addr_t bus_addr, size_t sz,
594 enum dma_data_direction direction)
595 {
596 /* Nothing to do... */
597 }
598
599 static void dma_4v_sync_sg_for_cpu(struct device *dev,
600 struct scatterlist *sglist, int nelems,
601 enum dma_data_direction direction)
602 {
603 /* Nothing to do... */
604 }
605
606 const struct dma_ops sun4v_dma_ops = {
607 .alloc_coherent = dma_4v_alloc_coherent,
608 .free_coherent = dma_4v_free_coherent,
609 .map_single = dma_4v_map_single,
610 .unmap_single = dma_4v_unmap_single,
611 .map_sg = dma_4v_map_sg,
612 .unmap_sg = dma_4v_unmap_sg,
613 .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
614 .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
615 };
616
617 static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
618 {
619 struct property *prop;
620 struct device_node *dp;
621
622 dp = pbm->prom_node;
623 prop = of_find_property(dp, "66mhz-capable", NULL);
624 pbm->is_66mhz_capable = (prop != NULL);
625 pbm->pci_bus = pci_scan_one_pbm(pbm);
626
627 /* XXX register error interrupt handlers XXX */
628 }
629
630 static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
631 struct iommu *iommu)
632 {
633 struct iommu_arena *arena = &iommu->arena;
634 unsigned long i, cnt = 0;
635 u32 devhandle;
636
637 devhandle = pbm->devhandle;
638 for (i = 0; i < arena->limit; i++) {
639 unsigned long ret, io_attrs, ra;
640
641 ret = pci_sun4v_iommu_getmap(devhandle,
642 HV_PCI_TSBID(0, i),
643 &io_attrs, &ra);
644 if (ret == HV_EOK) {
645 if (page_in_phys_avail(ra)) {
646 pci_sun4v_iommu_demap(devhandle,
647 HV_PCI_TSBID(0, i), 1);
648 } else {
649 cnt++;
650 __set_bit(i, arena->map);
651 }
652 }
653 }
654
655 return cnt;
656 }
657
658 static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
659 {
660 struct iommu *iommu = pbm->iommu;
661 struct property *prop;
662 unsigned long num_tsb_entries, sz, tsbsize;
663 u32 vdma[2], dma_mask, dma_offset;
664
665 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
666 if (prop) {
667 u32 *val = prop->value;
668
669 vdma[0] = val[0];
670 vdma[1] = val[1];
671 } else {
672 /* No property, use default values. */
673 vdma[0] = 0x80000000;
674 vdma[1] = 0x80000000;
675 }
676
677 if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
678 prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
679 vdma[0], vdma[1]);
680 prom_halt();
681 };
682
683 dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
684 num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
685 tsbsize = num_tsb_entries * sizeof(iopte_t);
686
687 dma_offset = vdma[0];
688
689 /* Setup initial software IOMMU state. */
690 spin_lock_init(&iommu->lock);
691 iommu->ctx_lowest_free = 1;
692 iommu->page_table_map_base = dma_offset;
693 iommu->dma_addr_mask = dma_mask;
694
695 /* Allocate and initialize the free area map. */
696 sz = (num_tsb_entries + 7) / 8;
697 sz = (sz + 7UL) & ~7UL;
698 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
699 if (!iommu->arena.map) {
700 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
701 prom_halt();
702 }
703 iommu->arena.limit = num_tsb_entries;
704
705 sz = probe_existing_entries(pbm, iommu);
706 if (sz)
707 printk("%s: Imported %lu TSB entries from OBP\n",
708 pbm->name, sz);
709 }
710
711 #ifdef CONFIG_PCI_MSI
712 struct pci_sun4v_msiq_entry {
713 u64 version_type;
714 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
715 #define MSIQ_VERSION_SHIFT 32
716 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
717 #define MSIQ_TYPE_SHIFT 0
718 #define MSIQ_TYPE_NONE 0x00
719 #define MSIQ_TYPE_MSG 0x01
720 #define MSIQ_TYPE_MSI32 0x02
721 #define MSIQ_TYPE_MSI64 0x03
722 #define MSIQ_TYPE_INTX 0x08
723 #define MSIQ_TYPE_NONE2 0xff
724
725 u64 intx_sysino;
726 u64 reserved1;
727 u64 stick;
728 u64 req_id; /* bus/device/func */
729 #define MSIQ_REQID_BUS_MASK 0xff00UL
730 #define MSIQ_REQID_BUS_SHIFT 8
731 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
732 #define MSIQ_REQID_DEVICE_SHIFT 3
733 #define MSIQ_REQID_FUNC_MASK 0x0007UL
734 #define MSIQ_REQID_FUNC_SHIFT 0
735
736 u64 msi_address;
737
738 /* The format of this value is message type dependent.
739 * For MSI bits 15:0 are the data from the MSI packet.
740 * For MSI-X bits 31:0 are the data from the MSI packet.
741 * For MSG, the message code and message routing code where:
742 * bits 39:32 is the bus/device/fn of the msg target-id
743 * bits 18:16 is the message routing code
744 * bits 7:0 is the message code
745 * For INTx the low order 2-bits are:
746 * 00 - INTA
747 * 01 - INTB
748 * 10 - INTC
749 * 11 - INTD
750 */
751 u64 msi_data;
752
753 u64 reserved2;
754 };
755
756 static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
757 unsigned long *head)
758 {
759 unsigned long err, limit;
760
761 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
762 if (unlikely(err))
763 return -ENXIO;
764
765 limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
766 if (unlikely(*head >= limit))
767 return -EFBIG;
768
769 return 0;
770 }
771
772 static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
773 unsigned long msiqid, unsigned long *head,
774 unsigned long *msi)
775 {
776 struct pci_sun4v_msiq_entry *ep;
777 unsigned long err, type;
778
779 /* Note: void pointer arithmetic, 'head' is a byte offset */
780 ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
781 (pbm->msiq_ent_count *
782 sizeof(struct pci_sun4v_msiq_entry))) +
783 *head);
784
785 if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
786 return 0;
787
788 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
789 if (unlikely(type != MSIQ_TYPE_MSI32 &&
790 type != MSIQ_TYPE_MSI64))
791 return -EINVAL;
792
793 *msi = ep->msi_data;
794
795 err = pci_sun4v_msi_setstate(pbm->devhandle,
796 ep->msi_data /* msi_num */,
797 HV_MSISTATE_IDLE);
798 if (unlikely(err))
799 return -ENXIO;
800
801 /* Clear the entry. */
802 ep->version_type &= ~MSIQ_TYPE_MASK;
803
804 (*head) += sizeof(struct pci_sun4v_msiq_entry);
805 if (*head >=
806 (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
807 *head = 0;
808
809 return 1;
810 }
811
812 static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
813 unsigned long head)
814 {
815 unsigned long err;
816
817 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
818 if (unlikely(err))
819 return -EINVAL;
820
821 return 0;
822 }
823
824 static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
825 unsigned long msi, int is_msi64)
826 {
827 if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
828 (is_msi64 ?
829 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
830 return -ENXIO;
831 if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
832 return -ENXIO;
833 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
834 return -ENXIO;
835 return 0;
836 }
837
838 static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
839 {
840 unsigned long err, msiqid;
841
842 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
843 if (err)
844 return -ENXIO;
845
846 pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
847
848 return 0;
849 }
850
851 static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
852 {
853 unsigned long q_size, alloc_size, pages, order;
854 int i;
855
856 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
857 alloc_size = (pbm->msiq_num * q_size);
858 order = get_order(alloc_size);
859 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
860 if (pages == 0UL) {
861 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
862 order);
863 return -ENOMEM;
864 }
865 memset((char *)pages, 0, PAGE_SIZE << order);
866 pbm->msi_queues = (void *) pages;
867
868 for (i = 0; i < pbm->msiq_num; i++) {
869 unsigned long err, base = __pa(pages + (i * q_size));
870 unsigned long ret1, ret2;
871
872 err = pci_sun4v_msiq_conf(pbm->devhandle,
873 pbm->msiq_first + i,
874 base, pbm->msiq_ent_count);
875 if (err) {
876 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
877 err);
878 goto h_error;
879 }
880
881 err = pci_sun4v_msiq_info(pbm->devhandle,
882 pbm->msiq_first + i,
883 &ret1, &ret2);
884 if (err) {
885 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
886 err);
887 goto h_error;
888 }
889 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
890 printk(KERN_ERR "MSI: Bogus qconf "
891 "expected[%lx:%x] got[%lx:%lx]\n",
892 base, pbm->msiq_ent_count,
893 ret1, ret2);
894 goto h_error;
895 }
896 }
897
898 return 0;
899
900 h_error:
901 free_pages(pages, order);
902 return -EINVAL;
903 }
904
905 static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
906 {
907 unsigned long q_size, alloc_size, pages, order;
908 int i;
909
910 for (i = 0; i < pbm->msiq_num; i++) {
911 unsigned long msiqid = pbm->msiq_first + i;
912
913 (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
914 }
915
916 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
917 alloc_size = (pbm->msiq_num * q_size);
918 order = get_order(alloc_size);
919
920 pages = (unsigned long) pbm->msi_queues;
921
922 free_pages(pages, order);
923
924 pbm->msi_queues = NULL;
925 }
926
927 static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
928 unsigned long msiqid,
929 unsigned long devino)
930 {
931 unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
932
933 if (!virt_irq)
934 return -ENOMEM;
935
936 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
937 return -EINVAL;
938 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
939 return -EINVAL;
940
941 return virt_irq;
942 }
943
944 static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
945 .get_head = pci_sun4v_get_head,
946 .dequeue_msi = pci_sun4v_dequeue_msi,
947 .set_head = pci_sun4v_set_head,
948 .msi_setup = pci_sun4v_msi_setup,
949 .msi_teardown = pci_sun4v_msi_teardown,
950 .msiq_alloc = pci_sun4v_msiq_alloc,
951 .msiq_free = pci_sun4v_msiq_free,
952 .msiq_build_irq = pci_sun4v_msiq_build_irq,
953 };
954
955 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
956 {
957 sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
958 }
959 #else /* CONFIG_PCI_MSI */
960 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
961 {
962 }
963 #endif /* !(CONFIG_PCI_MSI) */
964
965 static void __init pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
966 {
967 struct pci_pbm_info *pbm;
968
969 if (devhandle & 0x40)
970 pbm = &p->pbm_B;
971 else
972 pbm = &p->pbm_A;
973
974 pbm->next = pci_pbm_root;
975 pci_pbm_root = pbm;
976
977 pbm->scan_bus = pci_sun4v_scan_bus;
978 pbm->pci_ops = &sun4v_pci_ops;
979 pbm->config_space_reg_bits = 12;
980
981 pbm->index = pci_num_pbms++;
982
983 pbm->parent = p;
984 pbm->prom_node = dp;
985
986 pbm->devhandle = devhandle;
987
988 pbm->name = dp->full_name;
989
990 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
991
992 pci_determine_mem_io_space(pbm);
993
994 pci_get_pbm_props(pbm);
995 pci_sun4v_iommu_init(pbm);
996 pci_sun4v_msi_init(pbm);
997 }
998
999 void __init sun4v_pci_init(struct device_node *dp, char *model_name)
1000 {
1001 static int hvapi_negotiated = 0;
1002 struct pci_controller_info *p;
1003 struct pci_pbm_info *pbm;
1004 struct iommu *iommu;
1005 struct property *prop;
1006 struct linux_prom64_registers *regs;
1007 u32 devhandle;
1008 int i;
1009
1010 if (!hvapi_negotiated++) {
1011 int err = sun4v_hvapi_register(HV_GRP_PCI,
1012 vpci_major,
1013 &vpci_minor);
1014
1015 if (err) {
1016 prom_printf("SUN4V_PCI: Could not register hvapi, "
1017 "err=%d\n", err);
1018 prom_halt();
1019 }
1020 printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
1021 vpci_major, vpci_minor);
1022
1023 dma_ops = &sun4v_dma_ops;
1024 }
1025
1026 prop = of_find_property(dp, "reg", NULL);
1027 regs = prop->value;
1028
1029 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1030
1031 for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
1032 if (pbm->devhandle == (devhandle ^ 0x40)) {
1033 pci_sun4v_pbm_init(pbm->parent, dp, devhandle);
1034 return;
1035 }
1036 }
1037
1038 for_each_possible_cpu(i) {
1039 unsigned long page = get_zeroed_page(GFP_ATOMIC);
1040
1041 if (!page)
1042 goto fatal_memory_error;
1043
1044 per_cpu(iommu_batch, i).pglist = (u64 *) page;
1045 }
1046
1047 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1048 if (!p)
1049 goto fatal_memory_error;
1050
1051 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1052 if (!iommu)
1053 goto fatal_memory_error;
1054
1055 p->pbm_A.iommu = iommu;
1056
1057 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1058 if (!iommu)
1059 goto fatal_memory_error;
1060
1061 p->pbm_B.iommu = iommu;
1062
1063 pci_sun4v_pbm_init(p, dp, devhandle);
1064 return;
1065
1066 fatal_memory_error:
1067 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
1068 prom_halt();
1069 }
This page took 0.068792 seconds and 4 git commands to generate.