Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
[deliverable/linux.git] / arch / sparc64 / kernel / pci_sun4v.c
1 /* pci_sun4v.c: SUN4V specific PCI controller support.
2 *
3 * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/pci.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15
16 #include <asm/iommu.h>
17 #include <asm/irq.h>
18 #include <asm/upa.h>
19 #include <asm/pstate.h>
20 #include <asm/oplib.h>
21 #include <asm/hypervisor.h>
22 #include <asm/prom.h>
23
24 #include "pci_impl.h"
25 #include "iommu_common.h"
26
27 #include "pci_sun4v.h"
28
29 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
30
31 struct iommu_batch {
32 struct pci_dev *pdev; /* Device mapping is for. */
33 unsigned long prot; /* IOMMU page protections */
34 unsigned long entry; /* Index into IOTSB. */
35 u64 *pglist; /* List of physical pages */
36 unsigned long npages; /* Number of pages in list. */
37 };
38
39 static DEFINE_PER_CPU(struct iommu_batch, pci_iommu_batch);
40
41 /* Interrupts must be disabled. */
42 static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry)
43 {
44 struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
45
46 p->pdev = pdev;
47 p->prot = prot;
48 p->entry = entry;
49 p->npages = 0;
50 }
51
52 /* Interrupts must be disabled. */
53 static long pci_iommu_batch_flush(struct iommu_batch *p)
54 {
55 struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller;
56 unsigned long devhandle = pbm->devhandle;
57 unsigned long prot = p->prot;
58 unsigned long entry = p->entry;
59 u64 *pglist = p->pglist;
60 unsigned long npages = p->npages;
61
62 while (npages != 0) {
63 long num;
64
65 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
66 npages, prot, __pa(pglist));
67 if (unlikely(num < 0)) {
68 if (printk_ratelimit())
69 printk("pci_iommu_batch_flush: IOMMU map of "
70 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
71 "status %ld\n",
72 devhandle, HV_PCI_TSBID(0, entry),
73 npages, prot, __pa(pglist), num);
74 return -1;
75 }
76
77 entry += num;
78 npages -= num;
79 pglist += num;
80 }
81
82 p->entry = entry;
83 p->npages = 0;
84
85 return 0;
86 }
87
88 /* Interrupts must be disabled. */
89 static inline long pci_iommu_batch_add(u64 phys_page)
90 {
91 struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
92
93 BUG_ON(p->npages >= PGLIST_NENTS);
94
95 p->pglist[p->npages++] = phys_page;
96 if (p->npages == PGLIST_NENTS)
97 return pci_iommu_batch_flush(p);
98
99 return 0;
100 }
101
102 /* Interrupts must be disabled. */
103 static inline long pci_iommu_batch_end(void)
104 {
105 struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
106
107 BUG_ON(p->npages >= PGLIST_NENTS);
108
109 return pci_iommu_batch_flush(p);
110 }
111
112 static long pci_arena_alloc(struct iommu_arena *arena, unsigned long npages)
113 {
114 unsigned long n, i, start, end, limit;
115 int pass;
116
117 limit = arena->limit;
118 start = arena->hint;
119 pass = 0;
120
121 again:
122 n = find_next_zero_bit(arena->map, limit, start);
123 end = n + npages;
124 if (unlikely(end >= limit)) {
125 if (likely(pass < 1)) {
126 limit = start;
127 start = 0;
128 pass++;
129 goto again;
130 } else {
131 /* Scanned the whole thing, give up. */
132 return -1;
133 }
134 }
135
136 for (i = n; i < end; i++) {
137 if (test_bit(i, arena->map)) {
138 start = i + 1;
139 goto again;
140 }
141 }
142
143 for (i = n; i < end; i++)
144 __set_bit(i, arena->map);
145
146 arena->hint = end;
147
148 return n;
149 }
150
151 static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
152 {
153 unsigned long i;
154
155 for (i = base; i < (base + npages); i++)
156 __clear_bit(i, arena->map);
157 }
158
159 static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
160 {
161 struct iommu *iommu;
162 unsigned long flags, order, first_page, npages, n;
163 void *ret;
164 long entry;
165
166 size = IO_PAGE_ALIGN(size);
167 order = get_order(size);
168 if (unlikely(order >= MAX_ORDER))
169 return NULL;
170
171 npages = size >> IO_PAGE_SHIFT;
172
173 first_page = __get_free_pages(gfp, order);
174 if (unlikely(first_page == 0UL))
175 return NULL;
176
177 memset((char *)first_page, 0, PAGE_SIZE << order);
178
179 iommu = pdev->dev.archdata.iommu;
180
181 spin_lock_irqsave(&iommu->lock, flags);
182 entry = pci_arena_alloc(&iommu->arena, npages);
183 spin_unlock_irqrestore(&iommu->lock, flags);
184
185 if (unlikely(entry < 0L))
186 goto arena_alloc_fail;
187
188 *dma_addrp = (iommu->page_table_map_base +
189 (entry << IO_PAGE_SHIFT));
190 ret = (void *) first_page;
191 first_page = __pa(first_page);
192
193 local_irq_save(flags);
194
195 pci_iommu_batch_start(pdev,
196 (HV_PCI_MAP_ATTR_READ |
197 HV_PCI_MAP_ATTR_WRITE),
198 entry);
199
200 for (n = 0; n < npages; n++) {
201 long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE));
202 if (unlikely(err < 0L))
203 goto iommu_map_fail;
204 }
205
206 if (unlikely(pci_iommu_batch_end() < 0L))
207 goto iommu_map_fail;
208
209 local_irq_restore(flags);
210
211 return ret;
212
213 iommu_map_fail:
214 /* Interrupts are disabled. */
215 spin_lock(&iommu->lock);
216 pci_arena_free(&iommu->arena, entry, npages);
217 spin_unlock_irqrestore(&iommu->lock, flags);
218
219 arena_alloc_fail:
220 free_pages(first_page, order);
221 return NULL;
222 }
223
224 static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
225 {
226 struct pci_pbm_info *pbm;
227 struct iommu *iommu;
228 unsigned long flags, order, npages, entry;
229 u32 devhandle;
230
231 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
232 iommu = pdev->dev.archdata.iommu;
233 pbm = pdev->dev.archdata.host_controller;
234 devhandle = pbm->devhandle;
235 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
236
237 spin_lock_irqsave(&iommu->lock, flags);
238
239 pci_arena_free(&iommu->arena, entry, npages);
240
241 do {
242 unsigned long num;
243
244 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
245 npages);
246 entry += num;
247 npages -= num;
248 } while (npages != 0);
249
250 spin_unlock_irqrestore(&iommu->lock, flags);
251
252 order = get_order(size);
253 if (order < 10)
254 free_pages((unsigned long)cpu, order);
255 }
256
257 static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
258 {
259 struct iommu *iommu;
260 unsigned long flags, npages, oaddr;
261 unsigned long i, base_paddr;
262 u32 bus_addr, ret;
263 unsigned long prot;
264 long entry;
265
266 iommu = pdev->dev.archdata.iommu;
267
268 if (unlikely(direction == PCI_DMA_NONE))
269 goto bad;
270
271 oaddr = (unsigned long)ptr;
272 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
273 npages >>= IO_PAGE_SHIFT;
274
275 spin_lock_irqsave(&iommu->lock, flags);
276 entry = pci_arena_alloc(&iommu->arena, npages);
277 spin_unlock_irqrestore(&iommu->lock, flags);
278
279 if (unlikely(entry < 0L))
280 goto bad;
281
282 bus_addr = (iommu->page_table_map_base +
283 (entry << IO_PAGE_SHIFT));
284 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
285 base_paddr = __pa(oaddr & IO_PAGE_MASK);
286 prot = HV_PCI_MAP_ATTR_READ;
287 if (direction != PCI_DMA_TODEVICE)
288 prot |= HV_PCI_MAP_ATTR_WRITE;
289
290 local_irq_save(flags);
291
292 pci_iommu_batch_start(pdev, prot, entry);
293
294 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
295 long err = pci_iommu_batch_add(base_paddr);
296 if (unlikely(err < 0L))
297 goto iommu_map_fail;
298 }
299 if (unlikely(pci_iommu_batch_end() < 0L))
300 goto iommu_map_fail;
301
302 local_irq_restore(flags);
303
304 return ret;
305
306 bad:
307 if (printk_ratelimit())
308 WARN_ON(1);
309 return PCI_DMA_ERROR_CODE;
310
311 iommu_map_fail:
312 /* Interrupts are disabled. */
313 spin_lock(&iommu->lock);
314 pci_arena_free(&iommu->arena, entry, npages);
315 spin_unlock_irqrestore(&iommu->lock, flags);
316
317 return PCI_DMA_ERROR_CODE;
318 }
319
320 static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
321 {
322 struct pci_pbm_info *pbm;
323 struct iommu *iommu;
324 unsigned long flags, npages;
325 long entry;
326 u32 devhandle;
327
328 if (unlikely(direction == PCI_DMA_NONE)) {
329 if (printk_ratelimit())
330 WARN_ON(1);
331 return;
332 }
333
334 iommu = pdev->dev.archdata.iommu;
335 pbm = pdev->dev.archdata.host_controller;
336 devhandle = pbm->devhandle;
337
338 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
339 npages >>= IO_PAGE_SHIFT;
340 bus_addr &= IO_PAGE_MASK;
341
342 spin_lock_irqsave(&iommu->lock, flags);
343
344 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
345 pci_arena_free(&iommu->arena, entry, npages);
346
347 do {
348 unsigned long num;
349
350 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
351 npages);
352 entry += num;
353 npages -= num;
354 } while (npages != 0);
355
356 spin_unlock_irqrestore(&iommu->lock, flags);
357 }
358
359 #define SG_ENT_PHYS_ADDRESS(SG) \
360 (__pa(page_address((SG)->page)) + (SG)->offset)
361
362 static inline long fill_sg(long entry, struct pci_dev *pdev,
363 struct scatterlist *sg,
364 int nused, int nelems, unsigned long prot)
365 {
366 struct scatterlist *dma_sg = sg;
367 struct scatterlist *sg_end = sg + nelems;
368 unsigned long flags;
369 int i;
370
371 local_irq_save(flags);
372
373 pci_iommu_batch_start(pdev, prot, entry);
374
375 for (i = 0; i < nused; i++) {
376 unsigned long pteval = ~0UL;
377 u32 dma_npages;
378
379 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
380 dma_sg->dma_length +
381 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
382 do {
383 unsigned long offset;
384 signed int len;
385
386 /* If we are here, we know we have at least one
387 * more page to map. So walk forward until we
388 * hit a page crossing, and begin creating new
389 * mappings from that spot.
390 */
391 for (;;) {
392 unsigned long tmp;
393
394 tmp = SG_ENT_PHYS_ADDRESS(sg);
395 len = sg->length;
396 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
397 pteval = tmp & IO_PAGE_MASK;
398 offset = tmp & (IO_PAGE_SIZE - 1UL);
399 break;
400 }
401 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
402 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
403 offset = 0UL;
404 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
405 break;
406 }
407 sg++;
408 }
409
410 pteval = (pteval & IOPTE_PAGE);
411 while (len > 0) {
412 long err;
413
414 err = pci_iommu_batch_add(pteval);
415 if (unlikely(err < 0L))
416 goto iommu_map_failed;
417
418 pteval += IO_PAGE_SIZE;
419 len -= (IO_PAGE_SIZE - offset);
420 offset = 0;
421 dma_npages--;
422 }
423
424 pteval = (pteval & IOPTE_PAGE) + len;
425 sg++;
426
427 /* Skip over any tail mappings we've fully mapped,
428 * adjusting pteval along the way. Stop when we
429 * detect a page crossing event.
430 */
431 while (sg < sg_end &&
432 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
433 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
434 ((pteval ^
435 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
436 pteval += sg->length;
437 sg++;
438 }
439 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
440 pteval = ~0UL;
441 } while (dma_npages != 0);
442 dma_sg++;
443 }
444
445 if (unlikely(pci_iommu_batch_end() < 0L))
446 goto iommu_map_failed;
447
448 local_irq_restore(flags);
449 return 0;
450
451 iommu_map_failed:
452 local_irq_restore(flags);
453 return -1L;
454 }
455
456 static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
457 {
458 struct iommu *iommu;
459 unsigned long flags, npages, prot;
460 u32 dma_base;
461 struct scatterlist *sgtmp;
462 long entry, err;
463 int used;
464
465 /* Fast path single entry scatterlists. */
466 if (nelems == 1) {
467 sglist->dma_address =
468 pci_4v_map_single(pdev,
469 (page_address(sglist->page) + sglist->offset),
470 sglist->length, direction);
471 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
472 return 0;
473 sglist->dma_length = sglist->length;
474 return 1;
475 }
476
477 iommu = pdev->dev.archdata.iommu;
478
479 if (unlikely(direction == PCI_DMA_NONE))
480 goto bad;
481
482 /* Step 1: Prepare scatter list. */
483 npages = prepare_sg(sglist, nelems);
484
485 /* Step 2: Allocate a cluster and context, if necessary. */
486 spin_lock_irqsave(&iommu->lock, flags);
487 entry = pci_arena_alloc(&iommu->arena, npages);
488 spin_unlock_irqrestore(&iommu->lock, flags);
489
490 if (unlikely(entry < 0L))
491 goto bad;
492
493 dma_base = iommu->page_table_map_base +
494 (entry << IO_PAGE_SHIFT);
495
496 /* Step 3: Normalize DMA addresses. */
497 used = nelems;
498
499 sgtmp = sglist;
500 while (used && sgtmp->dma_length) {
501 sgtmp->dma_address += dma_base;
502 sgtmp++;
503 used--;
504 }
505 used = nelems - used;
506
507 /* Step 4: Create the mappings. */
508 prot = HV_PCI_MAP_ATTR_READ;
509 if (direction != PCI_DMA_TODEVICE)
510 prot |= HV_PCI_MAP_ATTR_WRITE;
511
512 err = fill_sg(entry, pdev, sglist, used, nelems, prot);
513 if (unlikely(err < 0L))
514 goto iommu_map_failed;
515
516 return used;
517
518 bad:
519 if (printk_ratelimit())
520 WARN_ON(1);
521 return 0;
522
523 iommu_map_failed:
524 spin_lock_irqsave(&iommu->lock, flags);
525 pci_arena_free(&iommu->arena, entry, npages);
526 spin_unlock_irqrestore(&iommu->lock, flags);
527
528 return 0;
529 }
530
531 static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
532 {
533 struct pci_pbm_info *pbm;
534 struct iommu *iommu;
535 unsigned long flags, i, npages;
536 long entry;
537 u32 devhandle, bus_addr;
538
539 if (unlikely(direction == PCI_DMA_NONE)) {
540 if (printk_ratelimit())
541 WARN_ON(1);
542 }
543
544 iommu = pdev->dev.archdata.iommu;
545 pbm = pdev->dev.archdata.host_controller;
546 devhandle = pbm->devhandle;
547
548 bus_addr = sglist->dma_address & IO_PAGE_MASK;
549
550 for (i = 1; i < nelems; i++)
551 if (sglist[i].dma_length == 0)
552 break;
553 i--;
554 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
555 bus_addr) >> IO_PAGE_SHIFT;
556
557 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
558
559 spin_lock_irqsave(&iommu->lock, flags);
560
561 pci_arena_free(&iommu->arena, entry, npages);
562
563 do {
564 unsigned long num;
565
566 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
567 npages);
568 entry += num;
569 npages -= num;
570 } while (npages != 0);
571
572 spin_unlock_irqrestore(&iommu->lock, flags);
573 }
574
575 static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
576 {
577 /* Nothing to do... */
578 }
579
580 static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
581 {
582 /* Nothing to do... */
583 }
584
585 const struct pci_iommu_ops pci_sun4v_iommu_ops = {
586 .alloc_consistent = pci_4v_alloc_consistent,
587 .free_consistent = pci_4v_free_consistent,
588 .map_single = pci_4v_map_single,
589 .unmap_single = pci_4v_unmap_single,
590 .map_sg = pci_4v_map_sg,
591 .unmap_sg = pci_4v_unmap_sg,
592 .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu,
593 .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
594 };
595
596 static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
597 {
598 if (bus < pbm->pci_first_busno ||
599 bus > pbm->pci_last_busno)
600 return 1;
601 return 0;
602 }
603
604 static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
605 int where, int size, u32 *value)
606 {
607 struct pci_pbm_info *pbm = bus_dev->sysdata;
608 u32 devhandle = pbm->devhandle;
609 unsigned int bus = bus_dev->number;
610 unsigned int device = PCI_SLOT(devfn);
611 unsigned int func = PCI_FUNC(devfn);
612 unsigned long ret;
613
614 if (bus_dev == pbm->pci_bus && devfn == 0x00)
615 return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where,
616 size, value);
617 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
618 ret = ~0UL;
619 } else {
620 ret = pci_sun4v_config_get(devhandle,
621 HV_PCI_DEVICE_BUILD(bus, device, func),
622 where, size);
623 #if 0
624 printk("rcfg: [%x:%x:%x:%d]=[%lx]\n",
625 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
626 where, size, ret);
627 #endif
628 }
629 switch (size) {
630 case 1:
631 *value = ret & 0xff;
632 break;
633 case 2:
634 *value = ret & 0xffff;
635 break;
636 case 4:
637 *value = ret & 0xffffffff;
638 break;
639 };
640
641
642 return PCIBIOS_SUCCESSFUL;
643 }
644
645 static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
646 int where, int size, u32 value)
647 {
648 struct pci_pbm_info *pbm = bus_dev->sysdata;
649 u32 devhandle = pbm->devhandle;
650 unsigned int bus = bus_dev->number;
651 unsigned int device = PCI_SLOT(devfn);
652 unsigned int func = PCI_FUNC(devfn);
653 unsigned long ret;
654
655 if (bus_dev == pbm->pci_bus && devfn == 0x00)
656 return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where,
657 size, value);
658 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
659 /* Do nothing. */
660 } else {
661 ret = pci_sun4v_config_put(devhandle,
662 HV_PCI_DEVICE_BUILD(bus, device, func),
663 where, size, value);
664 #if 0
665 printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n",
666 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
667 where, size, value, ret);
668 #endif
669 }
670 return PCIBIOS_SUCCESSFUL;
671 }
672
673 static struct pci_ops pci_sun4v_ops = {
674 .read = pci_sun4v_read_pci_cfg,
675 .write = pci_sun4v_write_pci_cfg,
676 };
677
678
679 static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
680 {
681 struct property *prop;
682 struct device_node *dp;
683
684 dp = pbm->prom_node;
685 prop = of_find_property(dp, "66mhz-capable", NULL);
686 pbm->is_66mhz_capable = (prop != NULL);
687 pbm->pci_bus = pci_scan_one_pbm(pbm);
688
689 /* XXX register error interrupt handlers XXX */
690 }
691
692 static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
693 struct iommu *iommu)
694 {
695 struct iommu_arena *arena = &iommu->arena;
696 unsigned long i, cnt = 0;
697 u32 devhandle;
698
699 devhandle = pbm->devhandle;
700 for (i = 0; i < arena->limit; i++) {
701 unsigned long ret, io_attrs, ra;
702
703 ret = pci_sun4v_iommu_getmap(devhandle,
704 HV_PCI_TSBID(0, i),
705 &io_attrs, &ra);
706 if (ret == HV_EOK) {
707 if (page_in_phys_avail(ra)) {
708 pci_sun4v_iommu_demap(devhandle,
709 HV_PCI_TSBID(0, i), 1);
710 } else {
711 cnt++;
712 __set_bit(i, arena->map);
713 }
714 }
715 }
716
717 return cnt;
718 }
719
720 static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
721 {
722 struct iommu *iommu = pbm->iommu;
723 struct property *prop;
724 unsigned long num_tsb_entries, sz;
725 u32 vdma[2], dma_mask, dma_offset;
726 int tsbsize;
727
728 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
729 if (prop) {
730 u32 *val = prop->value;
731
732 vdma[0] = val[0];
733 vdma[1] = val[1];
734 } else {
735 /* No property, use default values. */
736 vdma[0] = 0x80000000;
737 vdma[1] = 0x80000000;
738 }
739
740 dma_mask = vdma[0];
741 switch (vdma[1]) {
742 case 0x20000000:
743 dma_mask |= 0x1fffffff;
744 tsbsize = 64;
745 break;
746
747 case 0x40000000:
748 dma_mask |= 0x3fffffff;
749 tsbsize = 128;
750 break;
751
752 case 0x80000000:
753 dma_mask |= 0x7fffffff;
754 tsbsize = 256;
755 break;
756
757 default:
758 prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
759 prom_halt();
760 };
761
762 tsbsize *= (8 * 1024);
763
764 num_tsb_entries = tsbsize / sizeof(iopte_t);
765
766 dma_offset = vdma[0];
767
768 /* Setup initial software IOMMU state. */
769 spin_lock_init(&iommu->lock);
770 iommu->ctx_lowest_free = 1;
771 iommu->page_table_map_base = dma_offset;
772 iommu->dma_addr_mask = dma_mask;
773
774 /* Allocate and initialize the free area map. */
775 sz = num_tsb_entries / 8;
776 sz = (sz + 7UL) & ~7UL;
777 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
778 if (!iommu->arena.map) {
779 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
780 prom_halt();
781 }
782 iommu->arena.limit = num_tsb_entries;
783
784 sz = probe_existing_entries(pbm, iommu);
785 if (sz)
786 printk("%s: Imported %lu TSB entries from OBP\n",
787 pbm->name, sz);
788 }
789
790 #ifdef CONFIG_PCI_MSI
791 struct pci_sun4v_msiq_entry {
792 u64 version_type;
793 #define MSIQ_VERSION_MASK 0xffffffff00000000UL
794 #define MSIQ_VERSION_SHIFT 32
795 #define MSIQ_TYPE_MASK 0x00000000000000ffUL
796 #define MSIQ_TYPE_SHIFT 0
797 #define MSIQ_TYPE_NONE 0x00
798 #define MSIQ_TYPE_MSG 0x01
799 #define MSIQ_TYPE_MSI32 0x02
800 #define MSIQ_TYPE_MSI64 0x03
801 #define MSIQ_TYPE_INTX 0x08
802 #define MSIQ_TYPE_NONE2 0xff
803
804 u64 intx_sysino;
805 u64 reserved1;
806 u64 stick;
807 u64 req_id; /* bus/device/func */
808 #define MSIQ_REQID_BUS_MASK 0xff00UL
809 #define MSIQ_REQID_BUS_SHIFT 8
810 #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
811 #define MSIQ_REQID_DEVICE_SHIFT 3
812 #define MSIQ_REQID_FUNC_MASK 0x0007UL
813 #define MSIQ_REQID_FUNC_SHIFT 0
814
815 u64 msi_address;
816
817 /* The format of this value is message type dependant.
818 * For MSI bits 15:0 are the data from the MSI packet.
819 * For MSI-X bits 31:0 are the data from the MSI packet.
820 * For MSG, the message code and message routing code where:
821 * bits 39:32 is the bus/device/fn of the msg target-id
822 * bits 18:16 is the message routing code
823 * bits 7:0 is the message code
824 * For INTx the low order 2-bits are:
825 * 00 - INTA
826 * 01 - INTB
827 * 10 - INTC
828 * 11 - INTD
829 */
830 u64 msi_data;
831
832 u64 reserved2;
833 };
834
835 /* For now this just runs as a pre-handler for the real interrupt handler.
836 * So we just walk through the queue and ACK all the entries, update the
837 * head pointer, and return.
838 *
839 * In the longer term it would be nice to do something more integrated
840 * wherein we can pass in some of this MSI info to the drivers. This
841 * would be most useful for PCIe fabric error messages, although we could
842 * invoke those directly from the loop here in order to pass the info around.
843 */
844 static void pci_sun4v_msi_prehandler(unsigned int ino, void *data1, void *data2)
845 {
846 struct pci_pbm_info *pbm = data1;
847 struct pci_sun4v_msiq_entry *base, *ep;
848 unsigned long msiqid, orig_head, head, type, err;
849
850 msiqid = (unsigned long) data2;
851
852 head = 0xdeadbeef;
853 err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, &head);
854 if (unlikely(err))
855 goto hv_error_get;
856
857 if (unlikely(head >= (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry))))
858 goto bad_offset;
859
860 head /= sizeof(struct pci_sun4v_msiq_entry);
861 orig_head = head;
862 base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
863 (pbm->msiq_ent_count *
864 sizeof(struct pci_sun4v_msiq_entry))));
865 ep = &base[head];
866 while ((ep->version_type & MSIQ_TYPE_MASK) != 0) {
867 type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
868 if (unlikely(type != MSIQ_TYPE_MSI32 &&
869 type != MSIQ_TYPE_MSI64))
870 goto bad_type;
871
872 pci_sun4v_msi_setstate(pbm->devhandle,
873 ep->msi_data /* msi_num */,
874 HV_MSISTATE_IDLE);
875
876 /* Clear the entry. */
877 ep->version_type &= ~MSIQ_TYPE_MASK;
878
879 /* Go to next entry in ring. */
880 head++;
881 if (head >= pbm->msiq_ent_count)
882 head = 0;
883 ep = &base[head];
884 }
885
886 if (likely(head != orig_head)) {
887 /* ACK entries by updating head pointer. */
888 head *= sizeof(struct pci_sun4v_msiq_entry);
889 err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
890 if (unlikely(err))
891 goto hv_error_set;
892 }
893 return;
894
895 hv_error_set:
896 printk(KERN_EMERG "MSI: Hypervisor set head gives error %lu\n", err);
897 goto hv_error_cont;
898
899 hv_error_get:
900 printk(KERN_EMERG "MSI: Hypervisor get head gives error %lu\n", err);
901
902 hv_error_cont:
903 printk(KERN_EMERG "MSI: devhandle[%x] msiqid[%lx] head[%lu]\n",
904 pbm->devhandle, msiqid, head);
905 return;
906
907 bad_offset:
908 printk(KERN_EMERG "MSI: Hypervisor gives bad offset %lx max(%lx)\n",
909 head, pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry));
910 return;
911
912 bad_type:
913 printk(KERN_EMERG "MSI: Entry has bad type %lx\n", type);
914 return;
915 }
916
917 static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
918 {
919 unsigned long size, bits_per_ulong;
920
921 bits_per_ulong = sizeof(unsigned long) * 8;
922 size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
923 size /= 8;
924 BUG_ON(size % sizeof(unsigned long));
925
926 pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
927 if (!pbm->msi_bitmap)
928 return -ENOMEM;
929
930 return 0;
931 }
932
933 static void msi_bitmap_free(struct pci_pbm_info *pbm)
934 {
935 kfree(pbm->msi_bitmap);
936 pbm->msi_bitmap = NULL;
937 }
938
939 static int msi_queue_alloc(struct pci_pbm_info *pbm)
940 {
941 unsigned long q_size, alloc_size, pages, order;
942 int i;
943
944 q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
945 alloc_size = (pbm->msiq_num * q_size);
946 order = get_order(alloc_size);
947 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
948 if (pages == 0UL) {
949 printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
950 order);
951 return -ENOMEM;
952 }
953 memset((char *)pages, 0, PAGE_SIZE << order);
954 pbm->msi_queues = (void *) pages;
955
956 for (i = 0; i < pbm->msiq_num; i++) {
957 unsigned long err, base = __pa(pages + (i * q_size));
958 unsigned long ret1, ret2;
959
960 err = pci_sun4v_msiq_conf(pbm->devhandle,
961 pbm->msiq_first + i,
962 base, pbm->msiq_ent_count);
963 if (err) {
964 printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
965 err);
966 goto h_error;
967 }
968
969 err = pci_sun4v_msiq_info(pbm->devhandle,
970 pbm->msiq_first + i,
971 &ret1, &ret2);
972 if (err) {
973 printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
974 err);
975 goto h_error;
976 }
977 if (ret1 != base || ret2 != pbm->msiq_ent_count) {
978 printk(KERN_ERR "MSI: Bogus qconf "
979 "expected[%lx:%x] got[%lx:%lx]\n",
980 base, pbm->msiq_ent_count,
981 ret1, ret2);
982 goto h_error;
983 }
984 }
985
986 return 0;
987
988 h_error:
989 free_pages(pages, order);
990 return -EINVAL;
991 }
992
993
994 static int alloc_msi(struct pci_pbm_info *pbm)
995 {
996 int i;
997
998 for (i = 0; i < pbm->msi_num; i++) {
999 if (!test_and_set_bit(i, pbm->msi_bitmap))
1000 return i + pbm->msi_first;
1001 }
1002
1003 return -ENOENT;
1004 }
1005
1006 static void free_msi(struct pci_pbm_info *pbm, int msi_num)
1007 {
1008 msi_num -= pbm->msi_first;
1009 clear_bit(msi_num, pbm->msi_bitmap);
1010 }
1011
1012 static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p,
1013 struct pci_dev *pdev,
1014 struct msi_desc *entry)
1015 {
1016 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
1017 unsigned long devino, msiqid;
1018 struct msi_msg msg;
1019 int msi_num, err;
1020
1021 *virt_irq_p = 0;
1022
1023 msi_num = alloc_msi(pbm);
1024 if (msi_num < 0)
1025 return msi_num;
1026
1027 devino = sun4v_build_msi(pbm->devhandle, virt_irq_p,
1028 pbm->msiq_first_devino,
1029 (pbm->msiq_first_devino +
1030 pbm->msiq_num));
1031 err = -ENOMEM;
1032 if (!devino)
1033 goto out_err;
1034
1035 msiqid = ((devino - pbm->msiq_first_devino) +
1036 pbm->msiq_first);
1037
1038 err = -EINVAL;
1039 if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
1040 if (err)
1041 goto out_err;
1042
1043 if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
1044 goto out_err;
1045
1046 if (pci_sun4v_msi_setmsiq(pbm->devhandle,
1047 msi_num, msiqid,
1048 (entry->msi_attrib.is_64 ?
1049 HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
1050 goto out_err;
1051
1052 if (pci_sun4v_msi_setstate(pbm->devhandle, msi_num, HV_MSISTATE_IDLE))
1053 goto out_err;
1054
1055 if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID))
1056 goto out_err;
1057
1058 pdev->dev.archdata.msi_num = msi_num;
1059
1060 if (entry->msi_attrib.is_64) {
1061 msg.address_hi = pbm->msi64_start >> 32;
1062 msg.address_lo = pbm->msi64_start & 0xffffffff;
1063 } else {
1064 msg.address_hi = 0;
1065 msg.address_lo = pbm->msi32_start;
1066 }
1067 msg.data = msi_num;
1068
1069 set_irq_msi(*virt_irq_p, entry);
1070 write_msi_msg(*virt_irq_p, &msg);
1071
1072 irq_install_pre_handler(*virt_irq_p,
1073 pci_sun4v_msi_prehandler,
1074 pbm, (void *) msiqid);
1075
1076 return 0;
1077
1078 out_err:
1079 free_msi(pbm, msi_num);
1080 sun4v_destroy_msi(*virt_irq_p);
1081 *virt_irq_p = 0;
1082 return err;
1083
1084 }
1085
1086 static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq,
1087 struct pci_dev *pdev)
1088 {
1089 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
1090 unsigned long msiqid, err;
1091 unsigned int msi_num;
1092
1093 msi_num = pdev->dev.archdata.msi_num;
1094 err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid);
1095 if (err) {
1096 printk(KERN_ERR "%s: getmsiq gives error %lu\n",
1097 pbm->name, err);
1098 return;
1099 }
1100
1101 pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_INVALID);
1102 pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_INVALID);
1103
1104 free_msi(pbm, msi_num);
1105
1106 /* The sun4v_destroy_msi() will liberate the devino and thus the MSIQ
1107 * allocation.
1108 */
1109 sun4v_destroy_msi(virt_irq);
1110 }
1111
1112 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1113 {
1114 const u32 *val;
1115 int len;
1116
1117 val = of_get_property(pbm->prom_node, "#msi-eqs", &len);
1118 if (!val || len != 4)
1119 goto no_msi;
1120 pbm->msiq_num = *val;
1121 if (pbm->msiq_num) {
1122 const struct msiq_prop {
1123 u32 first_msiq;
1124 u32 num_msiq;
1125 u32 first_devino;
1126 } *mqp;
1127 const struct msi_range_prop {
1128 u32 first_msi;
1129 u32 num_msi;
1130 } *mrng;
1131 const struct addr_range_prop {
1132 u32 msi32_high;
1133 u32 msi32_low;
1134 u32 msi32_len;
1135 u32 msi64_high;
1136 u32 msi64_low;
1137 u32 msi64_len;
1138 } *arng;
1139
1140 val = of_get_property(pbm->prom_node, "msi-eq-size", &len);
1141 if (!val || len != 4)
1142 goto no_msi;
1143
1144 pbm->msiq_ent_count = *val;
1145
1146 mqp = of_get_property(pbm->prom_node,
1147 "msi-eq-to-devino", &len);
1148 if (!mqp || len != sizeof(struct msiq_prop))
1149 goto no_msi;
1150
1151 pbm->msiq_first = mqp->first_msiq;
1152 pbm->msiq_first_devino = mqp->first_devino;
1153
1154 val = of_get_property(pbm->prom_node, "#msi", &len);
1155 if (!val || len != 4)
1156 goto no_msi;
1157 pbm->msi_num = *val;
1158
1159 mrng = of_get_property(pbm->prom_node, "msi-ranges", &len);
1160 if (!mrng || len != sizeof(struct msi_range_prop))
1161 goto no_msi;
1162 pbm->msi_first = mrng->first_msi;
1163
1164 val = of_get_property(pbm->prom_node, "msi-data-mask", &len);
1165 if (!val || len != 4)
1166 goto no_msi;
1167 pbm->msi_data_mask = *val;
1168
1169 val = of_get_property(pbm->prom_node, "msix-data-width", &len);
1170 if (!val || len != 4)
1171 goto no_msi;
1172 pbm->msix_data_width = *val;
1173
1174 arng = of_get_property(pbm->prom_node, "msi-address-ranges",
1175 &len);
1176 if (!arng || len != sizeof(struct addr_range_prop))
1177 goto no_msi;
1178 pbm->msi32_start = ((u64)arng->msi32_high << 32) |
1179 (u64) arng->msi32_low;
1180 pbm->msi64_start = ((u64)arng->msi64_high << 32) |
1181 (u64) arng->msi64_low;
1182 pbm->msi32_len = arng->msi32_len;
1183 pbm->msi64_len = arng->msi64_len;
1184
1185 if (msi_bitmap_alloc(pbm))
1186 goto no_msi;
1187
1188 if (msi_queue_alloc(pbm)) {
1189 msi_bitmap_free(pbm);
1190 goto no_msi;
1191 }
1192
1193 printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
1194 "devino[0x%x]\n",
1195 pbm->name,
1196 pbm->msiq_first, pbm->msiq_num,
1197 pbm->msiq_ent_count,
1198 pbm->msiq_first_devino);
1199 printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
1200 "width[%u]\n",
1201 pbm->name,
1202 pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
1203 pbm->msix_data_width);
1204 printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] "
1205 "addr64[0x%lx:0x%x]\n",
1206 pbm->name,
1207 pbm->msi32_start, pbm->msi32_len,
1208 pbm->msi64_start, pbm->msi64_len);
1209 printk(KERN_INFO "%s: MSI queues at RA [%p]\n",
1210 pbm->name,
1211 pbm->msi_queues);
1212 }
1213 pbm->setup_msi_irq = pci_sun4v_setup_msi_irq;
1214 pbm->teardown_msi_irq = pci_sun4v_teardown_msi_irq;
1215
1216 return;
1217
1218 no_msi:
1219 pbm->msiq_num = 0;
1220 printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
1221 }
1222 #else /* CONFIG_PCI_MSI */
1223 static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
1224 {
1225 }
1226 #endif /* !(CONFIG_PCI_MSI) */
1227
1228 static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
1229 {
1230 struct pci_pbm_info *pbm;
1231
1232 if (devhandle & 0x40)
1233 pbm = &p->pbm_B;
1234 else
1235 pbm = &p->pbm_A;
1236
1237 pbm->next = pci_pbm_root;
1238 pci_pbm_root = pbm;
1239
1240 pbm->scan_bus = pci_sun4v_scan_bus;
1241 pbm->pci_ops = &pci_sun4v_ops;
1242
1243 pbm->index = pci_num_pbms++;
1244
1245 pbm->parent = p;
1246 pbm->prom_node = dp;
1247
1248 pbm->devhandle = devhandle;
1249
1250 pbm->name = dp->full_name;
1251
1252 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
1253
1254 pci_determine_mem_io_space(pbm);
1255
1256 pci_get_pbm_props(pbm);
1257 pci_sun4v_iommu_init(pbm);
1258 pci_sun4v_msi_init(pbm);
1259 }
1260
1261 void sun4v_pci_init(struct device_node *dp, char *model_name)
1262 {
1263 struct pci_controller_info *p;
1264 struct pci_pbm_info *pbm;
1265 struct iommu *iommu;
1266 struct property *prop;
1267 struct linux_prom64_registers *regs;
1268 u32 devhandle;
1269 int i;
1270
1271 prop = of_find_property(dp, "reg", NULL);
1272 regs = prop->value;
1273
1274 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1275
1276 for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
1277 if (pbm->devhandle == (devhandle ^ 0x40)) {
1278 pci_sun4v_pbm_init(pbm->parent, dp, devhandle);
1279 return;
1280 }
1281 }
1282
1283 for_each_possible_cpu(i) {
1284 unsigned long page = get_zeroed_page(GFP_ATOMIC);
1285
1286 if (!page)
1287 goto fatal_memory_error;
1288
1289 per_cpu(pci_iommu_batch, i).pglist = (u64 *) page;
1290 }
1291
1292 p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1293 if (!p)
1294 goto fatal_memory_error;
1295
1296 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1297 if (!iommu)
1298 goto fatal_memory_error;
1299
1300 p->pbm_A.iommu = iommu;
1301
1302 iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
1303 if (!iommu)
1304 goto fatal_memory_error;
1305
1306 p->pbm_B.iommu = iommu;
1307
1308 /* Like PSYCHO and SCHIZO we have a 2GB aligned area
1309 * for memory space.
1310 */
1311 pci_memspace_mask = 0x7fffffffUL;
1312
1313 pci_sun4v_pbm_init(p, dp, devhandle);
1314 return;
1315
1316 fatal_memory_error:
1317 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
1318 prom_halt();
1319 }
This page took 0.057464 seconds and 6 git commands to generate.