[SPARC64]: Use in-kernel OBP device tree for PCI controller probing.
[deliverable/linux.git] / arch / sparc64 / kernel / pci_sun4v.c
1 /* pci_sun4v.c: SUN4V specific PCI controller support.
2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/pci.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <linux/percpu.h>
13
14 #include <asm/pbm.h>
15 #include <asm/iommu.h>
16 #include <asm/irq.h>
17 #include <asm/upa.h>
18 #include <asm/pstate.h>
19 #include <asm/oplib.h>
20 #include <asm/hypervisor.h>
21 #include <asm/prom.h>
22
23 #include "pci_impl.h"
24 #include "iommu_common.h"
25
26 #include "pci_sun4v.h"
27
28 #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
29
30 struct pci_iommu_batch {
31 struct pci_dev *pdev; /* Device mapping is for. */
32 unsigned long prot; /* IOMMU page protections */
33 unsigned long entry; /* Index into IOTSB. */
34 u64 *pglist; /* List of physical pages */
35 unsigned long npages; /* Number of pages in list. */
36 };
37
38 static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch);
39
40 /* Interrupts must be disabled. */
41 static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry)
42 {
43 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
44
45 p->pdev = pdev;
46 p->prot = prot;
47 p->entry = entry;
48 p->npages = 0;
49 }
50
51 /* Interrupts must be disabled. */
52 static long pci_iommu_batch_flush(struct pci_iommu_batch *p)
53 {
54 struct pcidev_cookie *pcp = p->pdev->sysdata;
55 unsigned long devhandle = pcp->pbm->devhandle;
56 unsigned long prot = p->prot;
57 unsigned long entry = p->entry;
58 u64 *pglist = p->pglist;
59 unsigned long npages = p->npages;
60
61 while (npages != 0) {
62 long num;
63
64 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
65 npages, prot, __pa(pglist));
66 if (unlikely(num < 0)) {
67 if (printk_ratelimit())
68 printk("pci_iommu_batch_flush: IOMMU map of "
69 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
70 "status %ld\n",
71 devhandle, HV_PCI_TSBID(0, entry),
72 npages, prot, __pa(pglist), num);
73 return -1;
74 }
75
76 entry += num;
77 npages -= num;
78 pglist += num;
79 }
80
81 p->entry = entry;
82 p->npages = 0;
83
84 return 0;
85 }
86
87 /* Interrupts must be disabled. */
88 static inline long pci_iommu_batch_add(u64 phys_page)
89 {
90 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
91
92 BUG_ON(p->npages >= PGLIST_NENTS);
93
94 p->pglist[p->npages++] = phys_page;
95 if (p->npages == PGLIST_NENTS)
96 return pci_iommu_batch_flush(p);
97
98 return 0;
99 }
100
101 /* Interrupts must be disabled. */
102 static inline long pci_iommu_batch_end(void)
103 {
104 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
105
106 BUG_ON(p->npages >= PGLIST_NENTS);
107
108 return pci_iommu_batch_flush(p);
109 }
110
111 static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
112 {
113 unsigned long n, i, start, end, limit;
114 int pass;
115
116 limit = arena->limit;
117 start = arena->hint;
118 pass = 0;
119
120 again:
121 n = find_next_zero_bit(arena->map, limit, start);
122 end = n + npages;
123 if (unlikely(end >= limit)) {
124 if (likely(pass < 1)) {
125 limit = start;
126 start = 0;
127 pass++;
128 goto again;
129 } else {
130 /* Scanned the whole thing, give up. */
131 return -1;
132 }
133 }
134
135 for (i = n; i < end; i++) {
136 if (test_bit(i, arena->map)) {
137 start = i + 1;
138 goto again;
139 }
140 }
141
142 for (i = n; i < end; i++)
143 __set_bit(i, arena->map);
144
145 arena->hint = end;
146
147 return n;
148 }
149
150 static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
151 {
152 unsigned long i;
153
154 for (i = base; i < (base + npages); i++)
155 __clear_bit(i, arena->map);
156 }
157
158 static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
159 {
160 struct pcidev_cookie *pcp;
161 struct pci_iommu *iommu;
162 unsigned long flags, order, first_page, npages, n;
163 void *ret;
164 long entry;
165
166 size = IO_PAGE_ALIGN(size);
167 order = get_order(size);
168 if (unlikely(order >= MAX_ORDER))
169 return NULL;
170
171 npages = size >> IO_PAGE_SHIFT;
172
173 first_page = __get_free_pages(gfp, order);
174 if (unlikely(first_page == 0UL))
175 return NULL;
176
177 memset((char *)first_page, 0, PAGE_SIZE << order);
178
179 pcp = pdev->sysdata;
180 iommu = pcp->pbm->iommu;
181
182 spin_lock_irqsave(&iommu->lock, flags);
183 entry = pci_arena_alloc(&iommu->arena, npages);
184 spin_unlock_irqrestore(&iommu->lock, flags);
185
186 if (unlikely(entry < 0L))
187 goto arena_alloc_fail;
188
189 *dma_addrp = (iommu->page_table_map_base +
190 (entry << IO_PAGE_SHIFT));
191 ret = (void *) first_page;
192 first_page = __pa(first_page);
193
194 local_irq_save(flags);
195
196 pci_iommu_batch_start(pdev,
197 (HV_PCI_MAP_ATTR_READ |
198 HV_PCI_MAP_ATTR_WRITE),
199 entry);
200
201 for (n = 0; n < npages; n++) {
202 long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE));
203 if (unlikely(err < 0L))
204 goto iommu_map_fail;
205 }
206
207 if (unlikely(pci_iommu_batch_end() < 0L))
208 goto iommu_map_fail;
209
210 local_irq_restore(flags);
211
212 return ret;
213
214 iommu_map_fail:
215 /* Interrupts are disabled. */
216 spin_lock(&iommu->lock);
217 pci_arena_free(&iommu->arena, entry, npages);
218 spin_unlock_irqrestore(&iommu->lock, flags);
219
220 arena_alloc_fail:
221 free_pages(first_page, order);
222 return NULL;
223 }
224
225 static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
226 {
227 struct pcidev_cookie *pcp;
228 struct pci_iommu *iommu;
229 unsigned long flags, order, npages, entry;
230 u32 devhandle;
231
232 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
233 pcp = pdev->sysdata;
234 iommu = pcp->pbm->iommu;
235 devhandle = pcp->pbm->devhandle;
236 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
237
238 spin_lock_irqsave(&iommu->lock, flags);
239
240 pci_arena_free(&iommu->arena, entry, npages);
241
242 do {
243 unsigned long num;
244
245 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
246 npages);
247 entry += num;
248 npages -= num;
249 } while (npages != 0);
250
251 spin_unlock_irqrestore(&iommu->lock, flags);
252
253 order = get_order(size);
254 if (order < 10)
255 free_pages((unsigned long)cpu, order);
256 }
257
258 static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
259 {
260 struct pcidev_cookie *pcp;
261 struct pci_iommu *iommu;
262 unsigned long flags, npages, oaddr;
263 unsigned long i, base_paddr;
264 u32 bus_addr, ret;
265 unsigned long prot;
266 long entry;
267
268 pcp = pdev->sysdata;
269 iommu = pcp->pbm->iommu;
270
271 if (unlikely(direction == PCI_DMA_NONE))
272 goto bad;
273
274 oaddr = (unsigned long)ptr;
275 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
276 npages >>= IO_PAGE_SHIFT;
277
278 spin_lock_irqsave(&iommu->lock, flags);
279 entry = pci_arena_alloc(&iommu->arena, npages);
280 spin_unlock_irqrestore(&iommu->lock, flags);
281
282 if (unlikely(entry < 0L))
283 goto bad;
284
285 bus_addr = (iommu->page_table_map_base +
286 (entry << IO_PAGE_SHIFT));
287 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
288 base_paddr = __pa(oaddr & IO_PAGE_MASK);
289 prot = HV_PCI_MAP_ATTR_READ;
290 if (direction != PCI_DMA_TODEVICE)
291 prot |= HV_PCI_MAP_ATTR_WRITE;
292
293 local_irq_save(flags);
294
295 pci_iommu_batch_start(pdev, prot, entry);
296
297 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
298 long err = pci_iommu_batch_add(base_paddr);
299 if (unlikely(err < 0L))
300 goto iommu_map_fail;
301 }
302 if (unlikely(pci_iommu_batch_end() < 0L))
303 goto iommu_map_fail;
304
305 local_irq_restore(flags);
306
307 return ret;
308
309 bad:
310 if (printk_ratelimit())
311 WARN_ON(1);
312 return PCI_DMA_ERROR_CODE;
313
314 iommu_map_fail:
315 /* Interrupts are disabled. */
316 spin_lock(&iommu->lock);
317 pci_arena_free(&iommu->arena, entry, npages);
318 spin_unlock_irqrestore(&iommu->lock, flags);
319
320 return PCI_DMA_ERROR_CODE;
321 }
322
323 static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
324 {
325 struct pcidev_cookie *pcp;
326 struct pci_iommu *iommu;
327 unsigned long flags, npages;
328 long entry;
329 u32 devhandle;
330
331 if (unlikely(direction == PCI_DMA_NONE)) {
332 if (printk_ratelimit())
333 WARN_ON(1);
334 return;
335 }
336
337 pcp = pdev->sysdata;
338 iommu = pcp->pbm->iommu;
339 devhandle = pcp->pbm->devhandle;
340
341 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
342 npages >>= IO_PAGE_SHIFT;
343 bus_addr &= IO_PAGE_MASK;
344
345 spin_lock_irqsave(&iommu->lock, flags);
346
347 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
348 pci_arena_free(&iommu->arena, entry, npages);
349
350 do {
351 unsigned long num;
352
353 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
354 npages);
355 entry += num;
356 npages -= num;
357 } while (npages != 0);
358
359 spin_unlock_irqrestore(&iommu->lock, flags);
360 }
361
362 #define SG_ENT_PHYS_ADDRESS(SG) \
363 (__pa(page_address((SG)->page)) + (SG)->offset)
364
365 static inline long fill_sg(long entry, struct pci_dev *pdev,
366 struct scatterlist *sg,
367 int nused, int nelems, unsigned long prot)
368 {
369 struct scatterlist *dma_sg = sg;
370 struct scatterlist *sg_end = sg + nelems;
371 unsigned long flags;
372 int i;
373
374 local_irq_save(flags);
375
376 pci_iommu_batch_start(pdev, prot, entry);
377
378 for (i = 0; i < nused; i++) {
379 unsigned long pteval = ~0UL;
380 u32 dma_npages;
381
382 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
383 dma_sg->dma_length +
384 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
385 do {
386 unsigned long offset;
387 signed int len;
388
389 /* If we are here, we know we have at least one
390 * more page to map. So walk forward until we
391 * hit a page crossing, and begin creating new
392 * mappings from that spot.
393 */
394 for (;;) {
395 unsigned long tmp;
396
397 tmp = SG_ENT_PHYS_ADDRESS(sg);
398 len = sg->length;
399 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
400 pteval = tmp & IO_PAGE_MASK;
401 offset = tmp & (IO_PAGE_SIZE - 1UL);
402 break;
403 }
404 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
405 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
406 offset = 0UL;
407 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
408 break;
409 }
410 sg++;
411 }
412
413 pteval = (pteval & IOPTE_PAGE);
414 while (len > 0) {
415 long err;
416
417 err = pci_iommu_batch_add(pteval);
418 if (unlikely(err < 0L))
419 goto iommu_map_failed;
420
421 pteval += IO_PAGE_SIZE;
422 len -= (IO_PAGE_SIZE - offset);
423 offset = 0;
424 dma_npages--;
425 }
426
427 pteval = (pteval & IOPTE_PAGE) + len;
428 sg++;
429
430 /* Skip over any tail mappings we've fully mapped,
431 * adjusting pteval along the way. Stop when we
432 * detect a page crossing event.
433 */
434 while (sg < sg_end &&
435 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
436 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
437 ((pteval ^
438 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
439 pteval += sg->length;
440 sg++;
441 }
442 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
443 pteval = ~0UL;
444 } while (dma_npages != 0);
445 dma_sg++;
446 }
447
448 if (unlikely(pci_iommu_batch_end() < 0L))
449 goto iommu_map_failed;
450
451 local_irq_restore(flags);
452 return 0;
453
454 iommu_map_failed:
455 local_irq_restore(flags);
456 return -1L;
457 }
458
459 static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
460 {
461 struct pcidev_cookie *pcp;
462 struct pci_iommu *iommu;
463 unsigned long flags, npages, prot;
464 u32 dma_base;
465 struct scatterlist *sgtmp;
466 long entry, err;
467 int used;
468
469 /* Fast path single entry scatterlists. */
470 if (nelems == 1) {
471 sglist->dma_address =
472 pci_4v_map_single(pdev,
473 (page_address(sglist->page) + sglist->offset),
474 sglist->length, direction);
475 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
476 return 0;
477 sglist->dma_length = sglist->length;
478 return 1;
479 }
480
481 pcp = pdev->sysdata;
482 iommu = pcp->pbm->iommu;
483
484 if (unlikely(direction == PCI_DMA_NONE))
485 goto bad;
486
487 /* Step 1: Prepare scatter list. */
488 npages = prepare_sg(sglist, nelems);
489
490 /* Step 2: Allocate a cluster and context, if necessary. */
491 spin_lock_irqsave(&iommu->lock, flags);
492 entry = pci_arena_alloc(&iommu->arena, npages);
493 spin_unlock_irqrestore(&iommu->lock, flags);
494
495 if (unlikely(entry < 0L))
496 goto bad;
497
498 dma_base = iommu->page_table_map_base +
499 (entry << IO_PAGE_SHIFT);
500
501 /* Step 3: Normalize DMA addresses. */
502 used = nelems;
503
504 sgtmp = sglist;
505 while (used && sgtmp->dma_length) {
506 sgtmp->dma_address += dma_base;
507 sgtmp++;
508 used--;
509 }
510 used = nelems - used;
511
512 /* Step 4: Create the mappings. */
513 prot = HV_PCI_MAP_ATTR_READ;
514 if (direction != PCI_DMA_TODEVICE)
515 prot |= HV_PCI_MAP_ATTR_WRITE;
516
517 err = fill_sg(entry, pdev, sglist, used, nelems, prot);
518 if (unlikely(err < 0L))
519 goto iommu_map_failed;
520
521 return used;
522
523 bad:
524 if (printk_ratelimit())
525 WARN_ON(1);
526 return 0;
527
528 iommu_map_failed:
529 spin_lock_irqsave(&iommu->lock, flags);
530 pci_arena_free(&iommu->arena, entry, npages);
531 spin_unlock_irqrestore(&iommu->lock, flags);
532
533 return 0;
534 }
535
536 static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
537 {
538 struct pcidev_cookie *pcp;
539 struct pci_iommu *iommu;
540 unsigned long flags, i, npages;
541 long entry;
542 u32 devhandle, bus_addr;
543
544 if (unlikely(direction == PCI_DMA_NONE)) {
545 if (printk_ratelimit())
546 WARN_ON(1);
547 }
548
549 pcp = pdev->sysdata;
550 iommu = pcp->pbm->iommu;
551 devhandle = pcp->pbm->devhandle;
552
553 bus_addr = sglist->dma_address & IO_PAGE_MASK;
554
555 for (i = 1; i < nelems; i++)
556 if (sglist[i].dma_length == 0)
557 break;
558 i--;
559 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
560 bus_addr) >> IO_PAGE_SHIFT;
561
562 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
563
564 spin_lock_irqsave(&iommu->lock, flags);
565
566 pci_arena_free(&iommu->arena, entry, npages);
567
568 do {
569 unsigned long num;
570
571 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
572 npages);
573 entry += num;
574 npages -= num;
575 } while (npages != 0);
576
577 spin_unlock_irqrestore(&iommu->lock, flags);
578 }
579
580 static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
581 {
582 /* Nothing to do... */
583 }
584
585 static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
586 {
587 /* Nothing to do... */
588 }
589
590 struct pci_iommu_ops pci_sun4v_iommu_ops = {
591 .alloc_consistent = pci_4v_alloc_consistent,
592 .free_consistent = pci_4v_free_consistent,
593 .map_single = pci_4v_map_single,
594 .unmap_single = pci_4v_unmap_single,
595 .map_sg = pci_4v_map_sg,
596 .unmap_sg = pci_4v_unmap_sg,
597 .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu,
598 .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
599 };
600
601 /* SUN4V PCI configuration space accessors. */
602
603 struct pdev_entry {
604 struct pdev_entry *next;
605 u32 devhandle;
606 unsigned int bus;
607 unsigned int device;
608 unsigned int func;
609 };
610
611 #define PDEV_HTAB_SIZE 16
612 #define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1)
613 static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE];
614
615 static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
616 {
617 unsigned int val;
618
619 val = (devhandle ^ (devhandle >> 4));
620 val ^= bus;
621 val ^= device;
622 val ^= func;
623
624 return val & PDEV_HTAB_MASK;
625 }
626
627 static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
628 {
629 struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL);
630 struct pdev_entry **slot;
631
632 if (!p)
633 return -ENOMEM;
634
635 slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
636 p->next = *slot;
637 *slot = p;
638
639 p->devhandle = devhandle;
640 p->bus = bus;
641 p->device = device;
642 p->func = func;
643
644 return 0;
645 }
646
647 /* Recursively descend into the OBP device tree, rooted at toplevel_node,
648 * looking for a PCI device matching bus and devfn.
649 */
650 static int obp_find(struct device_node *toplevel_node, unsigned int bus, unsigned int devfn)
651 {
652 toplevel_node = toplevel_node->child;
653
654 while (toplevel_node != NULL) {
655 struct linux_prom_pci_registers *regs;
656 struct property *prop;
657 int ret;
658
659 ret = obp_find(toplevel_node, bus, devfn);
660 if (ret != 0)
661 return ret;
662
663 prop = of_find_property(toplevel_node, "reg", NULL);
664 if (!prop)
665 goto next_sibling;
666
667 regs = prop->value;
668 if (((regs->phys_hi >> 16) & 0xff) == bus &&
669 ((regs->phys_hi >> 8) & 0xff) == devfn)
670 break;
671
672 next_sibling:
673 toplevel_node = toplevel_node->sibling;
674 }
675
676 return toplevel_node != NULL;
677 }
678
679 static int pdev_htab_populate(struct pci_pbm_info *pbm)
680 {
681 u32 devhandle = pbm->devhandle;
682 unsigned int bus;
683
684 for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) {
685 unsigned int devfn;
686
687 for (devfn = 0; devfn < 256; devfn++) {
688 unsigned int device = PCI_SLOT(devfn);
689 unsigned int func = PCI_FUNC(devfn);
690
691 if (obp_find(pbm->prom_node, bus, devfn)) {
692 int err = pdev_htab_add(devhandle, bus,
693 device, func);
694 if (err)
695 return err;
696 }
697 }
698 }
699
700 return 0;
701 }
702
703 static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
704 {
705 struct pdev_entry *p;
706
707 p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
708 while (p) {
709 if (p->devhandle == devhandle &&
710 p->bus == bus &&
711 p->device == device &&
712 p->func == func)
713 break;
714
715 p = p->next;
716 }
717
718 return p;
719 }
720
721 static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
722 {
723 if (bus < pbm->pci_first_busno ||
724 bus > pbm->pci_last_busno)
725 return 1;
726 return pdev_find(pbm->devhandle, bus, device, func) == NULL;
727 }
728
729 static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
730 int where, int size, u32 *value)
731 {
732 struct pci_pbm_info *pbm = bus_dev->sysdata;
733 u32 devhandle = pbm->devhandle;
734 unsigned int bus = bus_dev->number;
735 unsigned int device = PCI_SLOT(devfn);
736 unsigned int func = PCI_FUNC(devfn);
737 unsigned long ret;
738
739 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
740 ret = ~0UL;
741 } else {
742 ret = pci_sun4v_config_get(devhandle,
743 HV_PCI_DEVICE_BUILD(bus, device, func),
744 where, size);
745 #if 0
746 printk("rcfg: [%x:%x:%x:%d]=[%lx]\n",
747 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
748 where, size, ret);
749 #endif
750 }
751 switch (size) {
752 case 1:
753 *value = ret & 0xff;
754 break;
755 case 2:
756 *value = ret & 0xffff;
757 break;
758 case 4:
759 *value = ret & 0xffffffff;
760 break;
761 };
762
763
764 return PCIBIOS_SUCCESSFUL;
765 }
766
767 static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
768 int where, int size, u32 value)
769 {
770 struct pci_pbm_info *pbm = bus_dev->sysdata;
771 u32 devhandle = pbm->devhandle;
772 unsigned int bus = bus_dev->number;
773 unsigned int device = PCI_SLOT(devfn);
774 unsigned int func = PCI_FUNC(devfn);
775 unsigned long ret;
776
777 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
778 /* Do nothing. */
779 } else {
780 ret = pci_sun4v_config_put(devhandle,
781 HV_PCI_DEVICE_BUILD(bus, device, func),
782 where, size, value);
783 #if 0
784 printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n",
785 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
786 where, size, value, ret);
787 #endif
788 }
789 return PCIBIOS_SUCCESSFUL;
790 }
791
792 static struct pci_ops pci_sun4v_ops = {
793 .read = pci_sun4v_read_pci_cfg,
794 .write = pci_sun4v_write_pci_cfg,
795 };
796
797
798 static void pbm_scan_bus(struct pci_controller_info *p,
799 struct pci_pbm_info *pbm)
800 {
801 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
802
803 if (!cookie) {
804 prom_printf("%s: Critical allocation failure.\n", pbm->name);
805 prom_halt();
806 }
807
808 /* All we care about is the PBM. */
809 memset(cookie, 0, sizeof(*cookie));
810 cookie->pbm = pbm;
811
812 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm);
813 #if 0
814 pci_fixup_host_bridge_self(pbm->pci_bus);
815 pbm->pci_bus->self->sysdata = cookie;
816 #endif
817 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm,
818 pbm->prom_node->node);
819 pci_record_assignments(pbm, pbm->pci_bus);
820 pci_assign_unassigned(pbm, pbm->pci_bus);
821 pci_fixup_irq(pbm, pbm->pci_bus);
822 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
823 pci_setup_busmastering(pbm, pbm->pci_bus);
824 }
825
826 static void pci_sun4v_scan_bus(struct pci_controller_info *p)
827 {
828 struct property *prop;
829 struct device_node *dp;
830
831 if ((dp = p->pbm_A.prom_node) != NULL) {
832 prop = of_find_property(dp, "66mhz-capable", NULL);
833 p->pbm_A.is_66mhz_capable = (prop != NULL);
834
835 pbm_scan_bus(p, &p->pbm_A);
836 }
837 if ((dp = p->pbm_B.prom_node) != NULL) {
838 prop = of_find_property(dp, "66mhz-capable", NULL);
839 p->pbm_B.is_66mhz_capable = (prop != NULL);
840
841 pbm_scan_bus(p, &p->pbm_B);
842 }
843
844 /* XXX register error interrupt handlers XXX */
845 }
846
847 static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
848 struct pci_dev *pdev,
849 unsigned int devino)
850 {
851 u32 devhandle = pbm->devhandle;
852
853 return sun4v_build_irq(devhandle, devino);
854 }
855
856 static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
857 {
858 struct pcidev_cookie *pcp = pdev->sysdata;
859 struct pci_pbm_info *pbm = pcp->pbm;
860 struct resource *res, *root;
861 u32 reg;
862 int where, size, is_64bit;
863
864 res = &pdev->resource[resource];
865 if (resource < 6) {
866 where = PCI_BASE_ADDRESS_0 + (resource * 4);
867 } else if (resource == PCI_ROM_RESOURCE) {
868 where = pdev->rom_base_reg;
869 } else {
870 /* Somebody might have asked allocation of a non-standard resource */
871 return;
872 }
873
874 /* XXX 64-bit MEM handling is not %100 correct... XXX */
875 is_64bit = 0;
876 if (res->flags & IORESOURCE_IO)
877 root = &pbm->io_space;
878 else {
879 root = &pbm->mem_space;
880 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
881 == PCI_BASE_ADDRESS_MEM_TYPE_64)
882 is_64bit = 1;
883 }
884
885 size = res->end - res->start;
886 pci_read_config_dword(pdev, where, &reg);
887 reg = ((reg & size) |
888 (((u32)(res->start - root->start)) & ~size));
889 if (resource == PCI_ROM_RESOURCE) {
890 reg |= PCI_ROM_ADDRESS_ENABLE;
891 res->flags |= IORESOURCE_ROM_ENABLE;
892 }
893 pci_write_config_dword(pdev, where, reg);
894
895 /* This knows that the upper 32-bits of the address
896 * must be zero. Our PCI common layer enforces this.
897 */
898 if (is_64bit)
899 pci_write_config_dword(pdev, where + 4, 0);
900 }
901
902 static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
903 struct resource *res,
904 struct resource *root)
905 {
906 res->start += root->start;
907 res->end += root->start;
908 }
909
910 /* Use ranges property to determine where PCI MEM, I/O, and Config
911 * space are for this PCI bus module.
912 */
913 static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
914 {
915 int i, saw_mem, saw_io;
916
917 saw_mem = saw_io = 0;
918 for (i = 0; i < pbm->num_pbm_ranges; i++) {
919 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
920 unsigned long a;
921 int type;
922
923 type = (pr->child_phys_hi >> 24) & 0x3;
924 a = (((unsigned long)pr->parent_phys_hi << 32UL) |
925 ((unsigned long)pr->parent_phys_lo << 0UL));
926
927 switch (type) {
928 case 1:
929 /* 16-bit IO space, 16MB */
930 pbm->io_space.start = a;
931 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
932 pbm->io_space.flags = IORESOURCE_IO;
933 saw_io = 1;
934 break;
935
936 case 2:
937 /* 32-bit MEM space, 2GB */
938 pbm->mem_space.start = a;
939 pbm->mem_space.end = a + (0x80000000UL - 1UL);
940 pbm->mem_space.flags = IORESOURCE_MEM;
941 saw_mem = 1;
942 break;
943
944 case 3:
945 /* XXX 64-bit MEM handling XXX */
946
947 default:
948 break;
949 };
950 }
951
952 if (!saw_io || !saw_mem) {
953 prom_printf("%s: Fatal error, missing %s PBM range.\n",
954 pbm->name,
955 (!saw_io ? "IO" : "MEM"));
956 prom_halt();
957 }
958
959 printk("%s: PCI IO[%lx] MEM[%lx]\n",
960 pbm->name,
961 pbm->io_space.start,
962 pbm->mem_space.start);
963 }
964
965 static void pbm_register_toplevel_resources(struct pci_controller_info *p,
966 struct pci_pbm_info *pbm)
967 {
968 pbm->io_space.name = pbm->mem_space.name = pbm->name;
969
970 request_resource(&ioport_resource, &pbm->io_space);
971 request_resource(&iomem_resource, &pbm->mem_space);
972 pci_register_legacy_regions(&pbm->io_space,
973 &pbm->mem_space);
974 }
975
976 static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
977 struct pci_iommu *iommu)
978 {
979 struct pci_iommu_arena *arena = &iommu->arena;
980 unsigned long i, cnt = 0;
981 u32 devhandle;
982
983 devhandle = pbm->devhandle;
984 for (i = 0; i < arena->limit; i++) {
985 unsigned long ret, io_attrs, ra;
986
987 ret = pci_sun4v_iommu_getmap(devhandle,
988 HV_PCI_TSBID(0, i),
989 &io_attrs, &ra);
990 if (ret == HV_EOK) {
991 cnt++;
992 __set_bit(i, arena->map);
993 }
994 }
995
996 return cnt;
997 }
998
999 static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
1000 {
1001 struct pci_iommu *iommu = pbm->iommu;
1002 struct property *prop;
1003 unsigned long num_tsb_entries, sz;
1004 u32 vdma[2], dma_mask, dma_offset;
1005 int tsbsize;
1006
1007 prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
1008 if (prop) {
1009 u32 *val = prop->value;
1010
1011 vdma[0] = val[0];
1012 vdma[1] = val[1];
1013 } else {
1014 /* No property, use default values. */
1015 vdma[0] = 0x80000000;
1016 vdma[1] = 0x80000000;
1017 }
1018
1019 dma_mask = vdma[0];
1020 switch (vdma[1]) {
1021 case 0x20000000:
1022 dma_mask |= 0x1fffffff;
1023 tsbsize = 64;
1024 break;
1025
1026 case 0x40000000:
1027 dma_mask |= 0x3fffffff;
1028 tsbsize = 128;
1029 break;
1030
1031 case 0x80000000:
1032 dma_mask |= 0x7fffffff;
1033 tsbsize = 256;
1034 break;
1035
1036 default:
1037 prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
1038 prom_halt();
1039 };
1040
1041 tsbsize *= (8 * 1024);
1042
1043 num_tsb_entries = tsbsize / sizeof(iopte_t);
1044
1045 dma_offset = vdma[0];
1046
1047 /* Setup initial software IOMMU state. */
1048 spin_lock_init(&iommu->lock);
1049 iommu->ctx_lowest_free = 1;
1050 iommu->page_table_map_base = dma_offset;
1051 iommu->dma_addr_mask = dma_mask;
1052
1053 /* Allocate and initialize the free area map. */
1054 sz = num_tsb_entries / 8;
1055 sz = (sz + 7UL) & ~7UL;
1056 iommu->arena.map = kmalloc(sz, GFP_KERNEL);
1057 if (!iommu->arena.map) {
1058 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
1059 prom_halt();
1060 }
1061 memset(iommu->arena.map, 0, sz);
1062 iommu->arena.limit = num_tsb_entries;
1063
1064 sz = probe_existing_entries(pbm, iommu);
1065
1066 printk("%s: TSB entries [%lu], existing mapings [%lu]\n",
1067 pbm->name, num_tsb_entries, sz);
1068 }
1069
1070 static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm)
1071 {
1072 struct property *prop;
1073 unsigned int *busrange;
1074
1075 prop = of_find_property(pbm->prom_node, "bus-range", NULL);
1076
1077 busrange = prop->value;
1078
1079 pbm->pci_first_busno = busrange[0];
1080 pbm->pci_last_busno = busrange[1];
1081
1082 }
1083
1084 static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle)
1085 {
1086 struct pci_pbm_info *pbm;
1087 struct property *prop;
1088 int len, i;
1089
1090 if (devhandle & 0x40)
1091 pbm = &p->pbm_B;
1092 else
1093 pbm = &p->pbm_A;
1094
1095 pbm->parent = p;
1096 pbm->prom_node = dp;
1097 pbm->pci_first_slot = 1;
1098
1099 pbm->devhandle = devhandle;
1100
1101 pbm->name = dp->full_name;
1102
1103 printk("%s: SUN4V PCI Bus Module\n", pbm->name);
1104
1105 prop = of_find_property(dp, "ranges", &len);
1106 pbm->pbm_ranges = prop->value;
1107 pbm->num_pbm_ranges =
1108 (len / sizeof(struct linux_prom_pci_ranges));
1109
1110 /* Mask out the top 8 bits of the ranges, leaving the real
1111 * physical address.
1112 */
1113 for (i = 0; i < pbm->num_pbm_ranges; i++)
1114 pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
1115
1116 pci_sun4v_determine_mem_io_space(pbm);
1117 pbm_register_toplevel_resources(p, pbm);
1118
1119 prop = of_find_property(dp, "interrupt-map", &len);
1120 pbm->pbm_intmap = prop->value;
1121 pbm->num_pbm_intmap =
1122 (len / sizeof(struct linux_prom_pci_intmap));
1123
1124 prop = of_find_property(dp, "interrupt-map-mask", NULL);
1125 pbm->pbm_intmask = prop->value;
1126
1127 pci_sun4v_get_bus_range(pbm);
1128 pci_sun4v_iommu_init(pbm);
1129
1130 pdev_htab_populate(pbm);
1131 }
1132
1133 void sun4v_pci_init(struct device_node *dp, char *model_name)
1134 {
1135 struct pci_controller_info *p;
1136 struct pci_iommu *iommu;
1137 struct property *prop;
1138 struct linux_prom64_registers *regs;
1139 u32 devhandle;
1140 int i;
1141
1142 prop = of_find_property(dp, "reg", NULL);
1143 regs = prop->value;
1144
1145 devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1146
1147 for (p = pci_controller_root; p; p = p->next) {
1148 struct pci_pbm_info *pbm;
1149
1150 if (p->pbm_A.prom_node && p->pbm_B.prom_node)
1151 continue;
1152
1153 pbm = (p->pbm_A.prom_node ?
1154 &p->pbm_A :
1155 &p->pbm_B);
1156
1157 if (pbm->devhandle == (devhandle ^ 0x40)) {
1158 pci_sun4v_pbm_init(p, dp, devhandle);
1159 return;
1160 }
1161 }
1162
1163 for_each_possible_cpu(i) {
1164 unsigned long page = get_zeroed_page(GFP_ATOMIC);
1165
1166 if (!page)
1167 goto fatal_memory_error;
1168
1169 per_cpu(pci_iommu_batch, i).pglist = (u64 *) page;
1170 }
1171
1172 p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1173 if (!p)
1174 goto fatal_memory_error;
1175
1176 memset(p, 0, sizeof(*p));
1177
1178 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1179 if (!iommu)
1180 goto fatal_memory_error;
1181
1182 memset(iommu, 0, sizeof(*iommu));
1183 p->pbm_A.iommu = iommu;
1184
1185 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
1186 if (!iommu)
1187 goto fatal_memory_error;
1188
1189 memset(iommu, 0, sizeof(*iommu));
1190 p->pbm_B.iommu = iommu;
1191
1192 p->next = pci_controller_root;
1193 pci_controller_root = p;
1194
1195 p->index = pci_num_controllers++;
1196 p->pbms_same_domain = 0;
1197
1198 p->scan_bus = pci_sun4v_scan_bus;
1199 p->irq_build = pci_sun4v_irq_build;
1200 p->base_address_update = pci_sun4v_base_address_update;
1201 p->resource_adjust = pci_sun4v_resource_adjust;
1202 p->pci_ops = &pci_sun4v_ops;
1203
1204 /* Like PSYCHO and SCHIZO we have a 2GB aligned area
1205 * for memory space.
1206 */
1207 pci_memspace_mask = 0x7fffffffUL;
1208
1209 pci_sun4v_pbm_init(p, dp, devhandle);
1210 return;
1211
1212 fatal_memory_error:
1213 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
1214 prom_halt();
1215 }
This page took 0.060433 seconds and 5 git commands to generate.