[SPARC64]: Use KERN_EMERG in dump_tl1_traplog() and sun4v TLB errors.
[deliverable/linux.git] / arch / sparc64 / kernel / pci_sun4v.c
CommitLineData
8f6a93a1
DM
1/* pci_sun4v.c: SUN4V specific PCI controller support.
2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/pci.h>
9#include <linux/init.h>
10#include <linux/slab.h>
11#include <linux/interrupt.h>
18397944 12#include <linux/percpu.h>
8f6a93a1
DM
13
14#include <asm/pbm.h>
15#include <asm/iommu.h>
16#include <asm/irq.h>
17#include <asm/upa.h>
18#include <asm/pstate.h>
19#include <asm/oplib.h>
20#include <asm/hypervisor.h>
21
22#include "pci_impl.h"
23#include "iommu_common.h"
24
bade5622
DM
25#include "pci_sun4v.h"
26
7c8f486a 27#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
18397944
DM
28
29struct sun4v_pglist {
7c8f486a 30 u64 *pglist;
18397944
DM
31};
32
33static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists);
34
35static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
36{
37 unsigned long n, i, start, end, limit;
38 int pass;
39
40 limit = arena->limit;
41 start = arena->hint;
42 pass = 0;
43
44again:
45 n = find_next_zero_bit(arena->map, limit, start);
46 end = n + npages;
47 if (unlikely(end >= limit)) {
48 if (likely(pass < 1)) {
49 limit = start;
50 start = 0;
51 pass++;
52 goto again;
53 } else {
54 /* Scanned the whole thing, give up. */
55 return -1;
56 }
57 }
58
59 for (i = n; i < end; i++) {
60 if (test_bit(i, arena->map)) {
61 start = i + 1;
62 goto again;
63 }
64 }
65
66 for (i = n; i < end; i++)
67 __set_bit(i, arena->map);
68
69 arena->hint = end;
70
71 return n;
72}
73
74static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
75{
76 unsigned long i;
77
78 for (i = base; i < (base + npages); i++)
79 __clear_bit(i, arena->map);
80}
81
8f6a93a1
DM
82static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
83{
18397944
DM
84 struct pcidev_cookie *pcp;
85 struct pci_iommu *iommu;
7c8f486a 86 unsigned long flags, order, first_page, npages, n;
18397944
DM
87 void *ret;
88 long entry;
89 u64 *pglist;
7c8f486a 90 u32 devhandle;
18397944
DM
91 int cpu;
92
93 size = IO_PAGE_ALIGN(size);
94 order = get_order(size);
95 if (order >= MAX_ORDER)
96 return NULL;
97
98 npages = size >> IO_PAGE_SHIFT;
99 if (npages > PGLIST_NENTS)
100 return NULL;
101
102 first_page = __get_free_pages(GFP_ATOMIC, order);
103 if (first_page == 0UL)
104 return NULL;
e7a0453e 105
18397944
DM
106 memset((char *)first_page, 0, PAGE_SIZE << order);
107
108 pcp = pdev->sysdata;
109 devhandle = pcp->pbm->devhandle;
110 iommu = pcp->pbm->iommu;
111
112 spin_lock_irqsave(&iommu->lock, flags);
113 entry = pci_arena_alloc(&iommu->arena, npages);
114 spin_unlock_irqrestore(&iommu->lock, flags);
115
116 if (unlikely(entry < 0L)) {
117 free_pages(first_page, order);
118 return NULL;
119 }
120
121 *dma_addrp = (iommu->page_table_map_base +
122 (entry << IO_PAGE_SHIFT));
123 ret = (void *) first_page;
124 first_page = __pa(first_page);
125
126 cpu = get_cpu();
127
7c8f486a 128 pglist = __get_cpu_var(iommu_pglists).pglist;
18397944
DM
129 for (n = 0; n < npages; n++)
130 pglist[n] = first_page + (n * PAGE_SIZE);
131
132 do {
133 unsigned long num;
134
135 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
136 npages,
137 (HV_PCI_MAP_ATTR_READ |
138 HV_PCI_MAP_ATTR_WRITE),
139 __pa(pglist));
140 entry += num;
141 npages -= num;
142 pglist += num;
143 } while (npages != 0);
144
145 put_cpu();
146
147 return ret;
8f6a93a1
DM
148}
149
150static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
151{
18397944
DM
152 struct pcidev_cookie *pcp;
153 struct pci_iommu *iommu;
7c8f486a
DM
154 unsigned long flags, order, npages, entry;
155 u32 devhandle;
18397944
DM
156
157 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
158 pcp = pdev->sysdata;
159 iommu = pcp->pbm->iommu;
160 devhandle = pcp->pbm->devhandle;
161 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
162
163 spin_lock_irqsave(&iommu->lock, flags);
164
165 pci_arena_free(&iommu->arena, entry, npages);
166
167 do {
168 unsigned long num;
169
170 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
171 npages);
172 entry += num;
173 npages -= num;
174 } while (npages != 0);
175
176 spin_unlock_irqrestore(&iommu->lock, flags);
177
178 order = get_order(size);
179 if (order < 10)
180 free_pages((unsigned long)cpu, order);
8f6a93a1
DM
181}
182
183static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
184{
18397944
DM
185 struct pcidev_cookie *pcp;
186 struct pci_iommu *iommu;
187 unsigned long flags, npages, oaddr;
7c8f486a
DM
188 unsigned long i, base_paddr;
189 u32 devhandle, bus_addr, ret;
18397944
DM
190 unsigned long prot;
191 long entry;
192 u64 *pglist;
193 int cpu;
194
195 pcp = pdev->sysdata;
196 iommu = pcp->pbm->iommu;
197 devhandle = pcp->pbm->devhandle;
198
199 if (unlikely(direction == PCI_DMA_NONE))
200 goto bad;
201
202 oaddr = (unsigned long)ptr;
203 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
204 npages >>= IO_PAGE_SHIFT;
205 if (unlikely(npages > PGLIST_NENTS))
206 goto bad;
207
208 spin_lock_irqsave(&iommu->lock, flags);
209 entry = pci_arena_alloc(&iommu->arena, npages);
210 spin_unlock_irqrestore(&iommu->lock, flags);
211
212 if (unlikely(entry < 0L))
213 goto bad;
214
215 bus_addr = (iommu->page_table_map_base +
216 (entry << IO_PAGE_SHIFT));
217 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
218 base_paddr = __pa(oaddr & IO_PAGE_MASK);
219 prot = HV_PCI_MAP_ATTR_READ;
220 if (direction != PCI_DMA_TODEVICE)
221 prot |= HV_PCI_MAP_ATTR_WRITE;
222
223 cpu = get_cpu();
224
7c8f486a 225 pglist = __get_cpu_var(iommu_pglists).pglist;
18397944
DM
226 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE)
227 pglist[i] = base_paddr;
228
229 do {
230 unsigned long num;
231
232 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
233 npages, prot,
234 __pa(pglist));
235 entry += num;
236 npages -= num;
237 pglist += num;
238 } while (npages != 0);
239
240 put_cpu();
241
242 return ret;
243
244bad:
245 if (printk_ratelimit())
246 WARN_ON(1);
247 return PCI_DMA_ERROR_CODE;
8f6a93a1
DM
248}
249
250static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
251{
18397944
DM
252 struct pcidev_cookie *pcp;
253 struct pci_iommu *iommu;
7c8f486a 254 unsigned long flags, npages;
18397944 255 long entry;
7c8f486a 256 u32 devhandle;
18397944
DM
257
258 if (unlikely(direction == PCI_DMA_NONE)) {
259 if (printk_ratelimit())
260 WARN_ON(1);
261 return;
262 }
263
264 pcp = pdev->sysdata;
265 iommu = pcp->pbm->iommu;
266 devhandle = pcp->pbm->devhandle;
267
268 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
269 npages >>= IO_PAGE_SHIFT;
270 bus_addr &= IO_PAGE_MASK;
271
272 spin_lock_irqsave(&iommu->lock, flags);
273
274 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
275 pci_arena_free(&iommu->arena, entry, npages);
276
277 do {
278 unsigned long num;
279
280 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
281 npages);
282 entry += num;
283 npages -= num;
284 } while (npages != 0);
285
286 spin_unlock_irqrestore(&iommu->lock, flags);
287}
288
289#define SG_ENT_PHYS_ADDRESS(SG) \
290 (__pa(page_address((SG)->page)) + (SG)->offset)
291
7c8f486a 292static inline void fill_sg(long entry, u32 devhandle,
18397944
DM
293 struct scatterlist *sg,
294 int nused, int nelems, unsigned long prot)
295{
296 struct scatterlist *dma_sg = sg;
297 struct scatterlist *sg_end = sg + nelems;
298 int i, cpu, pglist_ent;
299 u64 *pglist;
300
301 cpu = get_cpu();
7c8f486a 302 pglist = __get_cpu_var(iommu_pglists).pglist;
18397944
DM
303 pglist_ent = 0;
304 for (i = 0; i < nused; i++) {
305 unsigned long pteval = ~0UL;
306 u32 dma_npages;
307
308 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
309 dma_sg->dma_length +
310 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
311 do {
312 unsigned long offset;
313 signed int len;
314
315 /* If we are here, we know we have at least one
316 * more page to map. So walk forward until we
317 * hit a page crossing, and begin creating new
318 * mappings from that spot.
319 */
320 for (;;) {
321 unsigned long tmp;
322
323 tmp = SG_ENT_PHYS_ADDRESS(sg);
324 len = sg->length;
325 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
326 pteval = tmp & IO_PAGE_MASK;
327 offset = tmp & (IO_PAGE_SIZE - 1UL);
328 break;
329 }
330 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
331 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
332 offset = 0UL;
333 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
334 break;
335 }
336 sg++;
337 }
338
339 pteval = (pteval & IOPTE_PAGE);
340 while (len > 0) {
341 pglist[pglist_ent++] = pteval;
342 pteval += IO_PAGE_SIZE;
343 len -= (IO_PAGE_SIZE - offset);
344 offset = 0;
345 dma_npages--;
346 }
347
348 pteval = (pteval & IOPTE_PAGE) + len;
349 sg++;
350
351 /* Skip over any tail mappings we've fully mapped,
352 * adjusting pteval along the way. Stop when we
353 * detect a page crossing event.
354 */
355 while (sg < sg_end &&
356 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
357 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
358 ((pteval ^
359 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
360 pteval += sg->length;
361 sg++;
362 }
363 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
364 pteval = ~0UL;
365 } while (dma_npages != 0);
366 dma_sg++;
367 }
368
369 BUG_ON(pglist_ent == 0);
370
371 do {
372 unsigned long num;
373
374 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
375 pglist_ent);
376 entry += num;
377 pglist_ent -= num;
378 } while (pglist_ent != 0);
379
380 put_cpu();
8f6a93a1
DM
381}
382
383static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
384{
18397944
DM
385 struct pcidev_cookie *pcp;
386 struct pci_iommu *iommu;
7c8f486a
DM
387 unsigned long flags, npages, prot;
388 u32 devhandle, dma_base;
18397944
DM
389 struct scatterlist *sgtmp;
390 long entry;
391 int used;
392
393 /* Fast path single entry scatterlists. */
394 if (nelems == 1) {
395 sglist->dma_address =
396 pci_4v_map_single(pdev,
397 (page_address(sglist->page) + sglist->offset),
398 sglist->length, direction);
399 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
400 return 0;
401 sglist->dma_length = sglist->length;
402 return 1;
403 }
404
405 pcp = pdev->sysdata;
406 iommu = pcp->pbm->iommu;
407 devhandle = pcp->pbm->devhandle;
408
409 if (unlikely(direction == PCI_DMA_NONE))
410 goto bad;
411
412 /* Step 1: Prepare scatter list. */
413 npages = prepare_sg(sglist, nelems);
414 if (unlikely(npages > PGLIST_NENTS))
415 goto bad;
416
417 /* Step 2: Allocate a cluster and context, if necessary. */
418 spin_lock_irqsave(&iommu->lock, flags);
419 entry = pci_arena_alloc(&iommu->arena, npages);
420 spin_unlock_irqrestore(&iommu->lock, flags);
421
422 if (unlikely(entry < 0L))
423 goto bad;
424
425 dma_base = iommu->page_table_map_base +
426 (entry << IO_PAGE_SHIFT);
427
428 /* Step 3: Normalize DMA addresses. */
429 used = nelems;
430
431 sgtmp = sglist;
432 while (used && sgtmp->dma_length) {
433 sgtmp->dma_address += dma_base;
434 sgtmp++;
435 used--;
436 }
437 used = nelems - used;
438
439 /* Step 4: Create the mappings. */
440 prot = HV_PCI_MAP_ATTR_READ;
441 if (direction != PCI_DMA_TODEVICE)
442 prot |= HV_PCI_MAP_ATTR_WRITE;
443
444 fill_sg(entry, devhandle, sglist, used, nelems, prot);
445
446 return used;
447
448bad:
449 if (printk_ratelimit())
450 WARN_ON(1);
451 return 0;
8f6a93a1
DM
452}
453
454static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
455{
18397944
DM
456 struct pcidev_cookie *pcp;
457 struct pci_iommu *iommu;
7c8f486a 458 unsigned long flags, i, npages;
18397944 459 long entry;
7c8f486a 460 u32 devhandle, bus_addr;
18397944
DM
461
462 if (unlikely(direction == PCI_DMA_NONE)) {
463 if (printk_ratelimit())
464 WARN_ON(1);
465 }
466
467 pcp = pdev->sysdata;
468 iommu = pcp->pbm->iommu;
469 devhandle = pcp->pbm->devhandle;
470
471 bus_addr = sglist->dma_address & IO_PAGE_MASK;
472
473 for (i = 1; i < nelems; i++)
474 if (sglist[i].dma_length == 0)
475 break;
476 i--;
477 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
478 bus_addr) >> IO_PAGE_SHIFT;
479
480 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
481
482 spin_lock_irqsave(&iommu->lock, flags);
483
484 pci_arena_free(&iommu->arena, entry, npages);
485
486 do {
487 unsigned long num;
488
489 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
490 npages);
491 entry += num;
492 npages -= num;
493 } while (npages != 0);
494
495 spin_unlock_irqrestore(&iommu->lock, flags);
8f6a93a1
DM
496}
497
498static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
499{
18397944 500 /* Nothing to do... */
8f6a93a1
DM
501}
502
503static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
504{
18397944 505 /* Nothing to do... */
8f6a93a1
DM
506}
507
508struct pci_iommu_ops pci_sun4v_iommu_ops = {
509 .alloc_consistent = pci_4v_alloc_consistent,
510 .free_consistent = pci_4v_free_consistent,
511 .map_single = pci_4v_map_single,
512 .unmap_single = pci_4v_unmap_single,
513 .map_sg = pci_4v_map_sg,
514 .unmap_sg = pci_4v_unmap_sg,
515 .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu,
516 .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
517};
518
bade5622
DM
519/* SUN4V PCI configuration space accessors. */
520
987b6de7 521static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
059833eb 522{
987b6de7
DM
523 if (bus == pbm->pci_first_busno) {
524 if (device == 0 && func == 0)
525 return 0;
526 return 1;
527 }
528
059833eb
DM
529 if (bus < pbm->pci_first_busno ||
530 bus > pbm->pci_last_busno)
531 return 1;
532 return 0;
533}
534
bade5622
DM
535static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
536 int where, int size, u32 *value)
537{
7eae642f 538 struct pci_pbm_info *pbm = bus_dev->sysdata;
059833eb 539 u32 devhandle = pbm->devhandle;
7eae642f
DM
540 unsigned int bus = bus_dev->number;
541 unsigned int device = PCI_SLOT(devfn);
542 unsigned int func = PCI_FUNC(devfn);
543 unsigned long ret;
544
987b6de7 545 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
059833eb
DM
546 ret = ~0UL;
547 } else {
548 ret = pci_sun4v_config_get(devhandle,
549 HV_PCI_DEVICE_BUILD(bus, device, func),
550 where, size);
10804828 551#if 0
987b6de7 552 printk("rcfg: [%x:%x:%x:%d]=[%lx]\n",
10804828
DM
553 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
554 where, size, ret);
555#endif
059833eb 556 }
7eae642f
DM
557 switch (size) {
558 case 1:
559 *value = ret & 0xff;
560 break;
561 case 2:
562 *value = ret & 0xffff;
563 break;
564 case 4:
565 *value = ret & 0xffffffff;
566 break;
567 };
568
569
570 return PCIBIOS_SUCCESSFUL;
bade5622
DM
571}
572
573static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
574 int where, int size, u32 value)
575{
7eae642f 576 struct pci_pbm_info *pbm = bus_dev->sysdata;
059833eb 577 u32 devhandle = pbm->devhandle;
7eae642f
DM
578 unsigned int bus = bus_dev->number;
579 unsigned int device = PCI_SLOT(devfn);
580 unsigned int func = PCI_FUNC(devfn);
581 unsigned long ret;
582
987b6de7 583 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
059833eb
DM
584 /* Do nothing. */
585 } else {
586 ret = pci_sun4v_config_put(devhandle,
587 HV_PCI_DEVICE_BUILD(bus, device, func),
588 where, size, value);
10804828 589#if 0
987b6de7 590 printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n",
10804828
DM
591 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
592 where, size, value, ret);
593#endif
059833eb 594 }
7eae642f 595 return PCIBIOS_SUCCESSFUL;
bade5622
DM
596}
597
598static struct pci_ops pci_sun4v_ops = {
599 .read = pci_sun4v_read_pci_cfg,
600 .write = pci_sun4v_write_pci_cfg,
601};
602
603
c2609267
DM
604static void pbm_scan_bus(struct pci_controller_info *p,
605 struct pci_pbm_info *pbm)
606{
607 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
608
609 if (!cookie) {
610 prom_printf("%s: Critical allocation failure.\n", pbm->name);
611 prom_halt();
612 }
613
614 /* All we care about is the PBM. */
615 memset(cookie, 0, sizeof(*cookie));
616 cookie->pbm = pbm;
617
987b6de7 618 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm);
10804828 619#if 0
c2609267
DM
620 pci_fixup_host_bridge_self(pbm->pci_bus);
621 pbm->pci_bus->self->sysdata = cookie;
10804828 622#endif
10804828 623 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm,
987b6de7 624 pbm->prom_node);
c2609267
DM
625 pci_record_assignments(pbm, pbm->pci_bus);
626 pci_assign_unassigned(pbm, pbm->pci_bus);
627 pci_fixup_irq(pbm, pbm->pci_bus);
628 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
629 pci_setup_busmastering(pbm, pbm->pci_bus);
630}
631
bade5622
DM
632static void pci_sun4v_scan_bus(struct pci_controller_info *p)
633{
c2609267
DM
634 if (p->pbm_A.prom_node) {
635 p->pbm_A.is_66mhz_capable =
636 prom_getbool(p->pbm_A.prom_node, "66mhz-capable");
637
638 pbm_scan_bus(p, &p->pbm_A);
639 }
640 if (p->pbm_B.prom_node) {
641 p->pbm_B.is_66mhz_capable =
642 prom_getbool(p->pbm_B.prom_node, "66mhz-capable");
643
644 pbm_scan_bus(p, &p->pbm_B);
645 }
646
647 /* XXX register error interrupt handlers XXX */
bade5622
DM
648}
649
650static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
651 struct pci_dev *pdev,
e3999574 652 unsigned int devino)
bade5622 653{
10804828
DM
654 u32 devhandle = pbm->devhandle;
655 int pil;
656
10804828
DM
657 pil = 4;
658 if (pdev) {
659 switch ((pdev->class >> 16) & 0xff) {
660 case PCI_BASE_CLASS_STORAGE:
661 pil = 4;
662 break;
663
664 case PCI_BASE_CLASS_NETWORK:
665 pil = 6;
666 break;
667
668 case PCI_BASE_CLASS_DISPLAY:
669 pil = 9;
670 break;
671
672 case PCI_BASE_CLASS_MULTIMEDIA:
673 case PCI_BASE_CLASS_MEMORY:
674 case PCI_BASE_CLASS_BRIDGE:
675 case PCI_BASE_CLASS_SERIAL:
676 pil = 10;
677 break;
678
679 default:
680 pil = 4;
681 break;
682 };
683 }
684 BUG_ON(PIL_RESERVED(pil));
685
e3999574 686 return sun4v_build_irq(devhandle, devino, pil, IBF_PCI);
bade5622
DM
687}
688
bade5622
DM
689static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
690{
691 struct pcidev_cookie *pcp = pdev->sysdata;
692 struct pci_pbm_info *pbm = pcp->pbm;
693 struct resource *res, *root;
694 u32 reg;
695 int where, size, is_64bit;
696
697 res = &pdev->resource[resource];
698 if (resource < 6) {
699 where = PCI_BASE_ADDRESS_0 + (resource * 4);
700 } else if (resource == PCI_ROM_RESOURCE) {
701 where = pdev->rom_base_reg;
702 } else {
703 /* Somebody might have asked allocation of a non-standard resource */
704 return;
705 }
706
c2609267 707 /* XXX 64-bit MEM handling is not %100 correct... XXX */
bade5622
DM
708 is_64bit = 0;
709 if (res->flags & IORESOURCE_IO)
710 root = &pbm->io_space;
711 else {
712 root = &pbm->mem_space;
713 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
714 == PCI_BASE_ADDRESS_MEM_TYPE_64)
715 is_64bit = 1;
716 }
717
718 size = res->end - res->start;
719 pci_read_config_dword(pdev, where, &reg);
720 reg = ((reg & size) |
721 (((u32)(res->start - root->start)) & ~size));
722 if (resource == PCI_ROM_RESOURCE) {
723 reg |= PCI_ROM_ADDRESS_ENABLE;
724 res->flags |= IORESOURCE_ROM_ENABLE;
725 }
726 pci_write_config_dword(pdev, where, reg);
727
728 /* This knows that the upper 32-bits of the address
729 * must be zero. Our PCI common layer enforces this.
730 */
731 if (is_64bit)
732 pci_write_config_dword(pdev, where + 4, 0);
733}
734
bade5622
DM
735static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
736 struct resource *res,
737 struct resource *root)
738{
739 res->start += root->start;
740 res->end += root->start;
741}
742
743/* Use ranges property to determine where PCI MEM, I/O, and Config
744 * space are for this PCI bus module.
745 */
746static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
747{
221b2fb8 748 int i, saw_mem, saw_io;
bade5622 749
221b2fb8 750 saw_mem = saw_io = 0;
bade5622
DM
751 for (i = 0; i < pbm->num_pbm_ranges; i++) {
752 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
753 unsigned long a;
754 int type;
755
756 type = (pr->child_phys_hi >> 24) & 0x3;
757 a = (((unsigned long)pr->parent_phys_hi << 32UL) |
758 ((unsigned long)pr->parent_phys_lo << 0UL));
759
760 switch (type) {
bade5622
DM
761 case 1:
762 /* 16-bit IO space, 16MB */
763 pbm->io_space.start = a;
764 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
765 pbm->io_space.flags = IORESOURCE_IO;
766 saw_io = 1;
767 break;
768
769 case 2:
770 /* 32-bit MEM space, 2GB */
771 pbm->mem_space.start = a;
772 pbm->mem_space.end = a + (0x80000000UL - 1UL);
773 pbm->mem_space.flags = IORESOURCE_MEM;
774 saw_mem = 1;
775 break;
776
c2609267
DM
777 case 3:
778 /* XXX 64-bit MEM handling XXX */
779
bade5622
DM
780 default:
781 break;
782 };
783 }
784
221b2fb8 785 if (!saw_io || !saw_mem) {
bade5622
DM
786 prom_printf("%s: Fatal error, missing %s PBM range.\n",
787 pbm->name,
221b2fb8 788 (!saw_io ? "IO" : "MEM"));
bade5622
DM
789 prom_halt();
790 }
791
221b2fb8 792 printk("%s: PCI IO[%lx] MEM[%lx]\n",
bade5622 793 pbm->name,
bade5622
DM
794 pbm->io_space.start,
795 pbm->mem_space.start);
796}
797
798static void pbm_register_toplevel_resources(struct pci_controller_info *p,
799 struct pci_pbm_info *pbm)
800{
801 pbm->io_space.name = pbm->mem_space.name = pbm->name;
802
803 request_resource(&ioport_resource, &pbm->io_space);
804 request_resource(&iomem_resource, &pbm->mem_space);
805 pci_register_legacy_regions(&pbm->io_space,
806 &pbm->mem_space);
807}
808
e7a0453e
DM
809static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
810 struct pci_iommu *iommu)
18397944
DM
811{
812 struct pci_iommu_arena *arena = &iommu->arena;
e7a0453e 813 unsigned long i, cnt = 0;
7c8f486a 814 u32 devhandle;
18397944
DM
815
816 devhandle = pbm->devhandle;
817 for (i = 0; i < arena->limit; i++) {
818 unsigned long ret, io_attrs, ra;
819
820 ret = pci_sun4v_iommu_getmap(devhandle,
821 HV_PCI_TSBID(0, i),
822 &io_attrs, &ra);
e7a0453e
DM
823 if (ret == HV_EOK) {
824 cnt++;
18397944 825 __set_bit(i, arena->map);
e7a0453e 826 }
18397944 827 }
e7a0453e
DM
828
829 return cnt;
18397944
DM
830}
831
bade5622
DM
832static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
833{
18397944
DM
834 struct pci_iommu *iommu = pbm->iommu;
835 unsigned long num_tsb_entries, sz;
836 u32 vdma[2], dma_mask, dma_offset;
837 int err, tsbsize;
838
839 err = prom_getproperty(pbm->prom_node, "virtual-dma",
840 (char *)&vdma[0], sizeof(vdma));
841 if (err == 0 || err == -1) {
842 /* No property, use default values. */
843 vdma[0] = 0x80000000;
844 vdma[1] = 0x80000000;
845 }
846
847 dma_mask = vdma[0];
848 switch (vdma[1]) {
849 case 0x20000000:
850 dma_mask |= 0x1fffffff;
851 tsbsize = 64;
852 break;
853
854 case 0x40000000:
855 dma_mask |= 0x3fffffff;
856 tsbsize = 128;
857 break;
858
859 case 0x80000000:
860 dma_mask |= 0x7fffffff;
e7a0453e 861 tsbsize = 256;
18397944
DM
862 break;
863
864 default:
865 prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
866 prom_halt();
867 };
868
e7a0453e
DM
869 tsbsize *= (8 * 1024);
870
18397944
DM
871 num_tsb_entries = tsbsize / sizeof(iopte_t);
872
873 dma_offset = vdma[0];
874
875 /* Setup initial software IOMMU state. */
876 spin_lock_init(&iommu->lock);
877 iommu->ctx_lowest_free = 1;
878 iommu->page_table_map_base = dma_offset;
879 iommu->dma_addr_mask = dma_mask;
880
881 /* Allocate and initialize the free area map. */
882 sz = num_tsb_entries / 8;
883 sz = (sz + 7UL) & ~7UL;
884 iommu->arena.map = kmalloc(sz, GFP_KERNEL);
885 if (!iommu->arena.map) {
886 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
887 prom_halt();
888 }
889 memset(iommu->arena.map, 0, sz);
890 iommu->arena.limit = num_tsb_entries;
891
e7a0453e
DM
892 sz = probe_existing_entries(pbm, iommu);
893
894 printk("%s: TSB entries [%lu], existing mapings [%lu]\n",
895 pbm->name, num_tsb_entries, sz);
bade5622
DM
896}
897
10804828
DM
898static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm)
899{
900 unsigned int busrange[2];
901 int prom_node = pbm->prom_node;
902 int err;
903
10804828
DM
904 err = prom_getproperty(prom_node, "bus-range",
905 (char *)&busrange[0],
906 sizeof(busrange));
907 if (err == 0 || err == -1) {
908 prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
909 prom_halt();
910 }
911
912 pbm->pci_first_busno = busrange[0];
913 pbm->pci_last_busno = busrange[1];
914
915}
916
7c8f486a 917static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32 devhandle)
bade5622
DM
918{
919 struct pci_pbm_info *pbm;
3833789b 920 int err, i;
bade5622 921
3833789b
DM
922 if (devhandle & 0x40)
923 pbm = &p->pbm_B;
924 else
925 pbm = &p->pbm_A;
bade5622
DM
926
927 pbm->parent = p;
928 pbm->prom_node = prom_node;
929 pbm->pci_first_slot = 1;
930
3833789b 931 pbm->devhandle = devhandle;
bade5622
DM
932
933 sprintf(pbm->name, "SUN4V-PCI%d PBM%c",
934 p->index, (pbm == &p->pbm_A ? 'A' : 'B'));
935
987b6de7
DM
936 printk("%s: devhandle[%x] prom_node[%x:%x]\n",
937 pbm->name, pbm->devhandle,
938 pbm->prom_node, prom_getchild(pbm->prom_node));
bade5622
DM
939
940 prom_getstring(prom_node, "name",
941 pbm->prom_name, sizeof(pbm->prom_name));
942
943 err = prom_getproperty(prom_node, "ranges",
944 (char *) pbm->pbm_ranges,
945 sizeof(pbm->pbm_ranges));
946 if (err == 0 || err == -1) {
947 prom_printf("%s: Fatal error, no ranges property.\n",
948 pbm->name);
949 prom_halt();
950 }
951
952 pbm->num_pbm_ranges =
953 (err / sizeof(struct linux_prom_pci_ranges));
954
3833789b
DM
955 /* Mask out the top 8 bits of the ranges, leaving the real
956 * physical address.
957 */
958 for (i = 0; i < pbm->num_pbm_ranges; i++)
959 pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
960
bade5622
DM
961 pci_sun4v_determine_mem_io_space(pbm);
962 pbm_register_toplevel_resources(p, pbm);
963
964 err = prom_getproperty(prom_node, "interrupt-map",
965 (char *)pbm->pbm_intmap,
966 sizeof(pbm->pbm_intmap));
329c68b2
DM
967 if (err == 0 || err == -1) {
968 prom_printf("%s: Fatal error, no interrupt-map property.\n",
969 pbm->name);
970 prom_halt();
971 }
972
973 pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
974 err = prom_getproperty(prom_node, "interrupt-map-mask",
975 (char *)&pbm->pbm_intmask,
976 sizeof(pbm->pbm_intmask));
977 if (err == 0 || err == -1) {
978 prom_printf("%s: Fatal error, no interrupt-map-mask.\n",
979 pbm->name);
980 prom_halt();
bade5622
DM
981 }
982
10804828 983 pci_sun4v_get_bus_range(pbm);
bade5622
DM
984 pci_sun4v_iommu_init(pbm);
985}
986
8f6a93a1
DM
987void sun4v_pci_init(int node, char *model_name)
988{
bade5622
DM
989 struct pci_controller_info *p;
990 struct pci_iommu *iommu;
3833789b 991 struct linux_prom64_registers regs;
7c8f486a
DM
992 u32 devhandle;
993 int i;
3833789b
DM
994
995 prom_getproperty(node, "reg", (char *)&regs, sizeof(regs));
d5eb4004 996 devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;
3833789b
DM
997
998 for (p = pci_controller_root; p; p = p->next) {
999 struct pci_pbm_info *pbm;
1000
1001 if (p->pbm_A.prom_node && p->pbm_B.prom_node)
1002 continue;
1003
1004 pbm = (p->pbm_A.prom_node ?
1005 &p->pbm_A :
1006 &p->pbm_B);
1007
0b522497 1008 if (pbm->devhandle == (devhandle ^ 0x40)) {
3833789b 1009 pci_sun4v_pbm_init(p, node, devhandle);
0b522497
DM
1010 return;
1011 }
3833789b 1012 }
bade5622 1013
7c8f486a
DM
1014 for (i = 0; i < NR_CPUS; i++) {
1015 unsigned long page = get_zeroed_page(GFP_ATOMIC);
1016
1017 if (!page)
1018 goto fatal_memory_error;
1019
1020 per_cpu(iommu_pglists, i).pglist = (u64 *) page;
bade5622 1021 }
7c8f486a
DM
1022
1023 p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1024 if (!p)
1025 goto fatal_memory_error;
1026
bade5622
DM
1027 memset(p, 0, sizeof(*p));
1028
1029 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
7c8f486a
DM
1030 if (!iommu)
1031 goto fatal_memory_error;
1032
bade5622
DM
1033 memset(iommu, 0, sizeof(*iommu));
1034 p->pbm_A.iommu = iommu;
1035
1036 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
7c8f486a
DM
1037 if (!iommu)
1038 goto fatal_memory_error;
1039
bade5622
DM
1040 memset(iommu, 0, sizeof(*iommu));
1041 p->pbm_B.iommu = iommu;
1042
1043 p->next = pci_controller_root;
1044 pci_controller_root = p;
1045
1046 p->index = pci_num_controllers++;
1047 p->pbms_same_domain = 0;
1048
1049 p->scan_bus = pci_sun4v_scan_bus;
1050 p->irq_build = pci_sun4v_irq_build;
1051 p->base_address_update = pci_sun4v_base_address_update;
1052 p->resource_adjust = pci_sun4v_resource_adjust;
1053 p->pci_ops = &pci_sun4v_ops;
1054
1055 /* Like PSYCHO and SCHIZO we have a 2GB aligned area
1056 * for memory space.
1057 */
1058 pci_memspace_mask = 0x7fffffffUL;
1059
3833789b 1060 pci_sun4v_pbm_init(p, node, devhandle);
7c8f486a
DM
1061 return;
1062
1063fatal_memory_error:
1064 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
1065 prom_halt();
8f6a93a1 1066}
This page took 0.073356 seconds and 5 git commands to generate.