Commit | Line | Data |
---|---|---|
8f6a93a1 DM |
1 | /* pci_sun4v.c: SUN4V specific PCI controller support. |
2 | * | |
3 | * Copyright (C) 2006 David S. Miller (davem@davemloft.net) | |
4 | */ | |
5 | ||
6 | #include <linux/kernel.h> | |
7 | #include <linux/types.h> | |
8 | #include <linux/pci.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/interrupt.h> | |
18397944 | 12 | #include <linux/percpu.h> |
8f6a93a1 DM |
13 | |
14 | #include <asm/pbm.h> | |
15 | #include <asm/iommu.h> | |
16 | #include <asm/irq.h> | |
17 | #include <asm/upa.h> | |
18 | #include <asm/pstate.h> | |
19 | #include <asm/oplib.h> | |
20 | #include <asm/hypervisor.h> | |
21 | ||
22 | #include "pci_impl.h" | |
23 | #include "iommu_common.h" | |
24 | ||
bade5622 DM |
25 | #include "pci_sun4v.h" |
26 | ||
7c8f486a | 27 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) |
18397944 | 28 | |
6a32fd4d DM |
29 | struct pci_iommu_batch { |
30 | struct pci_dev *pdev; /* Device mapping is for. */ | |
31 | unsigned long prot; /* IOMMU page protections */ | |
32 | unsigned long entry; /* Index into IOTSB. */ | |
33 | u64 *pglist; /* List of physical pages */ | |
34 | unsigned long npages; /* Number of pages in list. */ | |
18397944 DM |
35 | }; |
36 | ||
6a32fd4d DM |
37 | static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch); |
38 | ||
39 | /* Interrupts must be disabled. */ | |
40 | static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) | |
41 | { | |
42 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | |
43 | ||
44 | p->pdev = pdev; | |
45 | p->prot = prot; | |
46 | p->entry = entry; | |
47 | p->npages = 0; | |
48 | } | |
49 | ||
50 | /* Interrupts must be disabled. */ | |
51 | static long pci_iommu_batch_flush(struct pci_iommu_batch *p) | |
52 | { | |
53 | struct pcidev_cookie *pcp = p->pdev->sysdata; | |
54 | unsigned long devhandle = pcp->pbm->devhandle; | |
55 | unsigned long prot = p->prot; | |
56 | unsigned long entry = p->entry; | |
57 | u64 *pglist = p->pglist; | |
58 | unsigned long npages = p->npages; | |
59 | ||
d82965c1 | 60 | while (npages != 0) { |
6a32fd4d DM |
61 | long num; |
62 | ||
63 | num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), | |
64 | npages, prot, __pa(pglist)); | |
65 | if (unlikely(num < 0)) { | |
66 | if (printk_ratelimit()) | |
67 | printk("pci_iommu_batch_flush: IOMMU map of " | |
68 | "[%08lx:%08lx:%lx:%lx:%lx] failed with " | |
69 | "status %ld\n", | |
70 | devhandle, HV_PCI_TSBID(0, entry), | |
71 | npages, prot, __pa(pglist), num); | |
72 | return -1; | |
73 | } | |
74 | ||
75 | entry += num; | |
76 | npages -= num; | |
77 | pglist += num; | |
d82965c1 | 78 | } |
6a32fd4d DM |
79 | |
80 | p->entry = entry; | |
81 | p->npages = 0; | |
82 | ||
83 | return 0; | |
84 | } | |
85 | ||
86 | /* Interrupts must be disabled. */ | |
87 | static inline long pci_iommu_batch_add(u64 phys_page) | |
88 | { | |
89 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | |
90 | ||
91 | BUG_ON(p->npages >= PGLIST_NENTS); | |
92 | ||
93 | p->pglist[p->npages++] = phys_page; | |
94 | if (p->npages == PGLIST_NENTS) | |
95 | return pci_iommu_batch_flush(p); | |
96 | ||
97 | return 0; | |
98 | } | |
99 | ||
100 | /* Interrupts must be disabled. */ | |
101 | static inline long pci_iommu_batch_end(void) | |
102 | { | |
103 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | |
104 | ||
105 | BUG_ON(p->npages >= PGLIST_NENTS); | |
106 | ||
107 | return pci_iommu_batch_flush(p); | |
108 | } | |
18397944 DM |
109 | |
110 | static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) | |
111 | { | |
112 | unsigned long n, i, start, end, limit; | |
113 | int pass; | |
114 | ||
115 | limit = arena->limit; | |
116 | start = arena->hint; | |
117 | pass = 0; | |
118 | ||
119 | again: | |
120 | n = find_next_zero_bit(arena->map, limit, start); | |
121 | end = n + npages; | |
122 | if (unlikely(end >= limit)) { | |
123 | if (likely(pass < 1)) { | |
124 | limit = start; | |
125 | start = 0; | |
126 | pass++; | |
127 | goto again; | |
128 | } else { | |
129 | /* Scanned the whole thing, give up. */ | |
130 | return -1; | |
131 | } | |
132 | } | |
133 | ||
134 | for (i = n; i < end; i++) { | |
135 | if (test_bit(i, arena->map)) { | |
136 | start = i + 1; | |
137 | goto again; | |
138 | } | |
139 | } | |
140 | ||
141 | for (i = n; i < end; i++) | |
142 | __set_bit(i, arena->map); | |
143 | ||
144 | arena->hint = end; | |
145 | ||
146 | return n; | |
147 | } | |
148 | ||
149 | static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) | |
150 | { | |
151 | unsigned long i; | |
152 | ||
153 | for (i = base; i < (base + npages); i++) | |
154 | __clear_bit(i, arena->map); | |
155 | } | |
156 | ||
42f14237 | 157 | static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) |
8f6a93a1 | 158 | { |
18397944 DM |
159 | struct pcidev_cookie *pcp; |
160 | struct pci_iommu *iommu; | |
7c8f486a | 161 | unsigned long flags, order, first_page, npages, n; |
18397944 DM |
162 | void *ret; |
163 | long entry; | |
18397944 DM |
164 | |
165 | size = IO_PAGE_ALIGN(size); | |
166 | order = get_order(size); | |
6a32fd4d | 167 | if (unlikely(order >= MAX_ORDER)) |
18397944 DM |
168 | return NULL; |
169 | ||
170 | npages = size >> IO_PAGE_SHIFT; | |
18397944 | 171 | |
42f14237 | 172 | first_page = __get_free_pages(gfp, order); |
6a32fd4d | 173 | if (unlikely(first_page == 0UL)) |
18397944 | 174 | return NULL; |
e7a0453e | 175 | |
18397944 DM |
176 | memset((char *)first_page, 0, PAGE_SIZE << order); |
177 | ||
178 | pcp = pdev->sysdata; | |
18397944 DM |
179 | iommu = pcp->pbm->iommu; |
180 | ||
181 | spin_lock_irqsave(&iommu->lock, flags); | |
182 | entry = pci_arena_alloc(&iommu->arena, npages); | |
183 | spin_unlock_irqrestore(&iommu->lock, flags); | |
184 | ||
6a32fd4d DM |
185 | if (unlikely(entry < 0L)) |
186 | goto arena_alloc_fail; | |
18397944 DM |
187 | |
188 | *dma_addrp = (iommu->page_table_map_base + | |
189 | (entry << IO_PAGE_SHIFT)); | |
190 | ret = (void *) first_page; | |
191 | first_page = __pa(first_page); | |
192 | ||
6a32fd4d | 193 | local_irq_save(flags); |
18397944 | 194 | |
6a32fd4d DM |
195 | pci_iommu_batch_start(pdev, |
196 | (HV_PCI_MAP_ATTR_READ | | |
197 | HV_PCI_MAP_ATTR_WRITE), | |
198 | entry); | |
18397944 | 199 | |
6a32fd4d DM |
200 | for (n = 0; n < npages; n++) { |
201 | long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE)); | |
202 | if (unlikely(err < 0L)) | |
203 | goto iommu_map_fail; | |
204 | } | |
18397944 | 205 | |
6a32fd4d DM |
206 | if (unlikely(pci_iommu_batch_end() < 0L)) |
207 | goto iommu_map_fail; | |
18397944 | 208 | |
6a32fd4d | 209 | local_irq_restore(flags); |
18397944 DM |
210 | |
211 | return ret; | |
6a32fd4d DM |
212 | |
213 | iommu_map_fail: | |
214 | /* Interrupts are disabled. */ | |
215 | spin_lock(&iommu->lock); | |
216 | pci_arena_free(&iommu->arena, entry, npages); | |
217 | spin_unlock_irqrestore(&iommu->lock, flags); | |
218 | ||
219 | arena_alloc_fail: | |
220 | free_pages(first_page, order); | |
221 | return NULL; | |
8f6a93a1 DM |
222 | } |
223 | ||
224 | static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | |
225 | { | |
18397944 DM |
226 | struct pcidev_cookie *pcp; |
227 | struct pci_iommu *iommu; | |
7c8f486a DM |
228 | unsigned long flags, order, npages, entry; |
229 | u32 devhandle; | |
18397944 DM |
230 | |
231 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | |
232 | pcp = pdev->sysdata; | |
233 | iommu = pcp->pbm->iommu; | |
234 | devhandle = pcp->pbm->devhandle; | |
235 | entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | |
236 | ||
237 | spin_lock_irqsave(&iommu->lock, flags); | |
238 | ||
239 | pci_arena_free(&iommu->arena, entry, npages); | |
240 | ||
241 | do { | |
242 | unsigned long num; | |
243 | ||
244 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
245 | npages); | |
246 | entry += num; | |
247 | npages -= num; | |
248 | } while (npages != 0); | |
249 | ||
250 | spin_unlock_irqrestore(&iommu->lock, flags); | |
251 | ||
252 | order = get_order(size); | |
253 | if (order < 10) | |
254 | free_pages((unsigned long)cpu, order); | |
8f6a93a1 DM |
255 | } |
256 | ||
257 | static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) | |
258 | { | |
18397944 DM |
259 | struct pcidev_cookie *pcp; |
260 | struct pci_iommu *iommu; | |
261 | unsigned long flags, npages, oaddr; | |
7c8f486a | 262 | unsigned long i, base_paddr; |
6a32fd4d | 263 | u32 bus_addr, ret; |
18397944 DM |
264 | unsigned long prot; |
265 | long entry; | |
18397944 DM |
266 | |
267 | pcp = pdev->sysdata; | |
268 | iommu = pcp->pbm->iommu; | |
18397944 DM |
269 | |
270 | if (unlikely(direction == PCI_DMA_NONE)) | |
271 | goto bad; | |
272 | ||
273 | oaddr = (unsigned long)ptr; | |
274 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | |
275 | npages >>= IO_PAGE_SHIFT; | |
18397944 DM |
276 | |
277 | spin_lock_irqsave(&iommu->lock, flags); | |
278 | entry = pci_arena_alloc(&iommu->arena, npages); | |
279 | spin_unlock_irqrestore(&iommu->lock, flags); | |
280 | ||
281 | if (unlikely(entry < 0L)) | |
282 | goto bad; | |
283 | ||
284 | bus_addr = (iommu->page_table_map_base + | |
285 | (entry << IO_PAGE_SHIFT)); | |
286 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | |
287 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | |
288 | prot = HV_PCI_MAP_ATTR_READ; | |
289 | if (direction != PCI_DMA_TODEVICE) | |
290 | prot |= HV_PCI_MAP_ATTR_WRITE; | |
291 | ||
6a32fd4d | 292 | local_irq_save(flags); |
18397944 | 293 | |
6a32fd4d | 294 | pci_iommu_batch_start(pdev, prot, entry); |
18397944 | 295 | |
6a32fd4d DM |
296 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { |
297 | long err = pci_iommu_batch_add(base_paddr); | |
298 | if (unlikely(err < 0L)) | |
299 | goto iommu_map_fail; | |
300 | } | |
301 | if (unlikely(pci_iommu_batch_end() < 0L)) | |
302 | goto iommu_map_fail; | |
18397944 | 303 | |
6a32fd4d | 304 | local_irq_restore(flags); |
18397944 DM |
305 | |
306 | return ret; | |
307 | ||
308 | bad: | |
309 | if (printk_ratelimit()) | |
310 | WARN_ON(1); | |
311 | return PCI_DMA_ERROR_CODE; | |
6a32fd4d DM |
312 | |
313 | iommu_map_fail: | |
314 | /* Interrupts are disabled. */ | |
315 | spin_lock(&iommu->lock); | |
316 | pci_arena_free(&iommu->arena, entry, npages); | |
317 | spin_unlock_irqrestore(&iommu->lock, flags); | |
318 | ||
319 | return PCI_DMA_ERROR_CODE; | |
8f6a93a1 DM |
320 | } |
321 | ||
322 | static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | |
323 | { | |
18397944 DM |
324 | struct pcidev_cookie *pcp; |
325 | struct pci_iommu *iommu; | |
7c8f486a | 326 | unsigned long flags, npages; |
18397944 | 327 | long entry; |
7c8f486a | 328 | u32 devhandle; |
18397944 DM |
329 | |
330 | if (unlikely(direction == PCI_DMA_NONE)) { | |
331 | if (printk_ratelimit()) | |
332 | WARN_ON(1); | |
333 | return; | |
334 | } | |
335 | ||
336 | pcp = pdev->sysdata; | |
337 | iommu = pcp->pbm->iommu; | |
338 | devhandle = pcp->pbm->devhandle; | |
339 | ||
340 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | |
341 | npages >>= IO_PAGE_SHIFT; | |
342 | bus_addr &= IO_PAGE_MASK; | |
343 | ||
344 | spin_lock_irqsave(&iommu->lock, flags); | |
345 | ||
346 | entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; | |
347 | pci_arena_free(&iommu->arena, entry, npages); | |
348 | ||
349 | do { | |
350 | unsigned long num; | |
351 | ||
352 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
353 | npages); | |
354 | entry += num; | |
355 | npages -= num; | |
356 | } while (npages != 0); | |
357 | ||
358 | spin_unlock_irqrestore(&iommu->lock, flags); | |
359 | } | |
360 | ||
361 | #define SG_ENT_PHYS_ADDRESS(SG) \ | |
362 | (__pa(page_address((SG)->page)) + (SG)->offset) | |
363 | ||
6a32fd4d | 364 | static inline long fill_sg(long entry, struct pci_dev *pdev, |
18397944 DM |
365 | struct scatterlist *sg, |
366 | int nused, int nelems, unsigned long prot) | |
367 | { | |
368 | struct scatterlist *dma_sg = sg; | |
369 | struct scatterlist *sg_end = sg + nelems; | |
6a32fd4d DM |
370 | unsigned long flags; |
371 | int i; | |
372 | ||
373 | local_irq_save(flags); | |
374 | ||
375 | pci_iommu_batch_start(pdev, prot, entry); | |
18397944 | 376 | |
18397944 DM |
377 | for (i = 0; i < nused; i++) { |
378 | unsigned long pteval = ~0UL; | |
379 | u32 dma_npages; | |
380 | ||
381 | dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) + | |
382 | dma_sg->dma_length + | |
383 | ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT; | |
384 | do { | |
385 | unsigned long offset; | |
386 | signed int len; | |
387 | ||
388 | /* If we are here, we know we have at least one | |
389 | * more page to map. So walk forward until we | |
390 | * hit a page crossing, and begin creating new | |
391 | * mappings from that spot. | |
392 | */ | |
393 | for (;;) { | |
394 | unsigned long tmp; | |
395 | ||
396 | tmp = SG_ENT_PHYS_ADDRESS(sg); | |
397 | len = sg->length; | |
398 | if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) { | |
399 | pteval = tmp & IO_PAGE_MASK; | |
400 | offset = tmp & (IO_PAGE_SIZE - 1UL); | |
401 | break; | |
402 | } | |
403 | if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) { | |
404 | pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK; | |
405 | offset = 0UL; | |
406 | len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); | |
407 | break; | |
408 | } | |
409 | sg++; | |
410 | } | |
411 | ||
412 | pteval = (pteval & IOPTE_PAGE); | |
413 | while (len > 0) { | |
6a32fd4d DM |
414 | long err; |
415 | ||
416 | err = pci_iommu_batch_add(pteval); | |
417 | if (unlikely(err < 0L)) | |
418 | goto iommu_map_failed; | |
419 | ||
18397944 DM |
420 | pteval += IO_PAGE_SIZE; |
421 | len -= (IO_PAGE_SIZE - offset); | |
422 | offset = 0; | |
423 | dma_npages--; | |
424 | } | |
425 | ||
426 | pteval = (pteval & IOPTE_PAGE) + len; | |
427 | sg++; | |
428 | ||
429 | /* Skip over any tail mappings we've fully mapped, | |
430 | * adjusting pteval along the way. Stop when we | |
431 | * detect a page crossing event. | |
432 | */ | |
433 | while (sg < sg_end && | |
434 | (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && | |
435 | (pteval == SG_ENT_PHYS_ADDRESS(sg)) && | |
436 | ((pteval ^ | |
437 | (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { | |
438 | pteval += sg->length; | |
439 | sg++; | |
440 | } | |
441 | if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) | |
442 | pteval = ~0UL; | |
443 | } while (dma_npages != 0); | |
444 | dma_sg++; | |
445 | } | |
446 | ||
6a32fd4d DM |
447 | if (unlikely(pci_iommu_batch_end() < 0L)) |
448 | goto iommu_map_failed; | |
18397944 | 449 | |
6a32fd4d DM |
450 | local_irq_restore(flags); |
451 | return 0; | |
18397944 | 452 | |
6a32fd4d DM |
453 | iommu_map_failed: |
454 | local_irq_restore(flags); | |
455 | return -1L; | |
8f6a93a1 DM |
456 | } |
457 | ||
458 | static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | |
459 | { | |
18397944 DM |
460 | struct pcidev_cookie *pcp; |
461 | struct pci_iommu *iommu; | |
7c8f486a | 462 | unsigned long flags, npages, prot; |
6a32fd4d | 463 | u32 dma_base; |
18397944 | 464 | struct scatterlist *sgtmp; |
6a32fd4d | 465 | long entry, err; |
18397944 DM |
466 | int used; |
467 | ||
468 | /* Fast path single entry scatterlists. */ | |
469 | if (nelems == 1) { | |
470 | sglist->dma_address = | |
471 | pci_4v_map_single(pdev, | |
472 | (page_address(sglist->page) + sglist->offset), | |
473 | sglist->length, direction); | |
474 | if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) | |
475 | return 0; | |
476 | sglist->dma_length = sglist->length; | |
477 | return 1; | |
478 | } | |
479 | ||
480 | pcp = pdev->sysdata; | |
481 | iommu = pcp->pbm->iommu; | |
18397944 DM |
482 | |
483 | if (unlikely(direction == PCI_DMA_NONE)) | |
484 | goto bad; | |
485 | ||
486 | /* Step 1: Prepare scatter list. */ | |
487 | npages = prepare_sg(sglist, nelems); | |
18397944 DM |
488 | |
489 | /* Step 2: Allocate a cluster and context, if necessary. */ | |
490 | spin_lock_irqsave(&iommu->lock, flags); | |
491 | entry = pci_arena_alloc(&iommu->arena, npages); | |
492 | spin_unlock_irqrestore(&iommu->lock, flags); | |
493 | ||
494 | if (unlikely(entry < 0L)) | |
495 | goto bad; | |
496 | ||
497 | dma_base = iommu->page_table_map_base + | |
498 | (entry << IO_PAGE_SHIFT); | |
499 | ||
500 | /* Step 3: Normalize DMA addresses. */ | |
501 | used = nelems; | |
502 | ||
503 | sgtmp = sglist; | |
504 | while (used && sgtmp->dma_length) { | |
505 | sgtmp->dma_address += dma_base; | |
506 | sgtmp++; | |
507 | used--; | |
508 | } | |
509 | used = nelems - used; | |
510 | ||
511 | /* Step 4: Create the mappings. */ | |
512 | prot = HV_PCI_MAP_ATTR_READ; | |
513 | if (direction != PCI_DMA_TODEVICE) | |
514 | prot |= HV_PCI_MAP_ATTR_WRITE; | |
515 | ||
6a32fd4d DM |
516 | err = fill_sg(entry, pdev, sglist, used, nelems, prot); |
517 | if (unlikely(err < 0L)) | |
518 | goto iommu_map_failed; | |
18397944 DM |
519 | |
520 | return used; | |
521 | ||
522 | bad: | |
523 | if (printk_ratelimit()) | |
524 | WARN_ON(1); | |
525 | return 0; | |
6a32fd4d DM |
526 | |
527 | iommu_map_failed: | |
528 | spin_lock_irqsave(&iommu->lock, flags); | |
529 | pci_arena_free(&iommu->arena, entry, npages); | |
530 | spin_unlock_irqrestore(&iommu->lock, flags); | |
531 | ||
532 | return 0; | |
8f6a93a1 DM |
533 | } |
534 | ||
535 | static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | |
536 | { | |
18397944 DM |
537 | struct pcidev_cookie *pcp; |
538 | struct pci_iommu *iommu; | |
7c8f486a | 539 | unsigned long flags, i, npages; |
18397944 | 540 | long entry; |
7c8f486a | 541 | u32 devhandle, bus_addr; |
18397944 DM |
542 | |
543 | if (unlikely(direction == PCI_DMA_NONE)) { | |
544 | if (printk_ratelimit()) | |
545 | WARN_ON(1); | |
546 | } | |
547 | ||
548 | pcp = pdev->sysdata; | |
549 | iommu = pcp->pbm->iommu; | |
550 | devhandle = pcp->pbm->devhandle; | |
551 | ||
552 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | |
553 | ||
554 | for (i = 1; i < nelems; i++) | |
555 | if (sglist[i].dma_length == 0) | |
556 | break; | |
557 | i--; | |
558 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - | |
559 | bus_addr) >> IO_PAGE_SHIFT; | |
560 | ||
561 | entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | |
562 | ||
563 | spin_lock_irqsave(&iommu->lock, flags); | |
564 | ||
565 | pci_arena_free(&iommu->arena, entry, npages); | |
566 | ||
567 | do { | |
568 | unsigned long num; | |
569 | ||
570 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
571 | npages); | |
572 | entry += num; | |
573 | npages -= num; | |
574 | } while (npages != 0); | |
575 | ||
576 | spin_unlock_irqrestore(&iommu->lock, flags); | |
8f6a93a1 DM |
577 | } |
578 | ||
579 | static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | |
580 | { | |
18397944 | 581 | /* Nothing to do... */ |
8f6a93a1 DM |
582 | } |
583 | ||
584 | static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | |
585 | { | |
18397944 | 586 | /* Nothing to do... */ |
8f6a93a1 DM |
587 | } |
588 | ||
589 | struct pci_iommu_ops pci_sun4v_iommu_ops = { | |
590 | .alloc_consistent = pci_4v_alloc_consistent, | |
591 | .free_consistent = pci_4v_free_consistent, | |
592 | .map_single = pci_4v_map_single, | |
593 | .unmap_single = pci_4v_unmap_single, | |
594 | .map_sg = pci_4v_map_sg, | |
595 | .unmap_sg = pci_4v_unmap_sg, | |
596 | .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu, | |
597 | .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, | |
598 | }; | |
599 | ||
bade5622 DM |
600 | /* SUN4V PCI configuration space accessors. */ |
601 | ||
46b30493 DM |
602 | struct pdev_entry { |
603 | struct pdev_entry *next; | |
604 | u32 devhandle; | |
605 | unsigned int bus; | |
606 | unsigned int device; | |
607 | unsigned int func; | |
608 | }; | |
609 | ||
610 | #define PDEV_HTAB_SIZE 16 | |
611 | #define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1) | |
612 | static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE]; | |
613 | ||
614 | static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func) | |
059833eb | 615 | { |
46b30493 DM |
616 | unsigned int val; |
617 | ||
618 | val = (devhandle ^ (devhandle >> 4)); | |
619 | val ^= bus; | |
620 | val ^= device; | |
621 | val ^= func; | |
622 | ||
623 | return val & PDEV_HTAB_MASK; | |
624 | } | |
625 | ||
626 | static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func) | |
627 | { | |
628 | struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL); | |
629 | struct pdev_entry **slot; | |
630 | ||
631 | if (!p) | |
632 | return -ENOMEM; | |
633 | ||
634 | slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)]; | |
635 | p->next = *slot; | |
636 | *slot = p; | |
637 | ||
638 | p->devhandle = devhandle; | |
639 | p->bus = bus; | |
640 | p->device = device; | |
641 | p->func = func; | |
642 | ||
643 | return 0; | |
644 | } | |
645 | ||
646 | /* Recursively descend into the OBP device tree, rooted at toplevel_node, | |
647 | * looking for a PCI device matching bus and devfn. | |
648 | */ | |
649 | static int obp_find(struct linux_prom_pci_registers *pregs, int toplevel_node, unsigned int bus, unsigned int devfn) | |
650 | { | |
651 | toplevel_node = prom_getchild(toplevel_node); | |
652 | ||
653 | while (toplevel_node != 0) { | |
654 | int ret = obp_find(pregs, toplevel_node, bus, devfn); | |
655 | ||
656 | if (ret != 0) | |
657 | return ret; | |
658 | ||
659 | ret = prom_getproperty(toplevel_node, "reg", (char *) pregs, | |
660 | sizeof(*pregs) * PROMREG_MAX); | |
661 | if (ret == 0 || ret == -1) | |
662 | goto next_sibling; | |
663 | ||
664 | if (((pregs[0].phys_hi >> 16) & 0xff) == bus && | |
665 | ((pregs[0].phys_hi >> 8) & 0xff) == devfn) | |
666 | break; | |
667 | ||
668 | next_sibling: | |
669 | toplevel_node = prom_getsibling(toplevel_node); | |
987b6de7 DM |
670 | } |
671 | ||
46b30493 DM |
672 | return toplevel_node; |
673 | } | |
674 | ||
675 | static int pdev_htab_populate(struct pci_pbm_info *pbm) | |
676 | { | |
677 | struct linux_prom_pci_registers pr[PROMREG_MAX]; | |
678 | u32 devhandle = pbm->devhandle; | |
679 | unsigned int bus; | |
680 | ||
681 | for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) { | |
682 | unsigned int devfn; | |
683 | ||
684 | for (devfn = 0; devfn < 256; devfn++) { | |
685 | unsigned int device = PCI_SLOT(devfn); | |
686 | unsigned int func = PCI_FUNC(devfn); | |
687 | ||
688 | if (obp_find(pr, pbm->prom_node, bus, devfn)) { | |
689 | int err = pdev_htab_add(devhandle, bus, | |
690 | device, func); | |
691 | if (err) | |
692 | return err; | |
693 | } | |
694 | } | |
695 | } | |
696 | ||
697 | return 0; | |
698 | } | |
699 | ||
700 | static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func) | |
701 | { | |
702 | struct pdev_entry *p; | |
703 | ||
704 | p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)]; | |
705 | while (p) { | |
706 | if (p->devhandle == devhandle && | |
707 | p->bus == bus && | |
708 | p->device == device && | |
709 | p->func == func) | |
710 | break; | |
711 | ||
712 | p = p->next; | |
713 | } | |
714 | ||
715 | return p; | |
716 | } | |
717 | ||
718 | static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) | |
719 | { | |
059833eb DM |
720 | if (bus < pbm->pci_first_busno || |
721 | bus > pbm->pci_last_busno) | |
722 | return 1; | |
46b30493 | 723 | return pdev_find(pbm->devhandle, bus, device, func) == NULL; |
059833eb DM |
724 | } |
725 | ||
bade5622 DM |
726 | static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, |
727 | int where, int size, u32 *value) | |
728 | { | |
7eae642f | 729 | struct pci_pbm_info *pbm = bus_dev->sysdata; |
059833eb | 730 | u32 devhandle = pbm->devhandle; |
7eae642f DM |
731 | unsigned int bus = bus_dev->number; |
732 | unsigned int device = PCI_SLOT(devfn); | |
733 | unsigned int func = PCI_FUNC(devfn); | |
734 | unsigned long ret; | |
735 | ||
987b6de7 | 736 | if (pci_sun4v_out_of_range(pbm, bus, device, func)) { |
059833eb DM |
737 | ret = ~0UL; |
738 | } else { | |
739 | ret = pci_sun4v_config_get(devhandle, | |
740 | HV_PCI_DEVICE_BUILD(bus, device, func), | |
741 | where, size); | |
10804828 | 742 | #if 0 |
987b6de7 | 743 | printk("rcfg: [%x:%x:%x:%d]=[%lx]\n", |
10804828 DM |
744 | devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), |
745 | where, size, ret); | |
746 | #endif | |
059833eb | 747 | } |
7eae642f DM |
748 | switch (size) { |
749 | case 1: | |
750 | *value = ret & 0xff; | |
751 | break; | |
752 | case 2: | |
753 | *value = ret & 0xffff; | |
754 | break; | |
755 | case 4: | |
756 | *value = ret & 0xffffffff; | |
757 | break; | |
758 | }; | |
759 | ||
760 | ||
761 | return PCIBIOS_SUCCESSFUL; | |
bade5622 DM |
762 | } |
763 | ||
764 | static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | |
765 | int where, int size, u32 value) | |
766 | { | |
7eae642f | 767 | struct pci_pbm_info *pbm = bus_dev->sysdata; |
059833eb | 768 | u32 devhandle = pbm->devhandle; |
7eae642f DM |
769 | unsigned int bus = bus_dev->number; |
770 | unsigned int device = PCI_SLOT(devfn); | |
771 | unsigned int func = PCI_FUNC(devfn); | |
772 | unsigned long ret; | |
773 | ||
987b6de7 | 774 | if (pci_sun4v_out_of_range(pbm, bus, device, func)) { |
059833eb DM |
775 | /* Do nothing. */ |
776 | } else { | |
777 | ret = pci_sun4v_config_put(devhandle, | |
778 | HV_PCI_DEVICE_BUILD(bus, device, func), | |
779 | where, size, value); | |
10804828 | 780 | #if 0 |
987b6de7 | 781 | printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n", |
10804828 DM |
782 | devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), |
783 | where, size, value, ret); | |
784 | #endif | |
059833eb | 785 | } |
7eae642f | 786 | return PCIBIOS_SUCCESSFUL; |
bade5622 DM |
787 | } |
788 | ||
789 | static struct pci_ops pci_sun4v_ops = { | |
790 | .read = pci_sun4v_read_pci_cfg, | |
791 | .write = pci_sun4v_write_pci_cfg, | |
792 | }; | |
793 | ||
794 | ||
c2609267 DM |
795 | static void pbm_scan_bus(struct pci_controller_info *p, |
796 | struct pci_pbm_info *pbm) | |
797 | { | |
798 | struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); | |
799 | ||
800 | if (!cookie) { | |
801 | prom_printf("%s: Critical allocation failure.\n", pbm->name); | |
802 | prom_halt(); | |
803 | } | |
804 | ||
805 | /* All we care about is the PBM. */ | |
806 | memset(cookie, 0, sizeof(*cookie)); | |
807 | cookie->pbm = pbm; | |
808 | ||
987b6de7 | 809 | pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm); |
10804828 | 810 | #if 0 |
c2609267 DM |
811 | pci_fixup_host_bridge_self(pbm->pci_bus); |
812 | pbm->pci_bus->self->sysdata = cookie; | |
10804828 | 813 | #endif |
10804828 | 814 | pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, |
987b6de7 | 815 | pbm->prom_node); |
c2609267 DM |
816 | pci_record_assignments(pbm, pbm->pci_bus); |
817 | pci_assign_unassigned(pbm, pbm->pci_bus); | |
818 | pci_fixup_irq(pbm, pbm->pci_bus); | |
819 | pci_determine_66mhz_disposition(pbm, pbm->pci_bus); | |
820 | pci_setup_busmastering(pbm, pbm->pci_bus); | |
821 | } | |
822 | ||
bade5622 DM |
823 | static void pci_sun4v_scan_bus(struct pci_controller_info *p) |
824 | { | |
c2609267 DM |
825 | if (p->pbm_A.prom_node) { |
826 | p->pbm_A.is_66mhz_capable = | |
827 | prom_getbool(p->pbm_A.prom_node, "66mhz-capable"); | |
828 | ||
829 | pbm_scan_bus(p, &p->pbm_A); | |
830 | } | |
831 | if (p->pbm_B.prom_node) { | |
832 | p->pbm_B.is_66mhz_capable = | |
833 | prom_getbool(p->pbm_B.prom_node, "66mhz-capable"); | |
834 | ||
835 | pbm_scan_bus(p, &p->pbm_B); | |
836 | } | |
837 | ||
838 | /* XXX register error interrupt handlers XXX */ | |
bade5622 DM |
839 | } |
840 | ||
841 | static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm, | |
842 | struct pci_dev *pdev, | |
e3999574 | 843 | unsigned int devino) |
bade5622 | 844 | { |
10804828 | 845 | u32 devhandle = pbm->devhandle; |
10804828 | 846 | |
37cdcd9e | 847 | return sun4v_build_irq(devhandle, devino, IBF_PCI); |
bade5622 DM |
848 | } |
849 | ||
bade5622 DM |
850 | static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) |
851 | { | |
852 | struct pcidev_cookie *pcp = pdev->sysdata; | |
853 | struct pci_pbm_info *pbm = pcp->pbm; | |
854 | struct resource *res, *root; | |
855 | u32 reg; | |
856 | int where, size, is_64bit; | |
857 | ||
858 | res = &pdev->resource[resource]; | |
859 | if (resource < 6) { | |
860 | where = PCI_BASE_ADDRESS_0 + (resource * 4); | |
861 | } else if (resource == PCI_ROM_RESOURCE) { | |
862 | where = pdev->rom_base_reg; | |
863 | } else { | |
864 | /* Somebody might have asked allocation of a non-standard resource */ | |
865 | return; | |
866 | } | |
867 | ||
c2609267 | 868 | /* XXX 64-bit MEM handling is not %100 correct... XXX */ |
bade5622 DM |
869 | is_64bit = 0; |
870 | if (res->flags & IORESOURCE_IO) | |
871 | root = &pbm->io_space; | |
872 | else { | |
873 | root = &pbm->mem_space; | |
874 | if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) | |
875 | == PCI_BASE_ADDRESS_MEM_TYPE_64) | |
876 | is_64bit = 1; | |
877 | } | |
878 | ||
879 | size = res->end - res->start; | |
880 | pci_read_config_dword(pdev, where, ®); | |
881 | reg = ((reg & size) | | |
882 | (((u32)(res->start - root->start)) & ~size)); | |
883 | if (resource == PCI_ROM_RESOURCE) { | |
884 | reg |= PCI_ROM_ADDRESS_ENABLE; | |
885 | res->flags |= IORESOURCE_ROM_ENABLE; | |
886 | } | |
887 | pci_write_config_dword(pdev, where, reg); | |
888 | ||
889 | /* This knows that the upper 32-bits of the address | |
890 | * must be zero. Our PCI common layer enforces this. | |
891 | */ | |
892 | if (is_64bit) | |
893 | pci_write_config_dword(pdev, where + 4, 0); | |
894 | } | |
895 | ||
bade5622 DM |
896 | static void pci_sun4v_resource_adjust(struct pci_dev *pdev, |
897 | struct resource *res, | |
898 | struct resource *root) | |
899 | { | |
900 | res->start += root->start; | |
901 | res->end += root->start; | |
902 | } | |
903 | ||
904 | /* Use ranges property to determine where PCI MEM, I/O, and Config | |
905 | * space are for this PCI bus module. | |
906 | */ | |
907 | static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm) | |
908 | { | |
221b2fb8 | 909 | int i, saw_mem, saw_io; |
bade5622 | 910 | |
221b2fb8 | 911 | saw_mem = saw_io = 0; |
bade5622 DM |
912 | for (i = 0; i < pbm->num_pbm_ranges; i++) { |
913 | struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i]; | |
914 | unsigned long a; | |
915 | int type; | |
916 | ||
917 | type = (pr->child_phys_hi >> 24) & 0x3; | |
918 | a = (((unsigned long)pr->parent_phys_hi << 32UL) | | |
919 | ((unsigned long)pr->parent_phys_lo << 0UL)); | |
920 | ||
921 | switch (type) { | |
bade5622 DM |
922 | case 1: |
923 | /* 16-bit IO space, 16MB */ | |
924 | pbm->io_space.start = a; | |
925 | pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL); | |
926 | pbm->io_space.flags = IORESOURCE_IO; | |
927 | saw_io = 1; | |
928 | break; | |
929 | ||
930 | case 2: | |
931 | /* 32-bit MEM space, 2GB */ | |
932 | pbm->mem_space.start = a; | |
933 | pbm->mem_space.end = a + (0x80000000UL - 1UL); | |
934 | pbm->mem_space.flags = IORESOURCE_MEM; | |
935 | saw_mem = 1; | |
936 | break; | |
937 | ||
c2609267 DM |
938 | case 3: |
939 | /* XXX 64-bit MEM handling XXX */ | |
940 | ||
bade5622 DM |
941 | default: |
942 | break; | |
943 | }; | |
944 | } | |
945 | ||
221b2fb8 | 946 | if (!saw_io || !saw_mem) { |
bade5622 DM |
947 | prom_printf("%s: Fatal error, missing %s PBM range.\n", |
948 | pbm->name, | |
221b2fb8 | 949 | (!saw_io ? "IO" : "MEM")); |
bade5622 DM |
950 | prom_halt(); |
951 | } | |
952 | ||
221b2fb8 | 953 | printk("%s: PCI IO[%lx] MEM[%lx]\n", |
bade5622 | 954 | pbm->name, |
bade5622 DM |
955 | pbm->io_space.start, |
956 | pbm->mem_space.start); | |
957 | } | |
958 | ||
959 | static void pbm_register_toplevel_resources(struct pci_controller_info *p, | |
960 | struct pci_pbm_info *pbm) | |
961 | { | |
962 | pbm->io_space.name = pbm->mem_space.name = pbm->name; | |
963 | ||
964 | request_resource(&ioport_resource, &pbm->io_space); | |
965 | request_resource(&iomem_resource, &pbm->mem_space); | |
966 | pci_register_legacy_regions(&pbm->io_space, | |
967 | &pbm->mem_space); | |
968 | } | |
969 | ||
e7a0453e DM |
970 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, |
971 | struct pci_iommu *iommu) | |
18397944 DM |
972 | { |
973 | struct pci_iommu_arena *arena = &iommu->arena; | |
e7a0453e | 974 | unsigned long i, cnt = 0; |
7c8f486a | 975 | u32 devhandle; |
18397944 DM |
976 | |
977 | devhandle = pbm->devhandle; | |
978 | for (i = 0; i < arena->limit; i++) { | |
979 | unsigned long ret, io_attrs, ra; | |
980 | ||
981 | ret = pci_sun4v_iommu_getmap(devhandle, | |
982 | HV_PCI_TSBID(0, i), | |
983 | &io_attrs, &ra); | |
e7a0453e DM |
984 | if (ret == HV_EOK) { |
985 | cnt++; | |
18397944 | 986 | __set_bit(i, arena->map); |
e7a0453e | 987 | } |
18397944 | 988 | } |
e7a0453e DM |
989 | |
990 | return cnt; | |
18397944 DM |
991 | } |
992 | ||
bade5622 DM |
993 | static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) |
994 | { | |
18397944 DM |
995 | struct pci_iommu *iommu = pbm->iommu; |
996 | unsigned long num_tsb_entries, sz; | |
997 | u32 vdma[2], dma_mask, dma_offset; | |
998 | int err, tsbsize; | |
999 | ||
1000 | err = prom_getproperty(pbm->prom_node, "virtual-dma", | |
1001 | (char *)&vdma[0], sizeof(vdma)); | |
1002 | if (err == 0 || err == -1) { | |
1003 | /* No property, use default values. */ | |
1004 | vdma[0] = 0x80000000; | |
1005 | vdma[1] = 0x80000000; | |
1006 | } | |
1007 | ||
1008 | dma_mask = vdma[0]; | |
1009 | switch (vdma[1]) { | |
1010 | case 0x20000000: | |
1011 | dma_mask |= 0x1fffffff; | |
1012 | tsbsize = 64; | |
1013 | break; | |
1014 | ||
1015 | case 0x40000000: | |
1016 | dma_mask |= 0x3fffffff; | |
1017 | tsbsize = 128; | |
1018 | break; | |
1019 | ||
1020 | case 0x80000000: | |
1021 | dma_mask |= 0x7fffffff; | |
e7a0453e | 1022 | tsbsize = 256; |
18397944 DM |
1023 | break; |
1024 | ||
1025 | default: | |
1026 | prom_printf("PCI-SUN4V: strange virtual-dma size.\n"); | |
1027 | prom_halt(); | |
1028 | }; | |
1029 | ||
e7a0453e DM |
1030 | tsbsize *= (8 * 1024); |
1031 | ||
18397944 DM |
1032 | num_tsb_entries = tsbsize / sizeof(iopte_t); |
1033 | ||
1034 | dma_offset = vdma[0]; | |
1035 | ||
1036 | /* Setup initial software IOMMU state. */ | |
1037 | spin_lock_init(&iommu->lock); | |
1038 | iommu->ctx_lowest_free = 1; | |
1039 | iommu->page_table_map_base = dma_offset; | |
1040 | iommu->dma_addr_mask = dma_mask; | |
1041 | ||
1042 | /* Allocate and initialize the free area map. */ | |
1043 | sz = num_tsb_entries / 8; | |
1044 | sz = (sz + 7UL) & ~7UL; | |
1045 | iommu->arena.map = kmalloc(sz, GFP_KERNEL); | |
1046 | if (!iommu->arena.map) { | |
1047 | prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); | |
1048 | prom_halt(); | |
1049 | } | |
1050 | memset(iommu->arena.map, 0, sz); | |
1051 | iommu->arena.limit = num_tsb_entries; | |
1052 | ||
e7a0453e DM |
1053 | sz = probe_existing_entries(pbm, iommu); |
1054 | ||
1055 | printk("%s: TSB entries [%lu], existing mapings [%lu]\n", | |
1056 | pbm->name, num_tsb_entries, sz); | |
bade5622 DM |
1057 | } |
1058 | ||
10804828 DM |
1059 | static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm) |
1060 | { | |
1061 | unsigned int busrange[2]; | |
1062 | int prom_node = pbm->prom_node; | |
1063 | int err; | |
1064 | ||
10804828 DM |
1065 | err = prom_getproperty(prom_node, "bus-range", |
1066 | (char *)&busrange[0], | |
1067 | sizeof(busrange)); | |
1068 | if (err == 0 || err == -1) { | |
1069 | prom_printf("%s: Fatal error, no bus-range.\n", pbm->name); | |
1070 | prom_halt(); | |
1071 | } | |
1072 | ||
1073 | pbm->pci_first_busno = busrange[0]; | |
1074 | pbm->pci_last_busno = busrange[1]; | |
1075 | ||
1076 | } | |
1077 | ||
7c8f486a | 1078 | static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32 devhandle) |
bade5622 DM |
1079 | { |
1080 | struct pci_pbm_info *pbm; | |
3833789b | 1081 | int err, i; |
bade5622 | 1082 | |
3833789b DM |
1083 | if (devhandle & 0x40) |
1084 | pbm = &p->pbm_B; | |
1085 | else | |
1086 | pbm = &p->pbm_A; | |
bade5622 DM |
1087 | |
1088 | pbm->parent = p; | |
1089 | pbm->prom_node = prom_node; | |
1090 | pbm->pci_first_slot = 1; | |
1091 | ||
3833789b | 1092 | pbm->devhandle = devhandle; |
bade5622 DM |
1093 | |
1094 | sprintf(pbm->name, "SUN4V-PCI%d PBM%c", | |
1095 | p->index, (pbm == &p->pbm_A ? 'A' : 'B')); | |
1096 | ||
987b6de7 DM |
1097 | printk("%s: devhandle[%x] prom_node[%x:%x]\n", |
1098 | pbm->name, pbm->devhandle, | |
1099 | pbm->prom_node, prom_getchild(pbm->prom_node)); | |
bade5622 DM |
1100 | |
1101 | prom_getstring(prom_node, "name", | |
1102 | pbm->prom_name, sizeof(pbm->prom_name)); | |
1103 | ||
1104 | err = prom_getproperty(prom_node, "ranges", | |
1105 | (char *) pbm->pbm_ranges, | |
1106 | sizeof(pbm->pbm_ranges)); | |
1107 | if (err == 0 || err == -1) { | |
1108 | prom_printf("%s: Fatal error, no ranges property.\n", | |
1109 | pbm->name); | |
1110 | prom_halt(); | |
1111 | } | |
1112 | ||
1113 | pbm->num_pbm_ranges = | |
1114 | (err / sizeof(struct linux_prom_pci_ranges)); | |
1115 | ||
3833789b DM |
1116 | /* Mask out the top 8 bits of the ranges, leaving the real |
1117 | * physical address. | |
1118 | */ | |
1119 | for (i = 0; i < pbm->num_pbm_ranges; i++) | |
1120 | pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff; | |
1121 | ||
bade5622 DM |
1122 | pci_sun4v_determine_mem_io_space(pbm); |
1123 | pbm_register_toplevel_resources(p, pbm); | |
1124 | ||
1125 | err = prom_getproperty(prom_node, "interrupt-map", | |
1126 | (char *)pbm->pbm_intmap, | |
1127 | sizeof(pbm->pbm_intmap)); | |
329c68b2 DM |
1128 | if (err == 0 || err == -1) { |
1129 | prom_printf("%s: Fatal error, no interrupt-map property.\n", | |
1130 | pbm->name); | |
1131 | prom_halt(); | |
1132 | } | |
1133 | ||
1134 | pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap)); | |
1135 | err = prom_getproperty(prom_node, "interrupt-map-mask", | |
1136 | (char *)&pbm->pbm_intmask, | |
1137 | sizeof(pbm->pbm_intmask)); | |
1138 | if (err == 0 || err == -1) { | |
1139 | prom_printf("%s: Fatal error, no interrupt-map-mask.\n", | |
1140 | pbm->name); | |
1141 | prom_halt(); | |
bade5622 DM |
1142 | } |
1143 | ||
10804828 | 1144 | pci_sun4v_get_bus_range(pbm); |
bade5622 | 1145 | pci_sun4v_iommu_init(pbm); |
46b30493 DM |
1146 | |
1147 | pdev_htab_populate(pbm); | |
bade5622 DM |
1148 | } |
1149 | ||
8f6a93a1 DM |
1150 | void sun4v_pci_init(int node, char *model_name) |
1151 | { | |
bade5622 DM |
1152 | struct pci_controller_info *p; |
1153 | struct pci_iommu *iommu; | |
3833789b | 1154 | struct linux_prom64_registers regs; |
7c8f486a DM |
1155 | u32 devhandle; |
1156 | int i; | |
3833789b DM |
1157 | |
1158 | prom_getproperty(node, "reg", (char *)®s, sizeof(regs)); | |
d5eb4004 | 1159 | devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff; |
3833789b DM |
1160 | |
1161 | for (p = pci_controller_root; p; p = p->next) { | |
1162 | struct pci_pbm_info *pbm; | |
1163 | ||
1164 | if (p->pbm_A.prom_node && p->pbm_B.prom_node) | |
1165 | continue; | |
1166 | ||
1167 | pbm = (p->pbm_A.prom_node ? | |
1168 | &p->pbm_A : | |
1169 | &p->pbm_B); | |
1170 | ||
0b522497 | 1171 | if (pbm->devhandle == (devhandle ^ 0x40)) { |
3833789b | 1172 | pci_sun4v_pbm_init(p, node, devhandle); |
0b522497 DM |
1173 | return; |
1174 | } | |
3833789b | 1175 | } |
bade5622 | 1176 | |
a283a525 | 1177 | for_each_possible_cpu(i) { |
7c8f486a DM |
1178 | unsigned long page = get_zeroed_page(GFP_ATOMIC); |
1179 | ||
1180 | if (!page) | |
1181 | goto fatal_memory_error; | |
1182 | ||
6a32fd4d | 1183 | per_cpu(pci_iommu_batch, i).pglist = (u64 *) page; |
bade5622 | 1184 | } |
7c8f486a DM |
1185 | |
1186 | p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); | |
1187 | if (!p) | |
1188 | goto fatal_memory_error; | |
1189 | ||
bade5622 DM |
1190 | memset(p, 0, sizeof(*p)); |
1191 | ||
1192 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | |
7c8f486a DM |
1193 | if (!iommu) |
1194 | goto fatal_memory_error; | |
1195 | ||
bade5622 DM |
1196 | memset(iommu, 0, sizeof(*iommu)); |
1197 | p->pbm_A.iommu = iommu; | |
1198 | ||
1199 | iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); | |
7c8f486a DM |
1200 | if (!iommu) |
1201 | goto fatal_memory_error; | |
1202 | ||
bade5622 DM |
1203 | memset(iommu, 0, sizeof(*iommu)); |
1204 | p->pbm_B.iommu = iommu; | |
1205 | ||
1206 | p->next = pci_controller_root; | |
1207 | pci_controller_root = p; | |
1208 | ||
1209 | p->index = pci_num_controllers++; | |
1210 | p->pbms_same_domain = 0; | |
1211 | ||
1212 | p->scan_bus = pci_sun4v_scan_bus; | |
1213 | p->irq_build = pci_sun4v_irq_build; | |
1214 | p->base_address_update = pci_sun4v_base_address_update; | |
1215 | p->resource_adjust = pci_sun4v_resource_adjust; | |
1216 | p->pci_ops = &pci_sun4v_ops; | |
1217 | ||
1218 | /* Like PSYCHO and SCHIZO we have a 2GB aligned area | |
1219 | * for memory space. | |
1220 | */ | |
1221 | pci_memspace_mask = 0x7fffffffUL; | |
1222 | ||
3833789b | 1223 | pci_sun4v_pbm_init(p, node, devhandle); |
7c8f486a DM |
1224 | return; |
1225 | ||
1226 | fatal_memory_error: | |
1227 | prom_printf("SUN4V_PCI: Fatal memory allocation error.\n"); | |
1228 | prom_halt(); | |
8f6a93a1 | 1229 | } |