Commit | Line | Data |
---|---|---|
8f6a93a1 DM |
1 | /* pci_sun4v.c: SUN4V specific PCI controller support. |
2 | * | |
9fd8b647 | 3 | * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) |
8f6a93a1 DM |
4 | */ |
5 | ||
6 | #include <linux/kernel.h> | |
7 | #include <linux/types.h> | |
8 | #include <linux/pci.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/interrupt.h> | |
18397944 | 12 | #include <linux/percpu.h> |
35a17eb6 DM |
13 | #include <linux/irq.h> |
14 | #include <linux/msi.h> | |
59db8102 | 15 | #include <linux/log2.h> |
8f6a93a1 | 16 | |
8f6a93a1 DM |
17 | #include <asm/iommu.h> |
18 | #include <asm/irq.h> | |
19 | #include <asm/upa.h> | |
20 | #include <asm/pstate.h> | |
21 | #include <asm/oplib.h> | |
22 | #include <asm/hypervisor.h> | |
e87dc350 | 23 | #include <asm/prom.h> |
8f6a93a1 DM |
24 | |
25 | #include "pci_impl.h" | |
26 | #include "iommu_common.h" | |
27 | ||
bade5622 DM |
28 | #include "pci_sun4v.h" |
29 | ||
e01c0d6d DM |
30 | static unsigned long vpci_major = 1; |
31 | static unsigned long vpci_minor = 1; | |
32 | ||
7c8f486a | 33 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) |
18397944 | 34 | |
16ce82d8 | 35 | struct iommu_batch { |
6a32fd4d DM |
36 | struct pci_dev *pdev; /* Device mapping is for. */ |
37 | unsigned long prot; /* IOMMU page protections */ | |
38 | unsigned long entry; /* Index into IOTSB. */ | |
39 | u64 *pglist; /* List of physical pages */ | |
40 | unsigned long npages; /* Number of pages in list. */ | |
18397944 DM |
41 | }; |
42 | ||
16ce82d8 | 43 | static DEFINE_PER_CPU(struct iommu_batch, pci_iommu_batch); |
6a32fd4d DM |
44 | |
45 | /* Interrupts must be disabled. */ | |
46 | static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) | |
47 | { | |
16ce82d8 | 48 | struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); |
6a32fd4d DM |
49 | |
50 | p->pdev = pdev; | |
51 | p->prot = prot; | |
52 | p->entry = entry; | |
53 | p->npages = 0; | |
54 | } | |
55 | ||
56 | /* Interrupts must be disabled. */ | |
16ce82d8 | 57 | static long pci_iommu_batch_flush(struct iommu_batch *p) |
6a32fd4d | 58 | { |
a2fb23af DM |
59 | struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller; |
60 | unsigned long devhandle = pbm->devhandle; | |
6a32fd4d DM |
61 | unsigned long prot = p->prot; |
62 | unsigned long entry = p->entry; | |
63 | u64 *pglist = p->pglist; | |
64 | unsigned long npages = p->npages; | |
65 | ||
d82965c1 | 66 | while (npages != 0) { |
6a32fd4d DM |
67 | long num; |
68 | ||
69 | num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), | |
70 | npages, prot, __pa(pglist)); | |
71 | if (unlikely(num < 0)) { | |
72 | if (printk_ratelimit()) | |
73 | printk("pci_iommu_batch_flush: IOMMU map of " | |
74 | "[%08lx:%08lx:%lx:%lx:%lx] failed with " | |
75 | "status %ld\n", | |
76 | devhandle, HV_PCI_TSBID(0, entry), | |
77 | npages, prot, __pa(pglist), num); | |
78 | return -1; | |
79 | } | |
80 | ||
81 | entry += num; | |
82 | npages -= num; | |
83 | pglist += num; | |
d82965c1 | 84 | } |
6a32fd4d DM |
85 | |
86 | p->entry = entry; | |
87 | p->npages = 0; | |
88 | ||
89 | return 0; | |
90 | } | |
91 | ||
92 | /* Interrupts must be disabled. */ | |
93 | static inline long pci_iommu_batch_add(u64 phys_page) | |
94 | { | |
16ce82d8 | 95 | struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); |
6a32fd4d DM |
96 | |
97 | BUG_ON(p->npages >= PGLIST_NENTS); | |
98 | ||
99 | p->pglist[p->npages++] = phys_page; | |
100 | if (p->npages == PGLIST_NENTS) | |
101 | return pci_iommu_batch_flush(p); | |
102 | ||
103 | return 0; | |
104 | } | |
105 | ||
106 | /* Interrupts must be disabled. */ | |
107 | static inline long pci_iommu_batch_end(void) | |
108 | { | |
16ce82d8 | 109 | struct iommu_batch *p = &__get_cpu_var(pci_iommu_batch); |
6a32fd4d DM |
110 | |
111 | BUG_ON(p->npages >= PGLIST_NENTS); | |
112 | ||
113 | return pci_iommu_batch_flush(p); | |
114 | } | |
18397944 | 115 | |
9b3627f3 | 116 | static long pci_arena_alloc(struct iommu_arena *arena, unsigned long npages) |
18397944 DM |
117 | { |
118 | unsigned long n, i, start, end, limit; | |
119 | int pass; | |
120 | ||
121 | limit = arena->limit; | |
122 | start = arena->hint; | |
123 | pass = 0; | |
124 | ||
125 | again: | |
126 | n = find_next_zero_bit(arena->map, limit, start); | |
127 | end = n + npages; | |
128 | if (unlikely(end >= limit)) { | |
129 | if (likely(pass < 1)) { | |
130 | limit = start; | |
131 | start = 0; | |
132 | pass++; | |
133 | goto again; | |
134 | } else { | |
135 | /* Scanned the whole thing, give up. */ | |
136 | return -1; | |
137 | } | |
138 | } | |
139 | ||
140 | for (i = n; i < end; i++) { | |
141 | if (test_bit(i, arena->map)) { | |
142 | start = i + 1; | |
143 | goto again; | |
144 | } | |
145 | } | |
146 | ||
147 | for (i = n; i < end; i++) | |
148 | __set_bit(i, arena->map); | |
149 | ||
150 | arena->hint = end; | |
151 | ||
152 | return n; | |
153 | } | |
154 | ||
9b3627f3 | 155 | static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages) |
18397944 DM |
156 | { |
157 | unsigned long i; | |
158 | ||
159 | for (i = base; i < (base + npages); i++) | |
160 | __clear_bit(i, arena->map); | |
161 | } | |
162 | ||
42f14237 | 163 | static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) |
8f6a93a1 | 164 | { |
16ce82d8 | 165 | struct iommu *iommu; |
7c8f486a | 166 | unsigned long flags, order, first_page, npages, n; |
18397944 DM |
167 | void *ret; |
168 | long entry; | |
18397944 DM |
169 | |
170 | size = IO_PAGE_ALIGN(size); | |
171 | order = get_order(size); | |
6a32fd4d | 172 | if (unlikely(order >= MAX_ORDER)) |
18397944 DM |
173 | return NULL; |
174 | ||
175 | npages = size >> IO_PAGE_SHIFT; | |
18397944 | 176 | |
42f14237 | 177 | first_page = __get_free_pages(gfp, order); |
6a32fd4d | 178 | if (unlikely(first_page == 0UL)) |
18397944 | 179 | return NULL; |
e7a0453e | 180 | |
18397944 DM |
181 | memset((char *)first_page, 0, PAGE_SIZE << order); |
182 | ||
a2fb23af | 183 | iommu = pdev->dev.archdata.iommu; |
18397944 DM |
184 | |
185 | spin_lock_irqsave(&iommu->lock, flags); | |
186 | entry = pci_arena_alloc(&iommu->arena, npages); | |
187 | spin_unlock_irqrestore(&iommu->lock, flags); | |
188 | ||
6a32fd4d DM |
189 | if (unlikely(entry < 0L)) |
190 | goto arena_alloc_fail; | |
18397944 DM |
191 | |
192 | *dma_addrp = (iommu->page_table_map_base + | |
193 | (entry << IO_PAGE_SHIFT)); | |
194 | ret = (void *) first_page; | |
195 | first_page = __pa(first_page); | |
196 | ||
6a32fd4d | 197 | local_irq_save(flags); |
18397944 | 198 | |
6a32fd4d DM |
199 | pci_iommu_batch_start(pdev, |
200 | (HV_PCI_MAP_ATTR_READ | | |
201 | HV_PCI_MAP_ATTR_WRITE), | |
202 | entry); | |
18397944 | 203 | |
6a32fd4d DM |
204 | for (n = 0; n < npages; n++) { |
205 | long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE)); | |
206 | if (unlikely(err < 0L)) | |
207 | goto iommu_map_fail; | |
208 | } | |
18397944 | 209 | |
6a32fd4d DM |
210 | if (unlikely(pci_iommu_batch_end() < 0L)) |
211 | goto iommu_map_fail; | |
18397944 | 212 | |
6a32fd4d | 213 | local_irq_restore(flags); |
18397944 DM |
214 | |
215 | return ret; | |
6a32fd4d DM |
216 | |
217 | iommu_map_fail: | |
218 | /* Interrupts are disabled. */ | |
219 | spin_lock(&iommu->lock); | |
220 | pci_arena_free(&iommu->arena, entry, npages); | |
221 | spin_unlock_irqrestore(&iommu->lock, flags); | |
222 | ||
223 | arena_alloc_fail: | |
224 | free_pages(first_page, order); | |
225 | return NULL; | |
8f6a93a1 DM |
226 | } |
227 | ||
228 | static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | |
229 | { | |
a2fb23af | 230 | struct pci_pbm_info *pbm; |
16ce82d8 | 231 | struct iommu *iommu; |
7c8f486a DM |
232 | unsigned long flags, order, npages, entry; |
233 | u32 devhandle; | |
18397944 DM |
234 | |
235 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | |
a2fb23af DM |
236 | iommu = pdev->dev.archdata.iommu; |
237 | pbm = pdev->dev.archdata.host_controller; | |
238 | devhandle = pbm->devhandle; | |
18397944 DM |
239 | entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); |
240 | ||
241 | spin_lock_irqsave(&iommu->lock, flags); | |
242 | ||
243 | pci_arena_free(&iommu->arena, entry, npages); | |
244 | ||
245 | do { | |
246 | unsigned long num; | |
247 | ||
248 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
249 | npages); | |
250 | entry += num; | |
251 | npages -= num; | |
252 | } while (npages != 0); | |
253 | ||
254 | spin_unlock_irqrestore(&iommu->lock, flags); | |
255 | ||
256 | order = get_order(size); | |
257 | if (order < 10) | |
258 | free_pages((unsigned long)cpu, order); | |
8f6a93a1 DM |
259 | } |
260 | ||
261 | static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) | |
262 | { | |
16ce82d8 | 263 | struct iommu *iommu; |
18397944 | 264 | unsigned long flags, npages, oaddr; |
7c8f486a | 265 | unsigned long i, base_paddr; |
6a32fd4d | 266 | u32 bus_addr, ret; |
18397944 DM |
267 | unsigned long prot; |
268 | long entry; | |
18397944 | 269 | |
a2fb23af | 270 | iommu = pdev->dev.archdata.iommu; |
18397944 DM |
271 | |
272 | if (unlikely(direction == PCI_DMA_NONE)) | |
273 | goto bad; | |
274 | ||
275 | oaddr = (unsigned long)ptr; | |
276 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | |
277 | npages >>= IO_PAGE_SHIFT; | |
18397944 DM |
278 | |
279 | spin_lock_irqsave(&iommu->lock, flags); | |
280 | entry = pci_arena_alloc(&iommu->arena, npages); | |
281 | spin_unlock_irqrestore(&iommu->lock, flags); | |
282 | ||
283 | if (unlikely(entry < 0L)) | |
284 | goto bad; | |
285 | ||
286 | bus_addr = (iommu->page_table_map_base + | |
287 | (entry << IO_PAGE_SHIFT)); | |
288 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | |
289 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | |
290 | prot = HV_PCI_MAP_ATTR_READ; | |
291 | if (direction != PCI_DMA_TODEVICE) | |
292 | prot |= HV_PCI_MAP_ATTR_WRITE; | |
293 | ||
6a32fd4d | 294 | local_irq_save(flags); |
18397944 | 295 | |
6a32fd4d | 296 | pci_iommu_batch_start(pdev, prot, entry); |
18397944 | 297 | |
6a32fd4d DM |
298 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { |
299 | long err = pci_iommu_batch_add(base_paddr); | |
300 | if (unlikely(err < 0L)) | |
301 | goto iommu_map_fail; | |
302 | } | |
303 | if (unlikely(pci_iommu_batch_end() < 0L)) | |
304 | goto iommu_map_fail; | |
18397944 | 305 | |
6a32fd4d | 306 | local_irq_restore(flags); |
18397944 DM |
307 | |
308 | return ret; | |
309 | ||
310 | bad: | |
311 | if (printk_ratelimit()) | |
312 | WARN_ON(1); | |
313 | return PCI_DMA_ERROR_CODE; | |
6a32fd4d DM |
314 | |
315 | iommu_map_fail: | |
316 | /* Interrupts are disabled. */ | |
317 | spin_lock(&iommu->lock); | |
318 | pci_arena_free(&iommu->arena, entry, npages); | |
319 | spin_unlock_irqrestore(&iommu->lock, flags); | |
320 | ||
321 | return PCI_DMA_ERROR_CODE; | |
8f6a93a1 DM |
322 | } |
323 | ||
324 | static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | |
325 | { | |
a2fb23af | 326 | struct pci_pbm_info *pbm; |
16ce82d8 | 327 | struct iommu *iommu; |
7c8f486a | 328 | unsigned long flags, npages; |
18397944 | 329 | long entry; |
7c8f486a | 330 | u32 devhandle; |
18397944 DM |
331 | |
332 | if (unlikely(direction == PCI_DMA_NONE)) { | |
333 | if (printk_ratelimit()) | |
334 | WARN_ON(1); | |
335 | return; | |
336 | } | |
337 | ||
a2fb23af DM |
338 | iommu = pdev->dev.archdata.iommu; |
339 | pbm = pdev->dev.archdata.host_controller; | |
340 | devhandle = pbm->devhandle; | |
18397944 DM |
341 | |
342 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | |
343 | npages >>= IO_PAGE_SHIFT; | |
344 | bus_addr &= IO_PAGE_MASK; | |
345 | ||
346 | spin_lock_irqsave(&iommu->lock, flags); | |
347 | ||
348 | entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; | |
349 | pci_arena_free(&iommu->arena, entry, npages); | |
350 | ||
351 | do { | |
352 | unsigned long num; | |
353 | ||
354 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
355 | npages); | |
356 | entry += num; | |
357 | npages -= num; | |
358 | } while (npages != 0); | |
359 | ||
360 | spin_unlock_irqrestore(&iommu->lock, flags); | |
361 | } | |
362 | ||
363 | #define SG_ENT_PHYS_ADDRESS(SG) \ | |
364 | (__pa(page_address((SG)->page)) + (SG)->offset) | |
365 | ||
6a32fd4d | 366 | static inline long fill_sg(long entry, struct pci_dev *pdev, |
18397944 DM |
367 | struct scatterlist *sg, |
368 | int nused, int nelems, unsigned long prot) | |
369 | { | |
370 | struct scatterlist *dma_sg = sg; | |
371 | struct scatterlist *sg_end = sg + nelems; | |
6a32fd4d DM |
372 | unsigned long flags; |
373 | int i; | |
374 | ||
375 | local_irq_save(flags); | |
376 | ||
377 | pci_iommu_batch_start(pdev, prot, entry); | |
18397944 | 378 | |
18397944 DM |
379 | for (i = 0; i < nused; i++) { |
380 | unsigned long pteval = ~0UL; | |
381 | u32 dma_npages; | |
382 | ||
383 | dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) + | |
384 | dma_sg->dma_length + | |
385 | ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT; | |
386 | do { | |
387 | unsigned long offset; | |
388 | signed int len; | |
389 | ||
390 | /* If we are here, we know we have at least one | |
391 | * more page to map. So walk forward until we | |
392 | * hit a page crossing, and begin creating new | |
393 | * mappings from that spot. | |
394 | */ | |
395 | for (;;) { | |
396 | unsigned long tmp; | |
397 | ||
398 | tmp = SG_ENT_PHYS_ADDRESS(sg); | |
399 | len = sg->length; | |
400 | if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) { | |
401 | pteval = tmp & IO_PAGE_MASK; | |
402 | offset = tmp & (IO_PAGE_SIZE - 1UL); | |
403 | break; | |
404 | } | |
405 | if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) { | |
406 | pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK; | |
407 | offset = 0UL; | |
408 | len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); | |
409 | break; | |
410 | } | |
411 | sg++; | |
412 | } | |
413 | ||
414 | pteval = (pteval & IOPTE_PAGE); | |
415 | while (len > 0) { | |
6a32fd4d DM |
416 | long err; |
417 | ||
418 | err = pci_iommu_batch_add(pteval); | |
419 | if (unlikely(err < 0L)) | |
420 | goto iommu_map_failed; | |
421 | ||
18397944 DM |
422 | pteval += IO_PAGE_SIZE; |
423 | len -= (IO_PAGE_SIZE - offset); | |
424 | offset = 0; | |
425 | dma_npages--; | |
426 | } | |
427 | ||
428 | pteval = (pteval & IOPTE_PAGE) + len; | |
429 | sg++; | |
430 | ||
431 | /* Skip over any tail mappings we've fully mapped, | |
432 | * adjusting pteval along the way. Stop when we | |
433 | * detect a page crossing event. | |
434 | */ | |
435 | while (sg < sg_end && | |
436 | (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && | |
437 | (pteval == SG_ENT_PHYS_ADDRESS(sg)) && | |
438 | ((pteval ^ | |
439 | (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { | |
440 | pteval += sg->length; | |
441 | sg++; | |
442 | } | |
443 | if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) | |
444 | pteval = ~0UL; | |
445 | } while (dma_npages != 0); | |
446 | dma_sg++; | |
447 | } | |
448 | ||
6a32fd4d DM |
449 | if (unlikely(pci_iommu_batch_end() < 0L)) |
450 | goto iommu_map_failed; | |
18397944 | 451 | |
6a32fd4d DM |
452 | local_irq_restore(flags); |
453 | return 0; | |
18397944 | 454 | |
6a32fd4d DM |
455 | iommu_map_failed: |
456 | local_irq_restore(flags); | |
457 | return -1L; | |
8f6a93a1 DM |
458 | } |
459 | ||
460 | static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | |
461 | { | |
16ce82d8 | 462 | struct iommu *iommu; |
7c8f486a | 463 | unsigned long flags, npages, prot; |
6a32fd4d | 464 | u32 dma_base; |
18397944 | 465 | struct scatterlist *sgtmp; |
6a32fd4d | 466 | long entry, err; |
18397944 DM |
467 | int used; |
468 | ||
469 | /* Fast path single entry scatterlists. */ | |
470 | if (nelems == 1) { | |
471 | sglist->dma_address = | |
472 | pci_4v_map_single(pdev, | |
473 | (page_address(sglist->page) + sglist->offset), | |
474 | sglist->length, direction); | |
475 | if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) | |
476 | return 0; | |
477 | sglist->dma_length = sglist->length; | |
478 | return 1; | |
479 | } | |
480 | ||
a2fb23af | 481 | iommu = pdev->dev.archdata.iommu; |
18397944 DM |
482 | |
483 | if (unlikely(direction == PCI_DMA_NONE)) | |
484 | goto bad; | |
485 | ||
486 | /* Step 1: Prepare scatter list. */ | |
487 | npages = prepare_sg(sglist, nelems); | |
18397944 DM |
488 | |
489 | /* Step 2: Allocate a cluster and context, if necessary. */ | |
490 | spin_lock_irqsave(&iommu->lock, flags); | |
491 | entry = pci_arena_alloc(&iommu->arena, npages); | |
492 | spin_unlock_irqrestore(&iommu->lock, flags); | |
493 | ||
494 | if (unlikely(entry < 0L)) | |
495 | goto bad; | |
496 | ||
497 | dma_base = iommu->page_table_map_base + | |
498 | (entry << IO_PAGE_SHIFT); | |
499 | ||
500 | /* Step 3: Normalize DMA addresses. */ | |
501 | used = nelems; | |
502 | ||
503 | sgtmp = sglist; | |
504 | while (used && sgtmp->dma_length) { | |
505 | sgtmp->dma_address += dma_base; | |
506 | sgtmp++; | |
507 | used--; | |
508 | } | |
509 | used = nelems - used; | |
510 | ||
511 | /* Step 4: Create the mappings. */ | |
512 | prot = HV_PCI_MAP_ATTR_READ; | |
513 | if (direction != PCI_DMA_TODEVICE) | |
514 | prot |= HV_PCI_MAP_ATTR_WRITE; | |
515 | ||
6a32fd4d DM |
516 | err = fill_sg(entry, pdev, sglist, used, nelems, prot); |
517 | if (unlikely(err < 0L)) | |
518 | goto iommu_map_failed; | |
18397944 DM |
519 | |
520 | return used; | |
521 | ||
522 | bad: | |
523 | if (printk_ratelimit()) | |
524 | WARN_ON(1); | |
525 | return 0; | |
6a32fd4d DM |
526 | |
527 | iommu_map_failed: | |
528 | spin_lock_irqsave(&iommu->lock, flags); | |
529 | pci_arena_free(&iommu->arena, entry, npages); | |
530 | spin_unlock_irqrestore(&iommu->lock, flags); | |
531 | ||
532 | return 0; | |
8f6a93a1 DM |
533 | } |
534 | ||
535 | static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | |
536 | { | |
a2fb23af | 537 | struct pci_pbm_info *pbm; |
16ce82d8 | 538 | struct iommu *iommu; |
7c8f486a | 539 | unsigned long flags, i, npages; |
18397944 | 540 | long entry; |
7c8f486a | 541 | u32 devhandle, bus_addr; |
18397944 DM |
542 | |
543 | if (unlikely(direction == PCI_DMA_NONE)) { | |
544 | if (printk_ratelimit()) | |
545 | WARN_ON(1); | |
546 | } | |
547 | ||
a2fb23af DM |
548 | iommu = pdev->dev.archdata.iommu; |
549 | pbm = pdev->dev.archdata.host_controller; | |
550 | devhandle = pbm->devhandle; | |
18397944 DM |
551 | |
552 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | |
553 | ||
554 | for (i = 1; i < nelems; i++) | |
555 | if (sglist[i].dma_length == 0) | |
556 | break; | |
557 | i--; | |
558 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - | |
559 | bus_addr) >> IO_PAGE_SHIFT; | |
560 | ||
561 | entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | |
562 | ||
563 | spin_lock_irqsave(&iommu->lock, flags); | |
564 | ||
565 | pci_arena_free(&iommu->arena, entry, npages); | |
566 | ||
567 | do { | |
568 | unsigned long num; | |
569 | ||
570 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
571 | npages); | |
572 | entry += num; | |
573 | npages -= num; | |
574 | } while (npages != 0); | |
575 | ||
576 | spin_unlock_irqrestore(&iommu->lock, flags); | |
8f6a93a1 DM |
577 | } |
578 | ||
579 | static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | |
580 | { | |
18397944 | 581 | /* Nothing to do... */ |
8f6a93a1 DM |
582 | } |
583 | ||
584 | static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | |
585 | { | |
18397944 | 586 | /* Nothing to do... */ |
8f6a93a1 DM |
587 | } |
588 | ||
c6e87566 | 589 | const struct pci_iommu_ops pci_sun4v_iommu_ops = { |
8f6a93a1 DM |
590 | .alloc_consistent = pci_4v_alloc_consistent, |
591 | .free_consistent = pci_4v_free_consistent, | |
592 | .map_single = pci_4v_map_single, | |
593 | .unmap_single = pci_4v_unmap_single, | |
594 | .map_sg = pci_4v_map_sg, | |
595 | .unmap_sg = pci_4v_unmap_sg, | |
596 | .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu, | |
597 | .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, | |
598 | }; | |
599 | ||
34768bc8 | 600 | static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm) |
bade5622 | 601 | { |
e87dc350 DM |
602 | struct property *prop; |
603 | struct device_node *dp; | |
604 | ||
34768bc8 DM |
605 | dp = pbm->prom_node; |
606 | prop = of_find_property(dp, "66mhz-capable", NULL); | |
607 | pbm->is_66mhz_capable = (prop != NULL); | |
608 | pbm->pci_bus = pci_scan_one_pbm(pbm); | |
c2609267 DM |
609 | |
610 | /* XXX register error interrupt handlers XXX */ | |
bade5622 DM |
611 | } |
612 | ||
e7a0453e | 613 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, |
16ce82d8 | 614 | struct iommu *iommu) |
18397944 | 615 | { |
9b3627f3 | 616 | struct iommu_arena *arena = &iommu->arena; |
e7a0453e | 617 | unsigned long i, cnt = 0; |
7c8f486a | 618 | u32 devhandle; |
18397944 DM |
619 | |
620 | devhandle = pbm->devhandle; | |
621 | for (i = 0; i < arena->limit; i++) { | |
622 | unsigned long ret, io_attrs, ra; | |
623 | ||
624 | ret = pci_sun4v_iommu_getmap(devhandle, | |
625 | HV_PCI_TSBID(0, i), | |
626 | &io_attrs, &ra); | |
e7a0453e | 627 | if (ret == HV_EOK) { |
c2a5a46b DM |
628 | if (page_in_phys_avail(ra)) { |
629 | pci_sun4v_iommu_demap(devhandle, | |
630 | HV_PCI_TSBID(0, i), 1); | |
631 | } else { | |
632 | cnt++; | |
633 | __set_bit(i, arena->map); | |
634 | } | |
e7a0453e | 635 | } |
18397944 | 636 | } |
e7a0453e DM |
637 | |
638 | return cnt; | |
18397944 DM |
639 | } |
640 | ||
bade5622 DM |
641 | static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) |
642 | { | |
16ce82d8 | 643 | struct iommu *iommu = pbm->iommu; |
e87dc350 | 644 | struct property *prop; |
59db8102 | 645 | unsigned long num_tsb_entries, sz, tsbsize; |
18397944 | 646 | u32 vdma[2], dma_mask, dma_offset; |
e87dc350 DM |
647 | |
648 | prop = of_find_property(pbm->prom_node, "virtual-dma", NULL); | |
649 | if (prop) { | |
650 | u32 *val = prop->value; | |
18397944 | 651 | |
e87dc350 DM |
652 | vdma[0] = val[0]; |
653 | vdma[1] = val[1]; | |
654 | } else { | |
18397944 DM |
655 | /* No property, use default values. */ |
656 | vdma[0] = 0x80000000; | |
657 | vdma[1] = 0x80000000; | |
658 | } | |
659 | ||
59db8102 DM |
660 | if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) { |
661 | prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n", | |
662 | vdma[0], vdma[1]); | |
663 | prom_halt(); | |
18397944 DM |
664 | }; |
665 | ||
59db8102 DM |
666 | dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL); |
667 | num_tsb_entries = vdma[1] / IO_PAGE_SIZE; | |
668 | tsbsize = num_tsb_entries * sizeof(iopte_t); | |
18397944 DM |
669 | |
670 | dma_offset = vdma[0]; | |
671 | ||
672 | /* Setup initial software IOMMU state. */ | |
673 | spin_lock_init(&iommu->lock); | |
674 | iommu->ctx_lowest_free = 1; | |
675 | iommu->page_table_map_base = dma_offset; | |
676 | iommu->dma_addr_mask = dma_mask; | |
677 | ||
678 | /* Allocate and initialize the free area map. */ | |
59db8102 | 679 | sz = (num_tsb_entries + 7) / 8; |
18397944 | 680 | sz = (sz + 7UL) & ~7UL; |
982c2064 | 681 | iommu->arena.map = kzalloc(sz, GFP_KERNEL); |
18397944 DM |
682 | if (!iommu->arena.map) { |
683 | prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); | |
684 | prom_halt(); | |
685 | } | |
18397944 DM |
686 | iommu->arena.limit = num_tsb_entries; |
687 | ||
e7a0453e | 688 | sz = probe_existing_entries(pbm, iommu); |
c2a5a46b DM |
689 | if (sz) |
690 | printk("%s: Imported %lu TSB entries from OBP\n", | |
691 | pbm->name, sz); | |
bade5622 DM |
692 | } |
693 | ||
35a17eb6 DM |
694 | #ifdef CONFIG_PCI_MSI |
695 | struct pci_sun4v_msiq_entry { | |
696 | u64 version_type; | |
697 | #define MSIQ_VERSION_MASK 0xffffffff00000000UL | |
698 | #define MSIQ_VERSION_SHIFT 32 | |
699 | #define MSIQ_TYPE_MASK 0x00000000000000ffUL | |
700 | #define MSIQ_TYPE_SHIFT 0 | |
701 | #define MSIQ_TYPE_NONE 0x00 | |
702 | #define MSIQ_TYPE_MSG 0x01 | |
703 | #define MSIQ_TYPE_MSI32 0x02 | |
704 | #define MSIQ_TYPE_MSI64 0x03 | |
705 | #define MSIQ_TYPE_INTX 0x08 | |
706 | #define MSIQ_TYPE_NONE2 0xff | |
707 | ||
708 | u64 intx_sysino; | |
709 | u64 reserved1; | |
710 | u64 stick; | |
711 | u64 req_id; /* bus/device/func */ | |
712 | #define MSIQ_REQID_BUS_MASK 0xff00UL | |
713 | #define MSIQ_REQID_BUS_SHIFT 8 | |
714 | #define MSIQ_REQID_DEVICE_MASK 0x00f8UL | |
715 | #define MSIQ_REQID_DEVICE_SHIFT 3 | |
716 | #define MSIQ_REQID_FUNC_MASK 0x0007UL | |
717 | #define MSIQ_REQID_FUNC_SHIFT 0 | |
718 | ||
719 | u64 msi_address; | |
720 | ||
e5dd42e4 | 721 | /* The format of this value is message type dependent. |
35a17eb6 DM |
722 | * For MSI bits 15:0 are the data from the MSI packet. |
723 | * For MSI-X bits 31:0 are the data from the MSI packet. | |
724 | * For MSG, the message code and message routing code where: | |
725 | * bits 39:32 is the bus/device/fn of the msg target-id | |
726 | * bits 18:16 is the message routing code | |
727 | * bits 7:0 is the message code | |
728 | * For INTx the low order 2-bits are: | |
729 | * 00 - INTA | |
730 | * 01 - INTB | |
731 | * 10 - INTC | |
732 | * 11 - INTD | |
733 | */ | |
734 | u64 msi_data; | |
735 | ||
736 | u64 reserved2; | |
737 | }; | |
738 | ||
739 | /* For now this just runs as a pre-handler for the real interrupt handler. | |
740 | * So we just walk through the queue and ACK all the entries, update the | |
741 | * head pointer, and return. | |
742 | * | |
743 | * In the longer term it would be nice to do something more integrated | |
744 | * wherein we can pass in some of this MSI info to the drivers. This | |
745 | * would be most useful for PCIe fabric error messages, although we could | |
746 | * invoke those directly from the loop here in order to pass the info around. | |
747 | */ | |
748 | static void pci_sun4v_msi_prehandler(unsigned int ino, void *data1, void *data2) | |
749 | { | |
750 | struct pci_pbm_info *pbm = data1; | |
751 | struct pci_sun4v_msiq_entry *base, *ep; | |
752 | unsigned long msiqid, orig_head, head, type, err; | |
753 | ||
754 | msiqid = (unsigned long) data2; | |
755 | ||
756 | head = 0xdeadbeef; | |
757 | err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, &head); | |
758 | if (unlikely(err)) | |
759 | goto hv_error_get; | |
760 | ||
761 | if (unlikely(head >= (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))) | |
762 | goto bad_offset; | |
763 | ||
764 | head /= sizeof(struct pci_sun4v_msiq_entry); | |
765 | orig_head = head; | |
766 | base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * | |
767 | (pbm->msiq_ent_count * | |
768 | sizeof(struct pci_sun4v_msiq_entry)))); | |
769 | ep = &base[head]; | |
770 | while ((ep->version_type & MSIQ_TYPE_MASK) != 0) { | |
771 | type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT; | |
772 | if (unlikely(type != MSIQ_TYPE_MSI32 && | |
773 | type != MSIQ_TYPE_MSI64)) | |
774 | goto bad_type; | |
775 | ||
776 | pci_sun4v_msi_setstate(pbm->devhandle, | |
777 | ep->msi_data /* msi_num */, | |
778 | HV_MSISTATE_IDLE); | |
779 | ||
780 | /* Clear the entry. */ | |
781 | ep->version_type &= ~MSIQ_TYPE_MASK; | |
782 | ||
783 | /* Go to next entry in ring. */ | |
784 | head++; | |
785 | if (head >= pbm->msiq_ent_count) | |
786 | head = 0; | |
787 | ep = &base[head]; | |
788 | } | |
789 | ||
790 | if (likely(head != orig_head)) { | |
791 | /* ACK entries by updating head pointer. */ | |
792 | head *= sizeof(struct pci_sun4v_msiq_entry); | |
793 | err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head); | |
794 | if (unlikely(err)) | |
795 | goto hv_error_set; | |
796 | } | |
797 | return; | |
798 | ||
799 | hv_error_set: | |
800 | printk(KERN_EMERG "MSI: Hypervisor set head gives error %lu\n", err); | |
801 | goto hv_error_cont; | |
802 | ||
803 | hv_error_get: | |
804 | printk(KERN_EMERG "MSI: Hypervisor get head gives error %lu\n", err); | |
805 | ||
806 | hv_error_cont: | |
807 | printk(KERN_EMERG "MSI: devhandle[%x] msiqid[%lx] head[%lu]\n", | |
808 | pbm->devhandle, msiqid, head); | |
809 | return; | |
810 | ||
811 | bad_offset: | |
812 | printk(KERN_EMERG "MSI: Hypervisor gives bad offset %lx max(%lx)\n", | |
813 | head, pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)); | |
814 | return; | |
815 | ||
816 | bad_type: | |
817 | printk(KERN_EMERG "MSI: Entry has bad type %lx\n", type); | |
818 | return; | |
819 | } | |
820 | ||
821 | static int msi_bitmap_alloc(struct pci_pbm_info *pbm) | |
822 | { | |
823 | unsigned long size, bits_per_ulong; | |
824 | ||
825 | bits_per_ulong = sizeof(unsigned long) * 8; | |
826 | size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1); | |
827 | size /= 8; | |
828 | BUG_ON(size % sizeof(unsigned long)); | |
829 | ||
830 | pbm->msi_bitmap = kzalloc(size, GFP_KERNEL); | |
831 | if (!pbm->msi_bitmap) | |
832 | return -ENOMEM; | |
833 | ||
834 | return 0; | |
835 | } | |
836 | ||
837 | static void msi_bitmap_free(struct pci_pbm_info *pbm) | |
838 | { | |
839 | kfree(pbm->msi_bitmap); | |
840 | pbm->msi_bitmap = NULL; | |
841 | } | |
842 | ||
843 | static int msi_queue_alloc(struct pci_pbm_info *pbm) | |
844 | { | |
845 | unsigned long q_size, alloc_size, pages, order; | |
846 | int i; | |
847 | ||
848 | q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); | |
849 | alloc_size = (pbm->msiq_num * q_size); | |
850 | order = get_order(alloc_size); | |
851 | pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); | |
852 | if (pages == 0UL) { | |
853 | printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n", | |
854 | order); | |
855 | return -ENOMEM; | |
856 | } | |
857 | memset((char *)pages, 0, PAGE_SIZE << order); | |
858 | pbm->msi_queues = (void *) pages; | |
859 | ||
860 | for (i = 0; i < pbm->msiq_num; i++) { | |
861 | unsigned long err, base = __pa(pages + (i * q_size)); | |
862 | unsigned long ret1, ret2; | |
863 | ||
864 | err = pci_sun4v_msiq_conf(pbm->devhandle, | |
865 | pbm->msiq_first + i, | |
866 | base, pbm->msiq_ent_count); | |
867 | if (err) { | |
868 | printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n", | |
869 | err); | |
870 | goto h_error; | |
871 | } | |
872 | ||
873 | err = pci_sun4v_msiq_info(pbm->devhandle, | |
874 | pbm->msiq_first + i, | |
875 | &ret1, &ret2); | |
876 | if (err) { | |
877 | printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n", | |
878 | err); | |
879 | goto h_error; | |
880 | } | |
881 | if (ret1 != base || ret2 != pbm->msiq_ent_count) { | |
882 | printk(KERN_ERR "MSI: Bogus qconf " | |
883 | "expected[%lx:%x] got[%lx:%lx]\n", | |
884 | base, pbm->msiq_ent_count, | |
885 | ret1, ret2); | |
886 | goto h_error; | |
887 | } | |
888 | } | |
889 | ||
890 | return 0; | |
891 | ||
892 | h_error: | |
893 | free_pages(pages, order); | |
894 | return -EINVAL; | |
895 | } | |
896 | ||
35a17eb6 DM |
897 | |
898 | static int alloc_msi(struct pci_pbm_info *pbm) | |
899 | { | |
900 | int i; | |
901 | ||
902 | for (i = 0; i < pbm->msi_num; i++) { | |
903 | if (!test_and_set_bit(i, pbm->msi_bitmap)) | |
904 | return i + pbm->msi_first; | |
905 | } | |
906 | ||
907 | return -ENOENT; | |
908 | } | |
909 | ||
910 | static void free_msi(struct pci_pbm_info *pbm, int msi_num) | |
911 | { | |
912 | msi_num -= pbm->msi_first; | |
913 | clear_bit(msi_num, pbm->msi_bitmap); | |
914 | } | |
915 | ||
916 | static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p, | |
917 | struct pci_dev *pdev, | |
918 | struct msi_desc *entry) | |
919 | { | |
a2fb23af | 920 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; |
35a17eb6 DM |
921 | unsigned long devino, msiqid; |
922 | struct msi_msg msg; | |
923 | int msi_num, err; | |
924 | ||
925 | *virt_irq_p = 0; | |
926 | ||
927 | msi_num = alloc_msi(pbm); | |
928 | if (msi_num < 0) | |
929 | return msi_num; | |
930 | ||
931 | devino = sun4v_build_msi(pbm->devhandle, virt_irq_p, | |
932 | pbm->msiq_first_devino, | |
933 | (pbm->msiq_first_devino + | |
934 | pbm->msiq_num)); | |
935 | err = -ENOMEM; | |
936 | if (!devino) | |
937 | goto out_err; | |
938 | ||
35a17eb6 DM |
939 | msiqid = ((devino - pbm->msiq_first_devino) + |
940 | pbm->msiq_first); | |
941 | ||
942 | err = -EINVAL; | |
943 | if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) | |
944 | if (err) | |
945 | goto out_err; | |
946 | ||
947 | if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) | |
948 | goto out_err; | |
949 | ||
950 | if (pci_sun4v_msi_setmsiq(pbm->devhandle, | |
951 | msi_num, msiqid, | |
952 | (entry->msi_attrib.is_64 ? | |
953 | HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32))) | |
954 | goto out_err; | |
955 | ||
956 | if (pci_sun4v_msi_setstate(pbm->devhandle, msi_num, HV_MSISTATE_IDLE)) | |
957 | goto out_err; | |
958 | ||
959 | if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID)) | |
960 | goto out_err; | |
961 | ||
a2fb23af | 962 | pdev->dev.archdata.msi_num = msi_num; |
35a17eb6 DM |
963 | |
964 | if (entry->msi_attrib.is_64) { | |
965 | msg.address_hi = pbm->msi64_start >> 32; | |
966 | msg.address_lo = pbm->msi64_start & 0xffffffff; | |
967 | } else { | |
968 | msg.address_hi = 0; | |
969 | msg.address_lo = pbm->msi32_start; | |
970 | } | |
971 | msg.data = msi_num; | |
7fe3730d ME |
972 | |
973 | set_irq_msi(*virt_irq_p, entry); | |
35a17eb6 DM |
974 | write_msi_msg(*virt_irq_p, &msg); |
975 | ||
976 | irq_install_pre_handler(*virt_irq_p, | |
977 | pci_sun4v_msi_prehandler, | |
978 | pbm, (void *) msiqid); | |
979 | ||
980 | return 0; | |
981 | ||
982 | out_err: | |
983 | free_msi(pbm, msi_num); | |
984 | sun4v_destroy_msi(*virt_irq_p); | |
985 | *virt_irq_p = 0; | |
986 | return err; | |
987 | ||
988 | } | |
989 | ||
990 | static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq, | |
991 | struct pci_dev *pdev) | |
992 | { | |
a2fb23af | 993 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; |
35a17eb6 DM |
994 | unsigned long msiqid, err; |
995 | unsigned int msi_num; | |
996 | ||
a2fb23af | 997 | msi_num = pdev->dev.archdata.msi_num; |
35a17eb6 DM |
998 | err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid); |
999 | if (err) { | |
1000 | printk(KERN_ERR "%s: getmsiq gives error %lu\n", | |
1001 | pbm->name, err); | |
1002 | return; | |
1003 | } | |
1004 | ||
1005 | pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_INVALID); | |
1006 | pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_INVALID); | |
1007 | ||
1008 | free_msi(pbm, msi_num); | |
1009 | ||
1010 | /* The sun4v_destroy_msi() will liberate the devino and thus the MSIQ | |
1011 | * allocation. | |
1012 | */ | |
1013 | sun4v_destroy_msi(virt_irq); | |
1014 | } | |
e9870c4c DM |
1015 | |
1016 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) | |
1017 | { | |
1018 | const u32 *val; | |
1019 | int len; | |
1020 | ||
1021 | val = of_get_property(pbm->prom_node, "#msi-eqs", &len); | |
1022 | if (!val || len != 4) | |
1023 | goto no_msi; | |
1024 | pbm->msiq_num = *val; | |
1025 | if (pbm->msiq_num) { | |
1026 | const struct msiq_prop { | |
1027 | u32 first_msiq; | |
1028 | u32 num_msiq; | |
1029 | u32 first_devino; | |
1030 | } *mqp; | |
1031 | const struct msi_range_prop { | |
1032 | u32 first_msi; | |
1033 | u32 num_msi; | |
1034 | } *mrng; | |
1035 | const struct addr_range_prop { | |
1036 | u32 msi32_high; | |
1037 | u32 msi32_low; | |
1038 | u32 msi32_len; | |
1039 | u32 msi64_high; | |
1040 | u32 msi64_low; | |
1041 | u32 msi64_len; | |
1042 | } *arng; | |
1043 | ||
1044 | val = of_get_property(pbm->prom_node, "msi-eq-size", &len); | |
1045 | if (!val || len != 4) | |
1046 | goto no_msi; | |
1047 | ||
1048 | pbm->msiq_ent_count = *val; | |
1049 | ||
1050 | mqp = of_get_property(pbm->prom_node, | |
1051 | "msi-eq-to-devino", &len); | |
1052 | if (!mqp || len != sizeof(struct msiq_prop)) | |
1053 | goto no_msi; | |
1054 | ||
1055 | pbm->msiq_first = mqp->first_msiq; | |
1056 | pbm->msiq_first_devino = mqp->first_devino; | |
1057 | ||
1058 | val = of_get_property(pbm->prom_node, "#msi", &len); | |
1059 | if (!val || len != 4) | |
1060 | goto no_msi; | |
1061 | pbm->msi_num = *val; | |
1062 | ||
1063 | mrng = of_get_property(pbm->prom_node, "msi-ranges", &len); | |
1064 | if (!mrng || len != sizeof(struct msi_range_prop)) | |
1065 | goto no_msi; | |
1066 | pbm->msi_first = mrng->first_msi; | |
1067 | ||
1068 | val = of_get_property(pbm->prom_node, "msi-data-mask", &len); | |
1069 | if (!val || len != 4) | |
1070 | goto no_msi; | |
1071 | pbm->msi_data_mask = *val; | |
1072 | ||
1073 | val = of_get_property(pbm->prom_node, "msix-data-width", &len); | |
1074 | if (!val || len != 4) | |
1075 | goto no_msi; | |
1076 | pbm->msix_data_width = *val; | |
1077 | ||
1078 | arng = of_get_property(pbm->prom_node, "msi-address-ranges", | |
1079 | &len); | |
1080 | if (!arng || len != sizeof(struct addr_range_prop)) | |
1081 | goto no_msi; | |
1082 | pbm->msi32_start = ((u64)arng->msi32_high << 32) | | |
1083 | (u64) arng->msi32_low; | |
1084 | pbm->msi64_start = ((u64)arng->msi64_high << 32) | | |
1085 | (u64) arng->msi64_low; | |
1086 | pbm->msi32_len = arng->msi32_len; | |
1087 | pbm->msi64_len = arng->msi64_len; | |
1088 | ||
1089 | if (msi_bitmap_alloc(pbm)) | |
1090 | goto no_msi; | |
1091 | ||
1092 | if (msi_queue_alloc(pbm)) { | |
1093 | msi_bitmap_free(pbm); | |
1094 | goto no_msi; | |
1095 | } | |
1096 | ||
1097 | printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] " | |
1098 | "devino[0x%x]\n", | |
1099 | pbm->name, | |
1100 | pbm->msiq_first, pbm->msiq_num, | |
1101 | pbm->msiq_ent_count, | |
1102 | pbm->msiq_first_devino); | |
1103 | printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] " | |
1104 | "width[%u]\n", | |
1105 | pbm->name, | |
1106 | pbm->msi_first, pbm->msi_num, pbm->msi_data_mask, | |
1107 | pbm->msix_data_width); | |
1108 | printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] " | |
1109 | "addr64[0x%lx:0x%x]\n", | |
1110 | pbm->name, | |
1111 | pbm->msi32_start, pbm->msi32_len, | |
1112 | pbm->msi64_start, pbm->msi64_len); | |
1113 | printk(KERN_INFO "%s: MSI queues at RA [%p]\n", | |
1114 | pbm->name, | |
1115 | pbm->msi_queues); | |
1116 | } | |
1117 | pbm->setup_msi_irq = pci_sun4v_setup_msi_irq; | |
1118 | pbm->teardown_msi_irq = pci_sun4v_teardown_msi_irq; | |
1119 | ||
1120 | return; | |
1121 | ||
1122 | no_msi: | |
1123 | pbm->msiq_num = 0; | |
1124 | printk(KERN_INFO "%s: No MSI support.\n", pbm->name); | |
1125 | } | |
35a17eb6 DM |
1126 | #else /* CONFIG_PCI_MSI */ |
1127 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) | |
1128 | { | |
1129 | } | |
1130 | #endif /* !(CONFIG_PCI_MSI) */ | |
1131 | ||
e87dc350 | 1132 | static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle) |
bade5622 DM |
1133 | { |
1134 | struct pci_pbm_info *pbm; | |
bade5622 | 1135 | |
3833789b DM |
1136 | if (devhandle & 0x40) |
1137 | pbm = &p->pbm_B; | |
1138 | else | |
1139 | pbm = &p->pbm_A; | |
bade5622 | 1140 | |
34768bc8 DM |
1141 | pbm->next = pci_pbm_root; |
1142 | pci_pbm_root = pbm; | |
1143 | ||
1144 | pbm->scan_bus = pci_sun4v_scan_bus; | |
ca3dd88e DM |
1145 | pbm->pci_ops = &sun4v_pci_ops; |
1146 | pbm->config_space_reg_bits = 12; | |
34768bc8 | 1147 | |
6c108f12 DM |
1148 | pbm->index = pci_num_pbms++; |
1149 | ||
bade5622 | 1150 | pbm->parent = p; |
e87dc350 | 1151 | pbm->prom_node = dp; |
bade5622 | 1152 | |
3833789b | 1153 | pbm->devhandle = devhandle; |
bade5622 | 1154 | |
e87dc350 | 1155 | pbm->name = dp->full_name; |
bade5622 | 1156 | |
e87dc350 | 1157 | printk("%s: SUN4V PCI Bus Module\n", pbm->name); |
bade5622 | 1158 | |
9fd8b647 | 1159 | pci_determine_mem_io_space(pbm); |
bade5622 | 1160 | |
cfa0652c | 1161 | pci_get_pbm_props(pbm); |
bade5622 | 1162 | pci_sun4v_iommu_init(pbm); |
35a17eb6 | 1163 | pci_sun4v_msi_init(pbm); |
bade5622 DM |
1164 | } |
1165 | ||
e87dc350 | 1166 | void sun4v_pci_init(struct device_node *dp, char *model_name) |
8f6a93a1 | 1167 | { |
e01c0d6d | 1168 | static int hvapi_negotiated = 0; |
bade5622 | 1169 | struct pci_controller_info *p; |
34768bc8 | 1170 | struct pci_pbm_info *pbm; |
16ce82d8 | 1171 | struct iommu *iommu; |
e87dc350 DM |
1172 | struct property *prop; |
1173 | struct linux_prom64_registers *regs; | |
7c8f486a DM |
1174 | u32 devhandle; |
1175 | int i; | |
3833789b | 1176 | |
e01c0d6d DM |
1177 | if (!hvapi_negotiated++) { |
1178 | int err = sun4v_hvapi_register(HV_GRP_PCI, | |
1179 | vpci_major, | |
1180 | &vpci_minor); | |
1181 | ||
1182 | if (err) { | |
1183 | prom_printf("SUN4V_PCI: Could not register hvapi, " | |
1184 | "err=%d\n", err); | |
1185 | prom_halt(); | |
1186 | } | |
1187 | printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n", | |
1188 | vpci_major, vpci_minor); | |
1189 | } | |
1190 | ||
e87dc350 DM |
1191 | prop = of_find_property(dp, "reg", NULL); |
1192 | regs = prop->value; | |
1193 | ||
1194 | devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff; | |
3833789b | 1195 | |
34768bc8 | 1196 | for (pbm = pci_pbm_root; pbm; pbm = pbm->next) { |
0b522497 | 1197 | if (pbm->devhandle == (devhandle ^ 0x40)) { |
34768bc8 | 1198 | pci_sun4v_pbm_init(pbm->parent, dp, devhandle); |
0b522497 DM |
1199 | return; |
1200 | } | |
3833789b | 1201 | } |
bade5622 | 1202 | |
a283a525 | 1203 | for_each_possible_cpu(i) { |
7c8f486a DM |
1204 | unsigned long page = get_zeroed_page(GFP_ATOMIC); |
1205 | ||
1206 | if (!page) | |
1207 | goto fatal_memory_error; | |
1208 | ||
6a32fd4d | 1209 | per_cpu(pci_iommu_batch, i).pglist = (u64 *) page; |
bade5622 | 1210 | } |
7c8f486a | 1211 | |
982c2064 | 1212 | p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); |
7c8f486a DM |
1213 | if (!p) |
1214 | goto fatal_memory_error; | |
1215 | ||
16ce82d8 | 1216 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); |
7c8f486a DM |
1217 | if (!iommu) |
1218 | goto fatal_memory_error; | |
1219 | ||
bade5622 DM |
1220 | p->pbm_A.iommu = iommu; |
1221 | ||
16ce82d8 | 1222 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); |
7c8f486a DM |
1223 | if (!iommu) |
1224 | goto fatal_memory_error; | |
1225 | ||
bade5622 DM |
1226 | p->pbm_B.iommu = iommu; |
1227 | ||
bade5622 DM |
1228 | /* Like PSYCHO and SCHIZO we have a 2GB aligned area |
1229 | * for memory space. | |
1230 | */ | |
1231 | pci_memspace_mask = 0x7fffffffUL; | |
1232 | ||
e87dc350 | 1233 | pci_sun4v_pbm_init(p, dp, devhandle); |
7c8f486a DM |
1234 | return; |
1235 | ||
1236 | fatal_memory_error: | |
1237 | prom_printf("SUN4V_PCI: Fatal memory allocation error.\n"); | |
1238 | prom_halt(); | |
8f6a93a1 | 1239 | } |