Commit | Line | Data |
---|---|---|
8f6a93a1 DM |
1 | /* pci_sun4v.c: SUN4V specific PCI controller support. |
2 | * | |
9fd8b647 | 3 | * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) |
8f6a93a1 DM |
4 | */ |
5 | ||
6 | #include <linux/kernel.h> | |
7 | #include <linux/types.h> | |
8 | #include <linux/pci.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/interrupt.h> | |
18397944 | 12 | #include <linux/percpu.h> |
35a17eb6 DM |
13 | #include <linux/irq.h> |
14 | #include <linux/msi.h> | |
8f6a93a1 DM |
15 | |
16 | #include <asm/pbm.h> | |
17 | #include <asm/iommu.h> | |
18 | #include <asm/irq.h> | |
19 | #include <asm/upa.h> | |
20 | #include <asm/pstate.h> | |
21 | #include <asm/oplib.h> | |
22 | #include <asm/hypervisor.h> | |
e87dc350 | 23 | #include <asm/prom.h> |
8f6a93a1 DM |
24 | |
25 | #include "pci_impl.h" | |
26 | #include "iommu_common.h" | |
27 | ||
bade5622 DM |
28 | #include "pci_sun4v.h" |
29 | ||
7c8f486a | 30 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) |
18397944 | 31 | |
6a32fd4d DM |
32 | struct pci_iommu_batch { |
33 | struct pci_dev *pdev; /* Device mapping is for. */ | |
34 | unsigned long prot; /* IOMMU page protections */ | |
35 | unsigned long entry; /* Index into IOTSB. */ | |
36 | u64 *pglist; /* List of physical pages */ | |
37 | unsigned long npages; /* Number of pages in list. */ | |
18397944 DM |
38 | }; |
39 | ||
6a32fd4d DM |
40 | static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch); |
41 | ||
42 | /* Interrupts must be disabled. */ | |
43 | static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) | |
44 | { | |
45 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | |
46 | ||
47 | p->pdev = pdev; | |
48 | p->prot = prot; | |
49 | p->entry = entry; | |
50 | p->npages = 0; | |
51 | } | |
52 | ||
53 | /* Interrupts must be disabled. */ | |
54 | static long pci_iommu_batch_flush(struct pci_iommu_batch *p) | |
55 | { | |
a2fb23af DM |
56 | struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller; |
57 | unsigned long devhandle = pbm->devhandle; | |
6a32fd4d DM |
58 | unsigned long prot = p->prot; |
59 | unsigned long entry = p->entry; | |
60 | u64 *pglist = p->pglist; | |
61 | unsigned long npages = p->npages; | |
62 | ||
d82965c1 | 63 | while (npages != 0) { |
6a32fd4d DM |
64 | long num; |
65 | ||
66 | num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), | |
67 | npages, prot, __pa(pglist)); | |
68 | if (unlikely(num < 0)) { | |
69 | if (printk_ratelimit()) | |
70 | printk("pci_iommu_batch_flush: IOMMU map of " | |
71 | "[%08lx:%08lx:%lx:%lx:%lx] failed with " | |
72 | "status %ld\n", | |
73 | devhandle, HV_PCI_TSBID(0, entry), | |
74 | npages, prot, __pa(pglist), num); | |
75 | return -1; | |
76 | } | |
77 | ||
78 | entry += num; | |
79 | npages -= num; | |
80 | pglist += num; | |
d82965c1 | 81 | } |
6a32fd4d DM |
82 | |
83 | p->entry = entry; | |
84 | p->npages = 0; | |
85 | ||
86 | return 0; | |
87 | } | |
88 | ||
89 | /* Interrupts must be disabled. */ | |
90 | static inline long pci_iommu_batch_add(u64 phys_page) | |
91 | { | |
92 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | |
93 | ||
94 | BUG_ON(p->npages >= PGLIST_NENTS); | |
95 | ||
96 | p->pglist[p->npages++] = phys_page; | |
97 | if (p->npages == PGLIST_NENTS) | |
98 | return pci_iommu_batch_flush(p); | |
99 | ||
100 | return 0; | |
101 | } | |
102 | ||
103 | /* Interrupts must be disabled. */ | |
104 | static inline long pci_iommu_batch_end(void) | |
105 | { | |
106 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | |
107 | ||
108 | BUG_ON(p->npages >= PGLIST_NENTS); | |
109 | ||
110 | return pci_iommu_batch_flush(p); | |
111 | } | |
18397944 DM |
112 | |
113 | static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) | |
114 | { | |
115 | unsigned long n, i, start, end, limit; | |
116 | int pass; | |
117 | ||
118 | limit = arena->limit; | |
119 | start = arena->hint; | |
120 | pass = 0; | |
121 | ||
122 | again: | |
123 | n = find_next_zero_bit(arena->map, limit, start); | |
124 | end = n + npages; | |
125 | if (unlikely(end >= limit)) { | |
126 | if (likely(pass < 1)) { | |
127 | limit = start; | |
128 | start = 0; | |
129 | pass++; | |
130 | goto again; | |
131 | } else { | |
132 | /* Scanned the whole thing, give up. */ | |
133 | return -1; | |
134 | } | |
135 | } | |
136 | ||
137 | for (i = n; i < end; i++) { | |
138 | if (test_bit(i, arena->map)) { | |
139 | start = i + 1; | |
140 | goto again; | |
141 | } | |
142 | } | |
143 | ||
144 | for (i = n; i < end; i++) | |
145 | __set_bit(i, arena->map); | |
146 | ||
147 | arena->hint = end; | |
148 | ||
149 | return n; | |
150 | } | |
151 | ||
152 | static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) | |
153 | { | |
154 | unsigned long i; | |
155 | ||
156 | for (i = base; i < (base + npages); i++) | |
157 | __clear_bit(i, arena->map); | |
158 | } | |
159 | ||
42f14237 | 160 | static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) |
8f6a93a1 | 161 | { |
18397944 | 162 | struct pci_iommu *iommu; |
7c8f486a | 163 | unsigned long flags, order, first_page, npages, n; |
18397944 DM |
164 | void *ret; |
165 | long entry; | |
18397944 DM |
166 | |
167 | size = IO_PAGE_ALIGN(size); | |
168 | order = get_order(size); | |
6a32fd4d | 169 | if (unlikely(order >= MAX_ORDER)) |
18397944 DM |
170 | return NULL; |
171 | ||
172 | npages = size >> IO_PAGE_SHIFT; | |
18397944 | 173 | |
42f14237 | 174 | first_page = __get_free_pages(gfp, order); |
6a32fd4d | 175 | if (unlikely(first_page == 0UL)) |
18397944 | 176 | return NULL; |
e7a0453e | 177 | |
18397944 DM |
178 | memset((char *)first_page, 0, PAGE_SIZE << order); |
179 | ||
a2fb23af | 180 | iommu = pdev->dev.archdata.iommu; |
18397944 DM |
181 | |
182 | spin_lock_irqsave(&iommu->lock, flags); | |
183 | entry = pci_arena_alloc(&iommu->arena, npages); | |
184 | spin_unlock_irqrestore(&iommu->lock, flags); | |
185 | ||
6a32fd4d DM |
186 | if (unlikely(entry < 0L)) |
187 | goto arena_alloc_fail; | |
18397944 DM |
188 | |
189 | *dma_addrp = (iommu->page_table_map_base + | |
190 | (entry << IO_PAGE_SHIFT)); | |
191 | ret = (void *) first_page; | |
192 | first_page = __pa(first_page); | |
193 | ||
6a32fd4d | 194 | local_irq_save(flags); |
18397944 | 195 | |
6a32fd4d DM |
196 | pci_iommu_batch_start(pdev, |
197 | (HV_PCI_MAP_ATTR_READ | | |
198 | HV_PCI_MAP_ATTR_WRITE), | |
199 | entry); | |
18397944 | 200 | |
6a32fd4d DM |
201 | for (n = 0; n < npages; n++) { |
202 | long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE)); | |
203 | if (unlikely(err < 0L)) | |
204 | goto iommu_map_fail; | |
205 | } | |
18397944 | 206 | |
6a32fd4d DM |
207 | if (unlikely(pci_iommu_batch_end() < 0L)) |
208 | goto iommu_map_fail; | |
18397944 | 209 | |
6a32fd4d | 210 | local_irq_restore(flags); |
18397944 DM |
211 | |
212 | return ret; | |
6a32fd4d DM |
213 | |
214 | iommu_map_fail: | |
215 | /* Interrupts are disabled. */ | |
216 | spin_lock(&iommu->lock); | |
217 | pci_arena_free(&iommu->arena, entry, npages); | |
218 | spin_unlock_irqrestore(&iommu->lock, flags); | |
219 | ||
220 | arena_alloc_fail: | |
221 | free_pages(first_page, order); | |
222 | return NULL; | |
8f6a93a1 DM |
223 | } |
224 | ||
225 | static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | |
226 | { | |
a2fb23af | 227 | struct pci_pbm_info *pbm; |
18397944 | 228 | struct pci_iommu *iommu; |
7c8f486a DM |
229 | unsigned long flags, order, npages, entry; |
230 | u32 devhandle; | |
18397944 DM |
231 | |
232 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | |
a2fb23af DM |
233 | iommu = pdev->dev.archdata.iommu; |
234 | pbm = pdev->dev.archdata.host_controller; | |
235 | devhandle = pbm->devhandle; | |
18397944 DM |
236 | entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); |
237 | ||
238 | spin_lock_irqsave(&iommu->lock, flags); | |
239 | ||
240 | pci_arena_free(&iommu->arena, entry, npages); | |
241 | ||
242 | do { | |
243 | unsigned long num; | |
244 | ||
245 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
246 | npages); | |
247 | entry += num; | |
248 | npages -= num; | |
249 | } while (npages != 0); | |
250 | ||
251 | spin_unlock_irqrestore(&iommu->lock, flags); | |
252 | ||
253 | order = get_order(size); | |
254 | if (order < 10) | |
255 | free_pages((unsigned long)cpu, order); | |
8f6a93a1 DM |
256 | } |
257 | ||
258 | static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) | |
259 | { | |
18397944 DM |
260 | struct pci_iommu *iommu; |
261 | unsigned long flags, npages, oaddr; | |
7c8f486a | 262 | unsigned long i, base_paddr; |
6a32fd4d | 263 | u32 bus_addr, ret; |
18397944 DM |
264 | unsigned long prot; |
265 | long entry; | |
18397944 | 266 | |
a2fb23af | 267 | iommu = pdev->dev.archdata.iommu; |
18397944 DM |
268 | |
269 | if (unlikely(direction == PCI_DMA_NONE)) | |
270 | goto bad; | |
271 | ||
272 | oaddr = (unsigned long)ptr; | |
273 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | |
274 | npages >>= IO_PAGE_SHIFT; | |
18397944 DM |
275 | |
276 | spin_lock_irqsave(&iommu->lock, flags); | |
277 | entry = pci_arena_alloc(&iommu->arena, npages); | |
278 | spin_unlock_irqrestore(&iommu->lock, flags); | |
279 | ||
280 | if (unlikely(entry < 0L)) | |
281 | goto bad; | |
282 | ||
283 | bus_addr = (iommu->page_table_map_base + | |
284 | (entry << IO_PAGE_SHIFT)); | |
285 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | |
286 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | |
287 | prot = HV_PCI_MAP_ATTR_READ; | |
288 | if (direction != PCI_DMA_TODEVICE) | |
289 | prot |= HV_PCI_MAP_ATTR_WRITE; | |
290 | ||
6a32fd4d | 291 | local_irq_save(flags); |
18397944 | 292 | |
6a32fd4d | 293 | pci_iommu_batch_start(pdev, prot, entry); |
18397944 | 294 | |
6a32fd4d DM |
295 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { |
296 | long err = pci_iommu_batch_add(base_paddr); | |
297 | if (unlikely(err < 0L)) | |
298 | goto iommu_map_fail; | |
299 | } | |
300 | if (unlikely(pci_iommu_batch_end() < 0L)) | |
301 | goto iommu_map_fail; | |
18397944 | 302 | |
6a32fd4d | 303 | local_irq_restore(flags); |
18397944 DM |
304 | |
305 | return ret; | |
306 | ||
307 | bad: | |
308 | if (printk_ratelimit()) | |
309 | WARN_ON(1); | |
310 | return PCI_DMA_ERROR_CODE; | |
6a32fd4d DM |
311 | |
312 | iommu_map_fail: | |
313 | /* Interrupts are disabled. */ | |
314 | spin_lock(&iommu->lock); | |
315 | pci_arena_free(&iommu->arena, entry, npages); | |
316 | spin_unlock_irqrestore(&iommu->lock, flags); | |
317 | ||
318 | return PCI_DMA_ERROR_CODE; | |
8f6a93a1 DM |
319 | } |
320 | ||
321 | static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | |
322 | { | |
a2fb23af | 323 | struct pci_pbm_info *pbm; |
18397944 | 324 | struct pci_iommu *iommu; |
7c8f486a | 325 | unsigned long flags, npages; |
18397944 | 326 | long entry; |
7c8f486a | 327 | u32 devhandle; |
18397944 DM |
328 | |
329 | if (unlikely(direction == PCI_DMA_NONE)) { | |
330 | if (printk_ratelimit()) | |
331 | WARN_ON(1); | |
332 | return; | |
333 | } | |
334 | ||
a2fb23af DM |
335 | iommu = pdev->dev.archdata.iommu; |
336 | pbm = pdev->dev.archdata.host_controller; | |
337 | devhandle = pbm->devhandle; | |
18397944 DM |
338 | |
339 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | |
340 | npages >>= IO_PAGE_SHIFT; | |
341 | bus_addr &= IO_PAGE_MASK; | |
342 | ||
343 | spin_lock_irqsave(&iommu->lock, flags); | |
344 | ||
345 | entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; | |
346 | pci_arena_free(&iommu->arena, entry, npages); | |
347 | ||
348 | do { | |
349 | unsigned long num; | |
350 | ||
351 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
352 | npages); | |
353 | entry += num; | |
354 | npages -= num; | |
355 | } while (npages != 0); | |
356 | ||
357 | spin_unlock_irqrestore(&iommu->lock, flags); | |
358 | } | |
359 | ||
360 | #define SG_ENT_PHYS_ADDRESS(SG) \ | |
361 | (__pa(page_address((SG)->page)) + (SG)->offset) | |
362 | ||
6a32fd4d | 363 | static inline long fill_sg(long entry, struct pci_dev *pdev, |
18397944 DM |
364 | struct scatterlist *sg, |
365 | int nused, int nelems, unsigned long prot) | |
366 | { | |
367 | struct scatterlist *dma_sg = sg; | |
368 | struct scatterlist *sg_end = sg + nelems; | |
6a32fd4d DM |
369 | unsigned long flags; |
370 | int i; | |
371 | ||
372 | local_irq_save(flags); | |
373 | ||
374 | pci_iommu_batch_start(pdev, prot, entry); | |
18397944 | 375 | |
18397944 DM |
376 | for (i = 0; i < nused; i++) { |
377 | unsigned long pteval = ~0UL; | |
378 | u32 dma_npages; | |
379 | ||
380 | dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) + | |
381 | dma_sg->dma_length + | |
382 | ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT; | |
383 | do { | |
384 | unsigned long offset; | |
385 | signed int len; | |
386 | ||
387 | /* If we are here, we know we have at least one | |
388 | * more page to map. So walk forward until we | |
389 | * hit a page crossing, and begin creating new | |
390 | * mappings from that spot. | |
391 | */ | |
392 | for (;;) { | |
393 | unsigned long tmp; | |
394 | ||
395 | tmp = SG_ENT_PHYS_ADDRESS(sg); | |
396 | len = sg->length; | |
397 | if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) { | |
398 | pteval = tmp & IO_PAGE_MASK; | |
399 | offset = tmp & (IO_PAGE_SIZE - 1UL); | |
400 | break; | |
401 | } | |
402 | if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) { | |
403 | pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK; | |
404 | offset = 0UL; | |
405 | len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); | |
406 | break; | |
407 | } | |
408 | sg++; | |
409 | } | |
410 | ||
411 | pteval = (pteval & IOPTE_PAGE); | |
412 | while (len > 0) { | |
6a32fd4d DM |
413 | long err; |
414 | ||
415 | err = pci_iommu_batch_add(pteval); | |
416 | if (unlikely(err < 0L)) | |
417 | goto iommu_map_failed; | |
418 | ||
18397944 DM |
419 | pteval += IO_PAGE_SIZE; |
420 | len -= (IO_PAGE_SIZE - offset); | |
421 | offset = 0; | |
422 | dma_npages--; | |
423 | } | |
424 | ||
425 | pteval = (pteval & IOPTE_PAGE) + len; | |
426 | sg++; | |
427 | ||
428 | /* Skip over any tail mappings we've fully mapped, | |
429 | * adjusting pteval along the way. Stop when we | |
430 | * detect a page crossing event. | |
431 | */ | |
432 | while (sg < sg_end && | |
433 | (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && | |
434 | (pteval == SG_ENT_PHYS_ADDRESS(sg)) && | |
435 | ((pteval ^ | |
436 | (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { | |
437 | pteval += sg->length; | |
438 | sg++; | |
439 | } | |
440 | if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) | |
441 | pteval = ~0UL; | |
442 | } while (dma_npages != 0); | |
443 | dma_sg++; | |
444 | } | |
445 | ||
6a32fd4d DM |
446 | if (unlikely(pci_iommu_batch_end() < 0L)) |
447 | goto iommu_map_failed; | |
18397944 | 448 | |
6a32fd4d DM |
449 | local_irq_restore(flags); |
450 | return 0; | |
18397944 | 451 | |
6a32fd4d DM |
452 | iommu_map_failed: |
453 | local_irq_restore(flags); | |
454 | return -1L; | |
8f6a93a1 DM |
455 | } |
456 | ||
457 | static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | |
458 | { | |
18397944 | 459 | struct pci_iommu *iommu; |
7c8f486a | 460 | unsigned long flags, npages, prot; |
6a32fd4d | 461 | u32 dma_base; |
18397944 | 462 | struct scatterlist *sgtmp; |
6a32fd4d | 463 | long entry, err; |
18397944 DM |
464 | int used; |
465 | ||
466 | /* Fast path single entry scatterlists. */ | |
467 | if (nelems == 1) { | |
468 | sglist->dma_address = | |
469 | pci_4v_map_single(pdev, | |
470 | (page_address(sglist->page) + sglist->offset), | |
471 | sglist->length, direction); | |
472 | if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) | |
473 | return 0; | |
474 | sglist->dma_length = sglist->length; | |
475 | return 1; | |
476 | } | |
477 | ||
a2fb23af | 478 | iommu = pdev->dev.archdata.iommu; |
18397944 DM |
479 | |
480 | if (unlikely(direction == PCI_DMA_NONE)) | |
481 | goto bad; | |
482 | ||
483 | /* Step 1: Prepare scatter list. */ | |
484 | npages = prepare_sg(sglist, nelems); | |
18397944 DM |
485 | |
486 | /* Step 2: Allocate a cluster and context, if necessary. */ | |
487 | spin_lock_irqsave(&iommu->lock, flags); | |
488 | entry = pci_arena_alloc(&iommu->arena, npages); | |
489 | spin_unlock_irqrestore(&iommu->lock, flags); | |
490 | ||
491 | if (unlikely(entry < 0L)) | |
492 | goto bad; | |
493 | ||
494 | dma_base = iommu->page_table_map_base + | |
495 | (entry << IO_PAGE_SHIFT); | |
496 | ||
497 | /* Step 3: Normalize DMA addresses. */ | |
498 | used = nelems; | |
499 | ||
500 | sgtmp = sglist; | |
501 | while (used && sgtmp->dma_length) { | |
502 | sgtmp->dma_address += dma_base; | |
503 | sgtmp++; | |
504 | used--; | |
505 | } | |
506 | used = nelems - used; | |
507 | ||
508 | /* Step 4: Create the mappings. */ | |
509 | prot = HV_PCI_MAP_ATTR_READ; | |
510 | if (direction != PCI_DMA_TODEVICE) | |
511 | prot |= HV_PCI_MAP_ATTR_WRITE; | |
512 | ||
6a32fd4d DM |
513 | err = fill_sg(entry, pdev, sglist, used, nelems, prot); |
514 | if (unlikely(err < 0L)) | |
515 | goto iommu_map_failed; | |
18397944 DM |
516 | |
517 | return used; | |
518 | ||
519 | bad: | |
520 | if (printk_ratelimit()) | |
521 | WARN_ON(1); | |
522 | return 0; | |
6a32fd4d DM |
523 | |
524 | iommu_map_failed: | |
525 | spin_lock_irqsave(&iommu->lock, flags); | |
526 | pci_arena_free(&iommu->arena, entry, npages); | |
527 | spin_unlock_irqrestore(&iommu->lock, flags); | |
528 | ||
529 | return 0; | |
8f6a93a1 DM |
530 | } |
531 | ||
532 | static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | |
533 | { | |
a2fb23af | 534 | struct pci_pbm_info *pbm; |
18397944 | 535 | struct pci_iommu *iommu; |
7c8f486a | 536 | unsigned long flags, i, npages; |
18397944 | 537 | long entry; |
7c8f486a | 538 | u32 devhandle, bus_addr; |
18397944 DM |
539 | |
540 | if (unlikely(direction == PCI_DMA_NONE)) { | |
541 | if (printk_ratelimit()) | |
542 | WARN_ON(1); | |
543 | } | |
544 | ||
a2fb23af DM |
545 | iommu = pdev->dev.archdata.iommu; |
546 | pbm = pdev->dev.archdata.host_controller; | |
547 | devhandle = pbm->devhandle; | |
18397944 DM |
548 | |
549 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | |
550 | ||
551 | for (i = 1; i < nelems; i++) | |
552 | if (sglist[i].dma_length == 0) | |
553 | break; | |
554 | i--; | |
555 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - | |
556 | bus_addr) >> IO_PAGE_SHIFT; | |
557 | ||
558 | entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | |
559 | ||
560 | spin_lock_irqsave(&iommu->lock, flags); | |
561 | ||
562 | pci_arena_free(&iommu->arena, entry, npages); | |
563 | ||
564 | do { | |
565 | unsigned long num; | |
566 | ||
567 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
568 | npages); | |
569 | entry += num; | |
570 | npages -= num; | |
571 | } while (npages != 0); | |
572 | ||
573 | spin_unlock_irqrestore(&iommu->lock, flags); | |
8f6a93a1 DM |
574 | } |
575 | ||
576 | static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | |
577 | { | |
18397944 | 578 | /* Nothing to do... */ |
8f6a93a1 DM |
579 | } |
580 | ||
581 | static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | |
582 | { | |
18397944 | 583 | /* Nothing to do... */ |
8f6a93a1 DM |
584 | } |
585 | ||
586 | struct pci_iommu_ops pci_sun4v_iommu_ops = { | |
587 | .alloc_consistent = pci_4v_alloc_consistent, | |
588 | .free_consistent = pci_4v_free_consistent, | |
589 | .map_single = pci_4v_map_single, | |
590 | .unmap_single = pci_4v_unmap_single, | |
591 | .map_sg = pci_4v_map_sg, | |
592 | .unmap_sg = pci_4v_unmap_sg, | |
593 | .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu, | |
594 | .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, | |
595 | }; | |
596 | ||
46b30493 DM |
597 | static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) |
598 | { | |
059833eb DM |
599 | if (bus < pbm->pci_first_busno || |
600 | bus > pbm->pci_last_busno) | |
601 | return 1; | |
a2fb23af | 602 | return 0; |
059833eb DM |
603 | } |
604 | ||
bade5622 DM |
605 | static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, |
606 | int where, int size, u32 *value) | |
607 | { | |
7eae642f | 608 | struct pci_pbm_info *pbm = bus_dev->sysdata; |
059833eb | 609 | u32 devhandle = pbm->devhandle; |
7eae642f DM |
610 | unsigned int bus = bus_dev->number; |
611 | unsigned int device = PCI_SLOT(devfn); | |
612 | unsigned int func = PCI_FUNC(devfn); | |
613 | unsigned long ret; | |
614 | ||
987b6de7 | 615 | if (pci_sun4v_out_of_range(pbm, bus, device, func)) { |
059833eb DM |
616 | ret = ~0UL; |
617 | } else { | |
618 | ret = pci_sun4v_config_get(devhandle, | |
619 | HV_PCI_DEVICE_BUILD(bus, device, func), | |
620 | where, size); | |
10804828 | 621 | #if 0 |
987b6de7 | 622 | printk("rcfg: [%x:%x:%x:%d]=[%lx]\n", |
10804828 DM |
623 | devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), |
624 | where, size, ret); | |
625 | #endif | |
059833eb | 626 | } |
7eae642f DM |
627 | switch (size) { |
628 | case 1: | |
629 | *value = ret & 0xff; | |
630 | break; | |
631 | case 2: | |
632 | *value = ret & 0xffff; | |
633 | break; | |
634 | case 4: | |
635 | *value = ret & 0xffffffff; | |
636 | break; | |
637 | }; | |
638 | ||
639 | ||
640 | return PCIBIOS_SUCCESSFUL; | |
bade5622 DM |
641 | } |
642 | ||
643 | static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | |
644 | int where, int size, u32 value) | |
645 | { | |
7eae642f | 646 | struct pci_pbm_info *pbm = bus_dev->sysdata; |
059833eb | 647 | u32 devhandle = pbm->devhandle; |
7eae642f DM |
648 | unsigned int bus = bus_dev->number; |
649 | unsigned int device = PCI_SLOT(devfn); | |
650 | unsigned int func = PCI_FUNC(devfn); | |
651 | unsigned long ret; | |
652 | ||
987b6de7 | 653 | if (pci_sun4v_out_of_range(pbm, bus, device, func)) { |
059833eb DM |
654 | /* Do nothing. */ |
655 | } else { | |
656 | ret = pci_sun4v_config_put(devhandle, | |
657 | HV_PCI_DEVICE_BUILD(bus, device, func), | |
658 | where, size, value); | |
10804828 | 659 | #if 0 |
987b6de7 | 660 | printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n", |
10804828 DM |
661 | devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), |
662 | where, size, value, ret); | |
663 | #endif | |
059833eb | 664 | } |
7eae642f | 665 | return PCIBIOS_SUCCESSFUL; |
bade5622 DM |
666 | } |
667 | ||
668 | static struct pci_ops pci_sun4v_ops = { | |
669 | .read = pci_sun4v_read_pci_cfg, | |
670 | .write = pci_sun4v_write_pci_cfg, | |
671 | }; | |
672 | ||
673 | ||
c2609267 DM |
674 | static void pbm_scan_bus(struct pci_controller_info *p, |
675 | struct pci_pbm_info *pbm) | |
676 | { | |
a2fb23af | 677 | pbm->pci_bus = pci_scan_one_pbm(pbm); |
c2609267 DM |
678 | } |
679 | ||
bade5622 DM |
680 | static void pci_sun4v_scan_bus(struct pci_controller_info *p) |
681 | { | |
e87dc350 DM |
682 | struct property *prop; |
683 | struct device_node *dp; | |
684 | ||
685 | if ((dp = p->pbm_A.prom_node) != NULL) { | |
686 | prop = of_find_property(dp, "66mhz-capable", NULL); | |
687 | p->pbm_A.is_66mhz_capable = (prop != NULL); | |
c2609267 DM |
688 | |
689 | pbm_scan_bus(p, &p->pbm_A); | |
690 | } | |
e87dc350 DM |
691 | if ((dp = p->pbm_B.prom_node) != NULL) { |
692 | prop = of_find_property(dp, "66mhz-capable", NULL); | |
693 | p->pbm_B.is_66mhz_capable = (prop != NULL); | |
c2609267 DM |
694 | |
695 | pbm_scan_bus(p, &p->pbm_B); | |
696 | } | |
697 | ||
698 | /* XXX register error interrupt handlers XXX */ | |
bade5622 DM |
699 | } |
700 | ||
e7a0453e DM |
701 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, |
702 | struct pci_iommu *iommu) | |
18397944 DM |
703 | { |
704 | struct pci_iommu_arena *arena = &iommu->arena; | |
e7a0453e | 705 | unsigned long i, cnt = 0; |
7c8f486a | 706 | u32 devhandle; |
18397944 DM |
707 | |
708 | devhandle = pbm->devhandle; | |
709 | for (i = 0; i < arena->limit; i++) { | |
710 | unsigned long ret, io_attrs, ra; | |
711 | ||
712 | ret = pci_sun4v_iommu_getmap(devhandle, | |
713 | HV_PCI_TSBID(0, i), | |
714 | &io_attrs, &ra); | |
e7a0453e | 715 | if (ret == HV_EOK) { |
c2a5a46b DM |
716 | if (page_in_phys_avail(ra)) { |
717 | pci_sun4v_iommu_demap(devhandle, | |
718 | HV_PCI_TSBID(0, i), 1); | |
719 | } else { | |
720 | cnt++; | |
721 | __set_bit(i, arena->map); | |
722 | } | |
e7a0453e | 723 | } |
18397944 | 724 | } |
e7a0453e DM |
725 | |
726 | return cnt; | |
18397944 DM |
727 | } |
728 | ||
bade5622 DM |
729 | static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) |
730 | { | |
18397944 | 731 | struct pci_iommu *iommu = pbm->iommu; |
e87dc350 | 732 | struct property *prop; |
18397944 DM |
733 | unsigned long num_tsb_entries, sz; |
734 | u32 vdma[2], dma_mask, dma_offset; | |
e87dc350 DM |
735 | int tsbsize; |
736 | ||
737 | prop = of_find_property(pbm->prom_node, "virtual-dma", NULL); | |
738 | if (prop) { | |
739 | u32 *val = prop->value; | |
18397944 | 740 | |
e87dc350 DM |
741 | vdma[0] = val[0]; |
742 | vdma[1] = val[1]; | |
743 | } else { | |
18397944 DM |
744 | /* No property, use default values. */ |
745 | vdma[0] = 0x80000000; | |
746 | vdma[1] = 0x80000000; | |
747 | } | |
748 | ||
749 | dma_mask = vdma[0]; | |
750 | switch (vdma[1]) { | |
751 | case 0x20000000: | |
752 | dma_mask |= 0x1fffffff; | |
753 | tsbsize = 64; | |
754 | break; | |
755 | ||
756 | case 0x40000000: | |
757 | dma_mask |= 0x3fffffff; | |
758 | tsbsize = 128; | |
759 | break; | |
760 | ||
761 | case 0x80000000: | |
762 | dma_mask |= 0x7fffffff; | |
e7a0453e | 763 | tsbsize = 256; |
18397944 DM |
764 | break; |
765 | ||
766 | default: | |
767 | prom_printf("PCI-SUN4V: strange virtual-dma size.\n"); | |
768 | prom_halt(); | |
769 | }; | |
770 | ||
e7a0453e DM |
771 | tsbsize *= (8 * 1024); |
772 | ||
18397944 DM |
773 | num_tsb_entries = tsbsize / sizeof(iopte_t); |
774 | ||
775 | dma_offset = vdma[0]; | |
776 | ||
777 | /* Setup initial software IOMMU state. */ | |
778 | spin_lock_init(&iommu->lock); | |
779 | iommu->ctx_lowest_free = 1; | |
780 | iommu->page_table_map_base = dma_offset; | |
781 | iommu->dma_addr_mask = dma_mask; | |
782 | ||
783 | /* Allocate and initialize the free area map. */ | |
784 | sz = num_tsb_entries / 8; | |
785 | sz = (sz + 7UL) & ~7UL; | |
982c2064 | 786 | iommu->arena.map = kzalloc(sz, GFP_KERNEL); |
18397944 DM |
787 | if (!iommu->arena.map) { |
788 | prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); | |
789 | prom_halt(); | |
790 | } | |
18397944 DM |
791 | iommu->arena.limit = num_tsb_entries; |
792 | ||
e7a0453e | 793 | sz = probe_existing_entries(pbm, iommu); |
c2a5a46b DM |
794 | if (sz) |
795 | printk("%s: Imported %lu TSB entries from OBP\n", | |
796 | pbm->name, sz); | |
bade5622 DM |
797 | } |
798 | ||
10804828 DM |
799 | static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm) |
800 | { | |
e87dc350 DM |
801 | struct property *prop; |
802 | unsigned int *busrange; | |
803 | ||
804 | prop = of_find_property(pbm->prom_node, "bus-range", NULL); | |
805 | ||
806 | busrange = prop->value; | |
10804828 DM |
807 | |
808 | pbm->pci_first_busno = busrange[0]; | |
809 | pbm->pci_last_busno = busrange[1]; | |
810 | ||
811 | } | |
812 | ||
35a17eb6 DM |
813 | #ifdef CONFIG_PCI_MSI |
814 | struct pci_sun4v_msiq_entry { | |
815 | u64 version_type; | |
816 | #define MSIQ_VERSION_MASK 0xffffffff00000000UL | |
817 | #define MSIQ_VERSION_SHIFT 32 | |
818 | #define MSIQ_TYPE_MASK 0x00000000000000ffUL | |
819 | #define MSIQ_TYPE_SHIFT 0 | |
820 | #define MSIQ_TYPE_NONE 0x00 | |
821 | #define MSIQ_TYPE_MSG 0x01 | |
822 | #define MSIQ_TYPE_MSI32 0x02 | |
823 | #define MSIQ_TYPE_MSI64 0x03 | |
824 | #define MSIQ_TYPE_INTX 0x08 | |
825 | #define MSIQ_TYPE_NONE2 0xff | |
826 | ||
827 | u64 intx_sysino; | |
828 | u64 reserved1; | |
829 | u64 stick; | |
830 | u64 req_id; /* bus/device/func */ | |
831 | #define MSIQ_REQID_BUS_MASK 0xff00UL | |
832 | #define MSIQ_REQID_BUS_SHIFT 8 | |
833 | #define MSIQ_REQID_DEVICE_MASK 0x00f8UL | |
834 | #define MSIQ_REQID_DEVICE_SHIFT 3 | |
835 | #define MSIQ_REQID_FUNC_MASK 0x0007UL | |
836 | #define MSIQ_REQID_FUNC_SHIFT 0 | |
837 | ||
838 | u64 msi_address; | |
839 | ||
840 | /* The format of this value is message type dependant. | |
841 | * For MSI bits 15:0 are the data from the MSI packet. | |
842 | * For MSI-X bits 31:0 are the data from the MSI packet. | |
843 | * For MSG, the message code and message routing code where: | |
844 | * bits 39:32 is the bus/device/fn of the msg target-id | |
845 | * bits 18:16 is the message routing code | |
846 | * bits 7:0 is the message code | |
847 | * For INTx the low order 2-bits are: | |
848 | * 00 - INTA | |
849 | * 01 - INTB | |
850 | * 10 - INTC | |
851 | * 11 - INTD | |
852 | */ | |
853 | u64 msi_data; | |
854 | ||
855 | u64 reserved2; | |
856 | }; | |
857 | ||
858 | /* For now this just runs as a pre-handler for the real interrupt handler. | |
859 | * So we just walk through the queue and ACK all the entries, update the | |
860 | * head pointer, and return. | |
861 | * | |
862 | * In the longer term it would be nice to do something more integrated | |
863 | * wherein we can pass in some of this MSI info to the drivers. This | |
864 | * would be most useful for PCIe fabric error messages, although we could | |
865 | * invoke those directly from the loop here in order to pass the info around. | |
866 | */ | |
867 | static void pci_sun4v_msi_prehandler(unsigned int ino, void *data1, void *data2) | |
868 | { | |
869 | struct pci_pbm_info *pbm = data1; | |
870 | struct pci_sun4v_msiq_entry *base, *ep; | |
871 | unsigned long msiqid, orig_head, head, type, err; | |
872 | ||
873 | msiqid = (unsigned long) data2; | |
874 | ||
875 | head = 0xdeadbeef; | |
876 | err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, &head); | |
877 | if (unlikely(err)) | |
878 | goto hv_error_get; | |
879 | ||
880 | if (unlikely(head >= (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))) | |
881 | goto bad_offset; | |
882 | ||
883 | head /= sizeof(struct pci_sun4v_msiq_entry); | |
884 | orig_head = head; | |
885 | base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * | |
886 | (pbm->msiq_ent_count * | |
887 | sizeof(struct pci_sun4v_msiq_entry)))); | |
888 | ep = &base[head]; | |
889 | while ((ep->version_type & MSIQ_TYPE_MASK) != 0) { | |
890 | type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT; | |
891 | if (unlikely(type != MSIQ_TYPE_MSI32 && | |
892 | type != MSIQ_TYPE_MSI64)) | |
893 | goto bad_type; | |
894 | ||
895 | pci_sun4v_msi_setstate(pbm->devhandle, | |
896 | ep->msi_data /* msi_num */, | |
897 | HV_MSISTATE_IDLE); | |
898 | ||
899 | /* Clear the entry. */ | |
900 | ep->version_type &= ~MSIQ_TYPE_MASK; | |
901 | ||
902 | /* Go to next entry in ring. */ | |
903 | head++; | |
904 | if (head >= pbm->msiq_ent_count) | |
905 | head = 0; | |
906 | ep = &base[head]; | |
907 | } | |
908 | ||
909 | if (likely(head != orig_head)) { | |
910 | /* ACK entries by updating head pointer. */ | |
911 | head *= sizeof(struct pci_sun4v_msiq_entry); | |
912 | err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head); | |
913 | if (unlikely(err)) | |
914 | goto hv_error_set; | |
915 | } | |
916 | return; | |
917 | ||
918 | hv_error_set: | |
919 | printk(KERN_EMERG "MSI: Hypervisor set head gives error %lu\n", err); | |
920 | goto hv_error_cont; | |
921 | ||
922 | hv_error_get: | |
923 | printk(KERN_EMERG "MSI: Hypervisor get head gives error %lu\n", err); | |
924 | ||
925 | hv_error_cont: | |
926 | printk(KERN_EMERG "MSI: devhandle[%x] msiqid[%lx] head[%lu]\n", | |
927 | pbm->devhandle, msiqid, head); | |
928 | return; | |
929 | ||
930 | bad_offset: | |
931 | printk(KERN_EMERG "MSI: Hypervisor gives bad offset %lx max(%lx)\n", | |
932 | head, pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)); | |
933 | return; | |
934 | ||
935 | bad_type: | |
936 | printk(KERN_EMERG "MSI: Entry has bad type %lx\n", type); | |
937 | return; | |
938 | } | |
939 | ||
940 | static int msi_bitmap_alloc(struct pci_pbm_info *pbm) | |
941 | { | |
942 | unsigned long size, bits_per_ulong; | |
943 | ||
944 | bits_per_ulong = sizeof(unsigned long) * 8; | |
945 | size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1); | |
946 | size /= 8; | |
947 | BUG_ON(size % sizeof(unsigned long)); | |
948 | ||
949 | pbm->msi_bitmap = kzalloc(size, GFP_KERNEL); | |
950 | if (!pbm->msi_bitmap) | |
951 | return -ENOMEM; | |
952 | ||
953 | return 0; | |
954 | } | |
955 | ||
956 | static void msi_bitmap_free(struct pci_pbm_info *pbm) | |
957 | { | |
958 | kfree(pbm->msi_bitmap); | |
959 | pbm->msi_bitmap = NULL; | |
960 | } | |
961 | ||
962 | static int msi_queue_alloc(struct pci_pbm_info *pbm) | |
963 | { | |
964 | unsigned long q_size, alloc_size, pages, order; | |
965 | int i; | |
966 | ||
967 | q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); | |
968 | alloc_size = (pbm->msiq_num * q_size); | |
969 | order = get_order(alloc_size); | |
970 | pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); | |
971 | if (pages == 0UL) { | |
972 | printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n", | |
973 | order); | |
974 | return -ENOMEM; | |
975 | } | |
976 | memset((char *)pages, 0, PAGE_SIZE << order); | |
977 | pbm->msi_queues = (void *) pages; | |
978 | ||
979 | for (i = 0; i < pbm->msiq_num; i++) { | |
980 | unsigned long err, base = __pa(pages + (i * q_size)); | |
981 | unsigned long ret1, ret2; | |
982 | ||
983 | err = pci_sun4v_msiq_conf(pbm->devhandle, | |
984 | pbm->msiq_first + i, | |
985 | base, pbm->msiq_ent_count); | |
986 | if (err) { | |
987 | printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n", | |
988 | err); | |
989 | goto h_error; | |
990 | } | |
991 | ||
992 | err = pci_sun4v_msiq_info(pbm->devhandle, | |
993 | pbm->msiq_first + i, | |
994 | &ret1, &ret2); | |
995 | if (err) { | |
996 | printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n", | |
997 | err); | |
998 | goto h_error; | |
999 | } | |
1000 | if (ret1 != base || ret2 != pbm->msiq_ent_count) { | |
1001 | printk(KERN_ERR "MSI: Bogus qconf " | |
1002 | "expected[%lx:%x] got[%lx:%lx]\n", | |
1003 | base, pbm->msiq_ent_count, | |
1004 | ret1, ret2); | |
1005 | goto h_error; | |
1006 | } | |
1007 | } | |
1008 | ||
1009 | return 0; | |
1010 | ||
1011 | h_error: | |
1012 | free_pages(pages, order); | |
1013 | return -EINVAL; | |
1014 | } | |
1015 | ||
1016 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) | |
1017 | { | |
6a23acf3 | 1018 | const u32 *val; |
35a17eb6 DM |
1019 | int len; |
1020 | ||
1021 | val = of_get_property(pbm->prom_node, "#msi-eqs", &len); | |
1022 | if (!val || len != 4) | |
1023 | goto no_msi; | |
1024 | pbm->msiq_num = *val; | |
1025 | if (pbm->msiq_num) { | |
6a23acf3 | 1026 | const struct msiq_prop { |
35a17eb6 DM |
1027 | u32 first_msiq; |
1028 | u32 num_msiq; | |
1029 | u32 first_devino; | |
1030 | } *mqp; | |
6a23acf3 | 1031 | const struct msi_range_prop { |
35a17eb6 DM |
1032 | u32 first_msi; |
1033 | u32 num_msi; | |
1034 | } *mrng; | |
6a23acf3 | 1035 | const struct addr_range_prop { |
35a17eb6 DM |
1036 | u32 msi32_high; |
1037 | u32 msi32_low; | |
1038 | u32 msi32_len; | |
1039 | u32 msi64_high; | |
1040 | u32 msi64_low; | |
1041 | u32 msi64_len; | |
1042 | } *arng; | |
1043 | ||
1044 | val = of_get_property(pbm->prom_node, "msi-eq-size", &len); | |
1045 | if (!val || len != 4) | |
1046 | goto no_msi; | |
1047 | ||
1048 | pbm->msiq_ent_count = *val; | |
1049 | ||
1050 | mqp = of_get_property(pbm->prom_node, | |
1051 | "msi-eq-to-devino", &len); | |
1052 | if (!mqp || len != sizeof(struct msiq_prop)) | |
1053 | goto no_msi; | |
1054 | ||
1055 | pbm->msiq_first = mqp->first_msiq; | |
1056 | pbm->msiq_first_devino = mqp->first_devino; | |
1057 | ||
1058 | val = of_get_property(pbm->prom_node, "#msi", &len); | |
1059 | if (!val || len != 4) | |
1060 | goto no_msi; | |
1061 | pbm->msi_num = *val; | |
1062 | ||
1063 | mrng = of_get_property(pbm->prom_node, "msi-ranges", &len); | |
1064 | if (!mrng || len != sizeof(struct msi_range_prop)) | |
1065 | goto no_msi; | |
1066 | pbm->msi_first = mrng->first_msi; | |
1067 | ||
1068 | val = of_get_property(pbm->prom_node, "msi-data-mask", &len); | |
1069 | if (!val || len != 4) | |
1070 | goto no_msi; | |
1071 | pbm->msi_data_mask = *val; | |
1072 | ||
1073 | val = of_get_property(pbm->prom_node, "msix-data-width", &len); | |
1074 | if (!val || len != 4) | |
1075 | goto no_msi; | |
1076 | pbm->msix_data_width = *val; | |
1077 | ||
1078 | arng = of_get_property(pbm->prom_node, "msi-address-ranges", | |
1079 | &len); | |
1080 | if (!arng || len != sizeof(struct addr_range_prop)) | |
1081 | goto no_msi; | |
1082 | pbm->msi32_start = ((u64)arng->msi32_high << 32) | | |
1083 | (u64) arng->msi32_low; | |
1084 | pbm->msi64_start = ((u64)arng->msi64_high << 32) | | |
1085 | (u64) arng->msi64_low; | |
1086 | pbm->msi32_len = arng->msi32_len; | |
1087 | pbm->msi64_len = arng->msi64_len; | |
1088 | ||
1089 | if (msi_bitmap_alloc(pbm)) | |
1090 | goto no_msi; | |
1091 | ||
1092 | if (msi_queue_alloc(pbm)) { | |
1093 | msi_bitmap_free(pbm); | |
1094 | goto no_msi; | |
1095 | } | |
1096 | ||
1097 | printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] " | |
1098 | "devino[0x%x]\n", | |
1099 | pbm->name, | |
1100 | pbm->msiq_first, pbm->msiq_num, | |
1101 | pbm->msiq_ent_count, | |
1102 | pbm->msiq_first_devino); | |
1103 | printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] " | |
1104 | "width[%u]\n", | |
1105 | pbm->name, | |
1106 | pbm->msi_first, pbm->msi_num, pbm->msi_data_mask, | |
1107 | pbm->msix_data_width); | |
1108 | printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] " | |
1109 | "addr64[0x%lx:0x%x]\n", | |
1110 | pbm->name, | |
1111 | pbm->msi32_start, pbm->msi32_len, | |
1112 | pbm->msi64_start, pbm->msi64_len); | |
1113 | printk(KERN_INFO "%s: MSI queues at RA [%p]\n", | |
1114 | pbm->name, | |
1115 | pbm->msi_queues); | |
1116 | } | |
1117 | ||
1118 | return; | |
1119 | ||
1120 | no_msi: | |
1121 | pbm->msiq_num = 0; | |
1122 | printk(KERN_INFO "%s: No MSI support.\n", pbm->name); | |
1123 | } | |
1124 | ||
1125 | static int alloc_msi(struct pci_pbm_info *pbm) | |
1126 | { | |
1127 | int i; | |
1128 | ||
1129 | for (i = 0; i < pbm->msi_num; i++) { | |
1130 | if (!test_and_set_bit(i, pbm->msi_bitmap)) | |
1131 | return i + pbm->msi_first; | |
1132 | } | |
1133 | ||
1134 | return -ENOENT; | |
1135 | } | |
1136 | ||
1137 | static void free_msi(struct pci_pbm_info *pbm, int msi_num) | |
1138 | { | |
1139 | msi_num -= pbm->msi_first; | |
1140 | clear_bit(msi_num, pbm->msi_bitmap); | |
1141 | } | |
1142 | ||
1143 | static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p, | |
1144 | struct pci_dev *pdev, | |
1145 | struct msi_desc *entry) | |
1146 | { | |
a2fb23af | 1147 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; |
35a17eb6 DM |
1148 | unsigned long devino, msiqid; |
1149 | struct msi_msg msg; | |
1150 | int msi_num, err; | |
1151 | ||
1152 | *virt_irq_p = 0; | |
1153 | ||
1154 | msi_num = alloc_msi(pbm); | |
1155 | if (msi_num < 0) | |
1156 | return msi_num; | |
1157 | ||
1158 | devino = sun4v_build_msi(pbm->devhandle, virt_irq_p, | |
1159 | pbm->msiq_first_devino, | |
1160 | (pbm->msiq_first_devino + | |
1161 | pbm->msiq_num)); | |
1162 | err = -ENOMEM; | |
1163 | if (!devino) | |
1164 | goto out_err; | |
1165 | ||
1166 | set_irq_msi(*virt_irq_p, entry); | |
1167 | ||
1168 | msiqid = ((devino - pbm->msiq_first_devino) + | |
1169 | pbm->msiq_first); | |
1170 | ||
1171 | err = -EINVAL; | |
1172 | if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) | |
1173 | if (err) | |
1174 | goto out_err; | |
1175 | ||
1176 | if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) | |
1177 | goto out_err; | |
1178 | ||
1179 | if (pci_sun4v_msi_setmsiq(pbm->devhandle, | |
1180 | msi_num, msiqid, | |
1181 | (entry->msi_attrib.is_64 ? | |
1182 | HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32))) | |
1183 | goto out_err; | |
1184 | ||
1185 | if (pci_sun4v_msi_setstate(pbm->devhandle, msi_num, HV_MSISTATE_IDLE)) | |
1186 | goto out_err; | |
1187 | ||
1188 | if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID)) | |
1189 | goto out_err; | |
1190 | ||
a2fb23af | 1191 | pdev->dev.archdata.msi_num = msi_num; |
35a17eb6 DM |
1192 | |
1193 | if (entry->msi_attrib.is_64) { | |
1194 | msg.address_hi = pbm->msi64_start >> 32; | |
1195 | msg.address_lo = pbm->msi64_start & 0xffffffff; | |
1196 | } else { | |
1197 | msg.address_hi = 0; | |
1198 | msg.address_lo = pbm->msi32_start; | |
1199 | } | |
1200 | msg.data = msi_num; | |
1201 | write_msi_msg(*virt_irq_p, &msg); | |
1202 | ||
1203 | irq_install_pre_handler(*virt_irq_p, | |
1204 | pci_sun4v_msi_prehandler, | |
1205 | pbm, (void *) msiqid); | |
1206 | ||
1207 | return 0; | |
1208 | ||
1209 | out_err: | |
1210 | free_msi(pbm, msi_num); | |
1211 | sun4v_destroy_msi(*virt_irq_p); | |
1212 | *virt_irq_p = 0; | |
1213 | return err; | |
1214 | ||
1215 | } | |
1216 | ||
1217 | static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq, | |
1218 | struct pci_dev *pdev) | |
1219 | { | |
a2fb23af | 1220 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; |
35a17eb6 DM |
1221 | unsigned long msiqid, err; |
1222 | unsigned int msi_num; | |
1223 | ||
a2fb23af | 1224 | msi_num = pdev->dev.archdata.msi_num; |
35a17eb6 DM |
1225 | err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid); |
1226 | if (err) { | |
1227 | printk(KERN_ERR "%s: getmsiq gives error %lu\n", | |
1228 | pbm->name, err); | |
1229 | return; | |
1230 | } | |
1231 | ||
1232 | pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_INVALID); | |
1233 | pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_INVALID); | |
1234 | ||
1235 | free_msi(pbm, msi_num); | |
1236 | ||
1237 | /* The sun4v_destroy_msi() will liberate the devino and thus the MSIQ | |
1238 | * allocation. | |
1239 | */ | |
1240 | sun4v_destroy_msi(virt_irq); | |
1241 | } | |
1242 | #else /* CONFIG_PCI_MSI */ | |
1243 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) | |
1244 | { | |
1245 | } | |
1246 | #endif /* !(CONFIG_PCI_MSI) */ | |
1247 | ||
e87dc350 | 1248 | static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle) |
bade5622 DM |
1249 | { |
1250 | struct pci_pbm_info *pbm; | |
bade5622 | 1251 | |
3833789b DM |
1252 | if (devhandle & 0x40) |
1253 | pbm = &p->pbm_B; | |
1254 | else | |
1255 | pbm = &p->pbm_A; | |
bade5622 DM |
1256 | |
1257 | pbm->parent = p; | |
e87dc350 | 1258 | pbm->prom_node = dp; |
bade5622 | 1259 | |
3833789b | 1260 | pbm->devhandle = devhandle; |
bade5622 | 1261 | |
e87dc350 | 1262 | pbm->name = dp->full_name; |
bade5622 | 1263 | |
e87dc350 | 1264 | printk("%s: SUN4V PCI Bus Module\n", pbm->name); |
bade5622 | 1265 | |
9fd8b647 | 1266 | pci_determine_mem_io_space(pbm); |
bade5622 | 1267 | |
10804828 | 1268 | pci_sun4v_get_bus_range(pbm); |
bade5622 | 1269 | pci_sun4v_iommu_init(pbm); |
35a17eb6 | 1270 | pci_sun4v_msi_init(pbm); |
bade5622 DM |
1271 | } |
1272 | ||
e87dc350 | 1273 | void sun4v_pci_init(struct device_node *dp, char *model_name) |
8f6a93a1 | 1274 | { |
bade5622 DM |
1275 | struct pci_controller_info *p; |
1276 | struct pci_iommu *iommu; | |
e87dc350 DM |
1277 | struct property *prop; |
1278 | struct linux_prom64_registers *regs; | |
7c8f486a DM |
1279 | u32 devhandle; |
1280 | int i; | |
3833789b | 1281 | |
e87dc350 DM |
1282 | prop = of_find_property(dp, "reg", NULL); |
1283 | regs = prop->value; | |
1284 | ||
1285 | devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff; | |
3833789b DM |
1286 | |
1287 | for (p = pci_controller_root; p; p = p->next) { | |
1288 | struct pci_pbm_info *pbm; | |
1289 | ||
1290 | if (p->pbm_A.prom_node && p->pbm_B.prom_node) | |
1291 | continue; | |
1292 | ||
1293 | pbm = (p->pbm_A.prom_node ? | |
1294 | &p->pbm_A : | |
1295 | &p->pbm_B); | |
1296 | ||
0b522497 | 1297 | if (pbm->devhandle == (devhandle ^ 0x40)) { |
e87dc350 | 1298 | pci_sun4v_pbm_init(p, dp, devhandle); |
0b522497 DM |
1299 | return; |
1300 | } | |
3833789b | 1301 | } |
bade5622 | 1302 | |
a283a525 | 1303 | for_each_possible_cpu(i) { |
7c8f486a DM |
1304 | unsigned long page = get_zeroed_page(GFP_ATOMIC); |
1305 | ||
1306 | if (!page) | |
1307 | goto fatal_memory_error; | |
1308 | ||
6a32fd4d | 1309 | per_cpu(pci_iommu_batch, i).pglist = (u64 *) page; |
bade5622 | 1310 | } |
7c8f486a | 1311 | |
982c2064 | 1312 | p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); |
7c8f486a DM |
1313 | if (!p) |
1314 | goto fatal_memory_error; | |
1315 | ||
982c2064 | 1316 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); |
7c8f486a DM |
1317 | if (!iommu) |
1318 | goto fatal_memory_error; | |
1319 | ||
bade5622 DM |
1320 | p->pbm_A.iommu = iommu; |
1321 | ||
982c2064 | 1322 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); |
7c8f486a DM |
1323 | if (!iommu) |
1324 | goto fatal_memory_error; | |
1325 | ||
bade5622 DM |
1326 | p->pbm_B.iommu = iommu; |
1327 | ||
1328 | p->next = pci_controller_root; | |
1329 | pci_controller_root = p; | |
1330 | ||
1331 | p->index = pci_num_controllers++; | |
bade5622 DM |
1332 | |
1333 | p->scan_bus = pci_sun4v_scan_bus; | |
35a17eb6 DM |
1334 | #ifdef CONFIG_PCI_MSI |
1335 | p->setup_msi_irq = pci_sun4v_setup_msi_irq; | |
1336 | p->teardown_msi_irq = pci_sun4v_teardown_msi_irq; | |
1337 | #endif | |
bade5622 DM |
1338 | p->pci_ops = &pci_sun4v_ops; |
1339 | ||
1340 | /* Like PSYCHO and SCHIZO we have a 2GB aligned area | |
1341 | * for memory space. | |
1342 | */ | |
1343 | pci_memspace_mask = 0x7fffffffUL; | |
1344 | ||
e87dc350 | 1345 | pci_sun4v_pbm_init(p, dp, devhandle); |
7c8f486a DM |
1346 | return; |
1347 | ||
1348 | fatal_memory_error: | |
1349 | prom_printf("SUN4V_PCI: Fatal memory allocation error.\n"); | |
1350 | prom_halt(); | |
8f6a93a1 | 1351 | } |