Commit | Line | Data |
---|---|---|
8f6a93a1 DM |
1 | /* pci_sun4v.c: SUN4V specific PCI controller support. |
2 | * | |
9fd8b647 | 3 | * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) |
8f6a93a1 DM |
4 | */ |
5 | ||
6 | #include <linux/kernel.h> | |
7 | #include <linux/types.h> | |
8 | #include <linux/pci.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/interrupt.h> | |
18397944 | 12 | #include <linux/percpu.h> |
35a17eb6 DM |
13 | #include <linux/irq.h> |
14 | #include <linux/msi.h> | |
8f6a93a1 DM |
15 | |
16 | #include <asm/pbm.h> | |
17 | #include <asm/iommu.h> | |
18 | #include <asm/irq.h> | |
19 | #include <asm/upa.h> | |
20 | #include <asm/pstate.h> | |
21 | #include <asm/oplib.h> | |
22 | #include <asm/hypervisor.h> | |
e87dc350 | 23 | #include <asm/prom.h> |
8f6a93a1 DM |
24 | |
25 | #include "pci_impl.h" | |
26 | #include "iommu_common.h" | |
27 | ||
bade5622 DM |
28 | #include "pci_sun4v.h" |
29 | ||
7c8f486a | 30 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) |
18397944 | 31 | |
6a32fd4d DM |
32 | struct pci_iommu_batch { |
33 | struct pci_dev *pdev; /* Device mapping is for. */ | |
34 | unsigned long prot; /* IOMMU page protections */ | |
35 | unsigned long entry; /* Index into IOTSB. */ | |
36 | u64 *pglist; /* List of physical pages */ | |
37 | unsigned long npages; /* Number of pages in list. */ | |
18397944 DM |
38 | }; |
39 | ||
6a32fd4d DM |
40 | static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch); |
41 | ||
42 | /* Interrupts must be disabled. */ | |
43 | static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) | |
44 | { | |
45 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | |
46 | ||
47 | p->pdev = pdev; | |
48 | p->prot = prot; | |
49 | p->entry = entry; | |
50 | p->npages = 0; | |
51 | } | |
52 | ||
53 | /* Interrupts must be disabled. */ | |
54 | static long pci_iommu_batch_flush(struct pci_iommu_batch *p) | |
55 | { | |
a2fb23af DM |
56 | struct pci_pbm_info *pbm = p->pdev->dev.archdata.host_controller; |
57 | unsigned long devhandle = pbm->devhandle; | |
6a32fd4d DM |
58 | unsigned long prot = p->prot; |
59 | unsigned long entry = p->entry; | |
60 | u64 *pglist = p->pglist; | |
61 | unsigned long npages = p->npages; | |
62 | ||
d82965c1 | 63 | while (npages != 0) { |
6a32fd4d DM |
64 | long num; |
65 | ||
66 | num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), | |
67 | npages, prot, __pa(pglist)); | |
68 | if (unlikely(num < 0)) { | |
69 | if (printk_ratelimit()) | |
70 | printk("pci_iommu_batch_flush: IOMMU map of " | |
71 | "[%08lx:%08lx:%lx:%lx:%lx] failed with " | |
72 | "status %ld\n", | |
73 | devhandle, HV_PCI_TSBID(0, entry), | |
74 | npages, prot, __pa(pglist), num); | |
75 | return -1; | |
76 | } | |
77 | ||
78 | entry += num; | |
79 | npages -= num; | |
80 | pglist += num; | |
d82965c1 | 81 | } |
6a32fd4d DM |
82 | |
83 | p->entry = entry; | |
84 | p->npages = 0; | |
85 | ||
86 | return 0; | |
87 | } | |
88 | ||
89 | /* Interrupts must be disabled. */ | |
90 | static inline long pci_iommu_batch_add(u64 phys_page) | |
91 | { | |
92 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | |
93 | ||
94 | BUG_ON(p->npages >= PGLIST_NENTS); | |
95 | ||
96 | p->pglist[p->npages++] = phys_page; | |
97 | if (p->npages == PGLIST_NENTS) | |
98 | return pci_iommu_batch_flush(p); | |
99 | ||
100 | return 0; | |
101 | } | |
102 | ||
103 | /* Interrupts must be disabled. */ | |
104 | static inline long pci_iommu_batch_end(void) | |
105 | { | |
106 | struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); | |
107 | ||
108 | BUG_ON(p->npages >= PGLIST_NENTS); | |
109 | ||
110 | return pci_iommu_batch_flush(p); | |
111 | } | |
18397944 DM |
112 | |
113 | static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) | |
114 | { | |
115 | unsigned long n, i, start, end, limit; | |
116 | int pass; | |
117 | ||
118 | limit = arena->limit; | |
119 | start = arena->hint; | |
120 | pass = 0; | |
121 | ||
122 | again: | |
123 | n = find_next_zero_bit(arena->map, limit, start); | |
124 | end = n + npages; | |
125 | if (unlikely(end >= limit)) { | |
126 | if (likely(pass < 1)) { | |
127 | limit = start; | |
128 | start = 0; | |
129 | pass++; | |
130 | goto again; | |
131 | } else { | |
132 | /* Scanned the whole thing, give up. */ | |
133 | return -1; | |
134 | } | |
135 | } | |
136 | ||
137 | for (i = n; i < end; i++) { | |
138 | if (test_bit(i, arena->map)) { | |
139 | start = i + 1; | |
140 | goto again; | |
141 | } | |
142 | } | |
143 | ||
144 | for (i = n; i < end; i++) | |
145 | __set_bit(i, arena->map); | |
146 | ||
147 | arena->hint = end; | |
148 | ||
149 | return n; | |
150 | } | |
151 | ||
152 | static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) | |
153 | { | |
154 | unsigned long i; | |
155 | ||
156 | for (i = base; i < (base + npages); i++) | |
157 | __clear_bit(i, arena->map); | |
158 | } | |
159 | ||
42f14237 | 160 | static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp) |
8f6a93a1 | 161 | { |
18397944 | 162 | struct pci_iommu *iommu; |
7c8f486a | 163 | unsigned long flags, order, first_page, npages, n; |
18397944 DM |
164 | void *ret; |
165 | long entry; | |
18397944 DM |
166 | |
167 | size = IO_PAGE_ALIGN(size); | |
168 | order = get_order(size); | |
6a32fd4d | 169 | if (unlikely(order >= MAX_ORDER)) |
18397944 DM |
170 | return NULL; |
171 | ||
172 | npages = size >> IO_PAGE_SHIFT; | |
18397944 | 173 | |
42f14237 | 174 | first_page = __get_free_pages(gfp, order); |
6a32fd4d | 175 | if (unlikely(first_page == 0UL)) |
18397944 | 176 | return NULL; |
e7a0453e | 177 | |
18397944 DM |
178 | memset((char *)first_page, 0, PAGE_SIZE << order); |
179 | ||
a2fb23af | 180 | iommu = pdev->dev.archdata.iommu; |
18397944 DM |
181 | |
182 | spin_lock_irqsave(&iommu->lock, flags); | |
183 | entry = pci_arena_alloc(&iommu->arena, npages); | |
184 | spin_unlock_irqrestore(&iommu->lock, flags); | |
185 | ||
6a32fd4d DM |
186 | if (unlikely(entry < 0L)) |
187 | goto arena_alloc_fail; | |
18397944 DM |
188 | |
189 | *dma_addrp = (iommu->page_table_map_base + | |
190 | (entry << IO_PAGE_SHIFT)); | |
191 | ret = (void *) first_page; | |
192 | first_page = __pa(first_page); | |
193 | ||
6a32fd4d | 194 | local_irq_save(flags); |
18397944 | 195 | |
6a32fd4d DM |
196 | pci_iommu_batch_start(pdev, |
197 | (HV_PCI_MAP_ATTR_READ | | |
198 | HV_PCI_MAP_ATTR_WRITE), | |
199 | entry); | |
18397944 | 200 | |
6a32fd4d DM |
201 | for (n = 0; n < npages; n++) { |
202 | long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE)); | |
203 | if (unlikely(err < 0L)) | |
204 | goto iommu_map_fail; | |
205 | } | |
18397944 | 206 | |
6a32fd4d DM |
207 | if (unlikely(pci_iommu_batch_end() < 0L)) |
208 | goto iommu_map_fail; | |
18397944 | 209 | |
6a32fd4d | 210 | local_irq_restore(flags); |
18397944 DM |
211 | |
212 | return ret; | |
6a32fd4d DM |
213 | |
214 | iommu_map_fail: | |
215 | /* Interrupts are disabled. */ | |
216 | spin_lock(&iommu->lock); | |
217 | pci_arena_free(&iommu->arena, entry, npages); | |
218 | spin_unlock_irqrestore(&iommu->lock, flags); | |
219 | ||
220 | arena_alloc_fail: | |
221 | free_pages(first_page, order); | |
222 | return NULL; | |
8f6a93a1 DM |
223 | } |
224 | ||
225 | static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) | |
226 | { | |
a2fb23af | 227 | struct pci_pbm_info *pbm; |
18397944 | 228 | struct pci_iommu *iommu; |
7c8f486a DM |
229 | unsigned long flags, order, npages, entry; |
230 | u32 devhandle; | |
18397944 DM |
231 | |
232 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | |
a2fb23af DM |
233 | iommu = pdev->dev.archdata.iommu; |
234 | pbm = pdev->dev.archdata.host_controller; | |
235 | devhandle = pbm->devhandle; | |
18397944 DM |
236 | entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); |
237 | ||
238 | spin_lock_irqsave(&iommu->lock, flags); | |
239 | ||
240 | pci_arena_free(&iommu->arena, entry, npages); | |
241 | ||
242 | do { | |
243 | unsigned long num; | |
244 | ||
245 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
246 | npages); | |
247 | entry += num; | |
248 | npages -= num; | |
249 | } while (npages != 0); | |
250 | ||
251 | spin_unlock_irqrestore(&iommu->lock, flags); | |
252 | ||
253 | order = get_order(size); | |
254 | if (order < 10) | |
255 | free_pages((unsigned long)cpu, order); | |
8f6a93a1 DM |
256 | } |
257 | ||
258 | static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) | |
259 | { | |
18397944 DM |
260 | struct pci_iommu *iommu; |
261 | unsigned long flags, npages, oaddr; | |
7c8f486a | 262 | unsigned long i, base_paddr; |
6a32fd4d | 263 | u32 bus_addr, ret; |
18397944 DM |
264 | unsigned long prot; |
265 | long entry; | |
18397944 | 266 | |
a2fb23af | 267 | iommu = pdev->dev.archdata.iommu; |
18397944 DM |
268 | |
269 | if (unlikely(direction == PCI_DMA_NONE)) | |
270 | goto bad; | |
271 | ||
272 | oaddr = (unsigned long)ptr; | |
273 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | |
274 | npages >>= IO_PAGE_SHIFT; | |
18397944 DM |
275 | |
276 | spin_lock_irqsave(&iommu->lock, flags); | |
277 | entry = pci_arena_alloc(&iommu->arena, npages); | |
278 | spin_unlock_irqrestore(&iommu->lock, flags); | |
279 | ||
280 | if (unlikely(entry < 0L)) | |
281 | goto bad; | |
282 | ||
283 | bus_addr = (iommu->page_table_map_base + | |
284 | (entry << IO_PAGE_SHIFT)); | |
285 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | |
286 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | |
287 | prot = HV_PCI_MAP_ATTR_READ; | |
288 | if (direction != PCI_DMA_TODEVICE) | |
289 | prot |= HV_PCI_MAP_ATTR_WRITE; | |
290 | ||
6a32fd4d | 291 | local_irq_save(flags); |
18397944 | 292 | |
6a32fd4d | 293 | pci_iommu_batch_start(pdev, prot, entry); |
18397944 | 294 | |
6a32fd4d DM |
295 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { |
296 | long err = pci_iommu_batch_add(base_paddr); | |
297 | if (unlikely(err < 0L)) | |
298 | goto iommu_map_fail; | |
299 | } | |
300 | if (unlikely(pci_iommu_batch_end() < 0L)) | |
301 | goto iommu_map_fail; | |
18397944 | 302 | |
6a32fd4d | 303 | local_irq_restore(flags); |
18397944 DM |
304 | |
305 | return ret; | |
306 | ||
307 | bad: | |
308 | if (printk_ratelimit()) | |
309 | WARN_ON(1); | |
310 | return PCI_DMA_ERROR_CODE; | |
6a32fd4d DM |
311 | |
312 | iommu_map_fail: | |
313 | /* Interrupts are disabled. */ | |
314 | spin_lock(&iommu->lock); | |
315 | pci_arena_free(&iommu->arena, entry, npages); | |
316 | spin_unlock_irqrestore(&iommu->lock, flags); | |
317 | ||
318 | return PCI_DMA_ERROR_CODE; | |
8f6a93a1 DM |
319 | } |
320 | ||
321 | static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | |
322 | { | |
a2fb23af | 323 | struct pci_pbm_info *pbm; |
18397944 | 324 | struct pci_iommu *iommu; |
7c8f486a | 325 | unsigned long flags, npages; |
18397944 | 326 | long entry; |
7c8f486a | 327 | u32 devhandle; |
18397944 DM |
328 | |
329 | if (unlikely(direction == PCI_DMA_NONE)) { | |
330 | if (printk_ratelimit()) | |
331 | WARN_ON(1); | |
332 | return; | |
333 | } | |
334 | ||
a2fb23af DM |
335 | iommu = pdev->dev.archdata.iommu; |
336 | pbm = pdev->dev.archdata.host_controller; | |
337 | devhandle = pbm->devhandle; | |
18397944 DM |
338 | |
339 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | |
340 | npages >>= IO_PAGE_SHIFT; | |
341 | bus_addr &= IO_PAGE_MASK; | |
342 | ||
343 | spin_lock_irqsave(&iommu->lock, flags); | |
344 | ||
345 | entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; | |
346 | pci_arena_free(&iommu->arena, entry, npages); | |
347 | ||
348 | do { | |
349 | unsigned long num; | |
350 | ||
351 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
352 | npages); | |
353 | entry += num; | |
354 | npages -= num; | |
355 | } while (npages != 0); | |
356 | ||
357 | spin_unlock_irqrestore(&iommu->lock, flags); | |
358 | } | |
359 | ||
360 | #define SG_ENT_PHYS_ADDRESS(SG) \ | |
361 | (__pa(page_address((SG)->page)) + (SG)->offset) | |
362 | ||
6a32fd4d | 363 | static inline long fill_sg(long entry, struct pci_dev *pdev, |
18397944 DM |
364 | struct scatterlist *sg, |
365 | int nused, int nelems, unsigned long prot) | |
366 | { | |
367 | struct scatterlist *dma_sg = sg; | |
368 | struct scatterlist *sg_end = sg + nelems; | |
6a32fd4d DM |
369 | unsigned long flags; |
370 | int i; | |
371 | ||
372 | local_irq_save(flags); | |
373 | ||
374 | pci_iommu_batch_start(pdev, prot, entry); | |
18397944 | 375 | |
18397944 DM |
376 | for (i = 0; i < nused; i++) { |
377 | unsigned long pteval = ~0UL; | |
378 | u32 dma_npages; | |
379 | ||
380 | dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) + | |
381 | dma_sg->dma_length + | |
382 | ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT; | |
383 | do { | |
384 | unsigned long offset; | |
385 | signed int len; | |
386 | ||
387 | /* If we are here, we know we have at least one | |
388 | * more page to map. So walk forward until we | |
389 | * hit a page crossing, and begin creating new | |
390 | * mappings from that spot. | |
391 | */ | |
392 | for (;;) { | |
393 | unsigned long tmp; | |
394 | ||
395 | tmp = SG_ENT_PHYS_ADDRESS(sg); | |
396 | len = sg->length; | |
397 | if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) { | |
398 | pteval = tmp & IO_PAGE_MASK; | |
399 | offset = tmp & (IO_PAGE_SIZE - 1UL); | |
400 | break; | |
401 | } | |
402 | if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) { | |
403 | pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK; | |
404 | offset = 0UL; | |
405 | len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); | |
406 | break; | |
407 | } | |
408 | sg++; | |
409 | } | |
410 | ||
411 | pteval = (pteval & IOPTE_PAGE); | |
412 | while (len > 0) { | |
6a32fd4d DM |
413 | long err; |
414 | ||
415 | err = pci_iommu_batch_add(pteval); | |
416 | if (unlikely(err < 0L)) | |
417 | goto iommu_map_failed; | |
418 | ||
18397944 DM |
419 | pteval += IO_PAGE_SIZE; |
420 | len -= (IO_PAGE_SIZE - offset); | |
421 | offset = 0; | |
422 | dma_npages--; | |
423 | } | |
424 | ||
425 | pteval = (pteval & IOPTE_PAGE) + len; | |
426 | sg++; | |
427 | ||
428 | /* Skip over any tail mappings we've fully mapped, | |
429 | * adjusting pteval along the way. Stop when we | |
430 | * detect a page crossing event. | |
431 | */ | |
432 | while (sg < sg_end && | |
433 | (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && | |
434 | (pteval == SG_ENT_PHYS_ADDRESS(sg)) && | |
435 | ((pteval ^ | |
436 | (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { | |
437 | pteval += sg->length; | |
438 | sg++; | |
439 | } | |
440 | if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) | |
441 | pteval = ~0UL; | |
442 | } while (dma_npages != 0); | |
443 | dma_sg++; | |
444 | } | |
445 | ||
6a32fd4d DM |
446 | if (unlikely(pci_iommu_batch_end() < 0L)) |
447 | goto iommu_map_failed; | |
18397944 | 448 | |
6a32fd4d DM |
449 | local_irq_restore(flags); |
450 | return 0; | |
18397944 | 451 | |
6a32fd4d DM |
452 | iommu_map_failed: |
453 | local_irq_restore(flags); | |
454 | return -1L; | |
8f6a93a1 DM |
455 | } |
456 | ||
457 | static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | |
458 | { | |
18397944 | 459 | struct pci_iommu *iommu; |
7c8f486a | 460 | unsigned long flags, npages, prot; |
6a32fd4d | 461 | u32 dma_base; |
18397944 | 462 | struct scatterlist *sgtmp; |
6a32fd4d | 463 | long entry, err; |
18397944 DM |
464 | int used; |
465 | ||
466 | /* Fast path single entry scatterlists. */ | |
467 | if (nelems == 1) { | |
468 | sglist->dma_address = | |
469 | pci_4v_map_single(pdev, | |
470 | (page_address(sglist->page) + sglist->offset), | |
471 | sglist->length, direction); | |
472 | if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) | |
473 | return 0; | |
474 | sglist->dma_length = sglist->length; | |
475 | return 1; | |
476 | } | |
477 | ||
a2fb23af | 478 | iommu = pdev->dev.archdata.iommu; |
18397944 DM |
479 | |
480 | if (unlikely(direction == PCI_DMA_NONE)) | |
481 | goto bad; | |
482 | ||
483 | /* Step 1: Prepare scatter list. */ | |
484 | npages = prepare_sg(sglist, nelems); | |
18397944 DM |
485 | |
486 | /* Step 2: Allocate a cluster and context, if necessary. */ | |
487 | spin_lock_irqsave(&iommu->lock, flags); | |
488 | entry = pci_arena_alloc(&iommu->arena, npages); | |
489 | spin_unlock_irqrestore(&iommu->lock, flags); | |
490 | ||
491 | if (unlikely(entry < 0L)) | |
492 | goto bad; | |
493 | ||
494 | dma_base = iommu->page_table_map_base + | |
495 | (entry << IO_PAGE_SHIFT); | |
496 | ||
497 | /* Step 3: Normalize DMA addresses. */ | |
498 | used = nelems; | |
499 | ||
500 | sgtmp = sglist; | |
501 | while (used && sgtmp->dma_length) { | |
502 | sgtmp->dma_address += dma_base; | |
503 | sgtmp++; | |
504 | used--; | |
505 | } | |
506 | used = nelems - used; | |
507 | ||
508 | /* Step 4: Create the mappings. */ | |
509 | prot = HV_PCI_MAP_ATTR_READ; | |
510 | if (direction != PCI_DMA_TODEVICE) | |
511 | prot |= HV_PCI_MAP_ATTR_WRITE; | |
512 | ||
6a32fd4d DM |
513 | err = fill_sg(entry, pdev, sglist, used, nelems, prot); |
514 | if (unlikely(err < 0L)) | |
515 | goto iommu_map_failed; | |
18397944 DM |
516 | |
517 | return used; | |
518 | ||
519 | bad: | |
520 | if (printk_ratelimit()) | |
521 | WARN_ON(1); | |
522 | return 0; | |
6a32fd4d DM |
523 | |
524 | iommu_map_failed: | |
525 | spin_lock_irqsave(&iommu->lock, flags); | |
526 | pci_arena_free(&iommu->arena, entry, npages); | |
527 | spin_unlock_irqrestore(&iommu->lock, flags); | |
528 | ||
529 | return 0; | |
8f6a93a1 DM |
530 | } |
531 | ||
532 | static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | |
533 | { | |
a2fb23af | 534 | struct pci_pbm_info *pbm; |
18397944 | 535 | struct pci_iommu *iommu; |
7c8f486a | 536 | unsigned long flags, i, npages; |
18397944 | 537 | long entry; |
7c8f486a | 538 | u32 devhandle, bus_addr; |
18397944 DM |
539 | |
540 | if (unlikely(direction == PCI_DMA_NONE)) { | |
541 | if (printk_ratelimit()) | |
542 | WARN_ON(1); | |
543 | } | |
544 | ||
a2fb23af DM |
545 | iommu = pdev->dev.archdata.iommu; |
546 | pbm = pdev->dev.archdata.host_controller; | |
547 | devhandle = pbm->devhandle; | |
18397944 DM |
548 | |
549 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | |
550 | ||
551 | for (i = 1; i < nelems; i++) | |
552 | if (sglist[i].dma_length == 0) | |
553 | break; | |
554 | i--; | |
555 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - | |
556 | bus_addr) >> IO_PAGE_SHIFT; | |
557 | ||
558 | entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | |
559 | ||
560 | spin_lock_irqsave(&iommu->lock, flags); | |
561 | ||
562 | pci_arena_free(&iommu->arena, entry, npages); | |
563 | ||
564 | do { | |
565 | unsigned long num; | |
566 | ||
567 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
568 | npages); | |
569 | entry += num; | |
570 | npages -= num; | |
571 | } while (npages != 0); | |
572 | ||
573 | spin_unlock_irqrestore(&iommu->lock, flags); | |
8f6a93a1 DM |
574 | } |
575 | ||
576 | static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | |
577 | { | |
18397944 | 578 | /* Nothing to do... */ |
8f6a93a1 DM |
579 | } |
580 | ||
581 | static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) | |
582 | { | |
18397944 | 583 | /* Nothing to do... */ |
8f6a93a1 DM |
584 | } |
585 | ||
c6e87566 | 586 | const struct pci_iommu_ops pci_sun4v_iommu_ops = { |
8f6a93a1 DM |
587 | .alloc_consistent = pci_4v_alloc_consistent, |
588 | .free_consistent = pci_4v_free_consistent, | |
589 | .map_single = pci_4v_map_single, | |
590 | .unmap_single = pci_4v_unmap_single, | |
591 | .map_sg = pci_4v_map_sg, | |
592 | .unmap_sg = pci_4v_unmap_sg, | |
593 | .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu, | |
594 | .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, | |
595 | }; | |
596 | ||
46b30493 DM |
597 | static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) |
598 | { | |
059833eb DM |
599 | if (bus < pbm->pci_first_busno || |
600 | bus > pbm->pci_last_busno) | |
601 | return 1; | |
a2fb23af | 602 | return 0; |
059833eb DM |
603 | } |
604 | ||
bade5622 DM |
605 | static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, |
606 | int where, int size, u32 *value) | |
607 | { | |
7eae642f | 608 | struct pci_pbm_info *pbm = bus_dev->sysdata; |
059833eb | 609 | u32 devhandle = pbm->devhandle; |
7eae642f DM |
610 | unsigned int bus = bus_dev->number; |
611 | unsigned int device = PCI_SLOT(devfn); | |
612 | unsigned int func = PCI_FUNC(devfn); | |
613 | unsigned long ret; | |
614 | ||
97b3cf05 DM |
615 | if (bus_dev == pbm->pci_bus && devfn == 0x00) |
616 | return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where, | |
617 | size, value); | |
987b6de7 | 618 | if (pci_sun4v_out_of_range(pbm, bus, device, func)) { |
059833eb DM |
619 | ret = ~0UL; |
620 | } else { | |
621 | ret = pci_sun4v_config_get(devhandle, | |
622 | HV_PCI_DEVICE_BUILD(bus, device, func), | |
623 | where, size); | |
10804828 | 624 | #if 0 |
987b6de7 | 625 | printk("rcfg: [%x:%x:%x:%d]=[%lx]\n", |
10804828 DM |
626 | devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), |
627 | where, size, ret); | |
628 | #endif | |
059833eb | 629 | } |
7eae642f DM |
630 | switch (size) { |
631 | case 1: | |
632 | *value = ret & 0xff; | |
633 | break; | |
634 | case 2: | |
635 | *value = ret & 0xffff; | |
636 | break; | |
637 | case 4: | |
638 | *value = ret & 0xffffffff; | |
639 | break; | |
640 | }; | |
641 | ||
642 | ||
643 | return PCIBIOS_SUCCESSFUL; | |
bade5622 DM |
644 | } |
645 | ||
646 | static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, | |
647 | int where, int size, u32 value) | |
648 | { | |
7eae642f | 649 | struct pci_pbm_info *pbm = bus_dev->sysdata; |
059833eb | 650 | u32 devhandle = pbm->devhandle; |
7eae642f DM |
651 | unsigned int bus = bus_dev->number; |
652 | unsigned int device = PCI_SLOT(devfn); | |
653 | unsigned int func = PCI_FUNC(devfn); | |
654 | unsigned long ret; | |
655 | ||
97b3cf05 DM |
656 | if (bus_dev == pbm->pci_bus && devfn == 0x00) |
657 | return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where, | |
658 | size, value); | |
987b6de7 | 659 | if (pci_sun4v_out_of_range(pbm, bus, device, func)) { |
059833eb DM |
660 | /* Do nothing. */ |
661 | } else { | |
662 | ret = pci_sun4v_config_put(devhandle, | |
663 | HV_PCI_DEVICE_BUILD(bus, device, func), | |
664 | where, size, value); | |
10804828 | 665 | #if 0 |
987b6de7 | 666 | printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n", |
10804828 DM |
667 | devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), |
668 | where, size, value, ret); | |
669 | #endif | |
059833eb | 670 | } |
7eae642f | 671 | return PCIBIOS_SUCCESSFUL; |
bade5622 DM |
672 | } |
673 | ||
674 | static struct pci_ops pci_sun4v_ops = { | |
675 | .read = pci_sun4v_read_pci_cfg, | |
676 | .write = pci_sun4v_write_pci_cfg, | |
677 | }; | |
678 | ||
679 | ||
c2609267 DM |
680 | static void pbm_scan_bus(struct pci_controller_info *p, |
681 | struct pci_pbm_info *pbm) | |
682 | { | |
a2fb23af | 683 | pbm->pci_bus = pci_scan_one_pbm(pbm); |
c2609267 DM |
684 | } |
685 | ||
bade5622 DM |
686 | static void pci_sun4v_scan_bus(struct pci_controller_info *p) |
687 | { | |
e87dc350 DM |
688 | struct property *prop; |
689 | struct device_node *dp; | |
690 | ||
691 | if ((dp = p->pbm_A.prom_node) != NULL) { | |
692 | prop = of_find_property(dp, "66mhz-capable", NULL); | |
693 | p->pbm_A.is_66mhz_capable = (prop != NULL); | |
c2609267 DM |
694 | |
695 | pbm_scan_bus(p, &p->pbm_A); | |
696 | } | |
e87dc350 DM |
697 | if ((dp = p->pbm_B.prom_node) != NULL) { |
698 | prop = of_find_property(dp, "66mhz-capable", NULL); | |
699 | p->pbm_B.is_66mhz_capable = (prop != NULL); | |
c2609267 DM |
700 | |
701 | pbm_scan_bus(p, &p->pbm_B); | |
702 | } | |
703 | ||
704 | /* XXX register error interrupt handlers XXX */ | |
bade5622 DM |
705 | } |
706 | ||
e7a0453e DM |
707 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, |
708 | struct pci_iommu *iommu) | |
18397944 DM |
709 | { |
710 | struct pci_iommu_arena *arena = &iommu->arena; | |
e7a0453e | 711 | unsigned long i, cnt = 0; |
7c8f486a | 712 | u32 devhandle; |
18397944 DM |
713 | |
714 | devhandle = pbm->devhandle; | |
715 | for (i = 0; i < arena->limit; i++) { | |
716 | unsigned long ret, io_attrs, ra; | |
717 | ||
718 | ret = pci_sun4v_iommu_getmap(devhandle, | |
719 | HV_PCI_TSBID(0, i), | |
720 | &io_attrs, &ra); | |
e7a0453e | 721 | if (ret == HV_EOK) { |
c2a5a46b DM |
722 | if (page_in_phys_avail(ra)) { |
723 | pci_sun4v_iommu_demap(devhandle, | |
724 | HV_PCI_TSBID(0, i), 1); | |
725 | } else { | |
726 | cnt++; | |
727 | __set_bit(i, arena->map); | |
728 | } | |
e7a0453e | 729 | } |
18397944 | 730 | } |
e7a0453e DM |
731 | |
732 | return cnt; | |
18397944 DM |
733 | } |
734 | ||
bade5622 DM |
735 | static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) |
736 | { | |
18397944 | 737 | struct pci_iommu *iommu = pbm->iommu; |
e87dc350 | 738 | struct property *prop; |
18397944 DM |
739 | unsigned long num_tsb_entries, sz; |
740 | u32 vdma[2], dma_mask, dma_offset; | |
e87dc350 DM |
741 | int tsbsize; |
742 | ||
743 | prop = of_find_property(pbm->prom_node, "virtual-dma", NULL); | |
744 | if (prop) { | |
745 | u32 *val = prop->value; | |
18397944 | 746 | |
e87dc350 DM |
747 | vdma[0] = val[0]; |
748 | vdma[1] = val[1]; | |
749 | } else { | |
18397944 DM |
750 | /* No property, use default values. */ |
751 | vdma[0] = 0x80000000; | |
752 | vdma[1] = 0x80000000; | |
753 | } | |
754 | ||
755 | dma_mask = vdma[0]; | |
756 | switch (vdma[1]) { | |
757 | case 0x20000000: | |
758 | dma_mask |= 0x1fffffff; | |
759 | tsbsize = 64; | |
760 | break; | |
761 | ||
762 | case 0x40000000: | |
763 | dma_mask |= 0x3fffffff; | |
764 | tsbsize = 128; | |
765 | break; | |
766 | ||
767 | case 0x80000000: | |
768 | dma_mask |= 0x7fffffff; | |
e7a0453e | 769 | tsbsize = 256; |
18397944 DM |
770 | break; |
771 | ||
772 | default: | |
773 | prom_printf("PCI-SUN4V: strange virtual-dma size.\n"); | |
774 | prom_halt(); | |
775 | }; | |
776 | ||
e7a0453e DM |
777 | tsbsize *= (8 * 1024); |
778 | ||
18397944 DM |
779 | num_tsb_entries = tsbsize / sizeof(iopte_t); |
780 | ||
781 | dma_offset = vdma[0]; | |
782 | ||
783 | /* Setup initial software IOMMU state. */ | |
784 | spin_lock_init(&iommu->lock); | |
785 | iommu->ctx_lowest_free = 1; | |
786 | iommu->page_table_map_base = dma_offset; | |
787 | iommu->dma_addr_mask = dma_mask; | |
788 | ||
789 | /* Allocate and initialize the free area map. */ | |
790 | sz = num_tsb_entries / 8; | |
791 | sz = (sz + 7UL) & ~7UL; | |
982c2064 | 792 | iommu->arena.map = kzalloc(sz, GFP_KERNEL); |
18397944 DM |
793 | if (!iommu->arena.map) { |
794 | prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); | |
795 | prom_halt(); | |
796 | } | |
18397944 DM |
797 | iommu->arena.limit = num_tsb_entries; |
798 | ||
e7a0453e | 799 | sz = probe_existing_entries(pbm, iommu); |
c2a5a46b DM |
800 | if (sz) |
801 | printk("%s: Imported %lu TSB entries from OBP\n", | |
802 | pbm->name, sz); | |
bade5622 DM |
803 | } |
804 | ||
10804828 DM |
805 | static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm) |
806 | { | |
e87dc350 DM |
807 | struct property *prop; |
808 | unsigned int *busrange; | |
809 | ||
810 | prop = of_find_property(pbm->prom_node, "bus-range", NULL); | |
811 | ||
812 | busrange = prop->value; | |
10804828 DM |
813 | |
814 | pbm->pci_first_busno = busrange[0]; | |
815 | pbm->pci_last_busno = busrange[1]; | |
816 | ||
817 | } | |
818 | ||
35a17eb6 DM |
819 | #ifdef CONFIG_PCI_MSI |
820 | struct pci_sun4v_msiq_entry { | |
821 | u64 version_type; | |
822 | #define MSIQ_VERSION_MASK 0xffffffff00000000UL | |
823 | #define MSIQ_VERSION_SHIFT 32 | |
824 | #define MSIQ_TYPE_MASK 0x00000000000000ffUL | |
825 | #define MSIQ_TYPE_SHIFT 0 | |
826 | #define MSIQ_TYPE_NONE 0x00 | |
827 | #define MSIQ_TYPE_MSG 0x01 | |
828 | #define MSIQ_TYPE_MSI32 0x02 | |
829 | #define MSIQ_TYPE_MSI64 0x03 | |
830 | #define MSIQ_TYPE_INTX 0x08 | |
831 | #define MSIQ_TYPE_NONE2 0xff | |
832 | ||
833 | u64 intx_sysino; | |
834 | u64 reserved1; | |
835 | u64 stick; | |
836 | u64 req_id; /* bus/device/func */ | |
837 | #define MSIQ_REQID_BUS_MASK 0xff00UL | |
838 | #define MSIQ_REQID_BUS_SHIFT 8 | |
839 | #define MSIQ_REQID_DEVICE_MASK 0x00f8UL | |
840 | #define MSIQ_REQID_DEVICE_SHIFT 3 | |
841 | #define MSIQ_REQID_FUNC_MASK 0x0007UL | |
842 | #define MSIQ_REQID_FUNC_SHIFT 0 | |
843 | ||
844 | u64 msi_address; | |
845 | ||
846 | /* The format of this value is message type dependant. | |
847 | * For MSI bits 15:0 are the data from the MSI packet. | |
848 | * For MSI-X bits 31:0 are the data from the MSI packet. | |
849 | * For MSG, the message code and message routing code where: | |
850 | * bits 39:32 is the bus/device/fn of the msg target-id | |
851 | * bits 18:16 is the message routing code | |
852 | * bits 7:0 is the message code | |
853 | * For INTx the low order 2-bits are: | |
854 | * 00 - INTA | |
855 | * 01 - INTB | |
856 | * 10 - INTC | |
857 | * 11 - INTD | |
858 | */ | |
859 | u64 msi_data; | |
860 | ||
861 | u64 reserved2; | |
862 | }; | |
863 | ||
864 | /* For now this just runs as a pre-handler for the real interrupt handler. | |
865 | * So we just walk through the queue and ACK all the entries, update the | |
866 | * head pointer, and return. | |
867 | * | |
868 | * In the longer term it would be nice to do something more integrated | |
869 | * wherein we can pass in some of this MSI info to the drivers. This | |
870 | * would be most useful for PCIe fabric error messages, although we could | |
871 | * invoke those directly from the loop here in order to pass the info around. | |
872 | */ | |
873 | static void pci_sun4v_msi_prehandler(unsigned int ino, void *data1, void *data2) | |
874 | { | |
875 | struct pci_pbm_info *pbm = data1; | |
876 | struct pci_sun4v_msiq_entry *base, *ep; | |
877 | unsigned long msiqid, orig_head, head, type, err; | |
878 | ||
879 | msiqid = (unsigned long) data2; | |
880 | ||
881 | head = 0xdeadbeef; | |
882 | err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, &head); | |
883 | if (unlikely(err)) | |
884 | goto hv_error_get; | |
885 | ||
886 | if (unlikely(head >= (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))) | |
887 | goto bad_offset; | |
888 | ||
889 | head /= sizeof(struct pci_sun4v_msiq_entry); | |
890 | orig_head = head; | |
891 | base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * | |
892 | (pbm->msiq_ent_count * | |
893 | sizeof(struct pci_sun4v_msiq_entry)))); | |
894 | ep = &base[head]; | |
895 | while ((ep->version_type & MSIQ_TYPE_MASK) != 0) { | |
896 | type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT; | |
897 | if (unlikely(type != MSIQ_TYPE_MSI32 && | |
898 | type != MSIQ_TYPE_MSI64)) | |
899 | goto bad_type; | |
900 | ||
901 | pci_sun4v_msi_setstate(pbm->devhandle, | |
902 | ep->msi_data /* msi_num */, | |
903 | HV_MSISTATE_IDLE); | |
904 | ||
905 | /* Clear the entry. */ | |
906 | ep->version_type &= ~MSIQ_TYPE_MASK; | |
907 | ||
908 | /* Go to next entry in ring. */ | |
909 | head++; | |
910 | if (head >= pbm->msiq_ent_count) | |
911 | head = 0; | |
912 | ep = &base[head]; | |
913 | } | |
914 | ||
915 | if (likely(head != orig_head)) { | |
916 | /* ACK entries by updating head pointer. */ | |
917 | head *= sizeof(struct pci_sun4v_msiq_entry); | |
918 | err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head); | |
919 | if (unlikely(err)) | |
920 | goto hv_error_set; | |
921 | } | |
922 | return; | |
923 | ||
924 | hv_error_set: | |
925 | printk(KERN_EMERG "MSI: Hypervisor set head gives error %lu\n", err); | |
926 | goto hv_error_cont; | |
927 | ||
928 | hv_error_get: | |
929 | printk(KERN_EMERG "MSI: Hypervisor get head gives error %lu\n", err); | |
930 | ||
931 | hv_error_cont: | |
932 | printk(KERN_EMERG "MSI: devhandle[%x] msiqid[%lx] head[%lu]\n", | |
933 | pbm->devhandle, msiqid, head); | |
934 | return; | |
935 | ||
936 | bad_offset: | |
937 | printk(KERN_EMERG "MSI: Hypervisor gives bad offset %lx max(%lx)\n", | |
938 | head, pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)); | |
939 | return; | |
940 | ||
941 | bad_type: | |
942 | printk(KERN_EMERG "MSI: Entry has bad type %lx\n", type); | |
943 | return; | |
944 | } | |
945 | ||
946 | static int msi_bitmap_alloc(struct pci_pbm_info *pbm) | |
947 | { | |
948 | unsigned long size, bits_per_ulong; | |
949 | ||
950 | bits_per_ulong = sizeof(unsigned long) * 8; | |
951 | size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1); | |
952 | size /= 8; | |
953 | BUG_ON(size % sizeof(unsigned long)); | |
954 | ||
955 | pbm->msi_bitmap = kzalloc(size, GFP_KERNEL); | |
956 | if (!pbm->msi_bitmap) | |
957 | return -ENOMEM; | |
958 | ||
959 | return 0; | |
960 | } | |
961 | ||
962 | static void msi_bitmap_free(struct pci_pbm_info *pbm) | |
963 | { | |
964 | kfree(pbm->msi_bitmap); | |
965 | pbm->msi_bitmap = NULL; | |
966 | } | |
967 | ||
968 | static int msi_queue_alloc(struct pci_pbm_info *pbm) | |
969 | { | |
970 | unsigned long q_size, alloc_size, pages, order; | |
971 | int i; | |
972 | ||
973 | q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); | |
974 | alloc_size = (pbm->msiq_num * q_size); | |
975 | order = get_order(alloc_size); | |
976 | pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); | |
977 | if (pages == 0UL) { | |
978 | printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n", | |
979 | order); | |
980 | return -ENOMEM; | |
981 | } | |
982 | memset((char *)pages, 0, PAGE_SIZE << order); | |
983 | pbm->msi_queues = (void *) pages; | |
984 | ||
985 | for (i = 0; i < pbm->msiq_num; i++) { | |
986 | unsigned long err, base = __pa(pages + (i * q_size)); | |
987 | unsigned long ret1, ret2; | |
988 | ||
989 | err = pci_sun4v_msiq_conf(pbm->devhandle, | |
990 | pbm->msiq_first + i, | |
991 | base, pbm->msiq_ent_count); | |
992 | if (err) { | |
993 | printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n", | |
994 | err); | |
995 | goto h_error; | |
996 | } | |
997 | ||
998 | err = pci_sun4v_msiq_info(pbm->devhandle, | |
999 | pbm->msiq_first + i, | |
1000 | &ret1, &ret2); | |
1001 | if (err) { | |
1002 | printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n", | |
1003 | err); | |
1004 | goto h_error; | |
1005 | } | |
1006 | if (ret1 != base || ret2 != pbm->msiq_ent_count) { | |
1007 | printk(KERN_ERR "MSI: Bogus qconf " | |
1008 | "expected[%lx:%x] got[%lx:%lx]\n", | |
1009 | base, pbm->msiq_ent_count, | |
1010 | ret1, ret2); | |
1011 | goto h_error; | |
1012 | } | |
1013 | } | |
1014 | ||
1015 | return 0; | |
1016 | ||
1017 | h_error: | |
1018 | free_pages(pages, order); | |
1019 | return -EINVAL; | |
1020 | } | |
1021 | ||
1022 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) | |
1023 | { | |
6a23acf3 | 1024 | const u32 *val; |
35a17eb6 DM |
1025 | int len; |
1026 | ||
1027 | val = of_get_property(pbm->prom_node, "#msi-eqs", &len); | |
1028 | if (!val || len != 4) | |
1029 | goto no_msi; | |
1030 | pbm->msiq_num = *val; | |
1031 | if (pbm->msiq_num) { | |
6a23acf3 | 1032 | const struct msiq_prop { |
35a17eb6 DM |
1033 | u32 first_msiq; |
1034 | u32 num_msiq; | |
1035 | u32 first_devino; | |
1036 | } *mqp; | |
6a23acf3 | 1037 | const struct msi_range_prop { |
35a17eb6 DM |
1038 | u32 first_msi; |
1039 | u32 num_msi; | |
1040 | } *mrng; | |
6a23acf3 | 1041 | const struct addr_range_prop { |
35a17eb6 DM |
1042 | u32 msi32_high; |
1043 | u32 msi32_low; | |
1044 | u32 msi32_len; | |
1045 | u32 msi64_high; | |
1046 | u32 msi64_low; | |
1047 | u32 msi64_len; | |
1048 | } *arng; | |
1049 | ||
1050 | val = of_get_property(pbm->prom_node, "msi-eq-size", &len); | |
1051 | if (!val || len != 4) | |
1052 | goto no_msi; | |
1053 | ||
1054 | pbm->msiq_ent_count = *val; | |
1055 | ||
1056 | mqp = of_get_property(pbm->prom_node, | |
1057 | "msi-eq-to-devino", &len); | |
1058 | if (!mqp || len != sizeof(struct msiq_prop)) | |
1059 | goto no_msi; | |
1060 | ||
1061 | pbm->msiq_first = mqp->first_msiq; | |
1062 | pbm->msiq_first_devino = mqp->first_devino; | |
1063 | ||
1064 | val = of_get_property(pbm->prom_node, "#msi", &len); | |
1065 | if (!val || len != 4) | |
1066 | goto no_msi; | |
1067 | pbm->msi_num = *val; | |
1068 | ||
1069 | mrng = of_get_property(pbm->prom_node, "msi-ranges", &len); | |
1070 | if (!mrng || len != sizeof(struct msi_range_prop)) | |
1071 | goto no_msi; | |
1072 | pbm->msi_first = mrng->first_msi; | |
1073 | ||
1074 | val = of_get_property(pbm->prom_node, "msi-data-mask", &len); | |
1075 | if (!val || len != 4) | |
1076 | goto no_msi; | |
1077 | pbm->msi_data_mask = *val; | |
1078 | ||
1079 | val = of_get_property(pbm->prom_node, "msix-data-width", &len); | |
1080 | if (!val || len != 4) | |
1081 | goto no_msi; | |
1082 | pbm->msix_data_width = *val; | |
1083 | ||
1084 | arng = of_get_property(pbm->prom_node, "msi-address-ranges", | |
1085 | &len); | |
1086 | if (!arng || len != sizeof(struct addr_range_prop)) | |
1087 | goto no_msi; | |
1088 | pbm->msi32_start = ((u64)arng->msi32_high << 32) | | |
1089 | (u64) arng->msi32_low; | |
1090 | pbm->msi64_start = ((u64)arng->msi64_high << 32) | | |
1091 | (u64) arng->msi64_low; | |
1092 | pbm->msi32_len = arng->msi32_len; | |
1093 | pbm->msi64_len = arng->msi64_len; | |
1094 | ||
1095 | if (msi_bitmap_alloc(pbm)) | |
1096 | goto no_msi; | |
1097 | ||
1098 | if (msi_queue_alloc(pbm)) { | |
1099 | msi_bitmap_free(pbm); | |
1100 | goto no_msi; | |
1101 | } | |
1102 | ||
1103 | printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] " | |
1104 | "devino[0x%x]\n", | |
1105 | pbm->name, | |
1106 | pbm->msiq_first, pbm->msiq_num, | |
1107 | pbm->msiq_ent_count, | |
1108 | pbm->msiq_first_devino); | |
1109 | printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] " | |
1110 | "width[%u]\n", | |
1111 | pbm->name, | |
1112 | pbm->msi_first, pbm->msi_num, pbm->msi_data_mask, | |
1113 | pbm->msix_data_width); | |
1114 | printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] " | |
1115 | "addr64[0x%lx:0x%x]\n", | |
1116 | pbm->name, | |
1117 | pbm->msi32_start, pbm->msi32_len, | |
1118 | pbm->msi64_start, pbm->msi64_len); | |
1119 | printk(KERN_INFO "%s: MSI queues at RA [%p]\n", | |
1120 | pbm->name, | |
1121 | pbm->msi_queues); | |
1122 | } | |
1123 | ||
1124 | return; | |
1125 | ||
1126 | no_msi: | |
1127 | pbm->msiq_num = 0; | |
1128 | printk(KERN_INFO "%s: No MSI support.\n", pbm->name); | |
1129 | } | |
1130 | ||
1131 | static int alloc_msi(struct pci_pbm_info *pbm) | |
1132 | { | |
1133 | int i; | |
1134 | ||
1135 | for (i = 0; i < pbm->msi_num; i++) { | |
1136 | if (!test_and_set_bit(i, pbm->msi_bitmap)) | |
1137 | return i + pbm->msi_first; | |
1138 | } | |
1139 | ||
1140 | return -ENOENT; | |
1141 | } | |
1142 | ||
1143 | static void free_msi(struct pci_pbm_info *pbm, int msi_num) | |
1144 | { | |
1145 | msi_num -= pbm->msi_first; | |
1146 | clear_bit(msi_num, pbm->msi_bitmap); | |
1147 | } | |
1148 | ||
1149 | static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p, | |
1150 | struct pci_dev *pdev, | |
1151 | struct msi_desc *entry) | |
1152 | { | |
a2fb23af | 1153 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; |
35a17eb6 DM |
1154 | unsigned long devino, msiqid; |
1155 | struct msi_msg msg; | |
1156 | int msi_num, err; | |
1157 | ||
1158 | *virt_irq_p = 0; | |
1159 | ||
1160 | msi_num = alloc_msi(pbm); | |
1161 | if (msi_num < 0) | |
1162 | return msi_num; | |
1163 | ||
1164 | devino = sun4v_build_msi(pbm->devhandle, virt_irq_p, | |
1165 | pbm->msiq_first_devino, | |
1166 | (pbm->msiq_first_devino + | |
1167 | pbm->msiq_num)); | |
1168 | err = -ENOMEM; | |
1169 | if (!devino) | |
1170 | goto out_err; | |
1171 | ||
1172 | set_irq_msi(*virt_irq_p, entry); | |
1173 | ||
1174 | msiqid = ((devino - pbm->msiq_first_devino) + | |
1175 | pbm->msiq_first); | |
1176 | ||
1177 | err = -EINVAL; | |
1178 | if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) | |
1179 | if (err) | |
1180 | goto out_err; | |
1181 | ||
1182 | if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) | |
1183 | goto out_err; | |
1184 | ||
1185 | if (pci_sun4v_msi_setmsiq(pbm->devhandle, | |
1186 | msi_num, msiqid, | |
1187 | (entry->msi_attrib.is_64 ? | |
1188 | HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32))) | |
1189 | goto out_err; | |
1190 | ||
1191 | if (pci_sun4v_msi_setstate(pbm->devhandle, msi_num, HV_MSISTATE_IDLE)) | |
1192 | goto out_err; | |
1193 | ||
1194 | if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID)) | |
1195 | goto out_err; | |
1196 | ||
a2fb23af | 1197 | pdev->dev.archdata.msi_num = msi_num; |
35a17eb6 DM |
1198 | |
1199 | if (entry->msi_attrib.is_64) { | |
1200 | msg.address_hi = pbm->msi64_start >> 32; | |
1201 | msg.address_lo = pbm->msi64_start & 0xffffffff; | |
1202 | } else { | |
1203 | msg.address_hi = 0; | |
1204 | msg.address_lo = pbm->msi32_start; | |
1205 | } | |
1206 | msg.data = msi_num; | |
1207 | write_msi_msg(*virt_irq_p, &msg); | |
1208 | ||
1209 | irq_install_pre_handler(*virt_irq_p, | |
1210 | pci_sun4v_msi_prehandler, | |
1211 | pbm, (void *) msiqid); | |
1212 | ||
1213 | return 0; | |
1214 | ||
1215 | out_err: | |
1216 | free_msi(pbm, msi_num); | |
1217 | sun4v_destroy_msi(*virt_irq_p); | |
1218 | *virt_irq_p = 0; | |
1219 | return err; | |
1220 | ||
1221 | } | |
1222 | ||
1223 | static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq, | |
1224 | struct pci_dev *pdev) | |
1225 | { | |
a2fb23af | 1226 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; |
35a17eb6 DM |
1227 | unsigned long msiqid, err; |
1228 | unsigned int msi_num; | |
1229 | ||
a2fb23af | 1230 | msi_num = pdev->dev.archdata.msi_num; |
35a17eb6 DM |
1231 | err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid); |
1232 | if (err) { | |
1233 | printk(KERN_ERR "%s: getmsiq gives error %lu\n", | |
1234 | pbm->name, err); | |
1235 | return; | |
1236 | } | |
1237 | ||
1238 | pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_INVALID); | |
1239 | pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_INVALID); | |
1240 | ||
1241 | free_msi(pbm, msi_num); | |
1242 | ||
1243 | /* The sun4v_destroy_msi() will liberate the devino and thus the MSIQ | |
1244 | * allocation. | |
1245 | */ | |
1246 | sun4v_destroy_msi(virt_irq); | |
1247 | } | |
1248 | #else /* CONFIG_PCI_MSI */ | |
1249 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) | |
1250 | { | |
1251 | } | |
1252 | #endif /* !(CONFIG_PCI_MSI) */ | |
1253 | ||
e87dc350 | 1254 | static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle) |
bade5622 DM |
1255 | { |
1256 | struct pci_pbm_info *pbm; | |
bade5622 | 1257 | |
3833789b DM |
1258 | if (devhandle & 0x40) |
1259 | pbm = &p->pbm_B; | |
1260 | else | |
1261 | pbm = &p->pbm_A; | |
bade5622 DM |
1262 | |
1263 | pbm->parent = p; | |
e87dc350 | 1264 | pbm->prom_node = dp; |
bade5622 | 1265 | |
3833789b | 1266 | pbm->devhandle = devhandle; |
bade5622 | 1267 | |
e87dc350 | 1268 | pbm->name = dp->full_name; |
bade5622 | 1269 | |
e87dc350 | 1270 | printk("%s: SUN4V PCI Bus Module\n", pbm->name); |
bade5622 | 1271 | |
9fd8b647 | 1272 | pci_determine_mem_io_space(pbm); |
bade5622 | 1273 | |
10804828 | 1274 | pci_sun4v_get_bus_range(pbm); |
bade5622 | 1275 | pci_sun4v_iommu_init(pbm); |
35a17eb6 | 1276 | pci_sun4v_msi_init(pbm); |
bade5622 DM |
1277 | } |
1278 | ||
e87dc350 | 1279 | void sun4v_pci_init(struct device_node *dp, char *model_name) |
8f6a93a1 | 1280 | { |
bade5622 DM |
1281 | struct pci_controller_info *p; |
1282 | struct pci_iommu *iommu; | |
e87dc350 DM |
1283 | struct property *prop; |
1284 | struct linux_prom64_registers *regs; | |
7c8f486a DM |
1285 | u32 devhandle; |
1286 | int i; | |
3833789b | 1287 | |
e87dc350 DM |
1288 | prop = of_find_property(dp, "reg", NULL); |
1289 | regs = prop->value; | |
1290 | ||
1291 | devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff; | |
3833789b DM |
1292 | |
1293 | for (p = pci_controller_root; p; p = p->next) { | |
1294 | struct pci_pbm_info *pbm; | |
1295 | ||
1296 | if (p->pbm_A.prom_node && p->pbm_B.prom_node) | |
1297 | continue; | |
1298 | ||
1299 | pbm = (p->pbm_A.prom_node ? | |
1300 | &p->pbm_A : | |
1301 | &p->pbm_B); | |
1302 | ||
0b522497 | 1303 | if (pbm->devhandle == (devhandle ^ 0x40)) { |
e87dc350 | 1304 | pci_sun4v_pbm_init(p, dp, devhandle); |
0b522497 DM |
1305 | return; |
1306 | } | |
3833789b | 1307 | } |
bade5622 | 1308 | |
a283a525 | 1309 | for_each_possible_cpu(i) { |
7c8f486a DM |
1310 | unsigned long page = get_zeroed_page(GFP_ATOMIC); |
1311 | ||
1312 | if (!page) | |
1313 | goto fatal_memory_error; | |
1314 | ||
6a32fd4d | 1315 | per_cpu(pci_iommu_batch, i).pglist = (u64 *) page; |
bade5622 | 1316 | } |
7c8f486a | 1317 | |
982c2064 | 1318 | p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); |
7c8f486a DM |
1319 | if (!p) |
1320 | goto fatal_memory_error; | |
1321 | ||
982c2064 | 1322 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); |
7c8f486a DM |
1323 | if (!iommu) |
1324 | goto fatal_memory_error; | |
1325 | ||
bade5622 DM |
1326 | p->pbm_A.iommu = iommu; |
1327 | ||
982c2064 | 1328 | iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); |
7c8f486a DM |
1329 | if (!iommu) |
1330 | goto fatal_memory_error; | |
1331 | ||
bade5622 DM |
1332 | p->pbm_B.iommu = iommu; |
1333 | ||
1334 | p->next = pci_controller_root; | |
1335 | pci_controller_root = p; | |
1336 | ||
1337 | p->index = pci_num_controllers++; | |
bade5622 DM |
1338 | |
1339 | p->scan_bus = pci_sun4v_scan_bus; | |
35a17eb6 DM |
1340 | #ifdef CONFIG_PCI_MSI |
1341 | p->setup_msi_irq = pci_sun4v_setup_msi_irq; | |
1342 | p->teardown_msi_irq = pci_sun4v_teardown_msi_irq; | |
1343 | #endif | |
bade5622 DM |
1344 | p->pci_ops = &pci_sun4v_ops; |
1345 | ||
1346 | /* Like PSYCHO and SCHIZO we have a 2GB aligned area | |
1347 | * for memory space. | |
1348 | */ | |
1349 | pci_memspace_mask = 0x7fffffffUL; | |
1350 | ||
e87dc350 | 1351 | pci_sun4v_pbm_init(p, dp, devhandle); |
7c8f486a DM |
1352 | return; |
1353 | ||
1354 | fatal_memory_error: | |
1355 | prom_printf("SUN4V_PCI: Fatal memory allocation error.\n"); | |
1356 | prom_halt(); | |
8f6a93a1 | 1357 | } |