Commit | Line | Data |
---|---|---|
8f6a93a1 DM |
1 | /* pci_sun4v.c: SUN4V specific PCI controller support. |
2 | * | |
9fd8b647 | 3 | * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) |
8f6a93a1 DM |
4 | */ |
5 | ||
6 | #include <linux/kernel.h> | |
7 | #include <linux/types.h> | |
8 | #include <linux/pci.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/interrupt.h> | |
18397944 | 12 | #include <linux/percpu.h> |
35a17eb6 DM |
13 | #include <linux/irq.h> |
14 | #include <linux/msi.h> | |
59db8102 | 15 | #include <linux/log2.h> |
8f6a93a1 | 16 | |
8f6a93a1 DM |
17 | #include <asm/iommu.h> |
18 | #include <asm/irq.h> | |
19 | #include <asm/upa.h> | |
20 | #include <asm/pstate.h> | |
21 | #include <asm/oplib.h> | |
22 | #include <asm/hypervisor.h> | |
e87dc350 | 23 | #include <asm/prom.h> |
8f6a93a1 DM |
24 | |
25 | #include "pci_impl.h" | |
26 | #include "iommu_common.h" | |
27 | ||
bade5622 DM |
28 | #include "pci_sun4v.h" |
29 | ||
e01c0d6d DM |
30 | static unsigned long vpci_major = 1; |
31 | static unsigned long vpci_minor = 1; | |
32 | ||
7c8f486a | 33 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) |
18397944 | 34 | |
16ce82d8 | 35 | struct iommu_batch { |
ad7ad57c | 36 | struct device *dev; /* Device mapping is for. */ |
6a32fd4d DM |
37 | unsigned long prot; /* IOMMU page protections */ |
38 | unsigned long entry; /* Index into IOTSB. */ | |
39 | u64 *pglist; /* List of physical pages */ | |
40 | unsigned long npages; /* Number of pages in list. */ | |
18397944 DM |
41 | }; |
42 | ||
ad7ad57c | 43 | static DEFINE_PER_CPU(struct iommu_batch, iommu_batch); |
6a32fd4d DM |
44 | |
45 | /* Interrupts must be disabled. */ | |
ad7ad57c | 46 | static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry) |
6a32fd4d | 47 | { |
ad7ad57c | 48 | struct iommu_batch *p = &__get_cpu_var(iommu_batch); |
6a32fd4d | 49 | |
ad7ad57c | 50 | p->dev = dev; |
6a32fd4d DM |
51 | p->prot = prot; |
52 | p->entry = entry; | |
53 | p->npages = 0; | |
54 | } | |
55 | ||
56 | /* Interrupts must be disabled. */ | |
ad7ad57c | 57 | static long iommu_batch_flush(struct iommu_batch *p) |
6a32fd4d | 58 | { |
ad7ad57c | 59 | struct pci_pbm_info *pbm = p->dev->archdata.host_controller; |
a2fb23af | 60 | unsigned long devhandle = pbm->devhandle; |
6a32fd4d DM |
61 | unsigned long prot = p->prot; |
62 | unsigned long entry = p->entry; | |
63 | u64 *pglist = p->pglist; | |
64 | unsigned long npages = p->npages; | |
65 | ||
d82965c1 | 66 | while (npages != 0) { |
6a32fd4d DM |
67 | long num; |
68 | ||
69 | num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), | |
70 | npages, prot, __pa(pglist)); | |
71 | if (unlikely(num < 0)) { | |
72 | if (printk_ratelimit()) | |
ad7ad57c | 73 | printk("iommu_batch_flush: IOMMU map of " |
6a32fd4d DM |
74 | "[%08lx:%08lx:%lx:%lx:%lx] failed with " |
75 | "status %ld\n", | |
76 | devhandle, HV_PCI_TSBID(0, entry), | |
77 | npages, prot, __pa(pglist), num); | |
78 | return -1; | |
79 | } | |
80 | ||
81 | entry += num; | |
82 | npages -= num; | |
83 | pglist += num; | |
d82965c1 | 84 | } |
6a32fd4d DM |
85 | |
86 | p->entry = entry; | |
87 | p->npages = 0; | |
88 | ||
89 | return 0; | |
90 | } | |
91 | ||
92 | /* Interrupts must be disabled. */ | |
ad7ad57c | 93 | static inline long iommu_batch_add(u64 phys_page) |
6a32fd4d | 94 | { |
ad7ad57c | 95 | struct iommu_batch *p = &__get_cpu_var(iommu_batch); |
6a32fd4d DM |
96 | |
97 | BUG_ON(p->npages >= PGLIST_NENTS); | |
98 | ||
99 | p->pglist[p->npages++] = phys_page; | |
100 | if (p->npages == PGLIST_NENTS) | |
ad7ad57c | 101 | return iommu_batch_flush(p); |
6a32fd4d DM |
102 | |
103 | return 0; | |
104 | } | |
105 | ||
106 | /* Interrupts must be disabled. */ | |
ad7ad57c | 107 | static inline long iommu_batch_end(void) |
6a32fd4d | 108 | { |
ad7ad57c | 109 | struct iommu_batch *p = &__get_cpu_var(iommu_batch); |
6a32fd4d DM |
110 | |
111 | BUG_ON(p->npages >= PGLIST_NENTS); | |
112 | ||
ad7ad57c | 113 | return iommu_batch_flush(p); |
6a32fd4d | 114 | } |
18397944 | 115 | |
ad7ad57c | 116 | static long arena_alloc(struct iommu_arena *arena, unsigned long npages) |
18397944 DM |
117 | { |
118 | unsigned long n, i, start, end, limit; | |
119 | int pass; | |
120 | ||
121 | limit = arena->limit; | |
122 | start = arena->hint; | |
123 | pass = 0; | |
124 | ||
125 | again: | |
126 | n = find_next_zero_bit(arena->map, limit, start); | |
127 | end = n + npages; | |
128 | if (unlikely(end >= limit)) { | |
129 | if (likely(pass < 1)) { | |
130 | limit = start; | |
131 | start = 0; | |
132 | pass++; | |
133 | goto again; | |
134 | } else { | |
135 | /* Scanned the whole thing, give up. */ | |
136 | return -1; | |
137 | } | |
138 | } | |
139 | ||
140 | for (i = n; i < end; i++) { | |
141 | if (test_bit(i, arena->map)) { | |
142 | start = i + 1; | |
143 | goto again; | |
144 | } | |
145 | } | |
146 | ||
147 | for (i = n; i < end; i++) | |
148 | __set_bit(i, arena->map); | |
149 | ||
150 | arena->hint = end; | |
151 | ||
152 | return n; | |
153 | } | |
154 | ||
ad7ad57c DM |
155 | static void arena_free(struct iommu_arena *arena, unsigned long base, |
156 | unsigned long npages) | |
18397944 DM |
157 | { |
158 | unsigned long i; | |
159 | ||
160 | for (i = base; i < (base + npages); i++) | |
161 | __clear_bit(i, arena->map); | |
162 | } | |
163 | ||
ad7ad57c DM |
164 | static void *dma_4v_alloc_coherent(struct device *dev, size_t size, |
165 | dma_addr_t *dma_addrp, gfp_t gfp) | |
8f6a93a1 | 166 | { |
16ce82d8 | 167 | struct iommu *iommu; |
7c8f486a | 168 | unsigned long flags, order, first_page, npages, n; |
18397944 DM |
169 | void *ret; |
170 | long entry; | |
18397944 DM |
171 | |
172 | size = IO_PAGE_ALIGN(size); | |
173 | order = get_order(size); | |
6a32fd4d | 174 | if (unlikely(order >= MAX_ORDER)) |
18397944 DM |
175 | return NULL; |
176 | ||
177 | npages = size >> IO_PAGE_SHIFT; | |
18397944 | 178 | |
42f14237 | 179 | first_page = __get_free_pages(gfp, order); |
6a32fd4d | 180 | if (unlikely(first_page == 0UL)) |
18397944 | 181 | return NULL; |
e7a0453e | 182 | |
18397944 DM |
183 | memset((char *)first_page, 0, PAGE_SIZE << order); |
184 | ||
ad7ad57c | 185 | iommu = dev->archdata.iommu; |
18397944 DM |
186 | |
187 | spin_lock_irqsave(&iommu->lock, flags); | |
ad7ad57c | 188 | entry = arena_alloc(&iommu->arena, npages); |
18397944 DM |
189 | spin_unlock_irqrestore(&iommu->lock, flags); |
190 | ||
6a32fd4d DM |
191 | if (unlikely(entry < 0L)) |
192 | goto arena_alloc_fail; | |
18397944 DM |
193 | |
194 | *dma_addrp = (iommu->page_table_map_base + | |
195 | (entry << IO_PAGE_SHIFT)); | |
196 | ret = (void *) first_page; | |
197 | first_page = __pa(first_page); | |
198 | ||
6a32fd4d | 199 | local_irq_save(flags); |
18397944 | 200 | |
ad7ad57c DM |
201 | iommu_batch_start(dev, |
202 | (HV_PCI_MAP_ATTR_READ | | |
203 | HV_PCI_MAP_ATTR_WRITE), | |
204 | entry); | |
18397944 | 205 | |
6a32fd4d | 206 | for (n = 0; n < npages; n++) { |
ad7ad57c | 207 | long err = iommu_batch_add(first_page + (n * PAGE_SIZE)); |
6a32fd4d DM |
208 | if (unlikely(err < 0L)) |
209 | goto iommu_map_fail; | |
210 | } | |
18397944 | 211 | |
ad7ad57c | 212 | if (unlikely(iommu_batch_end() < 0L)) |
6a32fd4d | 213 | goto iommu_map_fail; |
18397944 | 214 | |
6a32fd4d | 215 | local_irq_restore(flags); |
18397944 DM |
216 | |
217 | return ret; | |
6a32fd4d DM |
218 | |
219 | iommu_map_fail: | |
220 | /* Interrupts are disabled. */ | |
221 | spin_lock(&iommu->lock); | |
ad7ad57c | 222 | arena_free(&iommu->arena, entry, npages); |
6a32fd4d DM |
223 | spin_unlock_irqrestore(&iommu->lock, flags); |
224 | ||
225 | arena_alloc_fail: | |
226 | free_pages(first_page, order); | |
227 | return NULL; | |
8f6a93a1 DM |
228 | } |
229 | ||
ad7ad57c DM |
230 | static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, |
231 | dma_addr_t dvma) | |
8f6a93a1 | 232 | { |
a2fb23af | 233 | struct pci_pbm_info *pbm; |
16ce82d8 | 234 | struct iommu *iommu; |
7c8f486a DM |
235 | unsigned long flags, order, npages, entry; |
236 | u32 devhandle; | |
18397944 DM |
237 | |
238 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | |
ad7ad57c DM |
239 | iommu = dev->archdata.iommu; |
240 | pbm = dev->archdata.host_controller; | |
a2fb23af | 241 | devhandle = pbm->devhandle; |
18397944 DM |
242 | entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); |
243 | ||
244 | spin_lock_irqsave(&iommu->lock, flags); | |
245 | ||
ad7ad57c | 246 | arena_free(&iommu->arena, entry, npages); |
18397944 DM |
247 | |
248 | do { | |
249 | unsigned long num; | |
250 | ||
251 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
252 | npages); | |
253 | entry += num; | |
254 | npages -= num; | |
255 | } while (npages != 0); | |
256 | ||
257 | spin_unlock_irqrestore(&iommu->lock, flags); | |
258 | ||
259 | order = get_order(size); | |
260 | if (order < 10) | |
261 | free_pages((unsigned long)cpu, order); | |
8f6a93a1 DM |
262 | } |
263 | ||
ad7ad57c DM |
264 | static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz, |
265 | enum dma_data_direction direction) | |
8f6a93a1 | 266 | { |
16ce82d8 | 267 | struct iommu *iommu; |
18397944 | 268 | unsigned long flags, npages, oaddr; |
7c8f486a | 269 | unsigned long i, base_paddr; |
6a32fd4d | 270 | u32 bus_addr, ret; |
18397944 DM |
271 | unsigned long prot; |
272 | long entry; | |
18397944 | 273 | |
ad7ad57c | 274 | iommu = dev->archdata.iommu; |
18397944 | 275 | |
ad7ad57c | 276 | if (unlikely(direction == DMA_NONE)) |
18397944 DM |
277 | goto bad; |
278 | ||
279 | oaddr = (unsigned long)ptr; | |
280 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | |
281 | npages >>= IO_PAGE_SHIFT; | |
18397944 DM |
282 | |
283 | spin_lock_irqsave(&iommu->lock, flags); | |
ad7ad57c | 284 | entry = arena_alloc(&iommu->arena, npages); |
18397944 DM |
285 | spin_unlock_irqrestore(&iommu->lock, flags); |
286 | ||
287 | if (unlikely(entry < 0L)) | |
288 | goto bad; | |
289 | ||
290 | bus_addr = (iommu->page_table_map_base + | |
291 | (entry << IO_PAGE_SHIFT)); | |
292 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | |
293 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | |
294 | prot = HV_PCI_MAP_ATTR_READ; | |
ad7ad57c | 295 | if (direction != DMA_TO_DEVICE) |
18397944 DM |
296 | prot |= HV_PCI_MAP_ATTR_WRITE; |
297 | ||
6a32fd4d | 298 | local_irq_save(flags); |
18397944 | 299 | |
ad7ad57c | 300 | iommu_batch_start(dev, prot, entry); |
18397944 | 301 | |
6a32fd4d | 302 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { |
ad7ad57c | 303 | long err = iommu_batch_add(base_paddr); |
6a32fd4d DM |
304 | if (unlikely(err < 0L)) |
305 | goto iommu_map_fail; | |
306 | } | |
ad7ad57c | 307 | if (unlikely(iommu_batch_end() < 0L)) |
6a32fd4d | 308 | goto iommu_map_fail; |
18397944 | 309 | |
6a32fd4d | 310 | local_irq_restore(flags); |
18397944 DM |
311 | |
312 | return ret; | |
313 | ||
314 | bad: | |
315 | if (printk_ratelimit()) | |
316 | WARN_ON(1); | |
ad7ad57c | 317 | return DMA_ERROR_CODE; |
6a32fd4d DM |
318 | |
319 | iommu_map_fail: | |
320 | /* Interrupts are disabled. */ | |
321 | spin_lock(&iommu->lock); | |
ad7ad57c | 322 | arena_free(&iommu->arena, entry, npages); |
6a32fd4d DM |
323 | spin_unlock_irqrestore(&iommu->lock, flags); |
324 | ||
ad7ad57c | 325 | return DMA_ERROR_CODE; |
8f6a93a1 DM |
326 | } |
327 | ||
ad7ad57c DM |
328 | static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr, |
329 | size_t sz, enum dma_data_direction direction) | |
8f6a93a1 | 330 | { |
a2fb23af | 331 | struct pci_pbm_info *pbm; |
16ce82d8 | 332 | struct iommu *iommu; |
7c8f486a | 333 | unsigned long flags, npages; |
18397944 | 334 | long entry; |
7c8f486a | 335 | u32 devhandle; |
18397944 | 336 | |
ad7ad57c | 337 | if (unlikely(direction == DMA_NONE)) { |
18397944 DM |
338 | if (printk_ratelimit()) |
339 | WARN_ON(1); | |
340 | return; | |
341 | } | |
342 | ||
ad7ad57c DM |
343 | iommu = dev->archdata.iommu; |
344 | pbm = dev->archdata.host_controller; | |
a2fb23af | 345 | devhandle = pbm->devhandle; |
18397944 DM |
346 | |
347 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | |
348 | npages >>= IO_PAGE_SHIFT; | |
349 | bus_addr &= IO_PAGE_MASK; | |
350 | ||
351 | spin_lock_irqsave(&iommu->lock, flags); | |
352 | ||
353 | entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; | |
ad7ad57c | 354 | arena_free(&iommu->arena, entry, npages); |
18397944 DM |
355 | |
356 | do { | |
357 | unsigned long num; | |
358 | ||
359 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
360 | npages); | |
361 | entry += num; | |
362 | npages -= num; | |
363 | } while (npages != 0); | |
364 | ||
365 | spin_unlock_irqrestore(&iommu->lock, flags); | |
366 | } | |
367 | ||
368 | #define SG_ENT_PHYS_ADDRESS(SG) \ | |
369 | (__pa(page_address((SG)->page)) + (SG)->offset) | |
370 | ||
5804509e DM |
371 | static long fill_sg(long entry, struct device *dev, |
372 | struct scatterlist *sg, | |
373 | int nused, int nelems, unsigned long prot) | |
18397944 DM |
374 | { |
375 | struct scatterlist *dma_sg = sg; | |
6a32fd4d DM |
376 | unsigned long flags; |
377 | int i; | |
378 | ||
379 | local_irq_save(flags); | |
380 | ||
ad7ad57c | 381 | iommu_batch_start(dev, prot, entry); |
18397944 | 382 | |
18397944 DM |
383 | for (i = 0; i < nused; i++) { |
384 | unsigned long pteval = ~0UL; | |
385 | u32 dma_npages; | |
386 | ||
387 | dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) + | |
388 | dma_sg->dma_length + | |
389 | ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT; | |
390 | do { | |
391 | unsigned long offset; | |
392 | signed int len; | |
393 | ||
394 | /* If we are here, we know we have at least one | |
395 | * more page to map. So walk forward until we | |
396 | * hit a page crossing, and begin creating new | |
397 | * mappings from that spot. | |
398 | */ | |
399 | for (;;) { | |
400 | unsigned long tmp; | |
401 | ||
402 | tmp = SG_ENT_PHYS_ADDRESS(sg); | |
403 | len = sg->length; | |
404 | if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) { | |
405 | pteval = tmp & IO_PAGE_MASK; | |
406 | offset = tmp & (IO_PAGE_SIZE - 1UL); | |
407 | break; | |
408 | } | |
409 | if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) { | |
410 | pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK; | |
411 | offset = 0UL; | |
412 | len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); | |
413 | break; | |
414 | } | |
2c941a20 | 415 | sg = sg_next(sg); |
5804509e | 416 | nelems--; |
18397944 DM |
417 | } |
418 | ||
419 | pteval = (pteval & IOPTE_PAGE); | |
420 | while (len > 0) { | |
6a32fd4d DM |
421 | long err; |
422 | ||
ad7ad57c | 423 | err = iommu_batch_add(pteval); |
6a32fd4d DM |
424 | if (unlikely(err < 0L)) |
425 | goto iommu_map_failed; | |
426 | ||
18397944 DM |
427 | pteval += IO_PAGE_SIZE; |
428 | len -= (IO_PAGE_SIZE - offset); | |
429 | offset = 0; | |
430 | dma_npages--; | |
431 | } | |
432 | ||
433 | pteval = (pteval & IOPTE_PAGE) + len; | |
2c941a20 | 434 | sg = sg_next(sg); |
5804509e | 435 | nelems--; |
18397944 DM |
436 | |
437 | /* Skip over any tail mappings we've fully mapped, | |
438 | * adjusting pteval along the way. Stop when we | |
439 | * detect a page crossing event. | |
440 | */ | |
5804509e DM |
441 | while (nelems && |
442 | (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && | |
18397944 DM |
443 | (pteval == SG_ENT_PHYS_ADDRESS(sg)) && |
444 | ((pteval ^ | |
445 | (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { | |
446 | pteval += sg->length; | |
2c941a20 | 447 | sg = sg_next(sg); |
5804509e | 448 | nelems--; |
18397944 DM |
449 | } |
450 | if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) | |
451 | pteval = ~0UL; | |
452 | } while (dma_npages != 0); | |
2c941a20 | 453 | dma_sg = sg_next(dma_sg); |
18397944 DM |
454 | } |
455 | ||
ad7ad57c | 456 | if (unlikely(iommu_batch_end() < 0L)) |
6a32fd4d | 457 | goto iommu_map_failed; |
18397944 | 458 | |
6a32fd4d DM |
459 | local_irq_restore(flags); |
460 | return 0; | |
18397944 | 461 | |
6a32fd4d DM |
462 | iommu_map_failed: |
463 | local_irq_restore(flags); | |
464 | return -1L; | |
8f6a93a1 DM |
465 | } |
466 | ||
ad7ad57c DM |
467 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, |
468 | int nelems, enum dma_data_direction direction) | |
8f6a93a1 | 469 | { |
16ce82d8 | 470 | struct iommu *iommu; |
7c8f486a | 471 | unsigned long flags, npages, prot; |
6a32fd4d | 472 | u32 dma_base; |
18397944 | 473 | struct scatterlist *sgtmp; |
6a32fd4d | 474 | long entry, err; |
18397944 DM |
475 | int used; |
476 | ||
477 | /* Fast path single entry scatterlists. */ | |
478 | if (nelems == 1) { | |
479 | sglist->dma_address = | |
ad7ad57c DM |
480 | dma_4v_map_single(dev, |
481 | (page_address(sglist->page) + | |
482 | sglist->offset), | |
18397944 | 483 | sglist->length, direction); |
ad7ad57c | 484 | if (unlikely(sglist->dma_address == DMA_ERROR_CODE)) |
18397944 DM |
485 | return 0; |
486 | sglist->dma_length = sglist->length; | |
487 | return 1; | |
488 | } | |
489 | ||
ad7ad57c | 490 | iommu = dev->archdata.iommu; |
18397944 | 491 | |
ad7ad57c | 492 | if (unlikely(direction == DMA_NONE)) |
18397944 DM |
493 | goto bad; |
494 | ||
495 | /* Step 1: Prepare scatter list. */ | |
496 | npages = prepare_sg(sglist, nelems); | |
18397944 DM |
497 | |
498 | /* Step 2: Allocate a cluster and context, if necessary. */ | |
499 | spin_lock_irqsave(&iommu->lock, flags); | |
ad7ad57c | 500 | entry = arena_alloc(&iommu->arena, npages); |
18397944 DM |
501 | spin_unlock_irqrestore(&iommu->lock, flags); |
502 | ||
503 | if (unlikely(entry < 0L)) | |
504 | goto bad; | |
505 | ||
506 | dma_base = iommu->page_table_map_base + | |
507 | (entry << IO_PAGE_SHIFT); | |
508 | ||
509 | /* Step 3: Normalize DMA addresses. */ | |
510 | used = nelems; | |
511 | ||
512 | sgtmp = sglist; | |
513 | while (used && sgtmp->dma_length) { | |
514 | sgtmp->dma_address += dma_base; | |
2c941a20 | 515 | sgtmp = sg_next(sgtmp); |
18397944 DM |
516 | used--; |
517 | } | |
518 | used = nelems - used; | |
519 | ||
520 | /* Step 4: Create the mappings. */ | |
521 | prot = HV_PCI_MAP_ATTR_READ; | |
ad7ad57c | 522 | if (direction != DMA_TO_DEVICE) |
18397944 DM |
523 | prot |= HV_PCI_MAP_ATTR_WRITE; |
524 | ||
ad7ad57c | 525 | err = fill_sg(entry, dev, sglist, used, nelems, prot); |
6a32fd4d DM |
526 | if (unlikely(err < 0L)) |
527 | goto iommu_map_failed; | |
18397944 DM |
528 | |
529 | return used; | |
530 | ||
531 | bad: | |
532 | if (printk_ratelimit()) | |
533 | WARN_ON(1); | |
534 | return 0; | |
6a32fd4d DM |
535 | |
536 | iommu_map_failed: | |
537 | spin_lock_irqsave(&iommu->lock, flags); | |
ad7ad57c | 538 | arena_free(&iommu->arena, entry, npages); |
6a32fd4d DM |
539 | spin_unlock_irqrestore(&iommu->lock, flags); |
540 | ||
541 | return 0; | |
8f6a93a1 DM |
542 | } |
543 | ||
ad7ad57c DM |
544 | static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, |
545 | int nelems, enum dma_data_direction direction) | |
8f6a93a1 | 546 | { |
a2fb23af | 547 | struct pci_pbm_info *pbm; |
16ce82d8 | 548 | struct iommu *iommu; |
7c8f486a | 549 | unsigned long flags, i, npages; |
2c941a20 | 550 | struct scatterlist *sg, *sgprv; |
18397944 | 551 | long entry; |
7c8f486a | 552 | u32 devhandle, bus_addr; |
18397944 | 553 | |
ad7ad57c | 554 | if (unlikely(direction == DMA_NONE)) { |
18397944 DM |
555 | if (printk_ratelimit()) |
556 | WARN_ON(1); | |
557 | } | |
558 | ||
ad7ad57c DM |
559 | iommu = dev->archdata.iommu; |
560 | pbm = dev->archdata.host_controller; | |
a2fb23af | 561 | devhandle = pbm->devhandle; |
18397944 DM |
562 | |
563 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | |
2c941a20 JA |
564 | sgprv = NULL; |
565 | for_each_sg(sglist, sg, nelems, i) { | |
566 | if (sg->dma_length == 0) | |
18397944 | 567 | break; |
2c941a20 JA |
568 | |
569 | sgprv = sg; | |
570 | } | |
571 | ||
572 | npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) - | |
18397944 DM |
573 | bus_addr) >> IO_PAGE_SHIFT; |
574 | ||
575 | entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); | |
576 | ||
577 | spin_lock_irqsave(&iommu->lock, flags); | |
578 | ||
ad7ad57c | 579 | arena_free(&iommu->arena, entry, npages); |
18397944 DM |
580 | |
581 | do { | |
582 | unsigned long num; | |
583 | ||
584 | num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), | |
585 | npages); | |
586 | entry += num; | |
587 | npages -= num; | |
588 | } while (npages != 0); | |
589 | ||
590 | spin_unlock_irqrestore(&iommu->lock, flags); | |
8f6a93a1 DM |
591 | } |
592 | ||
ad7ad57c DM |
593 | static void dma_4v_sync_single_for_cpu(struct device *dev, |
594 | dma_addr_t bus_addr, size_t sz, | |
595 | enum dma_data_direction direction) | |
8f6a93a1 | 596 | { |
18397944 | 597 | /* Nothing to do... */ |
8f6a93a1 DM |
598 | } |
599 | ||
ad7ad57c DM |
600 | static void dma_4v_sync_sg_for_cpu(struct device *dev, |
601 | struct scatterlist *sglist, int nelems, | |
602 | enum dma_data_direction direction) | |
8f6a93a1 | 603 | { |
18397944 | 604 | /* Nothing to do... */ |
8f6a93a1 DM |
605 | } |
606 | ||
ad7ad57c DM |
607 | const struct dma_ops sun4v_dma_ops = { |
608 | .alloc_coherent = dma_4v_alloc_coherent, | |
609 | .free_coherent = dma_4v_free_coherent, | |
610 | .map_single = dma_4v_map_single, | |
611 | .unmap_single = dma_4v_unmap_single, | |
612 | .map_sg = dma_4v_map_sg, | |
613 | .unmap_sg = dma_4v_unmap_sg, | |
614 | .sync_single_for_cpu = dma_4v_sync_single_for_cpu, | |
615 | .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu, | |
8f6a93a1 DM |
616 | }; |
617 | ||
34768bc8 | 618 | static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm) |
bade5622 | 619 | { |
e87dc350 DM |
620 | struct property *prop; |
621 | struct device_node *dp; | |
622 | ||
34768bc8 DM |
623 | dp = pbm->prom_node; |
624 | prop = of_find_property(dp, "66mhz-capable", NULL); | |
625 | pbm->is_66mhz_capable = (prop != NULL); | |
626 | pbm->pci_bus = pci_scan_one_pbm(pbm); | |
c2609267 DM |
627 | |
628 | /* XXX register error interrupt handlers XXX */ | |
bade5622 DM |
629 | } |
630 | ||
e7a0453e | 631 | static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, |
16ce82d8 | 632 | struct iommu *iommu) |
18397944 | 633 | { |
9b3627f3 | 634 | struct iommu_arena *arena = &iommu->arena; |
e7a0453e | 635 | unsigned long i, cnt = 0; |
7c8f486a | 636 | u32 devhandle; |
18397944 DM |
637 | |
638 | devhandle = pbm->devhandle; | |
639 | for (i = 0; i < arena->limit; i++) { | |
640 | unsigned long ret, io_attrs, ra; | |
641 | ||
642 | ret = pci_sun4v_iommu_getmap(devhandle, | |
643 | HV_PCI_TSBID(0, i), | |
644 | &io_attrs, &ra); | |
e7a0453e | 645 | if (ret == HV_EOK) { |
c2a5a46b DM |
646 | if (page_in_phys_avail(ra)) { |
647 | pci_sun4v_iommu_demap(devhandle, | |
648 | HV_PCI_TSBID(0, i), 1); | |
649 | } else { | |
650 | cnt++; | |
651 | __set_bit(i, arena->map); | |
652 | } | |
e7a0453e | 653 | } |
18397944 | 654 | } |
e7a0453e DM |
655 | |
656 | return cnt; | |
18397944 DM |
657 | } |
658 | ||
bade5622 DM |
659 | static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) |
660 | { | |
16ce82d8 | 661 | struct iommu *iommu = pbm->iommu; |
e87dc350 | 662 | struct property *prop; |
59db8102 | 663 | unsigned long num_tsb_entries, sz, tsbsize; |
18397944 | 664 | u32 vdma[2], dma_mask, dma_offset; |
e87dc350 DM |
665 | |
666 | prop = of_find_property(pbm->prom_node, "virtual-dma", NULL); | |
667 | if (prop) { | |
668 | u32 *val = prop->value; | |
18397944 | 669 | |
e87dc350 DM |
670 | vdma[0] = val[0]; |
671 | vdma[1] = val[1]; | |
672 | } else { | |
18397944 DM |
673 | /* No property, use default values. */ |
674 | vdma[0] = 0x80000000; | |
675 | vdma[1] = 0x80000000; | |
676 | } | |
677 | ||
59db8102 DM |
678 | if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) { |
679 | prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n", | |
680 | vdma[0], vdma[1]); | |
681 | prom_halt(); | |
18397944 DM |
682 | }; |
683 | ||
59db8102 DM |
684 | dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL); |
685 | num_tsb_entries = vdma[1] / IO_PAGE_SIZE; | |
686 | tsbsize = num_tsb_entries * sizeof(iopte_t); | |
18397944 DM |
687 | |
688 | dma_offset = vdma[0]; | |
689 | ||
690 | /* Setup initial software IOMMU state. */ | |
691 | spin_lock_init(&iommu->lock); | |
692 | iommu->ctx_lowest_free = 1; | |
693 | iommu->page_table_map_base = dma_offset; | |
694 | iommu->dma_addr_mask = dma_mask; | |
695 | ||
696 | /* Allocate and initialize the free area map. */ | |
59db8102 | 697 | sz = (num_tsb_entries + 7) / 8; |
18397944 | 698 | sz = (sz + 7UL) & ~7UL; |
982c2064 | 699 | iommu->arena.map = kzalloc(sz, GFP_KERNEL); |
18397944 DM |
700 | if (!iommu->arena.map) { |
701 | prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); | |
702 | prom_halt(); | |
703 | } | |
18397944 DM |
704 | iommu->arena.limit = num_tsb_entries; |
705 | ||
e7a0453e | 706 | sz = probe_existing_entries(pbm, iommu); |
c2a5a46b DM |
707 | if (sz) |
708 | printk("%s: Imported %lu TSB entries from OBP\n", | |
709 | pbm->name, sz); | |
bade5622 DM |
710 | } |
711 | ||
35a17eb6 DM |
712 | #ifdef CONFIG_PCI_MSI |
713 | struct pci_sun4v_msiq_entry { | |
714 | u64 version_type; | |
715 | #define MSIQ_VERSION_MASK 0xffffffff00000000UL | |
716 | #define MSIQ_VERSION_SHIFT 32 | |
717 | #define MSIQ_TYPE_MASK 0x00000000000000ffUL | |
718 | #define MSIQ_TYPE_SHIFT 0 | |
719 | #define MSIQ_TYPE_NONE 0x00 | |
720 | #define MSIQ_TYPE_MSG 0x01 | |
721 | #define MSIQ_TYPE_MSI32 0x02 | |
722 | #define MSIQ_TYPE_MSI64 0x03 | |
723 | #define MSIQ_TYPE_INTX 0x08 | |
724 | #define MSIQ_TYPE_NONE2 0xff | |
725 | ||
726 | u64 intx_sysino; | |
727 | u64 reserved1; | |
728 | u64 stick; | |
729 | u64 req_id; /* bus/device/func */ | |
730 | #define MSIQ_REQID_BUS_MASK 0xff00UL | |
731 | #define MSIQ_REQID_BUS_SHIFT 8 | |
732 | #define MSIQ_REQID_DEVICE_MASK 0x00f8UL | |
733 | #define MSIQ_REQID_DEVICE_SHIFT 3 | |
734 | #define MSIQ_REQID_FUNC_MASK 0x0007UL | |
735 | #define MSIQ_REQID_FUNC_SHIFT 0 | |
736 | ||
737 | u64 msi_address; | |
738 | ||
e5dd42e4 | 739 | /* The format of this value is message type dependent. |
35a17eb6 DM |
740 | * For MSI bits 15:0 are the data from the MSI packet. |
741 | * For MSI-X bits 31:0 are the data from the MSI packet. | |
742 | * For MSG, the message code and message routing code where: | |
743 | * bits 39:32 is the bus/device/fn of the msg target-id | |
744 | * bits 18:16 is the message routing code | |
745 | * bits 7:0 is the message code | |
746 | * For INTx the low order 2-bits are: | |
747 | * 00 - INTA | |
748 | * 01 - INTB | |
749 | * 10 - INTC | |
750 | * 11 - INTD | |
751 | */ | |
752 | u64 msi_data; | |
753 | ||
754 | u64 reserved2; | |
755 | }; | |
756 | ||
759f89e0 DM |
757 | static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid, |
758 | unsigned long *head) | |
35a17eb6 | 759 | { |
759f89e0 | 760 | unsigned long err, limit; |
35a17eb6 | 761 | |
759f89e0 | 762 | err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head); |
35a17eb6 | 763 | if (unlikely(err)) |
759f89e0 | 764 | return -ENXIO; |
35a17eb6 | 765 | |
759f89e0 DM |
766 | limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); |
767 | if (unlikely(*head >= limit)) | |
768 | return -EFBIG; | |
769 | ||
770 | return 0; | |
771 | } | |
772 | ||
773 | static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm, | |
774 | unsigned long msiqid, unsigned long *head, | |
775 | unsigned long *msi) | |
776 | { | |
777 | struct pci_sun4v_msiq_entry *ep; | |
778 | unsigned long err, type; | |
779 | ||
780 | /* Note: void pointer arithmetic, 'head' is a byte offset */ | |
781 | ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * | |
782 | (pbm->msiq_ent_count * | |
783 | sizeof(struct pci_sun4v_msiq_entry))) + | |
784 | *head); | |
785 | ||
786 | if ((ep->version_type & MSIQ_TYPE_MASK) == 0) | |
787 | return 0; | |
35a17eb6 | 788 | |
759f89e0 DM |
789 | type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT; |
790 | if (unlikely(type != MSIQ_TYPE_MSI32 && | |
791 | type != MSIQ_TYPE_MSI64)) | |
792 | return -EINVAL; | |
35a17eb6 | 793 | |
759f89e0 DM |
794 | *msi = ep->msi_data; |
795 | ||
796 | err = pci_sun4v_msi_setstate(pbm->devhandle, | |
797 | ep->msi_data /* msi_num */, | |
798 | HV_MSISTATE_IDLE); | |
799 | if (unlikely(err)) | |
800 | return -ENXIO; | |
35a17eb6 | 801 | |
759f89e0 DM |
802 | /* Clear the entry. */ |
803 | ep->version_type &= ~MSIQ_TYPE_MASK; | |
35a17eb6 | 804 | |
759f89e0 DM |
805 | (*head) += sizeof(struct pci_sun4v_msiq_entry); |
806 | if (*head >= | |
807 | (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry))) | |
808 | *head = 0; | |
35a17eb6 | 809 | |
759f89e0 | 810 | return 1; |
35a17eb6 DM |
811 | } |
812 | ||
759f89e0 DM |
813 | static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid, |
814 | unsigned long head) | |
35a17eb6 | 815 | { |
759f89e0 | 816 | unsigned long err; |
35a17eb6 | 817 | |
759f89e0 DM |
818 | err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head); |
819 | if (unlikely(err)) | |
820 | return -EINVAL; | |
35a17eb6 | 821 | |
759f89e0 DM |
822 | return 0; |
823 | } | |
35a17eb6 | 824 | |
759f89e0 DM |
825 | static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, |
826 | unsigned long msi, int is_msi64) | |
827 | { | |
828 | if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid, | |
829 | (is_msi64 ? | |
830 | HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32))) | |
831 | return -ENXIO; | |
832 | if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE)) | |
833 | return -ENXIO; | |
834 | if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID)) | |
835 | return -ENXIO; | |
35a17eb6 DM |
836 | return 0; |
837 | } | |
838 | ||
759f89e0 | 839 | static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) |
35a17eb6 | 840 | { |
759f89e0 DM |
841 | unsigned long err, msiqid; |
842 | ||
843 | err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid); | |
844 | if (err) | |
845 | return -ENXIO; | |
846 | ||
847 | pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID); | |
848 | ||
849 | return 0; | |
35a17eb6 DM |
850 | } |
851 | ||
759f89e0 | 852 | static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm) |
35a17eb6 DM |
853 | { |
854 | unsigned long q_size, alloc_size, pages, order; | |
855 | int i; | |
856 | ||
857 | q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); | |
858 | alloc_size = (pbm->msiq_num * q_size); | |
859 | order = get_order(alloc_size); | |
860 | pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); | |
861 | if (pages == 0UL) { | |
862 | printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n", | |
863 | order); | |
864 | return -ENOMEM; | |
865 | } | |
866 | memset((char *)pages, 0, PAGE_SIZE << order); | |
867 | pbm->msi_queues = (void *) pages; | |
868 | ||
869 | for (i = 0; i < pbm->msiq_num; i++) { | |
870 | unsigned long err, base = __pa(pages + (i * q_size)); | |
871 | unsigned long ret1, ret2; | |
872 | ||
873 | err = pci_sun4v_msiq_conf(pbm->devhandle, | |
874 | pbm->msiq_first + i, | |
875 | base, pbm->msiq_ent_count); | |
876 | if (err) { | |
877 | printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n", | |
878 | err); | |
879 | goto h_error; | |
880 | } | |
881 | ||
882 | err = pci_sun4v_msiq_info(pbm->devhandle, | |
883 | pbm->msiq_first + i, | |
884 | &ret1, &ret2); | |
885 | if (err) { | |
886 | printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n", | |
887 | err); | |
888 | goto h_error; | |
889 | } | |
890 | if (ret1 != base || ret2 != pbm->msiq_ent_count) { | |
891 | printk(KERN_ERR "MSI: Bogus qconf " | |
892 | "expected[%lx:%x] got[%lx:%lx]\n", | |
893 | base, pbm->msiq_ent_count, | |
894 | ret1, ret2); | |
895 | goto h_error; | |
896 | } | |
897 | } | |
898 | ||
899 | return 0; | |
900 | ||
901 | h_error: | |
902 | free_pages(pages, order); | |
903 | return -EINVAL; | |
904 | } | |
905 | ||
759f89e0 | 906 | static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm) |
35a17eb6 | 907 | { |
759f89e0 | 908 | unsigned long q_size, alloc_size, pages, order; |
35a17eb6 DM |
909 | int i; |
910 | ||
759f89e0 DM |
911 | for (i = 0; i < pbm->msiq_num; i++) { |
912 | unsigned long msiqid = pbm->msiq_first + i; | |
35a17eb6 | 913 | |
759f89e0 | 914 | (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0); |
35a17eb6 | 915 | } |
7fe3730d | 916 | |
759f89e0 DM |
917 | q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); |
918 | alloc_size = (pbm->msiq_num * q_size); | |
919 | order = get_order(alloc_size); | |
35a17eb6 | 920 | |
759f89e0 | 921 | pages = (unsigned long) pbm->msi_queues; |
35a17eb6 | 922 | |
759f89e0 | 923 | free_pages(pages, order); |
35a17eb6 | 924 | |
759f89e0 | 925 | pbm->msi_queues = NULL; |
35a17eb6 DM |
926 | } |
927 | ||
759f89e0 DM |
928 | static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm, |
929 | unsigned long msiqid, | |
930 | unsigned long devino) | |
35a17eb6 | 931 | { |
759f89e0 | 932 | unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino); |
35a17eb6 | 933 | |
759f89e0 DM |
934 | if (!virt_irq) |
935 | return -ENOMEM; | |
35a17eb6 | 936 | |
759f89e0 DM |
937 | if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) |
938 | return -EINVAL; | |
939 | if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) | |
940 | return -EINVAL; | |
35a17eb6 | 941 | |
759f89e0 | 942 | return virt_irq; |
35a17eb6 | 943 | } |
e9870c4c | 944 | |
759f89e0 DM |
945 | static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = { |
946 | .get_head = pci_sun4v_get_head, | |
947 | .dequeue_msi = pci_sun4v_dequeue_msi, | |
948 | .set_head = pci_sun4v_set_head, | |
949 | .msi_setup = pci_sun4v_msi_setup, | |
950 | .msi_teardown = pci_sun4v_msi_teardown, | |
951 | .msiq_alloc = pci_sun4v_msiq_alloc, | |
952 | .msiq_free = pci_sun4v_msiq_free, | |
953 | .msiq_build_irq = pci_sun4v_msiq_build_irq, | |
954 | }; | |
955 | ||
e9870c4c DM |
956 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) |
957 | { | |
759f89e0 | 958 | sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops); |
e9870c4c | 959 | } |
35a17eb6 DM |
960 | #else /* CONFIG_PCI_MSI */ |
961 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) | |
962 | { | |
963 | } | |
964 | #endif /* !(CONFIG_PCI_MSI) */ | |
965 | ||
f0429bf7 | 966 | static void __init pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node *dp, u32 devhandle) |
bade5622 DM |
967 | { |
968 | struct pci_pbm_info *pbm; | |
bade5622 | 969 | |
3833789b DM |
970 | if (devhandle & 0x40) |
971 | pbm = &p->pbm_B; | |
972 | else | |
973 | pbm = &p->pbm_A; | |
bade5622 | 974 | |
34768bc8 DM |
975 | pbm->next = pci_pbm_root; |
976 | pci_pbm_root = pbm; | |
977 | ||
978 | pbm->scan_bus = pci_sun4v_scan_bus; | |
ca3dd88e DM |
979 | pbm->pci_ops = &sun4v_pci_ops; |
980 | pbm->config_space_reg_bits = 12; | |
34768bc8 | 981 | |
6c108f12 DM |
982 | pbm->index = pci_num_pbms++; |
983 | ||
bade5622 | 984 | pbm->parent = p; |
e87dc350 | 985 | pbm->prom_node = dp; |
bade5622 | 986 | |
3833789b | 987 | pbm->devhandle = devhandle; |
bade5622 | 988 | |
e87dc350 | 989 | pbm->name = dp->full_name; |
bade5622 | 990 | |
e87dc350 | 991 | printk("%s: SUN4V PCI Bus Module\n", pbm->name); |
bade5622 | 992 | |
9fd8b647 | 993 | pci_determine_mem_io_space(pbm); |
bade5622 | 994 | |
cfa0652c | 995 | pci_get_pbm_props(pbm); |
bade5622 | 996 | pci_sun4v_iommu_init(pbm); |
35a17eb6 | 997 | pci_sun4v_msi_init(pbm); |
bade5622 DM |
998 | } |
999 | ||
f0429bf7 | 1000 | void __init sun4v_pci_init(struct device_node *dp, char *model_name) |
8f6a93a1 | 1001 | { |
e01c0d6d | 1002 | static int hvapi_negotiated = 0; |
bade5622 | 1003 | struct pci_controller_info *p; |
34768bc8 | 1004 | struct pci_pbm_info *pbm; |
16ce82d8 | 1005 | struct iommu *iommu; |
e87dc350 DM |
1006 | struct property *prop; |
1007 | struct linux_prom64_registers *regs; | |
7c8f486a DM |
1008 | u32 devhandle; |
1009 | int i; | |
3833789b | 1010 | |
e01c0d6d DM |
1011 | if (!hvapi_negotiated++) { |
1012 | int err = sun4v_hvapi_register(HV_GRP_PCI, | |
1013 | vpci_major, | |
1014 | &vpci_minor); | |
1015 | ||
1016 | if (err) { | |
1017 | prom_printf("SUN4V_PCI: Could not register hvapi, " | |
1018 | "err=%d\n", err); | |
1019 | prom_halt(); | |
1020 | } | |
1021 | printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n", | |
1022 | vpci_major, vpci_minor); | |
ad7ad57c DM |
1023 | |
1024 | dma_ops = &sun4v_dma_ops; | |
e01c0d6d DM |
1025 | } |
1026 | ||
e87dc350 DM |
1027 | prop = of_find_property(dp, "reg", NULL); |
1028 | regs = prop->value; | |
1029 | ||
1030 | devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff; | |
3833789b | 1031 | |
34768bc8 | 1032 | for (pbm = pci_pbm_root; pbm; pbm = pbm->next) { |
0b522497 | 1033 | if (pbm->devhandle == (devhandle ^ 0x40)) { |
34768bc8 | 1034 | pci_sun4v_pbm_init(pbm->parent, dp, devhandle); |
0b522497 DM |
1035 | return; |
1036 | } | |
3833789b | 1037 | } |
bade5622 | 1038 | |
a283a525 | 1039 | for_each_possible_cpu(i) { |
7c8f486a DM |
1040 | unsigned long page = get_zeroed_page(GFP_ATOMIC); |
1041 | ||
1042 | if (!page) | |
1043 | goto fatal_memory_error; | |
1044 | ||
ad7ad57c | 1045 | per_cpu(iommu_batch, i).pglist = (u64 *) page; |
bade5622 | 1046 | } |
7c8f486a | 1047 | |
982c2064 | 1048 | p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); |
7c8f486a DM |
1049 | if (!p) |
1050 | goto fatal_memory_error; | |
1051 | ||
16ce82d8 | 1052 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); |
7c8f486a DM |
1053 | if (!iommu) |
1054 | goto fatal_memory_error; | |
1055 | ||
bade5622 DM |
1056 | p->pbm_A.iommu = iommu; |
1057 | ||
16ce82d8 | 1058 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); |
7c8f486a DM |
1059 | if (!iommu) |
1060 | goto fatal_memory_error; | |
1061 | ||
bade5622 DM |
1062 | p->pbm_B.iommu = iommu; |
1063 | ||
e87dc350 | 1064 | pci_sun4v_pbm_init(p, dp, devhandle); |
7c8f486a DM |
1065 | return; |
1066 | ||
1067 | fatal_memory_error: | |
1068 | prom_printf("SUN4V_PCI: Fatal memory allocation error.\n"); | |
1069 | prom_halt(); | |
8f6a93a1 | 1070 | } |