Commit | Line | Data |
---|---|---|
828b35f6 JG |
1 | /* |
2 | * Copyright IBM Corp. 2012 | |
3 | * | |
4 | * Author(s): | |
5 | * Jan Glauber <jang@linux.vnet.ibm.com> | |
6 | */ | |
7 | ||
8 | #include <linux/kernel.h> | |
9 | #include <linux/slab.h> | |
10 | #include <linux/export.h> | |
11 | #include <linux/iommu-helper.h> | |
12 | #include <linux/dma-mapping.h> | |
22459321 | 13 | #include <linux/vmalloc.h> |
828b35f6 JG |
14 | #include <linux/pci.h> |
15 | #include <asm/pci_dma.h> | |
16 | ||
828b35f6 JG |
17 | static struct kmem_cache *dma_region_table_cache; |
18 | static struct kmem_cache *dma_page_table_cache; | |
c60d1ae4 GS |
19 | static int s390_iommu_strict; |
20 | ||
21 | static int zpci_refresh_global(struct zpci_dev *zdev) | |
22 | { | |
23 | return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma, | |
24 | zdev->iommu_pages * PAGE_SIZE); | |
25 | } | |
828b35f6 | 26 | |
8128f23c | 27 | unsigned long *dma_alloc_cpu_table(void) |
828b35f6 JG |
28 | { |
29 | unsigned long *table, *entry; | |
30 | ||
31 | table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC); | |
32 | if (!table) | |
33 | return NULL; | |
34 | ||
35 | for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) | |
4d5a6b72 | 36 | *entry = ZPCI_TABLE_INVALID; |
828b35f6 JG |
37 | return table; |
38 | } | |
39 | ||
40 | static void dma_free_cpu_table(void *table) | |
41 | { | |
42 | kmem_cache_free(dma_region_table_cache, table); | |
43 | } | |
44 | ||
45 | static unsigned long *dma_alloc_page_table(void) | |
46 | { | |
47 | unsigned long *table, *entry; | |
48 | ||
49 | table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC); | |
50 | if (!table) | |
51 | return NULL; | |
52 | ||
53 | for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) | |
4d5a6b72 | 54 | *entry = ZPCI_PTE_INVALID; |
828b35f6 JG |
55 | return table; |
56 | } | |
57 | ||
58 | static void dma_free_page_table(void *table) | |
59 | { | |
60 | kmem_cache_free(dma_page_table_cache, table); | |
61 | } | |
62 | ||
63 | static unsigned long *dma_get_seg_table_origin(unsigned long *entry) | |
64 | { | |
65 | unsigned long *sto; | |
66 | ||
67 | if (reg_entry_isvalid(*entry)) | |
68 | sto = get_rt_sto(*entry); | |
69 | else { | |
70 | sto = dma_alloc_cpu_table(); | |
71 | if (!sto) | |
72 | return NULL; | |
73 | ||
74 | set_rt_sto(entry, sto); | |
75 | validate_rt_entry(entry); | |
76 | entry_clr_protected(entry); | |
77 | } | |
78 | return sto; | |
79 | } | |
80 | ||
81 | static unsigned long *dma_get_page_table_origin(unsigned long *entry) | |
82 | { | |
83 | unsigned long *pto; | |
84 | ||
85 | if (reg_entry_isvalid(*entry)) | |
86 | pto = get_st_pto(*entry); | |
87 | else { | |
88 | pto = dma_alloc_page_table(); | |
89 | if (!pto) | |
90 | return NULL; | |
91 | set_st_pto(entry, pto); | |
92 | validate_st_entry(entry); | |
93 | entry_clr_protected(entry); | |
94 | } | |
95 | return pto; | |
96 | } | |
97 | ||
66728eee | 98 | unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) |
828b35f6 JG |
99 | { |
100 | unsigned long *sto, *pto; | |
101 | unsigned int rtx, sx, px; | |
102 | ||
103 | rtx = calc_rtx(dma_addr); | |
104 | sto = dma_get_seg_table_origin(&rto[rtx]); | |
105 | if (!sto) | |
106 | return NULL; | |
107 | ||
108 | sx = calc_sx(dma_addr); | |
109 | pto = dma_get_page_table_origin(&sto[sx]); | |
110 | if (!pto) | |
111 | return NULL; | |
112 | ||
113 | px = calc_px(dma_addr); | |
114 | return &pto[px]; | |
115 | } | |
116 | ||
66728eee | 117 | void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags) |
828b35f6 | 118 | { |
828b35f6 JG |
119 | if (flags & ZPCI_PTE_INVALID) { |
120 | invalidate_pt_entry(entry); | |
828b35f6 JG |
121 | } else { |
122 | set_pt_pfaa(entry, page_addr); | |
123 | validate_pt_entry(entry); | |
124 | } | |
125 | ||
126 | if (flags & ZPCI_TABLE_PROTECTED) | |
127 | entry_set_protected(entry); | |
128 | else | |
129 | entry_clr_protected(entry); | |
130 | } | |
131 | ||
132 | static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, | |
133 | dma_addr_t dma_addr, size_t size, int flags) | |
134 | { | |
135 | unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
136 | u8 *page_addr = (u8 *) (pa & PAGE_MASK); | |
137 | dma_addr_t start_dma_addr = dma_addr; | |
138 | unsigned long irq_flags; | |
66728eee | 139 | unsigned long *entry; |
828b35f6 JG |
140 | int i, rc = 0; |
141 | ||
142 | if (!nr_pages) | |
143 | return -EINVAL; | |
144 | ||
145 | spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); | |
66728eee SO |
146 | if (!zdev->dma_table) { |
147 | rc = -EINVAL; | |
828b35f6 | 148 | goto no_refresh; |
66728eee | 149 | } |
828b35f6 JG |
150 | |
151 | for (i = 0; i < nr_pages; i++) { | |
66728eee SO |
152 | entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); |
153 | if (!entry) { | |
154 | rc = -ENOMEM; | |
155 | goto undo_cpu_trans; | |
156 | } | |
157 | dma_update_cpu_trans(entry, page_addr, flags); | |
828b35f6 JG |
158 | page_addr += PAGE_SIZE; |
159 | dma_addr += PAGE_SIZE; | |
160 | } | |
161 | ||
162 | /* | |
c60d1ae4 GS |
163 | * With zdev->tlb_refresh == 0, rpcit is not required to establish new |
164 | * translations when previously invalid translation-table entries are | |
165 | * validated. With lazy unmap, it also is skipped for previously valid | |
166 | * entries, but a global rpcit is then required before any address can | |
167 | * be re-used, i.e. after each iommu bitmap wrap-around. | |
828b35f6 JG |
168 | */ |
169 | if (!zdev->tlb_refresh && | |
c60d1ae4 GS |
170 | (!s390_iommu_strict || |
171 | ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))) | |
828b35f6 | 172 | goto no_refresh; |
b2a9e87d | 173 | |
9389339f MS |
174 | rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, |
175 | nr_pages * PAGE_SIZE); | |
66728eee SO |
176 | undo_cpu_trans: |
177 | if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) { | |
178 | flags = ZPCI_PTE_INVALID; | |
179 | while (i-- > 0) { | |
180 | page_addr -= PAGE_SIZE; | |
181 | dma_addr -= PAGE_SIZE; | |
182 | entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); | |
183 | if (!entry) | |
184 | break; | |
185 | dma_update_cpu_trans(entry, page_addr, flags); | |
186 | } | |
187 | } | |
828b35f6 JG |
188 | |
189 | no_refresh: | |
190 | spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); | |
191 | return rc; | |
192 | } | |
193 | ||
8128f23c | 194 | void dma_free_seg_table(unsigned long entry) |
828b35f6 JG |
195 | { |
196 | unsigned long *sto = get_rt_sto(entry); | |
197 | int sx; | |
198 | ||
199 | for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++) | |
200 | if (reg_entry_isvalid(sto[sx])) | |
201 | dma_free_page_table(get_st_pto(sto[sx])); | |
202 | ||
203 | dma_free_cpu_table(sto); | |
204 | } | |
205 | ||
8128f23c | 206 | void dma_cleanup_tables(unsigned long *table) |
828b35f6 | 207 | { |
828b35f6 JG |
208 | int rtx; |
209 | ||
8128f23c | 210 | if (!table) |
828b35f6 JG |
211 | return; |
212 | ||
213 | for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) | |
214 | if (reg_entry_isvalid(table[rtx])) | |
215 | dma_free_seg_table(table[rtx]); | |
216 | ||
217 | dma_free_cpu_table(table); | |
828b35f6 JG |
218 | } |
219 | ||
9a99649f | 220 | static unsigned long __dma_alloc_iommu(struct device *dev, |
5ec6d491 | 221 | unsigned long start, int size) |
828b35f6 | 222 | { |
9a99649f | 223 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
5ec6d491 | 224 | unsigned long boundary_size; |
828b35f6 | 225 | |
9a99649f | 226 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, |
5ec6d491 | 227 | PAGE_SIZE) >> PAGE_SHIFT; |
828b35f6 JG |
228 | return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages, |
229 | start, size, 0, boundary_size, 0); | |
230 | } | |
231 | ||
9a99649f | 232 | static unsigned long dma_alloc_iommu(struct device *dev, int size) |
828b35f6 | 233 | { |
9a99649f | 234 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
828b35f6 | 235 | unsigned long offset, flags; |
c60d1ae4 | 236 | int wrap = 0; |
828b35f6 JG |
237 | |
238 | spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); | |
9a99649f | 239 | offset = __dma_alloc_iommu(dev, zdev->next_bit, size); |
c60d1ae4 GS |
240 | if (offset == -1) { |
241 | /* wrap-around */ | |
9a99649f | 242 | offset = __dma_alloc_iommu(dev, 0, size); |
c60d1ae4 GS |
243 | wrap = 1; |
244 | } | |
828b35f6 JG |
245 | |
246 | if (offset != -1) { | |
247 | zdev->next_bit = offset + size; | |
c60d1ae4 GS |
248 | if (!zdev->tlb_refresh && !s390_iommu_strict && wrap) |
249 | /* global flush after wrap-around with lazy unmap */ | |
250 | zpci_refresh_global(zdev); | |
828b35f6 JG |
251 | } |
252 | spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); | |
253 | return offset; | |
254 | } | |
255 | ||
9a99649f | 256 | static void dma_free_iommu(struct device *dev, unsigned long offset, int size) |
828b35f6 | 257 | { |
9a99649f | 258 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
828b35f6 JG |
259 | unsigned long flags; |
260 | ||
261 | spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); | |
262 | if (!zdev->iommu_bitmap) | |
263 | goto out; | |
264 | bitmap_clear(zdev->iommu_bitmap, offset, size); | |
c60d1ae4 GS |
265 | /* |
266 | * Lazy flush for unmap: need to move next_bit to avoid address re-use | |
267 | * until wrap-around. | |
268 | */ | |
269 | if (!s390_iommu_strict && offset >= zdev->next_bit) | |
828b35f6 JG |
270 | zdev->next_bit = offset + size; |
271 | out: | |
272 | spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); | |
273 | } | |
274 | ||
52d43d81 SO |
275 | static inline void zpci_err_dma(unsigned long rc, unsigned long addr) |
276 | { | |
277 | struct { | |
278 | unsigned long rc; | |
279 | unsigned long addr; | |
280 | } __packed data = {rc, addr}; | |
281 | ||
282 | zpci_err_hex(&data, sizeof(data)); | |
283 | } | |
284 | ||
828b35f6 JG |
285 | static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, |
286 | unsigned long offset, size_t size, | |
287 | enum dma_data_direction direction, | |
288 | struct dma_attrs *attrs) | |
289 | { | |
198a5278 | 290 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
828b35f6 JG |
291 | unsigned long nr_pages, iommu_page_index; |
292 | unsigned long pa = page_to_phys(page) + offset; | |
293 | int flags = ZPCI_PTE_VALID; | |
294 | dma_addr_t dma_addr; | |
52d43d81 | 295 | int ret; |
828b35f6 | 296 | |
828b35f6 JG |
297 | /* This rounds up number of pages based on size and offset */ |
298 | nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); | |
9a99649f | 299 | iommu_page_index = dma_alloc_iommu(dev, nr_pages); |
52d43d81 SO |
300 | if (iommu_page_index == -1) { |
301 | ret = -ENOSPC; | |
828b35f6 | 302 | goto out_err; |
52d43d81 | 303 | } |
828b35f6 JG |
304 | |
305 | /* Use rounded up size */ | |
306 | size = nr_pages * PAGE_SIZE; | |
307 | ||
308 | dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE; | |
52d43d81 SO |
309 | if (dma_addr + size > zdev->end_dma) { |
310 | ret = -ERANGE; | |
828b35f6 | 311 | goto out_free; |
52d43d81 | 312 | } |
828b35f6 JG |
313 | |
314 | if (direction == DMA_NONE || direction == DMA_TO_DEVICE) | |
315 | flags |= ZPCI_TABLE_PROTECTED; | |
316 | ||
52d43d81 SO |
317 | ret = dma_update_trans(zdev, pa, dma_addr, size, flags); |
318 | if (ret) | |
319 | goto out_free; | |
320 | ||
321 | atomic64_add(nr_pages, &zdev->mapped_pages); | |
322 | return dma_addr + (offset & ~PAGE_MASK); | |
828b35f6 JG |
323 | |
324 | out_free: | |
9a99649f | 325 | dma_free_iommu(dev, iommu_page_index, nr_pages); |
828b35f6 | 326 | out_err: |
1f1dcbd4 | 327 | zpci_err("map error:\n"); |
52d43d81 | 328 | zpci_err_dma(ret, pa); |
828b35f6 JG |
329 | return DMA_ERROR_CODE; |
330 | } | |
331 | ||
332 | static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, | |
333 | size_t size, enum dma_data_direction direction, | |
334 | struct dma_attrs *attrs) | |
335 | { | |
198a5278 | 336 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
828b35f6 | 337 | unsigned long iommu_page_index; |
52d43d81 | 338 | int npages, ret; |
828b35f6 JG |
339 | |
340 | npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); | |
341 | dma_addr = dma_addr & PAGE_MASK; | |
52d43d81 SO |
342 | ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, |
343 | ZPCI_PTE_INVALID); | |
344 | if (ret) { | |
1f1dcbd4 | 345 | zpci_err("unmap error:\n"); |
52d43d81 SO |
346 | zpci_err_dma(ret, dma_addr); |
347 | return; | |
1f1dcbd4 | 348 | } |
828b35f6 | 349 | |
6001018a | 350 | atomic64_add(npages, &zdev->unmapped_pages); |
828b35f6 | 351 | iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT; |
9a99649f | 352 | dma_free_iommu(dev, iommu_page_index, npages); |
828b35f6 JG |
353 | } |
354 | ||
355 | static void *s390_dma_alloc(struct device *dev, size_t size, | |
356 | dma_addr_t *dma_handle, gfp_t flag, | |
357 | struct dma_attrs *attrs) | |
358 | { | |
198a5278 | 359 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
828b35f6 JG |
360 | struct page *page; |
361 | unsigned long pa; | |
362 | dma_addr_t map; | |
363 | ||
364 | size = PAGE_ALIGN(size); | |
365 | page = alloc_pages(flag, get_order(size)); | |
366 | if (!page) | |
367 | return NULL; | |
d0b08853 | 368 | |
828b35f6 JG |
369 | pa = page_to_phys(page); |
370 | memset((void *) pa, 0, size); | |
371 | ||
bdb97e91 | 372 | map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, NULL); |
828b35f6 JG |
373 | if (dma_mapping_error(dev, map)) { |
374 | free_pages(pa, get_order(size)); | |
375 | return NULL; | |
376 | } | |
377 | ||
6001018a | 378 | atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages); |
828b35f6 JG |
379 | if (dma_handle) |
380 | *dma_handle = map; | |
381 | return (void *) pa; | |
382 | } | |
383 | ||
384 | static void s390_dma_free(struct device *dev, size_t size, | |
385 | void *pa, dma_addr_t dma_handle, | |
386 | struct dma_attrs *attrs) | |
387 | { | |
198a5278 | 388 | struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); |
f7038b7c SO |
389 | |
390 | size = PAGE_ALIGN(size); | |
6001018a | 391 | atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages); |
f7038b7c | 392 | s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); |
828b35f6 JG |
393 | free_pages((unsigned long) pa, get_order(size)); |
394 | } | |
395 | ||
396 | static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, | |
397 | int nr_elements, enum dma_data_direction dir, | |
398 | struct dma_attrs *attrs) | |
399 | { | |
400 | int mapped_elements = 0; | |
401 | struct scatterlist *s; | |
402 | int i; | |
403 | ||
404 | for_each_sg(sg, s, nr_elements, i) { | |
405 | struct page *page = sg_page(s); | |
406 | s->dma_address = s390_dma_map_pages(dev, page, s->offset, | |
407 | s->length, dir, NULL); | |
408 | if (!dma_mapping_error(dev, s->dma_address)) { | |
409 | s->dma_length = s->length; | |
410 | mapped_elements++; | |
411 | } else | |
412 | goto unmap; | |
413 | } | |
414 | out: | |
415 | return mapped_elements; | |
416 | ||
417 | unmap: | |
418 | for_each_sg(sg, s, mapped_elements, i) { | |
419 | if (s->dma_address) | |
420 | s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, | |
421 | dir, NULL); | |
422 | s->dma_address = 0; | |
423 | s->dma_length = 0; | |
424 | } | |
425 | mapped_elements = 0; | |
426 | goto out; | |
427 | } | |
428 | ||
429 | static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, | |
430 | int nr_elements, enum dma_data_direction dir, | |
431 | struct dma_attrs *attrs) | |
432 | { | |
433 | struct scatterlist *s; | |
434 | int i; | |
435 | ||
436 | for_each_sg(sg, s, nr_elements, i) { | |
437 | s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL); | |
438 | s->dma_address = 0; | |
439 | s->dma_length = 0; | |
440 | } | |
441 | } | |
442 | ||
443 | int zpci_dma_init_device(struct zpci_dev *zdev) | |
444 | { | |
828b35f6 JG |
445 | int rc; |
446 | ||
8128f23c GS |
447 | /* |
448 | * At this point, if the device is part of an IOMMU domain, this would | |
449 | * be a strong hint towards a bug in the IOMMU API (common) code and/or | |
450 | * simultaneous access via IOMMU and DMA API. So let's issue a warning. | |
451 | */ | |
452 | WARN_ON(zdev->s390_domain); | |
453 | ||
828b35f6 JG |
454 | spin_lock_init(&zdev->iommu_bitmap_lock); |
455 | spin_lock_init(&zdev->dma_table_lock); | |
456 | ||
457 | zdev->dma_table = dma_alloc_cpu_table(); | |
458 | if (!zdev->dma_table) { | |
459 | rc = -ENOMEM; | |
460 | goto out_clean; | |
461 | } | |
462 | ||
69eea95c GS |
463 | /* |
464 | * Restrict the iommu bitmap size to the minimum of the following: | |
465 | * - main memory size | |
466 | * - 3-level pagetable address limit minus start_dma offset | |
467 | * - DMA address range allowed by the hardware (clp query pci fn) | |
468 | * | |
469 | * Also set zdev->end_dma to the actual end address of the usable | |
470 | * range, instead of the theoretical maximum as reported by hardware. | |
471 | */ | |
472 | zdev->iommu_size = min3((u64) high_memory, | |
473 | ZPCI_TABLE_SIZE_RT - zdev->start_dma, | |
474 | zdev->end_dma - zdev->start_dma + 1); | |
475 | zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1; | |
828b35f6 | 476 | zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT; |
22459321 | 477 | zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8); |
828b35f6 JG |
478 | if (!zdev->iommu_bitmap) { |
479 | rc = -ENOMEM; | |
480 | goto out_reg; | |
481 | } | |
482 | ||
69eea95c | 483 | rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, |
828b35f6 JG |
484 | (u64) zdev->dma_table); |
485 | if (rc) | |
486 | goto out_reg; | |
487 | return 0; | |
488 | ||
489 | out_reg: | |
490 | dma_free_cpu_table(zdev->dma_table); | |
491 | out_clean: | |
492 | return rc; | |
493 | } | |
494 | ||
495 | void zpci_dma_exit_device(struct zpci_dev *zdev) | |
496 | { | |
8128f23c GS |
497 | /* |
498 | * At this point, if the device is part of an IOMMU domain, this would | |
499 | * be a strong hint towards a bug in the IOMMU API (common) code and/or | |
500 | * simultaneous access via IOMMU and DMA API. So let's issue a warning. | |
501 | */ | |
502 | WARN_ON(zdev->s390_domain); | |
503 | ||
828b35f6 | 504 | zpci_unregister_ioat(zdev, 0); |
8128f23c GS |
505 | dma_cleanup_tables(zdev->dma_table); |
506 | zdev->dma_table = NULL; | |
22459321 | 507 | vfree(zdev->iommu_bitmap); |
828b35f6 JG |
508 | zdev->iommu_bitmap = NULL; |
509 | zdev->next_bit = 0; | |
510 | } | |
511 | ||
512 | static int __init dma_alloc_cpu_table_caches(void) | |
513 | { | |
514 | dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables", | |
515 | ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN, | |
516 | 0, NULL); | |
517 | if (!dma_region_table_cache) | |
518 | return -ENOMEM; | |
519 | ||
520 | dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables", | |
521 | ZPCI_PT_SIZE, ZPCI_PT_ALIGN, | |
522 | 0, NULL); | |
523 | if (!dma_page_table_cache) { | |
524 | kmem_cache_destroy(dma_region_table_cache); | |
525 | return -ENOMEM; | |
526 | } | |
527 | return 0; | |
528 | } | |
529 | ||
530 | int __init zpci_dma_init(void) | |
531 | { | |
532 | return dma_alloc_cpu_table_caches(); | |
533 | } | |
534 | ||
535 | void zpci_dma_exit(void) | |
536 | { | |
537 | kmem_cache_destroy(dma_page_table_cache); | |
538 | kmem_cache_destroy(dma_region_table_cache); | |
539 | } | |
540 | ||
541 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | |
542 | ||
543 | static int __init dma_debug_do_init(void) | |
544 | { | |
545 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | |
546 | return 0; | |
547 | } | |
548 | fs_initcall(dma_debug_do_init); | |
549 | ||
e82becfc | 550 | struct dma_map_ops s390_pci_dma_ops = { |
828b35f6 JG |
551 | .alloc = s390_dma_alloc, |
552 | .free = s390_dma_free, | |
553 | .map_sg = s390_dma_map_sg, | |
554 | .unmap_sg = s390_dma_unmap_sg, | |
555 | .map_page = s390_dma_map_pages, | |
556 | .unmap_page = s390_dma_unmap_pages, | |
557 | /* if we support direct DMA this must be conditional */ | |
558 | .is_phys = 0, | |
559 | /* dma_supported is unconditionally true without a callback */ | |
560 | }; | |
e82becfc | 561 | EXPORT_SYMBOL_GPL(s390_pci_dma_ops); |
c60d1ae4 GS |
562 | |
563 | static int __init s390_iommu_setup(char *str) | |
564 | { | |
565 | if (!strncmp(str, "strict", 6)) | |
566 | s390_iommu_strict = 1; | |
567 | return 0; | |
568 | } | |
569 | ||
570 | __setup("s390_iommu=", s390_iommu_setup); |