xen-swiotlb: Fix wrong panic.
[deliverable/linux.git] / drivers / xen / swiotlb-xen.c
CommitLineData
b097186f
KRW
1/*
2 * Copyright 2010
3 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
4 *
5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License v2.0 as published by
9 * the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * PV guests under Xen are running in an non-contiguous memory architecture.
17 *
18 * When PCI pass-through is utilized, this necessitates an IOMMU for
19 * translating bus (DMA) to virtual and vice-versa and also providing a
20 * mechanism to have contiguous pages for device drivers operations (say DMA
21 * operations).
22 *
23 * Specifically, under Xen the Linux idea of pages is an illusion. It
24 * assumes that pages start at zero and go up to the available memory. To
25 * help with that, the Linux Xen MMU provides a lookup mechanism to
26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
28 * memory is not contiguous. Xen hypervisor stitches memory for guests
29 * from different pools, which means there is no guarantee that PFN==MFN
30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
31 * allocated in descending order (high to low), meaning the guest might
32 * never get any MFN's under the 4GB mark.
33 *
34 */
35
36#include <linux/bootmem.h>
37#include <linux/dma-mapping.h>
38#include <xen/swiotlb-xen.h>
39#include <xen/page.h>
40#include <xen/xen-ops.h>
f4b2f07b 41#include <xen/hvc-console.h>
b097186f
KRW
42/*
43 * Used to do a quick range check in swiotlb_tbl_unmap_single and
44 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
45 * API.
46 */
47
48static char *xen_io_tlb_start, *xen_io_tlb_end;
49static unsigned long xen_io_tlb_nslabs;
50/*
51 * Quick lookup value of the bus address of the IOTLB.
52 */
53
54u64 start_dma_addr;
55
56static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
57{
6eab04a8 58 return phys_to_machine(XPADDR(paddr)).maddr;
b097186f
KRW
59}
60
61static phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
62{
63 return machine_to_phys(XMADDR(baddr)).paddr;
64}
65
66static dma_addr_t xen_virt_to_bus(void *address)
67{
68 return xen_phys_to_bus(virt_to_phys(address));
69}
70
71static int check_pages_physically_contiguous(unsigned long pfn,
72 unsigned int offset,
73 size_t length)
74{
75 unsigned long next_mfn;
76 int i;
77 int nr_pages;
78
79 next_mfn = pfn_to_mfn(pfn);
80 nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
81
82 for (i = 1; i < nr_pages; i++) {
83 if (pfn_to_mfn(++pfn) != ++next_mfn)
84 return 0;
85 }
86 return 1;
87}
88
89static int range_straddles_page_boundary(phys_addr_t p, size_t size)
90{
91 unsigned long pfn = PFN_DOWN(p);
92 unsigned int offset = p & ~PAGE_MASK;
93
94 if (offset + size <= PAGE_SIZE)
95 return 0;
96 if (check_pages_physically_contiguous(pfn, offset, size))
97 return 0;
98 return 1;
99}
100
101static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
102{
103 unsigned long mfn = PFN_DOWN(dma_addr);
104 unsigned long pfn = mfn_to_local_pfn(mfn);
105 phys_addr_t paddr;
106
107 /* If the address is outside our domain, it CAN
108 * have the same virtual address as another address
109 * in our domain. Therefore _only_ check address within our domain.
110 */
111 if (pfn_valid(pfn)) {
112 paddr = PFN_PHYS(pfn);
113 return paddr >= virt_to_phys(xen_io_tlb_start) &&
114 paddr < virt_to_phys(xen_io_tlb_end);
115 }
116 return 0;
117}
118
119static int max_dma_bits = 32;
120
121static int
122xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
123{
124 int i, rc;
125 int dma_bits;
126
127 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
128
129 i = 0;
130 do {
131 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
132
133 do {
134 rc = xen_create_contiguous_region(
135 (unsigned long)buf + (i << IO_TLB_SHIFT),
136 get_order(slabs << IO_TLB_SHIFT),
137 dma_bits);
138 } while (rc && dma_bits++ < max_dma_bits);
139 if (rc)
140 return rc;
141
142 i += slabs;
143 } while (i < nslabs);
144 return 0;
145}
146
147void __init xen_swiotlb_init(int verbose)
148{
149 unsigned long bytes;
f4b2f07b 150 int rc = -ENOMEM;
5f98ecdb 151 unsigned long nr_tbl;
f4b2f07b
KRW
152 char *m = NULL;
153 unsigned int repeat = 3;
5f98ecdb
FT
154
155 nr_tbl = swioltb_nr_tbl();
156 if (nr_tbl)
157 xen_io_tlb_nslabs = nr_tbl;
158 else {
159 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
160 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
161 }
f4b2f07b 162retry:
b097186f
KRW
163 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
164
165 /*
166 * Get IO TLB memory from any location.
167 */
168 xen_io_tlb_start = alloc_bootmem(bytes);
f4b2f07b
KRW
169 if (!xen_io_tlb_start) {
170 m = "Cannot allocate Xen-SWIOTLB buffer!\n";
171 goto error;
172 }
b097186f
KRW
173 xen_io_tlb_end = xen_io_tlb_start + bytes;
174 /*
175 * And replace that memory with pages under 4GB.
176 */
177 rc = xen_swiotlb_fixup(xen_io_tlb_start,
178 bytes,
179 xen_io_tlb_nslabs);
f4b2f07b
KRW
180 if (rc) {
181 free_bootmem(__pa(xen_io_tlb_start), bytes);
182 m = "Failed to get contiguous memory for DMA from Xen!\n"\
183 "You either: don't have the permissions, do not have"\
184 " enough free memory under 4GB, or the hypervisor memory"\
185 "is too fragmented!";
b097186f 186 goto error;
f4b2f07b 187 }
b097186f
KRW
188 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
189 swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
190
191 return;
192error:
f4b2f07b
KRW
193 if (repeat--) {
194 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
195 (xen_io_tlb_nslabs >> 1));
196 printk(KERN_INFO "Xen-SWIOTLB: Lowering to %luMB\n",
197 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
198 goto retry;
199 }
200 xen_raw_printk("%s (rc:%d)", rc, m);
201 panic("%s (rc:%d)", rc, m);
b097186f
KRW
202}
203
204void *
205xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
206 dma_addr_t *dma_handle, gfp_t flags)
207{
208 void *ret;
209 int order = get_order(size);
210 u64 dma_mask = DMA_BIT_MASK(32);
211 unsigned long vstart;
212
213 /*
214 * Ignore region specifiers - the kernel's ideas of
215 * pseudo-phys memory layout has nothing to do with the
216 * machine physical layout. We can't allocate highmem
217 * because we can't return a pointer to it.
218 */
219 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
220
221 if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
222 return ret;
223
224 vstart = __get_free_pages(flags, order);
225 ret = (void *)vstart;
226
227 if (hwdev && hwdev->coherent_dma_mask)
228 dma_mask = dma_alloc_coherent_mask(hwdev, flags);
229
230 if (ret) {
231 if (xen_create_contiguous_region(vstart, order,
232 fls64(dma_mask)) != 0) {
233 free_pages(vstart, order);
234 return NULL;
235 }
236 memset(ret, 0, size);
237 *dma_handle = virt_to_machine(ret).maddr;
238 }
239 return ret;
240}
241EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
242
243void
244xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
245 dma_addr_t dev_addr)
246{
247 int order = get_order(size);
248
249 if (dma_release_from_coherent(hwdev, order, vaddr))
250 return;
251
252 xen_destroy_contiguous_region((unsigned long)vaddr, order);
253 free_pages((unsigned long)vaddr, order);
254}
255EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
256
257
258/*
259 * Map a single buffer of the indicated size for DMA in streaming mode. The
260 * physical address to use is returned.
261 *
262 * Once the device is given the dma address, the device owns this memory until
263 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
264 */
265dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
266 unsigned long offset, size_t size,
267 enum dma_data_direction dir,
268 struct dma_attrs *attrs)
269{
270 phys_addr_t phys = page_to_phys(page) + offset;
271 dma_addr_t dev_addr = xen_phys_to_bus(phys);
272 void *map;
273
274 BUG_ON(dir == DMA_NONE);
275 /*
276 * If the address happens to be in the device's DMA window,
277 * we can safely return the device addr and not worry about bounce
278 * buffering it.
279 */
280 if (dma_capable(dev, dev_addr, size) &&
281 !range_straddles_page_boundary(phys, size) && !swiotlb_force)
282 return dev_addr;
283
284 /*
285 * Oh well, have to allocate and map a bounce buffer.
286 */
287 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
288 if (!map)
289 return DMA_ERROR_CODE;
290
291 dev_addr = xen_virt_to_bus(map);
292
293 /*
294 * Ensure that the address returned is DMA'ble
295 */
ab2a47bd
KRW
296 if (!dma_capable(dev, dev_addr, size)) {
297 swiotlb_tbl_unmap_single(dev, map, size, dir);
298 dev_addr = 0;
299 }
b097186f
KRW
300 return dev_addr;
301}
302EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
303
304/*
305 * Unmap a single streaming mode DMA translation. The dma_addr and size must
306 * match what was provided for in a previous xen_swiotlb_map_page call. All
307 * other usages are undefined.
308 *
309 * After this call, reads by the cpu to the buffer are guaranteed to see
310 * whatever the device wrote there.
311 */
312static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
313 size_t size, enum dma_data_direction dir)
314{
315 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
316
317 BUG_ON(dir == DMA_NONE);
318
319 /* NOTE: We use dev_addr here, not paddr! */
320 if (is_xen_swiotlb_buffer(dev_addr)) {
321 swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
322 return;
323 }
324
325 if (dir != DMA_FROM_DEVICE)
326 return;
327
328 /*
329 * phys_to_virt doesn't work with hihgmem page but we could
330 * call dma_mark_clean() with hihgmem page here. However, we
331 * are fine since dma_mark_clean() is null on POWERPC. We can
332 * make dma_mark_clean() take a physical address if necessary.
333 */
334 dma_mark_clean(phys_to_virt(paddr), size);
335}
336
337void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
338 size_t size, enum dma_data_direction dir,
339 struct dma_attrs *attrs)
340{
341 xen_unmap_single(hwdev, dev_addr, size, dir);
342}
343EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
344
345/*
346 * Make physical memory consistent for a single streaming mode DMA translation
347 * after a transfer.
348 *
349 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
350 * using the cpu, yet do not wish to teardown the dma mapping, you must
351 * call this function before doing so. At the next point you give the dma
352 * address back to the card, you must first perform a
353 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
354 */
355static void
356xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
357 size_t size, enum dma_data_direction dir,
358 enum dma_sync_target target)
359{
360 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
361
362 BUG_ON(dir == DMA_NONE);
363
364 /* NOTE: We use dev_addr here, not paddr! */
365 if (is_xen_swiotlb_buffer(dev_addr)) {
366 swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
367 target);
368 return;
369 }
370
371 if (dir != DMA_FROM_DEVICE)
372 return;
373
374 dma_mark_clean(phys_to_virt(paddr), size);
375}
376
377void
378xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
379 size_t size, enum dma_data_direction dir)
380{
381 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
382}
383EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
384
385void
386xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
387 size_t size, enum dma_data_direction dir)
388{
389 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
390}
391EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
392
393/*
394 * Map a set of buffers described by scatterlist in streaming mode for DMA.
395 * This is the scatter-gather version of the above xen_swiotlb_map_page
396 * interface. Here the scatter gather list elements are each tagged with the
397 * appropriate dma address and length. They are obtained via
398 * sg_dma_{address,length}(SG).
399 *
400 * NOTE: An implementation may be able to use a smaller number of
401 * DMA address/length pairs than there are SG table elements.
402 * (for example via virtual mapping capabilities)
403 * The routine returns the number of addr/length pairs actually
404 * used, at most nents.
405 *
406 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
407 * same here.
408 */
409int
410xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
411 int nelems, enum dma_data_direction dir,
412 struct dma_attrs *attrs)
413{
414 struct scatterlist *sg;
415 int i;
416
417 BUG_ON(dir == DMA_NONE);
418
419 for_each_sg(sgl, sg, nelems, i) {
420 phys_addr_t paddr = sg_phys(sg);
421 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
422
423 if (swiotlb_force ||
424 !dma_capable(hwdev, dev_addr, sg->length) ||
425 range_straddles_page_boundary(paddr, sg->length)) {
426 void *map = swiotlb_tbl_map_single(hwdev,
427 start_dma_addr,
428 sg_phys(sg),
429 sg->length, dir);
430 if (!map) {
431 /* Don't panic here, we expect map_sg users
432 to do proper error handling. */
433 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
434 attrs);
435 sgl[0].dma_length = 0;
436 return DMA_ERROR_CODE;
437 }
438 sg->dma_address = xen_virt_to_bus(map);
439 } else
440 sg->dma_address = dev_addr;
441 sg->dma_length = sg->length;
442 }
443 return nelems;
444}
445EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
446
447int
448xen_swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
449 enum dma_data_direction dir)
450{
451 return xen_swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
452}
453EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg);
454
455/*
456 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
457 * concerning calls here are the same as for swiotlb_unmap_page() above.
458 */
459void
460xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
461 int nelems, enum dma_data_direction dir,
462 struct dma_attrs *attrs)
463{
464 struct scatterlist *sg;
465 int i;
466
467 BUG_ON(dir == DMA_NONE);
468
469 for_each_sg(sgl, sg, nelems, i)
470 xen_unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
471
472}
473EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
474
475void
476xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
477 enum dma_data_direction dir)
478{
479 return xen_swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
480}
481EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg);
482
483/*
484 * Make physical memory consistent for a set of streaming mode DMA translations
485 * after a transfer.
486 *
487 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
488 * and usage.
489 */
490static void
491xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
492 int nelems, enum dma_data_direction dir,
493 enum dma_sync_target target)
494{
495 struct scatterlist *sg;
496 int i;
497
498 for_each_sg(sgl, sg, nelems, i)
499 xen_swiotlb_sync_single(hwdev, sg->dma_address,
500 sg->dma_length, dir, target);
501}
502
503void
504xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
505 int nelems, enum dma_data_direction dir)
506{
507 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
508}
509EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
510
511void
512xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
513 int nelems, enum dma_data_direction dir)
514{
515 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
516}
517EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
518
519int
520xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
521{
522 return !dma_addr;
523}
524EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
525
526/*
527 * Return whether the given device DMA address mask can be supported
528 * properly. For example, if your device can only drive the low 24-bits
529 * during bus mastering, then you would pass 0x00ffffff as the mask to
530 * this function.
531 */
532int
533xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
534{
535 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
536}
537EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
This page took 0.09841 seconds and 5 git commands to generate.