| 1 | /* |
| 2 | * Copyright 2010 |
| 3 | * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
| 4 | * |
| 5 | * This code provides a IOMMU for Xen PV guests with PCI passthrough. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License v2.0 as published by |
| 9 | * the Free Software Foundation |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * PV guests under Xen are running in an non-contiguous memory architecture. |
| 17 | * |
| 18 | * When PCI pass-through is utilized, this necessitates an IOMMU for |
| 19 | * translating bus (DMA) to virtual and vice-versa and also providing a |
| 20 | * mechanism to have contiguous pages for device drivers operations (say DMA |
| 21 | * operations). |
| 22 | * |
| 23 | * Specifically, under Xen the Linux idea of pages is an illusion. It |
| 24 | * assumes that pages start at zero and go up to the available memory. To |
| 25 | * help with that, the Linux Xen MMU provides a lookup mechanism to |
| 26 | * translate the page frame numbers (PFN) to machine frame numbers (MFN) |
| 27 | * and vice-versa. The MFN are the "real" frame numbers. Furthermore |
| 28 | * memory is not contiguous. Xen hypervisor stitches memory for guests |
| 29 | * from different pools, which means there is no guarantee that PFN==MFN |
| 30 | * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are |
| 31 | * allocated in descending order (high to low), meaning the guest might |
| 32 | * never get any MFN's under the 4GB mark. |
| 33 | * |
| 34 | */ |
| 35 | |
| 36 | #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt |
| 37 | |
| 38 | #include <linux/bootmem.h> |
| 39 | #include <linux/dma-mapping.h> |
| 40 | #include <linux/export.h> |
| 41 | #include <xen/swiotlb-xen.h> |
| 42 | #include <xen/page.h> |
| 43 | #include <xen/xen-ops.h> |
| 44 | #include <xen/hvc-console.h> |
| 45 | |
| 46 | #include <asm/dma-mapping.h> |
| 47 | #include <asm/xen/page-coherent.h> |
| 48 | |
| 49 | #include <trace/events/swiotlb.h> |
| 50 | /* |
| 51 | * Used to do a quick range check in swiotlb_tbl_unmap_single and |
| 52 | * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this |
| 53 | * API. |
| 54 | */ |
| 55 | |
| 56 | #ifndef CONFIG_X86 |
| 57 | static unsigned long dma_alloc_coherent_mask(struct device *dev, |
| 58 | gfp_t gfp) |
| 59 | { |
| 60 | unsigned long dma_mask = 0; |
| 61 | |
| 62 | dma_mask = dev->coherent_dma_mask; |
| 63 | if (!dma_mask) |
| 64 | dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); |
| 65 | |
| 66 | return dma_mask; |
| 67 | } |
| 68 | #endif |
| 69 | |
| 70 | static char *xen_io_tlb_start, *xen_io_tlb_end; |
| 71 | static unsigned long xen_io_tlb_nslabs; |
| 72 | /* |
| 73 | * Quick lookup value of the bus address of the IOTLB. |
| 74 | */ |
| 75 | |
| 76 | static u64 start_dma_addr; |
| 77 | |
| 78 | /* |
| 79 | * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t |
| 80 | * can be 32bit when dma_addr_t is 64bit leading to a loss in |
| 81 | * information if the shift is done before casting to 64bit. |
| 82 | */ |
| 83 | static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) |
| 84 | { |
| 85 | unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr)); |
| 86 | dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT; |
| 87 | |
| 88 | dma |= paddr & ~XEN_PAGE_MASK; |
| 89 | |
| 90 | return dma; |
| 91 | } |
| 92 | |
| 93 | static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) |
| 94 | { |
| 95 | unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr)); |
| 96 | dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT; |
| 97 | phys_addr_t paddr = dma; |
| 98 | |
| 99 | paddr |= baddr & ~XEN_PAGE_MASK; |
| 100 | |
| 101 | return paddr; |
| 102 | } |
| 103 | |
| 104 | static inline dma_addr_t xen_virt_to_bus(void *address) |
| 105 | { |
| 106 | return xen_phys_to_bus(virt_to_phys(address)); |
| 107 | } |
| 108 | |
| 109 | static int check_pages_physically_contiguous(unsigned long xen_pfn, |
| 110 | unsigned int offset, |
| 111 | size_t length) |
| 112 | { |
| 113 | unsigned long next_bfn; |
| 114 | int i; |
| 115 | int nr_pages; |
| 116 | |
| 117 | next_bfn = pfn_to_bfn(xen_pfn); |
| 118 | nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT; |
| 119 | |
| 120 | for (i = 1; i < nr_pages; i++) { |
| 121 | if (pfn_to_bfn(++xen_pfn) != ++next_bfn) |
| 122 | return 0; |
| 123 | } |
| 124 | return 1; |
| 125 | } |
| 126 | |
| 127 | static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) |
| 128 | { |
| 129 | unsigned long xen_pfn = XEN_PFN_DOWN(p); |
| 130 | unsigned int offset = p & ~XEN_PAGE_MASK; |
| 131 | |
| 132 | if (offset + size <= XEN_PAGE_SIZE) |
| 133 | return 0; |
| 134 | if (check_pages_physically_contiguous(xen_pfn, offset, size)) |
| 135 | return 0; |
| 136 | return 1; |
| 137 | } |
| 138 | |
| 139 | static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) |
| 140 | { |
| 141 | unsigned long bfn = XEN_PFN_DOWN(dma_addr); |
| 142 | unsigned long xen_pfn = bfn_to_local_pfn(bfn); |
| 143 | phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn); |
| 144 | |
| 145 | /* If the address is outside our domain, it CAN |
| 146 | * have the same virtual address as another address |
| 147 | * in our domain. Therefore _only_ check address within our domain. |
| 148 | */ |
| 149 | if (pfn_valid(PFN_DOWN(paddr))) { |
| 150 | return paddr >= virt_to_phys(xen_io_tlb_start) && |
| 151 | paddr < virt_to_phys(xen_io_tlb_end); |
| 152 | } |
| 153 | return 0; |
| 154 | } |
| 155 | |
| 156 | static int max_dma_bits = 32; |
| 157 | |
| 158 | static int |
| 159 | xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) |
| 160 | { |
| 161 | int i, rc; |
| 162 | int dma_bits; |
| 163 | dma_addr_t dma_handle; |
| 164 | phys_addr_t p = virt_to_phys(buf); |
| 165 | |
| 166 | dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; |
| 167 | |
| 168 | i = 0; |
| 169 | do { |
| 170 | int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE); |
| 171 | |
| 172 | do { |
| 173 | rc = xen_create_contiguous_region( |
| 174 | p + (i << IO_TLB_SHIFT), |
| 175 | get_order(slabs << IO_TLB_SHIFT), |
| 176 | dma_bits, &dma_handle); |
| 177 | } while (rc && dma_bits++ < max_dma_bits); |
| 178 | if (rc) |
| 179 | return rc; |
| 180 | |
| 181 | i += slabs; |
| 182 | } while (i < nslabs); |
| 183 | return 0; |
| 184 | } |
| 185 | static unsigned long xen_set_nslabs(unsigned long nr_tbl) |
| 186 | { |
| 187 | if (!nr_tbl) { |
| 188 | xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT); |
| 189 | xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE); |
| 190 | } else |
| 191 | xen_io_tlb_nslabs = nr_tbl; |
| 192 | |
| 193 | return xen_io_tlb_nslabs << IO_TLB_SHIFT; |
| 194 | } |
| 195 | |
| 196 | enum xen_swiotlb_err { |
| 197 | XEN_SWIOTLB_UNKNOWN = 0, |
| 198 | XEN_SWIOTLB_ENOMEM, |
| 199 | XEN_SWIOTLB_EFIXUP |
| 200 | }; |
| 201 | |
| 202 | static const char *xen_swiotlb_error(enum xen_swiotlb_err err) |
| 203 | { |
| 204 | switch (err) { |
| 205 | case XEN_SWIOTLB_ENOMEM: |
| 206 | return "Cannot allocate Xen-SWIOTLB buffer\n"; |
| 207 | case XEN_SWIOTLB_EFIXUP: |
| 208 | return "Failed to get contiguous memory for DMA from Xen!\n"\ |
| 209 | "You either: don't have the permissions, do not have"\ |
| 210 | " enough free memory under 4GB, or the hypervisor memory"\ |
| 211 | " is too fragmented!"; |
| 212 | default: |
| 213 | break; |
| 214 | } |
| 215 | return ""; |
| 216 | } |
| 217 | int __ref xen_swiotlb_init(int verbose, bool early) |
| 218 | { |
| 219 | unsigned long bytes, order; |
| 220 | int rc = -ENOMEM; |
| 221 | enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; |
| 222 | unsigned int repeat = 3; |
| 223 | |
| 224 | xen_io_tlb_nslabs = swiotlb_nr_tbl(); |
| 225 | retry: |
| 226 | bytes = xen_set_nslabs(xen_io_tlb_nslabs); |
| 227 | order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT); |
| 228 | /* |
| 229 | * Get IO TLB memory from any location. |
| 230 | */ |
| 231 | if (early) |
| 232 | xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes)); |
| 233 | else { |
| 234 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) |
| 235 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) |
| 236 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { |
| 237 | xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order); |
| 238 | if (xen_io_tlb_start) |
| 239 | break; |
| 240 | order--; |
| 241 | } |
| 242 | if (order != get_order(bytes)) { |
| 243 | pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n", |
| 244 | (PAGE_SIZE << order) >> 20); |
| 245 | xen_io_tlb_nslabs = SLABS_PER_PAGE << order; |
| 246 | bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; |
| 247 | } |
| 248 | } |
| 249 | if (!xen_io_tlb_start) { |
| 250 | m_ret = XEN_SWIOTLB_ENOMEM; |
| 251 | goto error; |
| 252 | } |
| 253 | xen_io_tlb_end = xen_io_tlb_start + bytes; |
| 254 | /* |
| 255 | * And replace that memory with pages under 4GB. |
| 256 | */ |
| 257 | rc = xen_swiotlb_fixup(xen_io_tlb_start, |
| 258 | bytes, |
| 259 | xen_io_tlb_nslabs); |
| 260 | if (rc) { |
| 261 | if (early) |
| 262 | free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes)); |
| 263 | else { |
| 264 | free_pages((unsigned long)xen_io_tlb_start, order); |
| 265 | xen_io_tlb_start = NULL; |
| 266 | } |
| 267 | m_ret = XEN_SWIOTLB_EFIXUP; |
| 268 | goto error; |
| 269 | } |
| 270 | start_dma_addr = xen_virt_to_bus(xen_io_tlb_start); |
| 271 | if (early) { |
| 272 | if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, |
| 273 | verbose)) |
| 274 | panic("Cannot allocate SWIOTLB buffer"); |
| 275 | rc = 0; |
| 276 | } else |
| 277 | rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs); |
| 278 | return rc; |
| 279 | error: |
| 280 | if (repeat--) { |
| 281 | xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */ |
| 282 | (xen_io_tlb_nslabs >> 1)); |
| 283 | pr_info("Lowering to %luMB\n", |
| 284 | (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20); |
| 285 | goto retry; |
| 286 | } |
| 287 | pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc); |
| 288 | if (early) |
| 289 | panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc); |
| 290 | else |
| 291 | free_pages((unsigned long)xen_io_tlb_start, order); |
| 292 | return rc; |
| 293 | } |
| 294 | void * |
| 295 | xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
| 296 | dma_addr_t *dma_handle, gfp_t flags, |
| 297 | struct dma_attrs *attrs) |
| 298 | { |
| 299 | void *ret; |
| 300 | int order = get_order(size); |
| 301 | u64 dma_mask = DMA_BIT_MASK(32); |
| 302 | phys_addr_t phys; |
| 303 | dma_addr_t dev_addr; |
| 304 | |
| 305 | /* |
| 306 | * Ignore region specifiers - the kernel's ideas of |
| 307 | * pseudo-phys memory layout has nothing to do with the |
| 308 | * machine physical layout. We can't allocate highmem |
| 309 | * because we can't return a pointer to it. |
| 310 | */ |
| 311 | flags &= ~(__GFP_DMA | __GFP_HIGHMEM); |
| 312 | |
| 313 | /* On ARM this function returns an ioremap'ped virtual address for |
| 314 | * which virt_to_phys doesn't return the corresponding physical |
| 315 | * address. In fact on ARM virt_to_phys only works for kernel direct |
| 316 | * mapped RAM memory. Also see comment below. |
| 317 | */ |
| 318 | ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs); |
| 319 | |
| 320 | if (!ret) |
| 321 | return ret; |
| 322 | |
| 323 | if (hwdev && hwdev->coherent_dma_mask) |
| 324 | dma_mask = dma_alloc_coherent_mask(hwdev, flags); |
| 325 | |
| 326 | /* At this point dma_handle is the physical address, next we are |
| 327 | * going to set it to the machine address. |
| 328 | * Do not use virt_to_phys(ret) because on ARM it doesn't correspond |
| 329 | * to *dma_handle. */ |
| 330 | phys = *dma_handle; |
| 331 | dev_addr = xen_phys_to_bus(phys); |
| 332 | if (((dev_addr + size - 1 <= dma_mask)) && |
| 333 | !range_straddles_page_boundary(phys, size)) |
| 334 | *dma_handle = dev_addr; |
| 335 | else { |
| 336 | if (xen_create_contiguous_region(phys, order, |
| 337 | fls64(dma_mask), dma_handle) != 0) { |
| 338 | xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); |
| 339 | return NULL; |
| 340 | } |
| 341 | } |
| 342 | memset(ret, 0, size); |
| 343 | return ret; |
| 344 | } |
| 345 | EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent); |
| 346 | |
| 347 | void |
| 348 | xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
| 349 | dma_addr_t dev_addr, struct dma_attrs *attrs) |
| 350 | { |
| 351 | int order = get_order(size); |
| 352 | phys_addr_t phys; |
| 353 | u64 dma_mask = DMA_BIT_MASK(32); |
| 354 | |
| 355 | if (hwdev && hwdev->coherent_dma_mask) |
| 356 | dma_mask = hwdev->coherent_dma_mask; |
| 357 | |
| 358 | /* do not use virt_to_phys because on ARM it doesn't return you the |
| 359 | * physical address */ |
| 360 | phys = xen_bus_to_phys(dev_addr); |
| 361 | |
| 362 | if (((dev_addr + size - 1 > dma_mask)) || |
| 363 | range_straddles_page_boundary(phys, size)) |
| 364 | xen_destroy_contiguous_region(phys, order); |
| 365 | |
| 366 | xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); |
| 367 | } |
| 368 | EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); |
| 369 | |
| 370 | |
| 371 | /* |
| 372 | * Map a single buffer of the indicated size for DMA in streaming mode. The |
| 373 | * physical address to use is returned. |
| 374 | * |
| 375 | * Once the device is given the dma address, the device owns this memory until |
| 376 | * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed. |
| 377 | */ |
| 378 | dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, |
| 379 | unsigned long offset, size_t size, |
| 380 | enum dma_data_direction dir, |
| 381 | struct dma_attrs *attrs) |
| 382 | { |
| 383 | phys_addr_t map, phys = page_to_phys(page) + offset; |
| 384 | dma_addr_t dev_addr = xen_phys_to_bus(phys); |
| 385 | |
| 386 | BUG_ON(dir == DMA_NONE); |
| 387 | /* |
| 388 | * If the address happens to be in the device's DMA window, |
| 389 | * we can safely return the device addr and not worry about bounce |
| 390 | * buffering it. |
| 391 | */ |
| 392 | if (dma_capable(dev, dev_addr, size) && |
| 393 | !range_straddles_page_boundary(phys, size) && |
| 394 | !xen_arch_need_swiotlb(dev, phys, dev_addr) && |
| 395 | !swiotlb_force) { |
| 396 | /* we are not interested in the dma_addr returned by |
| 397 | * xen_dma_map_page, only in the potential cache flushes executed |
| 398 | * by the function. */ |
| 399 | xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs); |
| 400 | return dev_addr; |
| 401 | } |
| 402 | |
| 403 | /* |
| 404 | * Oh well, have to allocate and map a bounce buffer. |
| 405 | */ |
| 406 | trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); |
| 407 | |
| 408 | map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir); |
| 409 | if (map == SWIOTLB_MAP_ERROR) |
| 410 | return DMA_ERROR_CODE; |
| 411 | |
| 412 | xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), |
| 413 | dev_addr, map & ~PAGE_MASK, size, dir, attrs); |
| 414 | dev_addr = xen_phys_to_bus(map); |
| 415 | |
| 416 | /* |
| 417 | * Ensure that the address returned is DMA'ble |
| 418 | */ |
| 419 | if (!dma_capable(dev, dev_addr, size)) { |
| 420 | swiotlb_tbl_unmap_single(dev, map, size, dir); |
| 421 | dev_addr = 0; |
| 422 | } |
| 423 | return dev_addr; |
| 424 | } |
| 425 | EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); |
| 426 | |
| 427 | /* |
| 428 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
| 429 | * match what was provided for in a previous xen_swiotlb_map_page call. All |
| 430 | * other usages are undefined. |
| 431 | * |
| 432 | * After this call, reads by the cpu to the buffer are guaranteed to see |
| 433 | * whatever the device wrote there. |
| 434 | */ |
| 435 | static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
| 436 | size_t size, enum dma_data_direction dir, |
| 437 | struct dma_attrs *attrs) |
| 438 | { |
| 439 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); |
| 440 | |
| 441 | BUG_ON(dir == DMA_NONE); |
| 442 | |
| 443 | xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs); |
| 444 | |
| 445 | /* NOTE: We use dev_addr here, not paddr! */ |
| 446 | if (is_xen_swiotlb_buffer(dev_addr)) { |
| 447 | swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); |
| 448 | return; |
| 449 | } |
| 450 | |
| 451 | if (dir != DMA_FROM_DEVICE) |
| 452 | return; |
| 453 | |
| 454 | /* |
| 455 | * phys_to_virt doesn't work with hihgmem page but we could |
| 456 | * call dma_mark_clean() with hihgmem page here. However, we |
| 457 | * are fine since dma_mark_clean() is null on POWERPC. We can |
| 458 | * make dma_mark_clean() take a physical address if necessary. |
| 459 | */ |
| 460 | dma_mark_clean(phys_to_virt(paddr), size); |
| 461 | } |
| 462 | |
| 463 | void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
| 464 | size_t size, enum dma_data_direction dir, |
| 465 | struct dma_attrs *attrs) |
| 466 | { |
| 467 | xen_unmap_single(hwdev, dev_addr, size, dir, attrs); |
| 468 | } |
| 469 | EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page); |
| 470 | |
| 471 | /* |
| 472 | * Make physical memory consistent for a single streaming mode DMA translation |
| 473 | * after a transfer. |
| 474 | * |
| 475 | * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer |
| 476 | * using the cpu, yet do not wish to teardown the dma mapping, you must |
| 477 | * call this function before doing so. At the next point you give the dma |
| 478 | * address back to the card, you must first perform a |
| 479 | * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer |
| 480 | */ |
| 481 | static void |
| 482 | xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, |
| 483 | size_t size, enum dma_data_direction dir, |
| 484 | enum dma_sync_target target) |
| 485 | { |
| 486 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); |
| 487 | |
| 488 | BUG_ON(dir == DMA_NONE); |
| 489 | |
| 490 | if (target == SYNC_FOR_CPU) |
| 491 | xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir); |
| 492 | |
| 493 | /* NOTE: We use dev_addr here, not paddr! */ |
| 494 | if (is_xen_swiotlb_buffer(dev_addr)) |
| 495 | swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); |
| 496 | |
| 497 | if (target == SYNC_FOR_DEVICE) |
| 498 | xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir); |
| 499 | |
| 500 | if (dir != DMA_FROM_DEVICE) |
| 501 | return; |
| 502 | |
| 503 | dma_mark_clean(phys_to_virt(paddr), size); |
| 504 | } |
| 505 | |
| 506 | void |
| 507 | xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |
| 508 | size_t size, enum dma_data_direction dir) |
| 509 | { |
| 510 | xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); |
| 511 | } |
| 512 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu); |
| 513 | |
| 514 | void |
| 515 | xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, |
| 516 | size_t size, enum dma_data_direction dir) |
| 517 | { |
| 518 | xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); |
| 519 | } |
| 520 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device); |
| 521 | |
| 522 | /* |
| 523 | * Map a set of buffers described by scatterlist in streaming mode for DMA. |
| 524 | * This is the scatter-gather version of the above xen_swiotlb_map_page |
| 525 | * interface. Here the scatter gather list elements are each tagged with the |
| 526 | * appropriate dma address and length. They are obtained via |
| 527 | * sg_dma_{address,length}(SG). |
| 528 | * |
| 529 | * NOTE: An implementation may be able to use a smaller number of |
| 530 | * DMA address/length pairs than there are SG table elements. |
| 531 | * (for example via virtual mapping capabilities) |
| 532 | * The routine returns the number of addr/length pairs actually |
| 533 | * used, at most nents. |
| 534 | * |
| 535 | * Device ownership issues as mentioned above for xen_swiotlb_map_page are the |
| 536 | * same here. |
| 537 | */ |
| 538 | int |
| 539 | xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
| 540 | int nelems, enum dma_data_direction dir, |
| 541 | struct dma_attrs *attrs) |
| 542 | { |
| 543 | struct scatterlist *sg; |
| 544 | int i; |
| 545 | |
| 546 | BUG_ON(dir == DMA_NONE); |
| 547 | |
| 548 | for_each_sg(sgl, sg, nelems, i) { |
| 549 | phys_addr_t paddr = sg_phys(sg); |
| 550 | dma_addr_t dev_addr = xen_phys_to_bus(paddr); |
| 551 | |
| 552 | if (swiotlb_force || |
| 553 | xen_arch_need_swiotlb(hwdev, paddr, dev_addr) || |
| 554 | !dma_capable(hwdev, dev_addr, sg->length) || |
| 555 | range_straddles_page_boundary(paddr, sg->length)) { |
| 556 | phys_addr_t map = swiotlb_tbl_map_single(hwdev, |
| 557 | start_dma_addr, |
| 558 | sg_phys(sg), |
| 559 | sg->length, |
| 560 | dir); |
| 561 | if (map == SWIOTLB_MAP_ERROR) { |
| 562 | dev_warn(hwdev, "swiotlb buffer is full\n"); |
| 563 | /* Don't panic here, we expect map_sg users |
| 564 | to do proper error handling. */ |
| 565 | xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, |
| 566 | attrs); |
| 567 | sg_dma_len(sgl) = 0; |
| 568 | return 0; |
| 569 | } |
| 570 | xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), |
| 571 | dev_addr, |
| 572 | map & ~PAGE_MASK, |
| 573 | sg->length, |
| 574 | dir, |
| 575 | attrs); |
| 576 | sg->dma_address = xen_phys_to_bus(map); |
| 577 | } else { |
| 578 | /* we are not interested in the dma_addr returned by |
| 579 | * xen_dma_map_page, only in the potential cache flushes executed |
| 580 | * by the function. */ |
| 581 | xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT), |
| 582 | dev_addr, |
| 583 | paddr & ~PAGE_MASK, |
| 584 | sg->length, |
| 585 | dir, |
| 586 | attrs); |
| 587 | sg->dma_address = dev_addr; |
| 588 | } |
| 589 | sg_dma_len(sg) = sg->length; |
| 590 | } |
| 591 | return nelems; |
| 592 | } |
| 593 | EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs); |
| 594 | |
| 595 | /* |
| 596 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules |
| 597 | * concerning calls here are the same as for swiotlb_unmap_page() above. |
| 598 | */ |
| 599 | void |
| 600 | xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
| 601 | int nelems, enum dma_data_direction dir, |
| 602 | struct dma_attrs *attrs) |
| 603 | { |
| 604 | struct scatterlist *sg; |
| 605 | int i; |
| 606 | |
| 607 | BUG_ON(dir == DMA_NONE); |
| 608 | |
| 609 | for_each_sg(sgl, sg, nelems, i) |
| 610 | xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs); |
| 611 | |
| 612 | } |
| 613 | EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs); |
| 614 | |
| 615 | /* |
| 616 | * Make physical memory consistent for a set of streaming mode DMA translations |
| 617 | * after a transfer. |
| 618 | * |
| 619 | * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules |
| 620 | * and usage. |
| 621 | */ |
| 622 | static void |
| 623 | xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, |
| 624 | int nelems, enum dma_data_direction dir, |
| 625 | enum dma_sync_target target) |
| 626 | { |
| 627 | struct scatterlist *sg; |
| 628 | int i; |
| 629 | |
| 630 | for_each_sg(sgl, sg, nelems, i) |
| 631 | xen_swiotlb_sync_single(hwdev, sg->dma_address, |
| 632 | sg_dma_len(sg), dir, target); |
| 633 | } |
| 634 | |
| 635 | void |
| 636 | xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
| 637 | int nelems, enum dma_data_direction dir) |
| 638 | { |
| 639 | xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); |
| 640 | } |
| 641 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu); |
| 642 | |
| 643 | void |
| 644 | xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
| 645 | int nelems, enum dma_data_direction dir) |
| 646 | { |
| 647 | xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); |
| 648 | } |
| 649 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device); |
| 650 | |
| 651 | int |
| 652 | xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) |
| 653 | { |
| 654 | return !dma_addr; |
| 655 | } |
| 656 | EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error); |
| 657 | |
| 658 | /* |
| 659 | * Return whether the given device DMA address mask can be supported |
| 660 | * properly. For example, if your device can only drive the low 24-bits |
| 661 | * during bus mastering, then you would pass 0x00ffffff as the mask to |
| 662 | * this function. |
| 663 | */ |
| 664 | int |
| 665 | xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) |
| 666 | { |
| 667 | return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; |
| 668 | } |
| 669 | EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported); |
| 670 | |
| 671 | int |
| 672 | xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask) |
| 673 | { |
| 674 | if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask)) |
| 675 | return -EIO; |
| 676 | |
| 677 | *dev->dma_mask = dma_mask; |
| 678 | |
| 679 | return 0; |
| 680 | } |
| 681 | EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask); |