xen: add a dma_addr_t dev_addr argument to xen_dma_map_page
[deliverable/linux.git] / arch / arm / include / asm / xen / page-coherent.h
CommitLineData
d6fe76c5
SS
1#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
2#define _ASM_ARM_XEN_PAGE_COHERENT_H
3
4#include <asm/page.h>
5#include <linux/dma-attrs.h>
6#include <linux/dma-mapping.h>
7
2f91fc33
SS
8void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
9 size_t size, enum dma_data_direction dir,
10 struct dma_attrs *attrs);
11void __xen_dma_sync_single_for_cpu(struct device *hwdev,
12 dma_addr_t handle, size_t size, enum dma_data_direction dir);
13
14void __xen_dma_sync_single_for_device(struct device *hwdev,
15 dma_addr_t handle, size_t size, enum dma_data_direction dir);
16
d6fe76c5
SS
17static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
18 dma_addr_t *dma_handle, gfp_t flags,
19 struct dma_attrs *attrs)
20{
21 return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
22}
23
24static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
25 void *cpu_addr, dma_addr_t dma_handle,
26 struct dma_attrs *attrs)
27{
28 __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
29}
30
7100b077 31static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
a0f2dee0
SS
32 dma_addr_t dev_addr, unsigned long offset, size_t size,
33 enum dma_data_direction dir, struct dma_attrs *attrs)
7100b077
SS
34{
35 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
36}
37
2f91fc33 38static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
7100b077 39 size_t size, enum dma_data_direction dir,
2f91fc33
SS
40 struct dma_attrs *attrs)
41{
42 unsigned long pfn = PFN_DOWN(handle);
43 /* Dom0 is mapped 1:1, so calling pfn_valid on a foreign mfn will
44 * always return false. If the page is local we can safely call the
45 * native dma_ops function, otherwise we call the xen specific
46 * function. */
47 if (pfn_valid(pfn)) {
48 if (__generic_dma_ops(hwdev)->unmap_page)
49 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
50 } else
51 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
52}
7100b077 53
2f91fc33
SS
54static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
55 dma_addr_t handle, size_t size, enum dma_data_direction dir)
56{
57 unsigned long pfn = PFN_DOWN(handle);
58 if (pfn_valid(pfn)) {
59 if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
60 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
61 } else
62 __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
63}
340720be 64
2f91fc33
SS
65static inline void xen_dma_sync_single_for_device(struct device *hwdev,
66 dma_addr_t handle, size_t size, enum dma_data_direction dir)
67{
68 unsigned long pfn = PFN_DOWN(handle);
69 if (pfn_valid(pfn)) {
70 if (__generic_dma_ops(hwdev)->sync_single_for_device)
71 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
72 } else
73 __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
74}
7100b077 75
d6fe76c5 76#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
This page took 0.076317 seconds and 5 git commands to generate.