X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=include%2Fasm-arm%2Fdma-mapping.h;h=9bc46b486afba7a5970e93b5c938a3be33c4684e;hb=1fe532685a1984dc9f2603ed20bd5e630ba79709;hp=55eb4dc3253ddfd1dea70d7900b2be0a1caa9ed9;hpb=be883da7594b0a2a02074e683673ae0e522566a4;p=deliverable%2Flinux.git diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h index 55eb4dc3253d..943f23bc99a2 100644 --- a/include/asm-arm/dma-mapping.h +++ b/include/asm-arm/dma-mapping.h @@ -5,15 +5,21 @@ #include /* need struct page */ -#include +#include + +#include /* * DMA-consistent mapping functions. These allocate/free a region of * uncached, unwrite-buffered mapped memory space for use with DMA * devices. This is the "generic" version. The PCI specific version * is in pci.h + * + * Note: Drivers should NOT use this function directly, as it will break + * platforms with CONFIG_DMABOUNCE. + * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ -extern void consistent_sync(void *kaddr, size_t size, int rw); +extern void dma_cache_maint(const void *kaddr, size_t size, int rw); /* * Return whether the given device DMA address mask can be supported @@ -44,7 +50,7 @@ static inline int dma_get_cache_alignment(void) return 32; } -static inline int dma_is_consistent(dma_addr_t handle) +static inline int dma_is_consistent(struct device *dev, dma_addr_t handle) { return !!arch_is_coherent(); } @@ -57,6 +63,22 @@ static inline int dma_mapping_error(dma_addr_t dma_addr) return dma_addr == ~0; } +/* + * Dummy noncoherent implementation. We don't provide a dma_cache_sync + * function so drivers using this API are highlighted with build warnings. + */ +static inline void * +dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) +{ + return NULL; +} + +static inline void +dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle) +{ +} + /** * dma_alloc_coherent - allocate consistent memory for DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -145,7 +167,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { if (!arch_is_coherent()) - consistent_sync(cpu_addr, size, dir); + dma_cache_maint(cpu_addr, size, dir); return virt_to_dma(dev, (unsigned long)cpu_addr); } @@ -254,11 +276,11 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, for (i = 0; i < nents; i++, sg++) { char *virt; - sg->dma_address = page_to_dma(dev, sg->page) + sg->offset; - virt = page_address(sg->page) + sg->offset; + sg->dma_address = page_to_dma(dev, sg_page(sg)) + sg->offset; + virt = sg_virt(sg); if (!arch_is_coherent()) - consistent_sync(virt, sg->length, dir); + dma_cache_maint(virt, sg->length, dir); } return nents; @@ -314,7 +336,7 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { if (!arch_is_coherent()) - consistent_sync((void *)dma_to_virt(dev, handle), size, dir); + dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir); } static inline void @@ -322,7 +344,7 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { if (!arch_is_coherent()) - consistent_sync((void *)dma_to_virt(dev, handle), size, dir); + dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir); } #else extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); @@ -351,9 +373,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, int i; for (i = 0; i < nents; i++, sg++) { - char *virt = page_address(sg->page) + sg->offset; + char *virt = sg_virt(sg); if (!arch_is_coherent()) - consistent_sync(virt, sg->length, dir); + dma_cache_maint(virt, sg->length, dir); } } @@ -364,9 +386,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, int i; for (i = 0; i < nents; i++, sg++) { - char *virt = page_address(sg->page) + sg->offset; + char *virt = sg_virt(sg); if (!arch_is_coherent()) - consistent_sync(virt, sg->length, dir); + dma_cache_maint(virt, sg->length, dir); } } #else @@ -381,7 +403,7 @@ extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enu * * On the SA-1111, a bug limits DMA to only certain regions of RAM. * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) - * On some ADI engineering sytems, PCI inbound window is 32MB (12MB total RAM) + * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) * * The following are helper functions used by the dmabounce subystem * @@ -425,7 +447,7 @@ extern void dmabounce_unregister_dev(struct device *); * * The dmabounce routines call this function whenever a dma-mapping * is requested to determine whether a given buffer needs to be bounced - * or not. The function must return 0 if the the buffer is OK for + * or not. The function must return 0 if the buffer is OK for * DMA access and 1 if the buffer needs to be bounced. * */