ARM: dma-mapping: introduce the idea of buffer ownership

The DMA API has the notion of buffer ownership; make it explicit in the
ARM implementation of this API.  This gives us a set of hooks to allow
us to deal with CPU cache issues arising from non-cache coherent DMA.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Tested-By: Santosh Shilimkar <santosh.shilimkar@ti.com>
Tested-By: Jamie Iles <jamie@jamieiles.com>
This commit is contained in:
Russell King 2009-10-31 16:52:16 +00:00
parent bf32eb8549
commit 18eabe2347
3 changed files with 58 additions and 23 deletions

View file

@ -277,7 +277,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
* We don't need to sync the DMA buffer since
* it was allocated via the coherent allocators.
*/
dma_cache_maint(ptr, size, dir);
__dma_single_cpu_to_dev(ptr, size, dir);
}
return dma_addr;
@ -315,6 +315,8 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
__cpuc_flush_kernel_dcache_area(ptr, size);
}
free_safe_buffer(dev->archdata.dmabounce, buf);
} else {
__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
}
}

View file

@ -57,19 +57,48 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
#endif
/*
* DMA-consistent mapping functions. These allocate/free a region of
* uncached, unwrite-buffered mapped memory space for use with DMA
* devices. This is the "generic" version. The PCI specific version
* is in pci.h
*
* Note: Drivers should NOT use this function directly, as it will break
* platforms with CONFIG_DMABOUNCE.
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
* Private support functions: these are not part of the API and are
* liable to change. Drivers must not use these.
*/
extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
extern void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t size, int rw);
/*
* The DMA API is built upon the notion of "buffer ownership". A buffer
* is either exclusively owned by the CPU (and therefore may be accessed
* by it) or exclusively owned by the DMA device. These helper functions
* represent the transitions between these two ownership states.
*
* As above, these are private support functions and not part of the API.
* Drivers must not use these.
*/
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
enum dma_data_direction dir)
{
if (!arch_is_coherent())
dma_cache_maint(kaddr, size, dir);
}
static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
enum dma_data_direction dir)
{
/* nothing to do */
}
static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
if (!arch_is_coherent())
dma_cache_maint_page(page, off, size, dir);
}
static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
/* nothing to do */
}
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
@ -304,8 +333,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
{
BUG_ON(!valid_dma_direction(dir));
if (!arch_is_coherent())
dma_cache_maint(cpu_addr, size, dir);
__dma_single_cpu_to_dev(cpu_addr, size, dir);
return virt_to_dma(dev, cpu_addr);
}
@ -329,8 +357,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
{
BUG_ON(!valid_dma_direction(dir));
if (!arch_is_coherent())
dma_cache_maint_page(page, offset, size, dir);
__dma_page_cpu_to_dev(page, offset, size, dir);
return page_to_dma(dev, page) + offset;
}
@ -352,7 +379,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
/* nothing to do */
__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
}
/**
@ -372,7 +399,8 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
/* nothing to do */
__dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK,
size, dir);
}
#endif /* CONFIG_DMABOUNCE */
@ -400,7 +428,10 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
{
BUG_ON(!valid_dma_direction(dir));
dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
return;
__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
@ -412,8 +443,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
return;
if (!arch_is_coherent())
dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
}
static inline void dma_sync_single_for_cpu(struct device *dev,

View file

@ -573,8 +573,12 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int i;
for_each_sg(sg, s, nents, i) {
dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
sg_dma_len(s), dir);
if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
sg_dma_len(s), dir))
continue;
__dma_page_dev_to_cpu(sg_page(s), s->offset,
s->length, dir);
}
}
EXPORT_SYMBOL(dma_sync_sg_for_cpu);
@ -597,9 +601,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
sg_dma_len(s), dir))
continue;
if (!arch_is_coherent())
dma_cache_maint_page(sg_page(s), s->offset,
s->length, dir);
__dma_page_cpu_to_dev(sg_page(s), s->offset,
s->length, dir);
}
}
EXPORT_SYMBOL(dma_sync_sg_for_device);