kernel-fxtec-pro1x/include/linux/dma-attrs.h
Marek Szyprowski bdf5e4871f common: DMA-mapping: add DMA_ATTR_SKIP_CPU_SYNC attribute
This patch adds DMA_ATTR_SKIP_CPU_SYNC attribute to the DMA-mapping
subsystem.

By default dma_map_{single,page,sg} functions family transfer a given
buffer from CPU domain to device domain. Some advanced use cases might
require sharing a buffer between more than one device. This requires
having a mapping created separately for each device and is usually
performed by calling dma_map_{single,page,sg} function more than once
for the given buffer with device pointer to each device taking part in
the buffer sharing. The first call transfers a buffer from 'CPU' domain
to 'device' domain, what synchronizes CPU caches for the given region
(usually it means that the cache has been flushed or invalidated
depending on the dma direction). However, next calls to
dma_map_{single,page,sg}() for other devices will perform exactly the
same sychronization operation on the CPU cache. CPU cache sychronization
might be a time consuming operation, especially if the buffers are
large, so it is highly recommended to avoid it if possible.
DMA_ATTR_SKIP_CPU_SYNC allows platform code to skip synchronization of
the CPU cache for the given buffer assuming that it has been already
transferred to 'device' domain. This attribute can be also used for
dma_unmap_{single,page,sg} functions family to force buffer to stay in
device domain after releasing a mapping for it. Use this attribute with
care!

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Kyungmin Park <kyungmin.park@samsung.com>
2012-07-30 12:25:47 +02:00

79 lines
1.8 KiB
C

#ifndef _DMA_ATTR_H
#define _DMA_ATTR_H
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
/**
* an enum dma_attr represents an attribute associated with a DMA
* mapping. The semantics of each attribute should be defined in
* Documentation/DMA-attributes.txt.
*/
enum dma_attr {
DMA_ATTR_WRITE_BARRIER,
DMA_ATTR_WEAK_ORDERING,
DMA_ATTR_WRITE_COMBINE,
DMA_ATTR_NON_CONSISTENT,
DMA_ATTR_NO_KERNEL_MAPPING,
DMA_ATTR_SKIP_CPU_SYNC,
DMA_ATTR_MAX,
};
#define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX)
/**
* struct dma_attrs - an opaque container for DMA attributes
* @flags - bitmask representing a collection of enum dma_attr
*/
struct dma_attrs {
unsigned long flags[__DMA_ATTRS_LONGS];
};
#define DEFINE_DMA_ATTRS(x) \
struct dma_attrs x = { \
.flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 }, \
}
static inline void init_dma_attrs(struct dma_attrs *attrs)
{
bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS);
}
#ifdef CONFIG_HAVE_DMA_ATTRS
/**
* dma_set_attr - set a specific attribute
* @attr: attribute to set
* @attrs: struct dma_attrs (may be NULL)
*/
static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
{
if (attrs == NULL)
return;
BUG_ON(attr >= DMA_ATTR_MAX);
__set_bit(attr, attrs->flags);
}
/**
* dma_get_attr - check for a specific attribute
* @attr: attribute to set
* @attrs: struct dma_attrs (may be NULL)
*/
static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
{
if (attrs == NULL)
return 0;
BUG_ON(attr >= DMA_ATTR_MAX);
return test_bit(attr, attrs->flags);
}
#else /* !CONFIG_HAVE_DMA_ATTRS */
static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
{
}
static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
{
return 0;
}
#endif /* CONFIG_HAVE_DMA_ATTRS */
#endif /* _DMA_ATTR_H */