A couple of dma-mapping updates:
- turn dma_cache_sync into a dma_map_ops instance and remove implementation that purely are dead because the architecture doesn't support noncoherent allocations - add a flag for busses that need DMA configuration (Robin Murphy) -----BEGIN PGP SIGNATURE----- iQI/BAABCAApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAloLSrYLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYOMuQ//XXD94uNPYavrgXzGsAtg+I+LEm+xyk4T0dX5fxfj amXX49MHoGemjsBgzJlkQMMFqwDEdkKyEuFnEuy6OeowYCyD6zW0MJ3MwP9OosNJ PNTdGZIfSvxPYEW8cR9AdK3iQ2loMBZnYhd+O/oVjSugULLW2DNa7r2VRktcCKoh 8Ob/8gL6Y9xEYJBRszhrBwKTa/hU8IThxxozBFzN7I3LIKyFboSTcwXGLAHow43g 4anCTjWTaDcoU2JwY6UTRKRRTV+gD0ZRcsZfd8lNNb5rtMVZkBVOHbF14SMAmw1r kSgRcU3+WIFPhK/8wBYqtGZZGnOgFBTHVeqow3AdS728pBWlWl8niTK0DiIgCd3m qzScF6SqfN1bCZkZAy8FUV2l0DPYKS6lvyNkf00Eb2W/f6LEqAcjCi2QDDxRfaw+ Vm97nPUiM+uXNy/6KtAy6ChdprSqx12/edXPp7Y3H2rS/+Dmr6exeix+wb7QUN8W JI7ZRHo4JLaJZk/XrZtGX/6jnN1Jo7vfApQOmYDY7kE1iGtOU/LQQj8gcZRVQxML 4soN6ivSmZX2k03LabWHpYQ8QiyCSYChLC+Az7rQH47LDLeu1IdTJu6orpXpaxyo ymzEWlHbmF7mE66X4g/Up/eAYk2YLUA3rKLGVjAIaWDBzHftSFg5EaAnqMADC1G2 hSo= =ALJf -----END PGP SIGNATURE----- Merge tag 'dma-mapping-4.15' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping updates from Christoph Hellwig: - turn dma_cache_sync into a dma_map_ops instance and remove implementation that purely are dead because the architecture doesn't support noncoherent allocations - add a flag for busses that need DMA configuration (Robin Murphy) * tag 'dma-mapping-4.15' of git://git.infradead.org/users/hch/dma-mapping: dma-mapping: turn dma_cache_sync into a dma_map_ops method sh: make dma_cache_sync a no-op xtensa: make dma_cache_sync a no-op unicore32: make dma_cache_sync a no-op powerpc: make dma_cache_sync a no-op mn10300: make dma_cache_sync a no-op microblaze: make dma_cache_sync a no-op ia64: make dma_cache_sync a no-op frv: make dma_cache_sync a no-op x86: make dma_cache_sync a no-op floppy: consolidate the dummy fd_cacheflush definition drivers: flag buses which demand DMA configuration
This commit is contained in:
commit
e37e0ee019
42 changed files with 71 additions and 250 deletions
|
@ -9,6 +9,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return dma_ops;
|
||||
}
|
||||
|
||||
#define dma_cache_sync(dev, va, size, dir) ((void)0)
|
||||
|
||||
#endif /* _ALPHA_DMA_MAPPING_H */
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
|
||||
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
|
||||
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
|
||||
#define fd_cacheflush(addr,size) /* nothing */
|
||||
#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\
|
||||
0, "floppy", NULL)
|
||||
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
|
||||
|
@ -62,7 +61,6 @@ alpha_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
|
|||
prev_dir = dir;
|
||||
|
||||
fd_clear_dma_ff();
|
||||
fd_cacheflush(addr, size);
|
||||
fd_set_dma_mode(mode);
|
||||
set_dma_addr(FLOPPY_DMA, bus_addr);
|
||||
fd_set_dma_count(size);
|
||||
|
|
|
@ -17,10 +17,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -15,11 +15,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return &frv_dma_ops;
|
||||
}
|
||||
|
||||
static inline
|
||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
flush_write_buffers();
|
||||
}
|
||||
|
||||
#endif /* _ASM_DMA_MAPPING_H */
|
||||
|
|
|
@ -37,9 +37,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return dma_ops;
|
||||
}
|
||||
|
||||
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
|
|
|
@ -45,15 +45,4 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|||
return daddr;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_cache_sync (struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
/*
|
||||
* IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
|
||||
* ensure that dma_cache_sync() enforces order, hence the mb().
|
||||
*/
|
||||
mb();
|
||||
}
|
||||
|
||||
#endif /* _ASM_IA64_DMA_MAPPING_H */
|
||||
|
|
|
@ -14,11 +14,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return &dma_noop_ops;
|
||||
}
|
||||
|
||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
|
|
|
@ -9,10 +9,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return &m68k_dma_ops;
|
||||
}
|
||||
|
||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
/* we use coherent allocation, so not much to do here. */
|
||||
}
|
||||
|
||||
#endif /* _M68K_DMA_MAPPING_H */
|
||||
|
|
|
@ -9,14 +9,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return &metag_dma_ops;
|
||||
}
|
||||
|
||||
/*
|
||||
* dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
|
||||
* do any flushing here.
|
||||
*/
|
||||
static inline void
|
||||
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -15,22 +15,6 @@
|
|||
#ifndef _ASM_MICROBLAZE_DMA_MAPPING_H
|
||||
#define _ASM_MICROBLAZE_DMA_MAPPING_H
|
||||
|
||||
/*
|
||||
* See Documentation/DMA-API-HOWTO.txt and
|
||||
* Documentation/DMA-API.txt for documentation.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-debug.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
|
||||
#define __dma_free_coherent(size, addr) ((void)0)
|
||||
|
||||
/*
|
||||
* Available generic sets of operations
|
||||
*/
|
||||
|
@ -41,27 +25,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return &dma_direct_ops;
|
||||
}
|
||||
|
||||
static inline void __dma_sync(unsigned long paddr,
|
||||
size_t size, enum dma_data_direction direction)
|
||||
{
|
||||
switch (direction) {
|
||||
case DMA_TO_DEVICE:
|
||||
case DMA_BIDIRECTIONAL:
|
||||
flush_dcache_range(paddr, paddr + size);
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
invalidate_dcache_range(paddr, paddr + size);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
__dma_sync(virt_to_phys(vaddr), size, (int)direction);
|
||||
}
|
||||
|
||||
#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/dma-debug.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/bug.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#define NOT_COHERENT_CACHE
|
||||
|
||||
|
@ -52,6 +53,22 @@ static void dma_direct_free_coherent(struct device *dev, size_t size,
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline void __dma_sync(unsigned long paddr,
|
||||
size_t size, enum dma_data_direction direction)
|
||||
{
|
||||
switch (direction) {
|
||||
case DMA_TO_DEVICE:
|
||||
case DMA_BIDIRECTIONAL:
|
||||
flush_dcache_range(paddr, paddr + size);
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
invalidate_dcache_range(paddr, paddr + size);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
|
|
|
@ -27,9 +27,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|||
|
||||
static inline void dma_mark_clean(void *addr, size_t size) {}
|
||||
|
||||
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
|
||||
#define arch_setup_dma_ops arch_setup_dma_ops
|
||||
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
|
||||
u64 size, const struct iommu_ops *iommu,
|
||||
|
|
|
@ -383,7 +383,7 @@ static int mips_dma_supported(struct device *dev, u64 mask)
|
|||
return plat_dma_supported(dev, mask);
|
||||
}
|
||||
|
||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
static void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
@ -392,8 +392,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
|||
__dma_sync_virtual(vaddr, size, direction);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(dma_cache_sync);
|
||||
|
||||
static const struct dma_map_ops mips_default_dma_map_ops = {
|
||||
.alloc = mips_dma_alloc_coherent,
|
||||
.free = mips_dma_free_coherent,
|
||||
|
@ -407,7 +405,8 @@ static const struct dma_map_ops mips_default_dma_map_ops = {
|
|||
.sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = mips_dma_sync_sg_for_device,
|
||||
.mapping_error = mips_dma_mapping_error,
|
||||
.dma_supported = mips_dma_supported
|
||||
.dma_supported = mips_dma_supported,
|
||||
.cache_sync = mips_dma_cache_sync,
|
||||
};
|
||||
|
||||
const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
|
||||
|
|
|
@ -11,9 +11,6 @@
|
|||
#ifndef _ASM_DMA_MAPPING_H
|
||||
#define _ASM_DMA_MAPPING_H
|
||||
|
||||
#include <asm/cache.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
extern const struct dma_map_ops mn10300_dma_ops;
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
|
@ -21,11 +18,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return &mn10300_dma_ops;
|
||||
}
|
||||
|
||||
static inline
|
||||
void dma_cache_sync(void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
mn10300_dcache_flush_inv();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -17,13 +17,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return &nios2_dma_ops;
|
||||
}
|
||||
|
||||
/*
|
||||
* dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
|
||||
* do any flushing here.
|
||||
*/
|
||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* _ASM_NIOS2_DMA_MAPPING_H */
|
||||
|
|
|
@ -33,14 +33,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return hppa_dma_ops;
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
if (hppa_dma_ops->sync_single_for_cpu)
|
||||
flush_kernel_dcache_range((unsigned long)vaddr, size);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
parisc_walk_tree(struct device *dev)
|
||||
{
|
||||
|
|
|
@ -572,6 +572,12 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *
|
|||
flush_kernel_vmap_range(sg_virt(sg), sg->length);
|
||||
}
|
||||
|
||||
static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
flush_kernel_dcache_range((unsigned long)vaddr, size);
|
||||
}
|
||||
|
||||
const struct dma_map_ops pcxl_dma_ops = {
|
||||
.dma_supported = pa11_dma_supported,
|
||||
.alloc = pa11_dma_alloc,
|
||||
|
@ -584,6 +590,7 @@ const struct dma_map_ops pcxl_dma_ops = {
|
|||
.sync_single_for_device = pa11_dma_sync_single_for_device,
|
||||
.sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = pa11_dma_sync_sg_for_device,
|
||||
.cache_sync = pa11_dma_cache_sync,
|
||||
};
|
||||
|
||||
static void *pcx_dma_alloc(struct device *dev, size_t size,
|
||||
|
@ -620,4 +627,5 @@ const struct dma_map_ops pcx_dma_ops = {
|
|||
.sync_single_for_device = pa11_dma_sync_single_for_device,
|
||||
.sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = pa11_dma_sync_sg_for_device,
|
||||
.cache_sync = pa11_dma_cache_sync,
|
||||
};
|
||||
|
|
|
@ -142,12 +142,5 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|||
|
||||
#define ARCH_HAS_DMA_MMAP_COHERENT
|
||||
|
||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
__dma_sync(vaddr, size, (int)direction);
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_DMA_MAPPING_H */
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#define fd_get_dma_residue() fd_ops->_get_dma_residue(FLOPPY_DMA)
|
||||
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
|
||||
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
|
||||
#define fd_cacheflush(addr,size) /* nothing */
|
||||
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL);
|
||||
|
||||
#include <linux/pci.h>
|
||||
|
@ -152,7 +151,6 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
|
|||
prev_dir = dir;
|
||||
|
||||
fd_clear_dma_ff();
|
||||
fd_cacheflush(addr, size);
|
||||
fd_set_dma_mode(mode);
|
||||
set_dma_addr(FLOPPY_DMA, bus_addr);
|
||||
fd_set_dma_count(size);
|
||||
|
|
|
@ -16,11 +16,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return &dma_noop_ops;
|
||||
}
|
||||
|
||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
|
|
|
@ -10,10 +10,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return dma_ops;
|
||||
}
|
||||
|
||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
|
||||
/* arch/sh/mm/consistent.c */
|
||||
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_addr, gfp_t flag,
|
||||
unsigned long attrs);
|
||||
|
@ -21,4 +17,7 @@ extern void dma_generic_free_coherent(struct device *dev, size_t size,
|
|||
void *vaddr, dma_addr_t dma_handle,
|
||||
unsigned long attrs);
|
||||
|
||||
void sh_sync_dma_for_device(void *vaddr, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
|
||||
#endif /* __ASM_SH_DMA_MAPPING_H */
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
*/
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
|
@ -20,7 +21,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
|
|||
WARN_ON(size == 0);
|
||||
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
dma_cache_sync(dev, page_address(page) + offset, size, dir);
|
||||
sh_sync_dma_for_device(page_address(page) + offset, size, dir);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
@ -38,7 +39,7 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
BUG_ON(!sg_page(s));
|
||||
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
dma_cache_sync(dev, sg_virt(s), s->length, dir);
|
||||
sh_sync_dma_for_device(sg_virt(s), s->length, dir);
|
||||
|
||||
s->dma_address = sg_phys(s);
|
||||
s->dma_length = s->length;
|
||||
|
@ -48,20 +49,20 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_DMA_NONCOHERENT
|
||||
static void nommu_sync_single(struct device *dev, dma_addr_t addr,
|
||||
static void nommu_sync_single_for_device(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_cache_sync(dev, phys_to_virt(addr), size, dir);
|
||||
sh_sync_dma_for_device(phys_to_virt(addr), size, dir);
|
||||
}
|
||||
|
||||
static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
|
||||
static void nommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
for_each_sg(sg, s, nelems, i)
|
||||
dma_cache_sync(dev, sg_virt(s), s->length, dir);
|
||||
sh_sync_dma_for_device(sg_virt(s), s->length, dir);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -71,8 +72,8 @@ const struct dma_map_ops nommu_dma_ops = {
|
|||
.map_page = nommu_map_page,
|
||||
.map_sg = nommu_map_sg,
|
||||
#ifdef CONFIG_DMA_NONCOHERENT
|
||||
.sync_single_for_device = nommu_sync_single,
|
||||
.sync_sg_for_device = nommu_sync_sg,
|
||||
.sync_single_for_device = nommu_sync_single_for_device,
|
||||
.sync_sg_for_device = nommu_sync_sg_for_device,
|
||||
#endif
|
||||
.is_phys = 1,
|
||||
};
|
||||
|
|
|
@ -49,7 +49,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
|||
* Pages from the page allocator may have data present in
|
||||
* cache. So flush the cache before using uncached memory.
|
||||
*/
|
||||
dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
|
||||
sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL);
|
||||
|
||||
ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
|
||||
if (!ret_nocache) {
|
||||
|
@ -78,7 +78,7 @@ void dma_generic_free_coherent(struct device *dev, size_t size,
|
|||
iounmap(vaddr);
|
||||
}
|
||||
|
||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
void sh_sync_dma_for_device(void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
void *addr;
|
||||
|
@ -100,7 +100,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
|||
BUG();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(dma_cache_sync);
|
||||
EXPORT_SYMBOL(sh_sync_dma_for_device);
|
||||
|
||||
static int __init memchunk_setup(char *str)
|
||||
{
|
||||
|
|
|
@ -6,14 +6,6 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/dma-debug.h>
|
||||
|
||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
/* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this
|
||||
* routine can be a nop.
|
||||
*/
|
||||
}
|
||||
|
||||
extern const struct dma_map_ops *dma_ops;
|
||||
extern const struct dma_map_ops pci32_dma_ops;
|
||||
|
||||
|
|
|
@ -71,7 +71,6 @@ static struct sun_floppy_ops sun_fdops;
|
|||
#define fd_set_dma_count(count) sun_fd_set_dma_count(count)
|
||||
#define fd_enable_irq() /* nothing... */
|
||||
#define fd_disable_irq() /* nothing... */
|
||||
#define fd_cacheflush(addr, size) /* nothing... */
|
||||
#define fd_request_irq() sun_fd_request_irq()
|
||||
#define fd_free_irq() /* nothing... */
|
||||
#if 0 /* P3: added by Alain, these cause a MMU corruption. 19960524 XXX */
|
||||
|
|
|
@ -73,7 +73,6 @@ static struct sun_floppy_ops sun_fdops;
|
|||
#define fd_set_dma_addr(addr) sun_fdops.fd_set_dma_addr(addr)
|
||||
#define fd_set_dma_count(count) sun_fdops.fd_set_dma_count(count)
|
||||
#define get_dma_residue(x) sun_fdops.get_dma_residue()
|
||||
#define fd_cacheflush(addr, size) /* nothing... */
|
||||
#define fd_request_irq() sun_fdops.fd_request_irq()
|
||||
#define fd_free_irq() sun_fdops.fd_free_irq()
|
||||
#define fd_eject(drive) sun_fdops.fd_eject(drive)
|
||||
|
|
|
@ -67,13 +67,4 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|||
#define HAVE_ARCH_DMA_SET_MASK 1
|
||||
int dma_set_mask(struct device *dev, u64 mask);
|
||||
|
||||
/*
|
||||
* dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
|
||||
* do any flushing here.
|
||||
*/
|
||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* _ASM_TILE_DMA_MAPPING_H */
|
||||
|
|
|
@ -101,15 +101,6 @@ extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
|
|||
extern void __cpuc_flush_dcache_area(void *, size_t);
|
||||
extern void __cpuc_flush_kern_dcache_area(void *addr, size_t size);
|
||||
|
||||
/*
|
||||
* These are private to the dma-mapping API. Do not use directly.
|
||||
* Their sole purpose is to ensure that data held in the cache
|
||||
* is visible to DMA, or data written by DMA to system memory is
|
||||
* visible to the CPU.
|
||||
*/
|
||||
extern void __cpuc_dma_clean_range(unsigned long, unsigned long);
|
||||
extern void __cpuc_dma_flush_range(unsigned long, unsigned long);
|
||||
|
||||
/*
|
||||
* Copy user data from/to a page which is mapped into a different
|
||||
* processes address space. Really, we want to allow our "user
|
||||
|
|
|
@ -18,9 +18,6 @@
|
|||
#include <linux/scatterlist.h>
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
#include <asm/memory.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
extern const struct dma_map_ops swiotlb_dma_map_ops;
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
|
@ -48,24 +45,5 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|||
|
||||
static inline void dma_mark_clean(void *addr, size_t size) {}
|
||||
|
||||
static inline void dma_cache_sync(struct device *dev, void *vaddr,
|
||||
size_t size, enum dma_data_direction direction)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
unsigned long end = start + size;
|
||||
|
||||
switch (direction) {
|
||||
case DMA_NONE:
|
||||
BUG();
|
||||
case DMA_FROM_DEVICE:
|
||||
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
|
||||
__cpuc_dma_flush_range(start, end);
|
||||
break;
|
||||
case DMA_TO_DEVICE: /* writeback only */
|
||||
__cpuc_dma_clean_range(start, end);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif
|
||||
|
|
|
@ -20,6 +20,3 @@ EXPORT_SYMBOL(cpu_dcache_clean_area);
|
|||
EXPORT_SYMBOL(cpu_set_pte);
|
||||
|
||||
EXPORT_SYMBOL(__cpuc_coherent_kern_range);
|
||||
|
||||
EXPORT_SYMBOL(__cpuc_dma_flush_range);
|
||||
EXPORT_SYMBOL(__cpuc_dma_clean_range);
|
||||
|
|
|
@ -68,13 +68,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|||
}
|
||||
#endif /* CONFIG_X86_DMA_REMAP */
|
||||
|
||||
static inline void
|
||||
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
flush_write_buffers();
|
||||
}
|
||||
|
||||
static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
|
||||
gfp_t gfp)
|
||||
{
|
||||
|
|
|
@ -23,9 +23,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return &xtensa_dma_map_ops;
|
||||
}
|
||||
|
||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return (dma_addr_t)paddr;
|
||||
|
|
|
@ -26,29 +26,6 @@
|
|||
#include <asm/cacheflush.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
switch (dir) {
|
||||
case DMA_BIDIRECTIONAL:
|
||||
__flush_invalidate_dcache_range((unsigned long)vaddr, size);
|
||||
break;
|
||||
|
||||
case DMA_FROM_DEVICE:
|
||||
__invalidate_dcache_range((unsigned long)vaddr, size);
|
||||
break;
|
||||
|
||||
case DMA_TO_DEVICE:
|
||||
__flush_dcache_range((unsigned long)vaddr, size);
|
||||
break;
|
||||
|
||||
case DMA_NONE:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(dma_cache_sync);
|
||||
|
||||
static void do_cache_op(dma_addr_t dma_handle, size_t size,
|
||||
void (*fn)(unsigned long, unsigned long))
|
||||
{
|
||||
|
|
|
@ -195,6 +195,7 @@ struct bus_type amba_bustype = {
|
|||
.match = amba_match,
|
||||
.uevent = amba_uevent,
|
||||
.pm = &amba_pm,
|
||||
.force_dma = true,
|
||||
};
|
||||
|
||||
static int __init amba_init(void)
|
||||
|
|
|
@ -1143,6 +1143,7 @@ struct bus_type platform_bus_type = {
|
|||
.match = platform_match,
|
||||
.uevent = platform_uevent,
|
||||
.pm = &platform_dev_pm_ops,
|
||||
.force_dma = true,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(platform_bus_type);
|
||||
|
||||
|
|
|
@ -275,6 +275,10 @@ static int set_next_request(void);
|
|||
#define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL, get_order(size))
|
||||
#endif
|
||||
|
||||
#ifndef fd_cacheflush
|
||||
#define fd_cacheflush(addr, size) /* nothing... */
|
||||
#endif
|
||||
|
||||
static inline void fallback_on_nodma_alloc(char **addr, size_t l)
|
||||
{
|
||||
#ifdef FLOPPY_CAN_FALLBACK_ON_NODMA
|
||||
|
|
|
@ -320,6 +320,7 @@ struct bus_type host1x_bus_type = {
|
|||
.name = "host1x",
|
||||
.match = host1x_device_match,
|
||||
.pm = &host1x_device_pm_ops,
|
||||
.force_dma = true,
|
||||
};
|
||||
|
||||
static void __host1x_device_del(struct host1x_device *device)
|
||||
|
|
|
@ -9,9 +9,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/amba/bus.h>
|
||||
|
||||
#include <asm/errno.h>
|
||||
#include "of_private.h"
|
||||
|
@ -101,11 +99,7 @@ int of_dma_configure(struct device *dev, struct device_node *np)
|
|||
* DMA configuration regardless of whether "dma-ranges" is
|
||||
* correctly specified or not.
|
||||
*/
|
||||
if (!dev_is_pci(dev) &&
|
||||
#ifdef CONFIG_ARM_AMBA
|
||||
dev->bus != &amba_bustype &&
|
||||
#endif
|
||||
dev->bus != &platform_bus_type)
|
||||
if (!dev->bus->force_dma)
|
||||
return ret == -ENODEV ? 0 : ret;
|
||||
|
||||
dma_addr = offset = 0;
|
||||
|
|
|
@ -1516,6 +1516,7 @@ struct bus_type pci_bus_type = {
|
|||
.drv_groups = pci_drv_groups,
|
||||
.pm = PCI_PM_OPS_PTR,
|
||||
.num_vf = pci_bus_num_vf,
|
||||
.force_dma = true,
|
||||
};
|
||||
EXPORT_SYMBOL(pci_bus_type);
|
||||
|
||||
|
|
|
@ -300,7 +300,7 @@ static void maple_send(void)
|
|||
mutex_unlock(&maple_wlist_lock);
|
||||
if (maple_packets > 0) {
|
||||
for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
|
||||
dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
|
||||
sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE,
|
||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
|
@ -642,8 +642,7 @@ static void maple_dma_handler(struct work_struct *work)
|
|||
list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
|
||||
mdev = mq->dev;
|
||||
recvbuf = mq->recvbuf->buf;
|
||||
dma_cache_sync(&mdev->dev, recvbuf, 0x400,
|
||||
DMA_FROM_DEVICE);
|
||||
sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE);
|
||||
code = recvbuf[0];
|
||||
kfree(mq->sendbuf);
|
||||
list_del_init(&mq->list);
|
||||
|
|
|
@ -97,6 +97,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
|
|||
* @p: The private data of the driver core, only the driver core can
|
||||
* touch this.
|
||||
* @lock_key: Lock class key for use by the lock validator
|
||||
* @force_dma: Assume devices on this bus should be set up by dma_configure()
|
||||
* even if DMA capability is not explicitly described by firmware.
|
||||
*
|
||||
* A bus is a channel between the processor and one or more devices. For the
|
||||
* purposes of the device model, all devices are connected via a bus, even if
|
||||
|
@ -135,6 +137,8 @@ struct bus_type {
|
|||
|
||||
struct subsys_private *p;
|
||||
struct lock_class_key lock_key;
|
||||
|
||||
bool force_dma;
|
||||
};
|
||||
|
||||
extern int __must_check bus_register(struct bus_type *bus);
|
||||
|
|
|
@ -127,6 +127,8 @@ struct dma_map_ops {
|
|||
void (*sync_sg_for_device)(struct device *dev,
|
||||
struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir);
|
||||
void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
|
||||
int (*dma_supported)(struct device *dev, u64 mask);
|
||||
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
|
||||
|
@ -437,6 +439,17 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|||
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
|
||||
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
|
||||
|
||||
static inline void
|
||||
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->cache_sync)
|
||||
ops->cache_sync(dev, vaddr, size, dir);
|
||||
}
|
||||
|
||||
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
||||
|
||||
|
|
Loading…
Reference in a new issue