kernel-fxtec-pro1x/arch/mips/mm/dma-default.c
FUJITA Tomonori 8d8bb39b9e dma-mapping: add the device argument to dma_mapping_error()
Add per-device dma_mapping_ops support for CONFIG_X86_64 as POWER
architecture does:

This enables us to cleanly fix the Calgary IOMMU issue that some devices
are not behind the IOMMU (http://lkml.org/lkml/2008/5/8/423).

I think that per-device dma_mapping_ops support would be also helpful for
KVM people to support PCI passthrough but Andi thinks that this makes it
difficult to support the PCI passthrough (see the above thread).  So I
CC'ed this to KVM camp.  Comments are appreciated.

A pointer to dma_mapping_ops to struct dev_archdata is added.  If the
pointer is non NULL, DMA operations in asm/dma-mapping.h use it.  If it's
NULL, the system-wide dma_ops pointer is used as before.

If it's useful for KVM people, I plan to implement a mechanism to register
a hook called when a new pci (or dma capable) device is created (it works
with hot plugging).  It enables IOMMUs to set up an appropriate
dma_mapping_ops per device.

The major obstacle is that dma_mapping_error doesn't take a pointer to the
device unlike other DMA operations.  So x86 can't have dma_mapping_ops per
device.  Note all the POWER IOMMUs use the same dma_mapping_error function
so this is not a problem for POWER but x86 IOMMUs use different
dma_mapping_error functions.

The first patch adds the device argument to dma_mapping_error.  The patch
is trivial but large since it touches lots of drivers and dma-mapping.h in
all the architecture.

This patch:

dma_mapping_error() doesn't take a pointer to the device unlike other DMA
operations.  So we can't have dma_mapping_ops per device.

Note that POWER already has dma_mapping_ops per device but all the POWER
IOMMUs use the same dma_mapping_error function.  x86 IOMMUs use device
argument.

[akpm@linux-foundation.org: fix sge]
[akpm@linux-foundation.org: fix svc_rdma]
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: fix bnx2x]
[akpm@linux-foundation.org: fix s2io]
[akpm@linux-foundation.org: fix pasemi_mac]
[akpm@linux-foundation.org: fix sdhci]
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: fix sparc]
[akpm@linux-foundation.org: fix ibmvscsi]
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Muli Ben-Yehuda <muli@il.ibm.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Avi Kivity <avi@qumranet.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-26 12:00:03 -07:00

389 lines
8.6 KiB
C

/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
* Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
*/
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <asm/cache.h>
#include <asm/io.h>
#include <dma-coherence.h>
static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
{
unsigned long addr = plat_dma_addr_to_phys(dma_addr);
return (unsigned long)phys_to_virt(addr);
}
/*
* Warning on the terminology - Linux calls an uncached area coherent;
* MIPS terminology calls memory areas with hardware maintained coherency
* coherent.
*/
static inline int cpu_is_noncoherent_r10000(struct device *dev)
{
return !plat_device_is_coherent(dev) &&
(current_cpu_type() == CPU_R10000 ||
current_cpu_type() == CPU_R12000);
}
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
#ifdef CONFIG_ZONE_DMA
if (dev == NULL)
gfp |= __GFP_DMA;
else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
gfp |= __GFP_DMA;
else
#endif
#ifdef CONFIG_ZONE_DMA32
if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
gfp |= __GFP_DMA32;
else
#endif
;
/* Don't invoke OOM killer */
gfp |= __GFP_NORETRY;
return gfp;
}
void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
gfp = massage_gfp_flags(dev, gfp);
ret = (void *) __get_free_pages(gfp, get_order(size));
if (ret != NULL) {
memset(ret, 0, size);
*dma_handle = plat_map_dma_mem(dev, ret, size);
}
return ret;
}
EXPORT_SYMBOL(dma_alloc_noncoherent);
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
gfp = massage_gfp_flags(dev, gfp);
ret = (void *) __get_free_pages(gfp, get_order(size));
if (ret) {
memset(ret, 0, size);
*dma_handle = plat_map_dma_mem(dev, ret, size);
if (!plat_device_is_coherent(dev)) {
dma_cache_wback_inv((unsigned long) ret, size);
ret = UNCAC_ADDR(ret);
}
}
return ret;
}
EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
free_pages((unsigned long) vaddr, get_order(size));
}
EXPORT_SYMBOL(dma_free_noncoherent);
void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
unsigned long addr = (unsigned long) vaddr;
if (!plat_device_is_coherent(dev))
addr = CAC_ADDR(addr);
free_pages(addr, get_order(size));
}
EXPORT_SYMBOL(dma_free_coherent);
static inline void __dma_sync(unsigned long addr, size_t size,
enum dma_data_direction direction)
{
switch (direction) {
case DMA_TO_DEVICE:
dma_cache_wback(addr, size);
break;
case DMA_FROM_DEVICE:
dma_cache_inv(addr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv(addr, size);
break;
default:
BUG();
}
}
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
unsigned long addr = (unsigned long) ptr;
if (!plat_device_is_coherent(dev))
__dma_sync(addr, size, direction);
return plat_map_dma_mem(dev, ptr, size);
}
EXPORT_SYMBOL(dma_map_single);
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
if (cpu_is_noncoherent_r10000(dev))
__dma_sync(dma_addr_to_virt(dma_addr), size,
direction);
plat_unmap_dma_mem(dma_addr);
}
EXPORT_SYMBOL(dma_unmap_single);
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
int i;
BUG_ON(direction == DMA_NONE);
for (i = 0; i < nents; i++, sg++) {
unsigned long addr;
addr = (unsigned long) sg_virt(sg);
if (!plat_device_is_coherent(dev) && addr)
__dma_sync(addr, sg->length, direction);
sg->dma_address = plat_map_dma_mem(dev,
(void *)addr, sg->length);
}
return nents;
}
EXPORT_SYMBOL(dma_map_sg);
dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
if (!plat_device_is_coherent(dev)) {
unsigned long addr;
addr = (unsigned long) page_address(page) + offset;
dma_cache_wback_inv(addr, size);
}
return plat_map_dma_mem_page(dev, page) + offset;
}
EXPORT_SYMBOL(dma_map_page);
void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
unsigned long addr;
addr = plat_dma_addr_to_phys(dma_address);
dma_cache_wback_inv(addr, size);
}
plat_unmap_dma_mem(dma_address);
}
EXPORT_SYMBOL(dma_unmap_page);
void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
unsigned long addr;
int i;
BUG_ON(direction == DMA_NONE);
for (i = 0; i < nhwentries; i++, sg++) {
if (!plat_device_is_coherent(dev) &&
direction != DMA_TO_DEVICE) {
addr = (unsigned long) sg_virt(sg);
if (addr)
__dma_sync(addr, sg->length, direction);
}
plat_unmap_dma_mem(sg->dma_address);
}
}
EXPORT_SYMBOL(dma_unmap_sg);
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
if (cpu_is_noncoherent_r10000(dev)) {
unsigned long addr;
addr = dma_addr_to_virt(dma_handle);
__dma_sync(addr, size, direction);
}
}
EXPORT_SYMBOL(dma_sync_single_for_cpu);
void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
if (!plat_device_is_coherent(dev)) {
unsigned long addr;
addr = dma_addr_to_virt(dma_handle);
__dma_sync(addr, size, direction);
}
}
EXPORT_SYMBOL(dma_sync_single_for_device);
void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
if (cpu_is_noncoherent_r10000(dev)) {
unsigned long addr;
addr = dma_addr_to_virt(dma_handle);
__dma_sync(addr + offset, size, direction);
}
}
EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
if (!plat_device_is_coherent(dev)) {
unsigned long addr;
addr = dma_addr_to_virt(dma_handle);
__dma_sync(addr + offset, size, direction);
}
}
EXPORT_SYMBOL(dma_sync_single_range_for_device);
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
int i;
BUG_ON(direction == DMA_NONE);
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
if (cpu_is_noncoherent_r10000(dev))
__dma_sync((unsigned long)page_address(sg_page(sg)),
sg->length, direction);
plat_unmap_dma_mem(sg->dma_address);
}
}
EXPORT_SYMBOL(dma_sync_sg_for_cpu);
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
int i;
BUG_ON(direction == DMA_NONE);
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
if (!plat_device_is_coherent(dev))
__dma_sync((unsigned long)page_address(sg_page(sg)),
sg->length, direction);
plat_unmap_dma_mem(sg->dma_address);
}
}
EXPORT_SYMBOL(dma_sync_sg_for_device);
int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
EXPORT_SYMBOL(dma_mapping_error);
int dma_supported(struct device *dev, u64 mask)
{
/*
* we fall back to GFP_DMA when the mask isn't all 1s,
* so we can't guarantee allocations that must be
* within a tighter range than GFP_DMA..
*/
if (mask < DMA_BIT_MASK(24))
return 0;
return 1;
}
EXPORT_SYMBOL(dma_supported);
int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return plat_device_is_coherent(dev);
}
EXPORT_SYMBOL(dma_is_consistent);
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
if (!plat_device_is_coherent(dev))
__dma_sync((unsigned long)vaddr, size, direction);
}
EXPORT_SYMBOL(dma_cache_sync);