From 2ffe2da3e71652d4f4cae19539b5c78c2a239136 Mon Sep 17 00:00:00 2001
From: Russell King <rmk+kernel@arm.linux.org.uk>
Date: Sat, 31 Oct 2009 16:52:16 +0000
Subject: [PATCH] ARM: dma-mapping: fix for speculative prefetching

ARMv6 and ARMv7 CPUs can perform speculative prefetching, which makes
DMA cache coherency handling slightly more interesting.  Rather than
being able to rely upon the CPU not accessing the DMA buffer until DMA
has completed, we now must expect that the cache could be loaded with
possibly stale data from the DMA buffer.

Where DMA involves data being transferred to the device, we clean the
cache before handing it over for DMA, otherwise we invalidate the buffer
to get rid of potential writebacks.  On DMA Completion, if data was
transferred from the device, we invalidate the buffer to get rid of
any stale speculative prefetches.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Tested-By: Santosh Shilimkar <santosh.shilimkar@ti.com>
---
 arch/arm/mm/cache-v6.S    | 10 +++---
 arch/arm/mm/cache-v7.S    | 10 +++---
 arch/arm/mm/dma-mapping.c | 68 +++++++++++++++++----------------------
 3 files changed, 42 insertions(+), 46 deletions(-)

diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index a11934e53fbd..9d89c67a1cc3 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -271,10 +271,9 @@ ENTRY(v6_dma_flush_range)
  */
 ENTRY(v6_dma_map_area)
 	add	r1, r1, r0
-	cmp	r2, #DMA_TO_DEVICE
-	beq	v6_dma_clean_range
-	bcs	v6_dma_inv_range
-	b	v6_dma_flush_range
+	teq	r2, #DMA_FROM_DEVICE
+	beq	v6_dma_inv_range
+	b	v6_dma_clean_range
 ENDPROC(v6_dma_map_area)
 
 /*
@@ -284,6 +283,9 @@ ENDPROC(v6_dma_map_area)
  *	- dir	- DMA direction
  */
 ENTRY(v6_dma_unmap_area)
+	add	r1, r1, r0
+	teq	r2, #DMA_TO_DEVICE
+	bne	v6_dma_inv_range
 	mov	pc, lr
 ENDPROC(v6_dma_unmap_area)
 
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index b1cd0fd91207..bcd64f265870 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -279,10 +279,9 @@ ENDPROC(v7_dma_flush_range)
  */
 ENTRY(v7_dma_map_area)
 	add	r1, r1, r0
-	cmp	r2, #DMA_TO_DEVICE
-	beq	v7_dma_clean_range
-	bcs	v7_dma_inv_range
-	b	v7_dma_flush_range
+	teq	r2, #DMA_FROM_DEVICE
+	beq	v7_dma_inv_range
+	b	v7_dma_clean_range
 ENDPROC(v7_dma_map_area)
 
 /*
@@ -292,6 +291,9 @@ ENDPROC(v7_dma_map_area)
  *	- dir	- DMA direction
  */
 ENTRY(v7_dma_unmap_area)
+	add	r1, r1, r0
+	teq	r2, #DMA_TO_DEVICE
+	bne	v7_dma_inv_range
 	mov	pc, lr
 ENDPROC(v7_dma_unmap_area)
 
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index efa8efa33f5e..64daef2173bd 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -404,34 +404,22 @@ EXPORT_SYMBOL(dma_free_coherent);
  * platforms with CONFIG_DMABOUNCE.
  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
  */
-static void dma_cache_maint(const void *start, size_t size, int direction)
-{
-	void (*outer_op)(unsigned long, unsigned long);
-
-	switch (direction) {
-	case DMA_FROM_DEVICE:		/* invalidate only */
-		outer_op = outer_inv_range;
-		break;
-	case DMA_TO_DEVICE:		/* writeback only */
-		outer_op = outer_clean_range;
-		break;
-	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
-		outer_op = outer_flush_range;
-		break;
-	default:
-		BUG();
-	}
-
-	outer_op(__pa(start), __pa(start) + size);
-}
-
 void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
 	enum dma_data_direction dir)
 {
+	unsigned long paddr;
+
 	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
 
 	dmac_map_area(kaddr, size, dir);
-	dma_cache_maint(kaddr, size, dir);
+
+	paddr = __pa(kaddr);
+	if (dir == DMA_FROM_DEVICE) {
+		outer_inv_range(paddr, paddr + size);
+	} else {
+		outer_clean_range(paddr, paddr + size);
+	}
+	/* FIXME: non-speculating: flush on bidirectional mappings? */
 }
 EXPORT_SYMBOL(___dma_single_cpu_to_dev);
 
@@ -440,6 +428,13 @@ void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
 {
 	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
 
+	/* FIXME: non-speculating: not required */
+	/* don't bother invalidating if DMA to device */
+	if (dir != DMA_TO_DEVICE) {
+		unsigned long paddr = __pa(kaddr);
+		outer_inv_range(paddr, paddr + size);
+	}
+
 	dmac_unmap_area(kaddr, size, dir);
 }
 EXPORT_SYMBOL(___dma_single_dev_to_cpu);
@@ -487,32 +482,29 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
 	size_t size, enum dma_data_direction dir)
 {
 	unsigned long paddr;
-	void (*outer_op)(unsigned long, unsigned long);
-
-	switch (direction) {
-	case DMA_FROM_DEVICE:		/* invalidate only */
-		outer_op = outer_inv_range;
-		break;
-	case DMA_TO_DEVICE:		/* writeback only */
-		outer_op = outer_clean_range;
-		break;
-	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
-		outer_op = outer_flush_range;
-		break;
-	default:
-		BUG();
-	}
 
 	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
 
 	paddr = page_to_phys(page) + off;
-	outer_op(paddr, paddr + size);
+	if (dir == DMA_FROM_DEVICE) {
+		outer_inv_range(paddr, paddr + size);
+	} else {
+		outer_clean_range(paddr, paddr + size);
+	}
+	/* FIXME: non-speculating: flush on bidirectional mappings? */
 }
 EXPORT_SYMBOL(___dma_page_cpu_to_dev);
 
 void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
 	size_t size, enum dma_data_direction dir)
 {
+	unsigned long paddr = page_to_phys(page) + off;
+
+	/* FIXME: non-speculating: not required */
+	/* don't bother invalidating if DMA to device */
+	if (dir != DMA_TO_DEVICE)
+		outer_inv_range(paddr, paddr + size);
+
 	dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
 }
 EXPORT_SYMBOL(___dma_page_dev_to_cpu);