m68k: add cache support for V4e ColdFire cores running with MMU enabled

Add code to deal with instruction, data and branch caches of the V4e
ColdFire cores when they are running with the MMU enabled.

This code is loosely based on Freescales changes for the caches of the
V4e ColdFire in the 2.6.25 kernel BSP. That code was originally by
Kurt Mahan <kmahan@freescale.com> (now <kmahan@xmission.com>).

Signed-off-by: Greg Ungerer <gerg@uclinux.org>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: Matt Waddel <mwaddel@yahoo.com>
Acked-by: Kurt Mahan <kmahan@xmission.com>
This commit is contained in:
Greg Ungerer 2011-10-17 14:38:09 +10:00
parent 78d705e3be
commit ae2eca724a
2 changed files with 104 additions and 8 deletions

View file

@ -2,23 +2,89 @@
#define _M68K_CACHEFLUSH_H #define _M68K_CACHEFLUSH_H
#include <linux/mm.h> #include <linux/mm.h>
#ifdef CONFIG_COLDFIRE
#include <asm/mcfsim.h>
#endif
/* cache code */ /* cache code */
#define FLUSH_I_AND_D (0x00000808) #define FLUSH_I_AND_D (0x00000808)
#define FLUSH_I (0x00000008) #define FLUSH_I (0x00000008)
#ifndef ICACHE_MAX_ADDR
#define ICACHE_MAX_ADDR 0
#define ICACHE_SET_MASK 0
#define DCACHE_MAX_ADDR 0
#define DCACHE_SETMASK 0
#endif
static inline void flush_cf_icache(unsigned long start, unsigned long end)
{
unsigned long set;
for (set = start; set <= end; set += (0x10 - 3)) {
__asm__ __volatile__ (
"cpushl %%ic,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%ic,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%ic,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%ic,(%0)"
: "=a" (set)
: "a" (set));
}
}
static inline void flush_cf_dcache(unsigned long start, unsigned long end)
{
unsigned long set;
for (set = start; set <= end; set += (0x10 - 3)) {
__asm__ __volatile__ (
"cpushl %%dc,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%dc,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%dc,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%dc,(%0)"
: "=a" (set)
: "a" (set));
}
}
static inline void flush_cf_bcache(unsigned long start, unsigned long end)
{
unsigned long set;
for (set = start; set <= end; set += (0x10 - 3)) {
__asm__ __volatile__ (
"cpushl %%bc,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%bc,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%bc,(%0)\n\t"
"addq%.l #1,%0\n\t"
"cpushl %%bc,(%0)"
: "=a" (set)
: "a" (set));
}
}
/* /*
* Cache handling functions * Cache handling functions
*/ */
static inline void flush_icache(void) static inline void flush_icache(void)
{ {
if (CPU_IS_040_OR_060) if (CPU_IS_COLDFIRE) {
flush_cf_icache(0, ICACHE_MAX_ADDR);
} else if (CPU_IS_040_OR_060) {
asm volatile ( "nop\n" asm volatile ( "nop\n"
" .chip 68040\n" " .chip 68040\n"
" cpusha %bc\n" " cpusha %bc\n"
" .chip 68k"); " .chip 68k");
else { } else {
unsigned long tmp; unsigned long tmp;
asm volatile ( "movec %%cacr,%0\n" asm volatile ( "movec %%cacr,%0\n"
" or.w %1,%0\n" " or.w %1,%0\n"
@ -51,12 +117,14 @@ extern void cache_push_v(unsigned long vaddr, int len);
process changes. */ process changes. */
#define __flush_cache_all() \ #define __flush_cache_all() \
({ \ ({ \
if (CPU_IS_040_OR_060) \ if (CPU_IS_COLDFIRE) { \
flush_cf_dcache(0, DCACHE_MAX_ADDR); \
} else if (CPU_IS_040_OR_060) { \
__asm__ __volatile__("nop\n\t" \ __asm__ __volatile__("nop\n\t" \
".chip 68040\n\t" \ ".chip 68040\n\t" \
"cpusha %dc\n\t" \ "cpusha %dc\n\t" \
".chip 68k"); \ ".chip 68k"); \
else { \ } else { \
unsigned long _tmp; \ unsigned long _tmp; \
__asm__ __volatile__("movec %%cacr,%0\n\t" \ __asm__ __volatile__("movec %%cacr,%0\n\t" \
"orw %1,%0\n\t" \ "orw %1,%0\n\t" \
@ -112,7 +180,17 @@ static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vm
/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
static inline void __flush_page_to_ram(void *vaddr) static inline void __flush_page_to_ram(void *vaddr)
{ {
if (CPU_IS_040_OR_060) { if (CPU_IS_COLDFIRE) {
unsigned long addr, start, end;
addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);
start = addr & ICACHE_SET_MASK;
end = (addr + PAGE_SIZE - 1) & ICACHE_SET_MASK;
if (start > end) {
flush_cf_bcache(0, end);
end = ICACHE_MAX_ADDR;
}
flush_cf_bcache(start, end);
} else if (CPU_IS_040_OR_060) {
__asm__ __volatile__("nop\n\t" __asm__ __volatile__("nop\n\t"
".chip 68040\n\t" ".chip 68040\n\t"
"cpushp %%bc,(%0)\n\t" "cpushp %%bc,(%0)\n\t"

View file

@ -74,8 +74,16 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
void flush_icache_range(unsigned long address, unsigned long endaddr) void flush_icache_range(unsigned long address, unsigned long endaddr)
{ {
if (CPU_IS_COLDFIRE) {
if (CPU_IS_040_OR_060) { unsigned long start, end;
start = address & ICACHE_SET_MASK;
end = endaddr & ICACHE_SET_MASK;
if (start > end) {
flush_cf_icache(0, end);
end = ICACHE_MAX_ADDR;
}
flush_cf_icache(start, end);
} else if (CPU_IS_040_OR_060) {
address &= PAGE_MASK; address &= PAGE_MASK;
do { do {
@ -100,7 +108,17 @@ EXPORT_SYMBOL(flush_icache_range);
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len) unsigned long addr, int len)
{ {
if (CPU_IS_040_OR_060) { if (CPU_IS_COLDFIRE) {
unsigned long start, end;
start = addr & ICACHE_SET_MASK;
end = (addr + len) & ICACHE_SET_MASK;
if (start > end) {
flush_cf_icache(0, end);
end = ICACHE_MAX_ADDR;
}
flush_cf_icache(start, end);
} else if (CPU_IS_040_OR_060) {
asm volatile ("nop\n\t" asm volatile ("nop\n\t"
".chip 68040\n\t" ".chip 68040\n\t"
"cpushp %%bc,(%0)\n\t" "cpushp %%bc,(%0)\n\t"