Merge branch 'page_colouring_despair'
This commit is contained in:
commit
541c547731
5 changed files with 73 additions and 137 deletions
|
@ -150,48 +150,3 @@ ENTRY(__clear_user)
|
|||
.long 8b, .Lbad_clear_user
|
||||
.long 9b, .Lbad_clear_user
|
||||
.previous
|
||||
|
||||
#if defined(CONFIG_CPU_SH4)
|
||||
/*
|
||||
* __clear_user_page
|
||||
* @to: P3 address (with same color)
|
||||
* @orig_to: P1 address
|
||||
*
|
||||
* void __clear_user_page(void *to, void *orig_to)
|
||||
*/
|
||||
|
||||
/*
|
||||
* r0 --- scratch
|
||||
* r4 --- to
|
||||
* r5 --- orig_to
|
||||
* r6 --- to + PAGE_SIZE
|
||||
*/
|
||||
ENTRY(__clear_user_page)
|
||||
mov.l .Lpsz,r0
|
||||
mov r4,r6
|
||||
add r0,r6
|
||||
mov #0,r0
|
||||
!
|
||||
1: ocbi @r5
|
||||
add #32,r5
|
||||
movca.l r0,@r4
|
||||
mov r4,r1
|
||||
add #32,r4
|
||||
mov.l r0,@-r4
|
||||
mov.l r0,@-r4
|
||||
mov.l r0,@-r4
|
||||
mov.l r0,@-r4
|
||||
mov.l r0,@-r4
|
||||
mov.l r0,@-r4
|
||||
mov.l r0,@-r4
|
||||
add #28,r4
|
||||
cmp/eq r6,r4
|
||||
bf/s 1b
|
||||
ocbwb @r1
|
||||
!
|
||||
rts
|
||||
nop
|
||||
.Lpsz: .long PAGE_SIZE
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -68,67 +68,6 @@ ENTRY(copy_page_slow)
|
|||
rts
|
||||
nop
|
||||
|
||||
#if defined(CONFIG_CPU_SH4)
|
||||
/*
|
||||
* __copy_user_page
|
||||
* @to: P1 address (with same color)
|
||||
* @from: P1 address
|
||||
* @orig_to: P1 address
|
||||
*
|
||||
* void __copy_user_page(void *to, void *from, void *orig_to)
|
||||
*/
|
||||
|
||||
/*
|
||||
* r0, r1, r2, r3, r4, r5, r6, r7 --- scratch
|
||||
* r8 --- from + PAGE_SIZE
|
||||
* r9 --- orig_to
|
||||
* r10 --- to
|
||||
* r11 --- from
|
||||
*/
|
||||
ENTRY(__copy_user_page)
|
||||
mov.l r8,@-r15
|
||||
mov.l r9,@-r15
|
||||
mov.l r10,@-r15
|
||||
mov.l r11,@-r15
|
||||
mov r4,r10
|
||||
mov r5,r11
|
||||
mov r6,r9
|
||||
mov r5,r8
|
||||
mov.l .Lpsz,r0
|
||||
add r0,r8
|
||||
!
|
||||
1: ocbi @r9
|
||||
add #32,r9
|
||||
mov.l @r11+,r0
|
||||
mov.l @r11+,r1
|
||||
mov.l @r11+,r2
|
||||
mov.l @r11+,r3
|
||||
mov.l @r11+,r4
|
||||
mov.l @r11+,r5
|
||||
mov.l @r11+,r6
|
||||
mov.l @r11+,r7
|
||||
movca.l r0,@r10
|
||||
mov r10,r0
|
||||
add #32,r10
|
||||
mov.l r7,@-r10
|
||||
mov.l r6,@-r10
|
||||
mov.l r5,@-r10
|
||||
mov.l r4,@-r10
|
||||
mov.l r3,@-r10
|
||||
mov.l r2,@-r10
|
||||
mov.l r1,@-r10
|
||||
ocbwb @r0
|
||||
cmp/eq r11,r8
|
||||
bf/s 1b
|
||||
add #28,r10
|
||||
!
|
||||
mov.l @r15+,r11
|
||||
mov.l @r15+,r10
|
||||
mov.l @r15+,r9
|
||||
mov.l @r15+,r8
|
||||
rts
|
||||
nop
|
||||
#endif
|
||||
.align 2
|
||||
.Lpsz: .long PAGE_SIZE
|
||||
/*
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
|
@ -50,35 +52,62 @@ static inline void kunmap_coherent(struct page *page)
|
|||
void clear_user_page(void *to, unsigned long address, struct page *page)
|
||||
{
|
||||
__set_bit(PG_mapped, &page->flags);
|
||||
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
|
||||
clear_page(to);
|
||||
else {
|
||||
void *vto = kmap_coherent(page, address);
|
||||
__clear_user_page(vto, to);
|
||||
kunmap_coherent(vto);
|
||||
}
|
||||
|
||||
clear_page(to);
|
||||
if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS))
|
||||
__flush_wback_region(to, PAGE_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* copy_user_page
|
||||
* @to: P1 address
|
||||
* @from: P1 address
|
||||
* @address: U0 address to be mapped
|
||||
* @page: page (virt_to_page(to))
|
||||
*/
|
||||
void copy_user_page(void *to, void *from, unsigned long address,
|
||||
struct page *page)
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long vaddr, void *dst, const void *src,
|
||||
unsigned long len)
|
||||
{
|
||||
void *vto;
|
||||
|
||||
__set_bit(PG_mapped, &page->flags);
|
||||
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
|
||||
copy_page(to, from);
|
||||
else {
|
||||
void *vfrom = kmap_coherent(page, address);
|
||||
__copy_user_page(vfrom, from, to);
|
||||
kunmap_coherent(vfrom);
|
||||
}
|
||||
|
||||
vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
||||
memcpy(vto, src, len);
|
||||
kunmap_coherent(vto);
|
||||
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_cache_page(vma, vaddr, page_to_pfn(page));
|
||||
}
|
||||
|
||||
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long vaddr, void *dst, const void *src,
|
||||
unsigned long len)
|
||||
{
|
||||
void *vfrom;
|
||||
|
||||
__set_bit(PG_mapped, &page->flags);
|
||||
|
||||
vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
||||
memcpy(dst, vfrom, len);
|
||||
kunmap_coherent(vfrom);
|
||||
}
|
||||
|
||||
void copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long vaddr, struct vm_area_struct *vma)
|
||||
{
|
||||
void *vfrom, *vto;
|
||||
|
||||
__set_bit(PG_mapped, &to->flags);
|
||||
|
||||
vto = kmap_atomic(to, KM_USER1);
|
||||
vfrom = kmap_coherent(from, vaddr);
|
||||
copy_page(vto, vfrom);
|
||||
kunmap_coherent(vfrom);
|
||||
|
||||
if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS))
|
||||
__flush_wback_region(vto, PAGE_SIZE);
|
||||
|
||||
kunmap_atomic(vto, KM_USER1);
|
||||
/* Make sure this page is cleared on other CPU's too before using it */
|
||||
smp_wmb();
|
||||
}
|
||||
EXPORT_SYMBOL(copy_user_highpage);
|
||||
|
||||
/*
|
||||
* For SH-4, we have our own implementation for ptep_get_and_clear
|
||||
*/
|
||||
|
|
|
@ -43,21 +43,31 @@ extern void __flush_purge_region(void *start, int size);
|
|||
extern void __flush_invalidate_region(void *start, int size);
|
||||
#endif
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
#ifdef CONFIG_CPU_SH4
|
||||
extern void copy_to_user_page(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long vaddr, void *dst, const void *src,
|
||||
unsigned long len);
|
||||
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
extern void copy_from_user_page(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long vaddr, void *dst, const void *src,
|
||||
unsigned long len);
|
||||
#else
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { \
|
||||
flush_cache_page(vma, vaddr, page_to_pfn(page));\
|
||||
memcpy(dst, src, len); \
|
||||
flush_icache_user_range(vma, page, vaddr, len); \
|
||||
} while (0)
|
||||
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { \
|
||||
flush_cache_page(vma, vaddr, page_to_pfn(page));\
|
||||
memcpy(dst, src, len); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
|
||||
#define HAVE_ARCH_UNMAPPED_AREA
|
||||
|
||||
|
|
|
@ -73,10 +73,13 @@ extern void copy_page_nommu(void *to, void *from);
|
|||
#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
|
||||
(defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB))
|
||||
struct page;
|
||||
extern void clear_user_page(void *to, unsigned long address, struct page *pg);
|
||||
extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg);
|
||||
extern void __clear_user_page(void *to, void *orig_to);
|
||||
extern void __copy_user_page(void *to, void *from, void *orig_to);
|
||||
struct vm_area_struct;
|
||||
extern void clear_user_page(void *to, unsigned long address, struct page *page);
|
||||
#ifdef CONFIG_CPU_SH4
|
||||
extern void copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long vaddr, struct vm_area_struct *vma);
|
||||
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
|
||||
#endif
|
||||
#else
|
||||
#define clear_user_page(page, vaddr, pg) clear_page(page)
|
||||
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
|
||||
|
|
Loading…
Reference in a new issue