sh: Optimized copy_{to,from}_user_page() for SH-4.
This moves copy_{to,from}_user_page() out-of-line on SH-4 and converts for the kmap_coherent() API. Based on the MIPS implementation. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
7747b9a493
commit
ba1789efea
2 changed files with 43 additions and 27 deletions
|
@ -52,33 +52,39 @@ static inline void kunmap_coherent(struct page *page)
|
|||
void clear_user_page(void *to, unsigned long address, struct page *page)
|
||||
{
|
||||
__set_bit(PG_mapped, &page->flags);
|
||||
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
|
||||
clear_page(to);
|
||||
else {
|
||||
void *vto = kmap_coherent(page, address);
|
||||
__clear_user_page(vto, to);
|
||||
kunmap_coherent(vto);
|
||||
}
|
||||
|
||||
clear_page(to);
|
||||
if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS))
|
||||
__flush_wback_region(to, PAGE_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* copy_user_page
|
||||
* @to: P1 address
|
||||
* @from: P1 address
|
||||
* @address: U0 address to be mapped
|
||||
* @page: page (virt_to_page(to))
|
||||
*/
|
||||
void copy_user_page(void *to, void *from, unsigned long address,
|
||||
struct page *page)
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long vaddr, void *dst, const void *src,
|
||||
unsigned long len)
|
||||
{
|
||||
void *vto;
|
||||
|
||||
__set_bit(PG_mapped, &page->flags);
|
||||
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
|
||||
copy_page(to, from);
|
||||
else {
|
||||
void *vfrom = kmap_coherent(page, address);
|
||||
__copy_user_page(vfrom, from, to);
|
||||
kunmap_coherent(vfrom);
|
||||
}
|
||||
|
||||
vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
||||
memcpy(vto, src, len);
|
||||
kunmap_coherent(vto);
|
||||
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_cache_page(vma, vaddr, page_to_pfn(page));
|
||||
}
|
||||
|
||||
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long vaddr, void *dst, const void *src,
|
||||
unsigned long len)
|
||||
{
|
||||
void *vfrom;
|
||||
|
||||
__set_bit(PG_mapped, &page->flags);
|
||||
|
||||
vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
||||
memcpy(dst, vfrom, len);
|
||||
kunmap_coherent(vfrom);
|
||||
}
|
||||
|
||||
void copy_user_highpage(struct page *to, struct page *from,
|
||||
|
|
|
@ -43,21 +43,31 @@ extern void __flush_purge_region(void *start, int size);
|
|||
extern void __flush_invalidate_region(void *start, int size);
|
||||
#endif
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
#ifdef CONFIG_CPU_SH4
|
||||
extern void copy_to_user_page(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long vaddr, void *dst, const void *src,
|
||||
unsigned long len);
|
||||
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
extern void copy_from_user_page(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long vaddr, void *dst, const void *src,
|
||||
unsigned long len);
|
||||
#else
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { \
|
||||
flush_cache_page(vma, vaddr, page_to_pfn(page));\
|
||||
memcpy(dst, src, len); \
|
||||
flush_icache_user_range(vma, page, vaddr, len); \
|
||||
} while (0)
|
||||
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { \
|
||||
flush_cache_page(vma, vaddr, page_to_pfn(page));\
|
||||
memcpy(dst, src, len); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
|
||||
#define HAVE_ARCH_UNMAPPED_AREA
|
||||
|
||||
|
|
Loading…
Reference in a new issue