x86: Use new cache mode type in mm/ioremap.c
Instead of directly using the cache mode bits in the pte switch to using the cache mode type. Based-on-patch-by: Stefan Bader <stefan.bader@canonical.com> Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: stefan.bader@canonical.com Cc: xen-devel@lists.xensource.com Cc: konrad.wilk@oracle.com Cc: ville.syrjala@linux.intel.com Cc: david.vrabel@citrix.com Cc: jbeulich@suse.com Cc: toshi.kani@hp.com Cc: plagnioj@jcrosoft.com Cc: tomi.valkeinen@ti.com Cc: bhelgaas@google.com Link: http://lkml.kernel.org/r/1415019724-4317-13-git-send-email-jgross@suse.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
c06814d841
commit
b14097bd91
4 changed files with 44 additions and 37 deletions
|
@ -314,7 +314,7 @@ extern void *xlate_dev_mem_ptr(unsigned long phys);
|
|||
extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
|
||||
|
||||
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
|
||||
unsigned long prot_val);
|
||||
enum page_cache_mode pcm);
|
||||
extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
|
||||
|
||||
extern bool is_early_ioremap_ptep(pte_t *ptep);
|
||||
|
|
|
@ -17,7 +17,7 @@ extern int reserve_memtype(u64 start, u64 end,
|
|||
extern int free_memtype(u64 start, u64 end);
|
||||
|
||||
extern int kernel_map_sync_memtype(u64 base, unsigned long size,
|
||||
unsigned long flag);
|
||||
enum page_cache_mode pcm);
|
||||
|
||||
int io_reserve_memtype(resource_size_t start, resource_size_t end,
|
||||
enum page_cache_mode *pcm);
|
||||
|
|
|
@ -29,20 +29,20 @@
|
|||
* conflicts.
|
||||
*/
|
||||
int ioremap_change_attr(unsigned long vaddr, unsigned long size,
|
||||
unsigned long prot_val)
|
||||
enum page_cache_mode pcm)
|
||||
{
|
||||
unsigned long nrpages = size >> PAGE_SHIFT;
|
||||
int err;
|
||||
|
||||
switch (prot_val) {
|
||||
case _PAGE_CACHE_UC:
|
||||
switch (pcm) {
|
||||
case _PAGE_CACHE_MODE_UC:
|
||||
default:
|
||||
err = _set_memory_uc(vaddr, nrpages);
|
||||
break;
|
||||
case _PAGE_CACHE_WC:
|
||||
case _PAGE_CACHE_MODE_WC:
|
||||
err = _set_memory_wc(vaddr, nrpages);
|
||||
break;
|
||||
case _PAGE_CACHE_WB:
|
||||
case _PAGE_CACHE_MODE_WB:
|
||||
err = _set_memory_wb(vaddr, nrpages);
|
||||
break;
|
||||
}
|
||||
|
@ -75,13 +75,14 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
|
|||
* caller shouldn't need to know that small detail.
|
||||
*/
|
||||
static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
||||
unsigned long size, unsigned long prot_val, void *caller)
|
||||
unsigned long size, enum page_cache_mode pcm, void *caller)
|
||||
{
|
||||
unsigned long offset, vaddr;
|
||||
resource_size_t pfn, last_pfn, last_addr;
|
||||
const resource_size_t unaligned_phys_addr = phys_addr;
|
||||
const unsigned long unaligned_size = size;
|
||||
struct vm_struct *area;
|
||||
enum page_cache_mode new_pcm;
|
||||
unsigned long new_prot_val;
|
||||
pgprot_t prot;
|
||||
int retval;
|
||||
|
@ -134,39 +135,42 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
|||
size = PAGE_ALIGN(last_addr+1) - phys_addr;
|
||||
|
||||
retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
|
||||
prot_val, &new_prot_val);
|
||||
cachemode2protval(pcm), &new_prot_val);
|
||||
if (retval) {
|
||||
printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (prot_val != new_prot_val) {
|
||||
if (!is_new_memtype_allowed(phys_addr, size,
|
||||
pgprot2cachemode(__pgprot(prot_val)),
|
||||
pgprot2cachemode(__pgprot(new_prot_val)))) {
|
||||
new_pcm = pgprot2cachemode(__pgprot(new_prot_val));
|
||||
|
||||
if (pcm != new_pcm) {
|
||||
if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
|
||||
printk(KERN_ERR
|
||||
"ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
|
||||
"ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
|
||||
(unsigned long long)phys_addr,
|
||||
(unsigned long long)(phys_addr + size),
|
||||
prot_val, new_prot_val);
|
||||
pcm, new_pcm);
|
||||
goto err_free_memtype;
|
||||
}
|
||||
prot_val = new_prot_val;
|
||||
pcm = new_pcm;
|
||||
}
|
||||
|
||||
switch (prot_val) {
|
||||
case _PAGE_CACHE_UC:
|
||||
prot = PAGE_KERNEL_IO;
|
||||
switch (pcm) {
|
||||
case _PAGE_CACHE_MODE_UC:
|
||||
default:
|
||||
prot = PAGE_KERNEL_IO_NOCACHE;
|
||||
prot = __pgprot(pgprot_val(prot) |
|
||||
cachemode2protval(_PAGE_CACHE_MODE_UC));
|
||||
break;
|
||||
case _PAGE_CACHE_UC_MINUS:
|
||||
prot = PAGE_KERNEL_IO_UC_MINUS;
|
||||
case _PAGE_CACHE_MODE_UC_MINUS:
|
||||
prot = __pgprot(pgprot_val(prot) |
|
||||
cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
|
||||
break;
|
||||
case _PAGE_CACHE_WC:
|
||||
prot = PAGE_KERNEL_IO_WC;
|
||||
case _PAGE_CACHE_MODE_WC:
|
||||
prot = __pgprot(pgprot_val(prot) |
|
||||
cachemode2protval(_PAGE_CACHE_MODE_WC));
|
||||
break;
|
||||
case _PAGE_CACHE_WB:
|
||||
prot = PAGE_KERNEL_IO;
|
||||
case _PAGE_CACHE_MODE_WB:
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -179,7 +183,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
|||
area->phys_addr = phys_addr;
|
||||
vaddr = (unsigned long) area->addr;
|
||||
|
||||
if (kernel_map_sync_memtype(phys_addr, size, prot_val))
|
||||
if (kernel_map_sync_memtype(phys_addr, size, pcm))
|
||||
goto err_free_area;
|
||||
|
||||
if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
|
||||
|
@ -228,14 +232,14 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
|
|||
{
|
||||
/*
|
||||
* Ideally, this should be:
|
||||
* pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
|
||||
* pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
|
||||
*
|
||||
* Till we fix all X drivers to use ioremap_wc(), we will use
|
||||
* UC MINUS.
|
||||
*/
|
||||
unsigned long val = _PAGE_CACHE_UC_MINUS;
|
||||
enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
|
||||
|
||||
return __ioremap_caller(phys_addr, size, val,
|
||||
return __ioremap_caller(phys_addr, size, pcm,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_nocache);
|
||||
|
@ -253,7 +257,7 @@ EXPORT_SYMBOL(ioremap_nocache);
|
|||
void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
|
||||
{
|
||||
if (pat_enabled)
|
||||
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
|
||||
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
|
||||
__builtin_return_address(0));
|
||||
else
|
||||
return ioremap_nocache(phys_addr, size);
|
||||
|
@ -262,7 +266,7 @@ EXPORT_SYMBOL(ioremap_wc);
|
|||
|
||||
void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
|
||||
{
|
||||
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
|
||||
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_cache);
|
||||
|
@ -270,7 +274,8 @@ EXPORT_SYMBOL(ioremap_cache);
|
|||
void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
|
||||
unsigned long prot_val)
|
||||
{
|
||||
return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
|
||||
return __ioremap_caller(phys_addr, size,
|
||||
pgprot2cachemode(__pgprot(prot_val)),
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap_prot);
|
||||
|
|
|
@ -462,7 +462,7 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end,
|
|||
if (!is_new_memtype_allowed(start, size, req_type, new_type))
|
||||
goto out_free;
|
||||
|
||||
if (kernel_map_sync_memtype(start, size, new_prot) < 0)
|
||||
if (kernel_map_sync_memtype(start, size, new_type) < 0)
|
||||
goto out_free;
|
||||
|
||||
*type = new_type;
|
||||
|
@ -560,7 +560,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
|||
* Change the memory type for the physial address range in kernel identity
|
||||
* mapping space if that range is a part of identity map.
|
||||
*/
|
||||
int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
|
||||
int kernel_map_sync_memtype(u64 base, unsigned long size,
|
||||
enum page_cache_mode pcm)
|
||||
{
|
||||
unsigned long id_sz;
|
||||
|
||||
|
@ -578,11 +579,11 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
|
|||
__pa(high_memory) - base :
|
||||
size;
|
||||
|
||||
if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
|
||||
if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
|
||||
printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
|
||||
"for [mem %#010Lx-%#010Lx]\n",
|
||||
current->comm, current->pid,
|
||||
cattr_name(flags),
|
||||
cattr_name(cachemode2protval(pcm)),
|
||||
base, (unsigned long long)(base + size-1));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -656,7 +657,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
|
|||
flags);
|
||||
}
|
||||
|
||||
if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
|
||||
if (kernel_map_sync_memtype(paddr, size,
|
||||
pgprot2cachemode(__pgprot(flags))) < 0) {
|
||||
free_memtype(paddr, paddr + size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue