Merge branches 'x86/urgent' and 'x86/pat' into x86/core
Conflicts: arch/x86/include/asm/pat.h
This commit is contained in:
commit
801c0be814
5 changed files with 82 additions and 22 deletions
|
@ -24,7 +24,10 @@
|
|||
#include <asm/tlbflush.h>
|
||||
|
||||
int
|
||||
is_io_mapping_possible(resource_size_t base, unsigned long size);
|
||||
reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot);
|
||||
|
||||
void
|
||||
free_io_memtype(u64 base, unsigned long size);
|
||||
|
||||
void *
|
||||
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
|
||||
|
|
|
@ -15,4 +15,7 @@ extern int reserve_memtype(u64 start, u64 end,
|
|||
unsigned long req_type, unsigned long *ret_type);
|
||||
extern int free_memtype(u64 start, u64 end);
|
||||
|
||||
extern int kernel_map_sync_memtype(u64 base, unsigned long size,
|
||||
unsigned long flag);
|
||||
|
||||
#endif /* _ASM_X86_PAT_H */
|
||||
|
|
|
@ -21,13 +21,13 @@
|
|||
#include <linux/module.h>
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
int
|
||||
static int
|
||||
is_io_mapping_possible(resource_size_t base, unsigned long size)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#else
|
||||
int
|
||||
static int
|
||||
is_io_mapping_possible(resource_size_t base, unsigned long size)
|
||||
{
|
||||
/* There is no way to map greater than 1 << 32 address without PAE */
|
||||
|
@ -38,6 +38,46 @@ is_io_mapping_possible(resource_size_t base, unsigned long size)
|
|||
}
|
||||
#endif
|
||||
|
||||
int
|
||||
reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot)
|
||||
{
|
||||
unsigned long ret_flag;
|
||||
|
||||
if (!is_io_mapping_possible(base, size))
|
||||
goto out_err;
|
||||
|
||||
if (!pat_enabled) {
|
||||
*prot = pgprot_noncached(PAGE_KERNEL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (reserve_memtype(base, base + size, _PAGE_CACHE_WC, &ret_flag))
|
||||
goto out_err;
|
||||
|
||||
if (ret_flag == _PAGE_CACHE_WB)
|
||||
goto out_free;
|
||||
|
||||
if (kernel_map_sync_memtype(base, size, ret_flag))
|
||||
goto out_free;
|
||||
|
||||
*prot = __pgprot(__PAGE_KERNEL | ret_flag);
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
free_memtype(base, base + size);
|
||||
out_err:
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(reserve_io_memtype_wc);
|
||||
|
||||
void
|
||||
free_io_memtype(u64 base, unsigned long size)
|
||||
{
|
||||
if (pat_enabled)
|
||||
free_memtype(base, base + size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(free_io_memtype);
|
||||
|
||||
/* Map 'pfn' using fixed map 'type' and protections 'prot'
|
||||
*/
|
||||
void *
|
||||
|
|
|
@ -633,6 +633,33 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
|
|||
free_memtype(addr, addr + size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Change the memory type for the physial address range in kernel identity
|
||||
* mapping space if that range is a part of identity map.
|
||||
*/
|
||||
int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
|
||||
{
|
||||
unsigned long id_sz;
|
||||
|
||||
if (!pat_enabled || base >= __pa(high_memory))
|
||||
return 0;
|
||||
|
||||
id_sz = (__pa(high_memory) < base + size) ?
|
||||
__pa(high_memory) - base :
|
||||
size;
|
||||
|
||||
if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
|
||||
printk(KERN_INFO
|
||||
"%s:%d ioremap_change_attr failed %s "
|
||||
"for %Lx-%Lx\n",
|
||||
current->comm, current->pid,
|
||||
cattr_name(flags),
|
||||
base, (unsigned long long)(base + size));
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal interface to reserve a range of physical memory with prot.
|
||||
* Reserved non RAM regions only and after successful reserve_memtype,
|
||||
|
@ -642,7 +669,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
|
|||
int strict_prot)
|
||||
{
|
||||
int is_ram = 0;
|
||||
int id_sz, ret;
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
|
||||
|
||||
|
@ -679,23 +706,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
|
|||
flags);
|
||||
}
|
||||
|
||||
/* Need to keep identity mapping in sync */
|
||||
if (paddr >= __pa(high_memory))
|
||||
return 0;
|
||||
|
||||
id_sz = (__pa(high_memory) < paddr + size) ?
|
||||
__pa(high_memory) - paddr :
|
||||
size;
|
||||
|
||||
if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
|
||||
if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
|
||||
free_memtype(paddr, paddr + size);
|
||||
printk(KERN_ERR
|
||||
"%s:%d reserve_pfn_range ioremap_change_attr failed %s "
|
||||
"for %Lx-%Lx\n",
|
||||
current->comm, current->pid,
|
||||
cattr_name(flags),
|
||||
(unsigned long long)paddr,
|
||||
(unsigned long long)(paddr + size));
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -49,8 +49,9 @@ static inline struct io_mapping *
|
|||
io_mapping_create_wc(resource_size_t base, unsigned long size)
|
||||
{
|
||||
struct io_mapping *iomap;
|
||||
pgprot_t prot;
|
||||
|
||||
if (!is_io_mapping_possible(base, size))
|
||||
if (!reserve_io_memtype_wc(base, size, &prot))
|
||||
return NULL;
|
||||
|
||||
iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
|
||||
|
@ -59,13 +60,14 @@ io_mapping_create_wc(resource_size_t base, unsigned long size)
|
|||
|
||||
iomap->base = base;
|
||||
iomap->size = size;
|
||||
iomap->prot = pgprot_writecombine(__pgprot(__PAGE_KERNEL));
|
||||
iomap->prot = prot;
|
||||
return iomap;
|
||||
}
|
||||
|
||||
static inline void
|
||||
io_mapping_free(struct io_mapping *mapping)
|
||||
{
|
||||
free_io_memtype(mapping->base, mapping->size);
|
||||
kfree(mapping);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue