gpu/drm, x86, PAT: routine to keep identity map in sync
Add a function to check and keep identity maps in sync, when changing any memory type. One of the follow on patches will also use this routine. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Dave Airlie <airlied@redhat.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Cc: Eric Anholt <eric@anholt.net> Cc: Keith Packard <keithp@keithp.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
4ab0d47d0a
commit
7880f74645
2 changed files with 32 additions and 17 deletions
|
@ -19,4 +19,7 @@ extern int free_memtype(u64 start, u64 end);
|
||||||
|
|
||||||
extern void pat_disable(char *reason);
|
extern void pat_disable(char *reason);
|
||||||
|
|
||||||
|
extern int kernel_map_sync_memtype(u64 base, unsigned long size,
|
||||||
|
unsigned long flag);
|
||||||
|
|
||||||
#endif /* _ASM_X86_PAT_H */
|
#endif /* _ASM_X86_PAT_H */
|
||||||
|
|
|
@ -624,6 +624,33 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
|
||||||
free_memtype(addr, addr + size);
|
free_memtype(addr, addr + size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Change the memory type for the physial address range in kernel identity
|
||||||
|
* mapping space if that range is a part of identity map.
|
||||||
|
*/
|
||||||
|
int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
|
||||||
|
{
|
||||||
|
unsigned long id_sz;
|
||||||
|
|
||||||
|
if (!pat_enabled || base >= __pa(high_memory))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
id_sz = (__pa(high_memory) < base + size) ?
|
||||||
|
__pa(high_memory) - base :
|
||||||
|
size;
|
||||||
|
|
||||||
|
if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
|
||||||
|
printk(KERN_INFO
|
||||||
|
"%s:%d ioremap_change_attr failed %s "
|
||||||
|
"for %Lx-%Lx\n",
|
||||||
|
current->comm, current->pid,
|
||||||
|
cattr_name(flags),
|
||||||
|
base, (unsigned long long)(base + size));
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Internal interface to reserve a range of physical memory with prot.
|
* Internal interface to reserve a range of physical memory with prot.
|
||||||
* Reserved non RAM regions only and after successful reserve_memtype,
|
* Reserved non RAM regions only and after successful reserve_memtype,
|
||||||
|
@ -633,7 +660,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
|
||||||
int strict_prot)
|
int strict_prot)
|
||||||
{
|
{
|
||||||
int is_ram = 0;
|
int is_ram = 0;
|
||||||
int id_sz, ret;
|
int ret;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
|
unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
|
||||||
|
|
||||||
|
@ -670,23 +697,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
|
||||||
flags);
|
flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Need to keep identity mapping in sync */
|
if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
|
||||||
if (paddr >= __pa(high_memory))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
id_sz = (__pa(high_memory) < paddr + size) ?
|
|
||||||
__pa(high_memory) - paddr :
|
|
||||||
size;
|
|
||||||
|
|
||||||
if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
|
|
||||||
free_memtype(paddr, paddr + size);
|
free_memtype(paddr, paddr + size);
|
||||||
printk(KERN_ERR
|
|
||||||
"%s:%d reserve_pfn_range ioremap_change_attr failed %s "
|
|
||||||
"for %Lx-%Lx\n",
|
|
||||||
current->comm, current->pid,
|
|
||||||
cattr_name(flags),
|
|
||||||
(unsigned long long)paddr,
|
|
||||||
(unsigned long long)(paddr + size));
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Loading…
Reference in a new issue