Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: x86: cpa, fix out of date comment KVM is not seen under X86 config with latest git (32 bit compile) x86: cpa: ensure page alignment x86: include proper prototypes for rodata_test x86: fix gart_iommu_init() x86: EFI set_memory_x()/set_memory_uc() fixes x86: make dump_pagetable() static x86: fix "BUG: sleeping function called from invalid context" in print_vma_addr()
This commit is contained in:
commit
664a1566df
12 changed files with 46 additions and 13 deletions
|
@ -21,6 +21,8 @@ config X86
|
|||
select HAVE_IDE
|
||||
select HAVE_OPROFILE
|
||||
select HAVE_KPROBES
|
||||
select HAVE_KVM
|
||||
|
||||
|
||||
config GENERIC_LOCKBREAK
|
||||
def_bool n
|
||||
|
@ -119,8 +121,6 @@ config ARCH_HAS_CPU_RELAX
|
|||
config HAVE_SETUP_PER_CPU_AREA
|
||||
def_bool X86_64
|
||||
|
||||
select HAVE_KVM
|
||||
|
||||
config ARCH_HIBERNATION_POSSIBLE
|
||||
def_bool y
|
||||
depends on !SMP || !X86_VOYAGER
|
||||
|
|
|
@ -391,7 +391,7 @@ static void __init runtime_code_page_mkexec(void)
|
|||
if (md->type != EFI_RUNTIME_SERVICES_CODE)
|
||||
continue;
|
||||
|
||||
set_memory_x(md->virt_addr, md->num_pages << EFI_PAGE_SHIFT);
|
||||
set_memory_x(md->virt_addr, md->num_pages);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -434,7 +434,7 @@ void __init efi_enter_virtual_mode(void)
|
|||
}
|
||||
|
||||
if (!(md->attribute & EFI_MEMORY_WB))
|
||||
set_memory_uc(md->virt_addr, size);
|
||||
set_memory_uc(md->virt_addr, md->num_pages);
|
||||
|
||||
systab = (u64) (unsigned long) efi_phys.systab;
|
||||
if (md->phys_addr <= systab && systab < end) {
|
||||
|
|
|
@ -749,6 +749,15 @@ void __init gart_iommu_init(void)
|
|||
*/
|
||||
set_memory_np((unsigned long)__va(iommu_bus_base),
|
||||
iommu_size >> PAGE_SHIFT);
|
||||
/*
|
||||
* Tricky. The GART table remaps the physical memory range,
|
||||
* so the CPU wont notice potential aliases and if the memory
|
||||
* is remapped to UC later on, we might surprise the PCI devices
|
||||
* with a stray writeout of a cacheline. So play it sure and
|
||||
* do an explicit, full-scale wbinvd() _after_ having marked all
|
||||
* the pages as Not-Present:
|
||||
*/
|
||||
wbinvd();
|
||||
|
||||
/*
|
||||
* Try to workaround a bug (thanks to BenH)
|
||||
|
|
|
@ -10,8 +10,8 @@
|
|||
* of the License.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/sections.h>
|
||||
extern int rodata_test_data;
|
||||
|
||||
int rodata_test(void)
|
||||
{
|
||||
|
|
|
@ -84,7 +84,7 @@ static inline void conditional_sti(struct pt_regs *regs)
|
|||
|
||||
static inline void preempt_conditional_sti(struct pt_regs *regs)
|
||||
{
|
||||
preempt_disable();
|
||||
inc_preempt_count();
|
||||
if (regs->flags & X86_EFLAGS_IF)
|
||||
local_irq_enable();
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
|
|||
local_irq_disable();
|
||||
/* Make sure to not schedule here because we could be running
|
||||
on an exception stack. */
|
||||
preempt_enable_no_resched();
|
||||
dec_preempt_count();
|
||||
}
|
||||
|
||||
int kstack_depth_to_print = 12;
|
||||
|
|
|
@ -186,7 +186,7 @@ static int bad_address(void *p)
|
|||
}
|
||||
#endif
|
||||
|
||||
void dump_pagetable(unsigned long address)
|
||||
static void dump_pagetable(unsigned long address)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
__typeof__(pte_val(__pte(0))) page;
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
#include <asm/sections.h>
|
||||
#include <asm/paravirt.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
unsigned int __VMALLOC_RESERVE = 128 << 20;
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#include <asm/sections.h>
|
||||
#include <asm/kdebug.h>
|
||||
#include <asm/numa.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
const struct dma_mapping_ops *dma_ops;
|
||||
EXPORT_SYMBOL(dma_ops);
|
||||
|
|
|
@ -688,6 +688,15 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
|
|||
if (!pgprot_val(mask_set) && !pgprot_val(mask_clr))
|
||||
return 0;
|
||||
|
||||
/* Ensure we are PAGE_SIZE aligned */
|
||||
if (addr & ~PAGE_MASK) {
|
||||
addr &= PAGE_MASK;
|
||||
/*
|
||||
* People should not be passing in unaligned addresses:
|
||||
*/
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
cpa.vaddr = addr;
|
||||
cpa.numpages = numpages;
|
||||
cpa.mask_set = mask_set;
|
||||
|
@ -861,8 +870,12 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
|
|||
return;
|
||||
|
||||
/*
|
||||
* The return value is ignored - the calls cannot fail,
|
||||
* large pages are disabled at boot time:
|
||||
* The return value is ignored as the calls cannot fail.
|
||||
* Large pages are kept enabled at boot time, and are
|
||||
* split up quickly with DEBUG_PAGEALLOC. If a splitup
|
||||
* fails here (due to temporary memory shortage) no damage
|
||||
* is done because we just keep the largepage intact up
|
||||
* to the next attempt when it will likely be split up:
|
||||
*/
|
||||
if (enable)
|
||||
__set_pages_p(page, numpages);
|
||||
|
|
|
@ -48,12 +48,15 @@ void cpa_init(void);
|
|||
|
||||
#ifdef CONFIG_DEBUG_RODATA
|
||||
void mark_rodata_ro(void);
|
||||
extern const int rodata_test_data;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_RODATA_TEST
|
||||
void rodata_test(void);
|
||||
int rodata_test(void);
|
||||
#else
|
||||
static inline void rodata_test(void)
|
||||
static inline int rodata_test(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ extern void show_trace(struct task_struct *t, struct pt_regs *regs,
|
|||
unsigned long *sp, unsigned long bp);
|
||||
extern void __show_regs(struct pt_regs *regs);
|
||||
extern void show_regs(struct pt_regs *regs);
|
||||
extern void dump_pagetable(unsigned long);
|
||||
extern unsigned long oops_begin(void);
|
||||
extern void oops_end(unsigned long, struct pt_regs *, int signr);
|
||||
|
||||
|
|
|
@ -2711,6 +2711,13 @@ void print_vma_addr(char *prefix, unsigned long ip)
|
|||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
/*
|
||||
* Do not print if we are in atomic
|
||||
* contexts (in exception stacks, etc.):
|
||||
*/
|
||||
if (preempt_count())
|
||||
return;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
vma = find_vma(mm, ip);
|
||||
if (vma && vma->vm_file) {
|
||||
|
|
Loading…
Reference in a new issue