Merge branch 'x86/urgent' into x86/core
Conflicts: arch/x86/include/asm/fixmap_64.h Semantic merge: arch/x86/include/asm/fixmap.h Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
commit
a1413c89ae
43 changed files with 356 additions and 162 deletions
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 29
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Erotic Pickled Herring
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -233,12 +233,13 @@ static void __init cacheid_init(void)
|
|||
unsigned int cachetype = read_cpuid_cachetype();
|
||||
unsigned int arch = cpu_architecture();
|
||||
|
||||
if (arch >= CPU_ARCH_ARMv7) {
|
||||
cacheid = CACHEID_VIPT_NONALIASING;
|
||||
if ((cachetype & (3 << 14)) == 1 << 14)
|
||||
cacheid |= CACHEID_ASID_TAGGED;
|
||||
} else if (arch >= CPU_ARCH_ARMv6) {
|
||||
if (cachetype & (1 << 23))
|
||||
if (arch >= CPU_ARCH_ARMv6) {
|
||||
if ((cachetype & (7 << 29)) == 4 << 29) {
|
||||
/* ARMv7 register format */
|
||||
cacheid = CACHEID_VIPT_NONALIASING;
|
||||
if ((cachetype & (3 << 14)) == 1 << 14)
|
||||
cacheid |= CACHEID_ASID_TAGGED;
|
||||
} else if (cachetype & (1 << 23))
|
||||
cacheid = CACHEID_VIPT_ALIASING;
|
||||
else
|
||||
cacheid = CACHEID_VIPT_NONALIASING;
|
||||
|
|
|
@ -332,7 +332,6 @@ static int at91_pm_enter(suspend_state_t state)
|
|||
at91_sys_read(AT91_AIC_IPR) & at91_sys_read(AT91_AIC_IMR));
|
||||
|
||||
error:
|
||||
sdram_selfrefresh_disable();
|
||||
target_state = PM_SUSPEND_ON;
|
||||
at91_irq_resume();
|
||||
at91_gpio_resume();
|
||||
|
|
|
@ -23,7 +23,8 @@ ENTRY(v6_early_abort)
|
|||
#ifdef CONFIG_CPU_32v6K
|
||||
clrex
|
||||
#else
|
||||
strex r0, r1, [sp] @ Clear the exclusive monitor
|
||||
sub r1, sp, #4 @ Get unused stack location
|
||||
strex r0, r1, [r1] @ Clear the exclusive monitor
|
||||
#endif
|
||||
mrc p15, 0, r1, c5, c0, 0 @ get FSR
|
||||
mrc p15, 0, r0, c6, c0, 0 @ get FAR
|
||||
|
|
|
@ -55,7 +55,7 @@ static void s3c_irq_eint_unmask(unsigned int irq)
|
|||
u32 mask;
|
||||
|
||||
mask = __raw_readl(S3C64XX_EINT0MASK);
|
||||
mask |= eint_irq_to_bit(irq);
|
||||
mask &= ~eint_irq_to_bit(irq);
|
||||
__raw_writel(mask, S3C64XX_EINT0MASK);
|
||||
}
|
||||
|
||||
|
|
|
@ -142,6 +142,10 @@ static void __init gef_sbc610_nec_fixup(struct pci_dev *pdev)
|
|||
{
|
||||
unsigned int val;
|
||||
|
||||
/* Do not do the fixup on other platforms! */
|
||||
if (!machine_is(gef_sbc610))
|
||||
return;
|
||||
|
||||
printk(KERN_INFO "Running NEC uPD720101 Fixup\n");
|
||||
|
||||
/* Ensure ports 1, 2, 3, 4 & 5 are enabled */
|
||||
|
|
|
@ -556,7 +556,7 @@ static void __exit aes_s390_fini(void)
|
|||
module_init(aes_s390_init);
|
||||
module_exit(aes_s390_fini);
|
||||
|
||||
MODULE_ALIAS("aes");
|
||||
MODULE_ALIAS("aes-all");
|
||||
|
||||
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -37,8 +37,6 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
|
|||
|
||||
#else /* !CONFIG_X86_32 */
|
||||
|
||||
#define MAX_EFI_IO_PAGES 100
|
||||
|
||||
extern u64 efi_call0(void *fp);
|
||||
extern u64 efi_call1(void *fp, u64 arg1);
|
||||
extern u64 efi_call2(void *fp, u64 arg1, u64 arg2);
|
||||
|
|
|
@ -24,9 +24,6 @@
|
|||
#include <asm/kmap_types.h>
|
||||
#else
|
||||
#include <asm/vsyscall.h>
|
||||
#ifdef CONFIG_EFI
|
||||
#include <asm/efi.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -92,13 +89,6 @@ enum fixed_addresses {
|
|||
FIX_IO_APIC_BASE_0,
|
||||
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
|
||||
#endif
|
||||
#ifdef CONFIG_X86_64
|
||||
#ifdef CONFIG_EFI
|
||||
FIX_EFI_IO_MAP_LAST_PAGE,
|
||||
FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE
|
||||
+ MAX_EFI_IO_PAGES - 1,
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_X86_VISWS_APIC
|
||||
FIX_CO_CPU, /* Cobalt timer */
|
||||
FIX_CO_APIC, /* Cobalt APIC Redirection Table */
|
||||
|
|
|
@ -172,7 +172,13 @@ static inline void __save_init_fpu(struct task_struct *tsk)
|
|||
|
||||
#else /* CONFIG_X86_32 */
|
||||
|
||||
extern void finit(void);
|
||||
#ifdef CONFIG_MATH_EMULATION
|
||||
extern void finit_task(struct task_struct *tsk);
|
||||
#else
|
||||
static inline void finit_task(struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void tolerant_fwait(void)
|
||||
{
|
||||
|
|
|
@ -469,7 +469,7 @@ void __init efi_enter_virtual_mode(void)
|
|||
efi_memory_desc_t *md;
|
||||
efi_status_t status;
|
||||
unsigned long size;
|
||||
u64 end, systab, addr, npages;
|
||||
u64 end, systab, addr, npages, end_pfn;
|
||||
void *p, *va;
|
||||
|
||||
efi.systab = NULL;
|
||||
|
@ -481,7 +481,10 @@ void __init efi_enter_virtual_mode(void)
|
|||
size = md->num_pages << EFI_PAGE_SHIFT;
|
||||
end = md->phys_addr + size;
|
||||
|
||||
if (PFN_UP(end) <= max_low_pfn_mapped)
|
||||
end_pfn = PFN_UP(end);
|
||||
if (end_pfn <= max_low_pfn_mapped
|
||||
|| (end_pfn > (1UL << (32 - PAGE_SHIFT))
|
||||
&& end_pfn <= max_pfn_mapped))
|
||||
va = __va(md->phys_addr);
|
||||
else
|
||||
va = efi_ioremap(md->phys_addr, size);
|
||||
|
|
|
@ -100,24 +100,11 @@ void __init efi_call_phys_epilog(void)
|
|||
|
||||
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size)
|
||||
{
|
||||
static unsigned pages_mapped __initdata;
|
||||
unsigned i, pages;
|
||||
unsigned long offset;
|
||||
unsigned long last_map_pfn;
|
||||
|
||||
pages = PFN_UP(phys_addr + size) - PFN_DOWN(phys_addr);
|
||||
offset = phys_addr & ~PAGE_MASK;
|
||||
phys_addr &= PAGE_MASK;
|
||||
|
||||
if (pages_mapped + pages > MAX_EFI_IO_PAGES)
|
||||
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
|
||||
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < pages; i++) {
|
||||
__set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
|
||||
phys_addr, PAGE_KERNEL);
|
||||
phys_addr += PAGE_SIZE;
|
||||
pages_mapped++;
|
||||
}
|
||||
|
||||
return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \
|
||||
(pages_mapped - pages)) + offset;
|
||||
return (void __iomem *)__va(phys_addr);
|
||||
}
|
||||
|
|
|
@ -136,7 +136,7 @@ int init_fpu(struct task_struct *tsk)
|
|||
#ifdef CONFIG_X86_32
|
||||
if (!HAVE_HWFP) {
|
||||
memset(tsk->thread.xstate, 0, xstate_size);
|
||||
finit();
|
||||
finit_task(tsk);
|
||||
set_stopped_child_used_math(tsk);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -216,6 +216,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on Dell XPS710 */
|
||||
.callback = set_bios_reboot,
|
||||
.ident = "Dell XPS710",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -770,6 +770,9 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
finish_e820_parsing();
|
||||
|
||||
if (efi_enabled)
|
||||
efi_init();
|
||||
|
||||
dmi_scan_machine();
|
||||
|
||||
dmi_check_system(bad_bios_dmi_table);
|
||||
|
@ -789,8 +792,6 @@ void __init setup_arch(char **cmdline_p)
|
|||
insert_resource(&iomem_resource, &data_resource);
|
||||
insert_resource(&iomem_resource, &bss_resource);
|
||||
|
||||
if (efi_enabled)
|
||||
efi_init();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
if (ppro_with_ram_bug()) {
|
||||
|
|
|
@ -30,20 +30,29 @@ static void fclex(void)
|
|||
}
|
||||
|
||||
/* Needs to be externally visible */
|
||||
void finit(void)
|
||||
void finit_task(struct task_struct *tsk)
|
||||
{
|
||||
control_word = 0x037f;
|
||||
partial_status = 0;
|
||||
top = 0; /* We don't keep top in the status word internally. */
|
||||
fpu_tag_word = 0xffff;
|
||||
struct i387_soft_struct *soft = &tsk->thread.xstate->soft;
|
||||
struct address *oaddr, *iaddr;
|
||||
soft->cwd = 0x037f;
|
||||
soft->swd = 0;
|
||||
soft->ftop = 0; /* We don't keep top in the status word internally. */
|
||||
soft->twd = 0xffff;
|
||||
/* The behaviour is different from that detailed in
|
||||
Section 15.1.6 of the Intel manual */
|
||||
operand_address.offset = 0;
|
||||
operand_address.selector = 0;
|
||||
instruction_address.offset = 0;
|
||||
instruction_address.selector = 0;
|
||||
instruction_address.opcode = 0;
|
||||
no_ip_update = 1;
|
||||
oaddr = (struct address *)&soft->foo;
|
||||
oaddr->offset = 0;
|
||||
oaddr->selector = 0;
|
||||
iaddr = (struct address *)&soft->fip;
|
||||
iaddr->offset = 0;
|
||||
iaddr->selector = 0;
|
||||
iaddr->opcode = 0;
|
||||
soft->no_update = 1;
|
||||
}
|
||||
|
||||
void finit(void)
|
||||
{
|
||||
finit_task(current);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -32,11 +32,14 @@ struct kmmio_fault_page {
|
|||
struct list_head list;
|
||||
struct kmmio_fault_page *release_next;
|
||||
unsigned long page; /* location of the fault page */
|
||||
bool old_presence; /* page presence prior to arming */
|
||||
bool armed;
|
||||
|
||||
/*
|
||||
* Number of times this page has been registered as a part
|
||||
* of a probe. If zero, page is disarmed and this may be freed.
|
||||
* Used only by writers (RCU).
|
||||
* Used only by writers (RCU) and post_kmmio_handler().
|
||||
* Protected by kmmio_lock, when linked into kmmio_page_table.
|
||||
*/
|
||||
int count;
|
||||
};
|
||||
|
@ -105,57 +108,85 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void set_page_present(unsigned long addr, bool present,
|
||||
unsigned int *pglevel)
|
||||
static void set_pmd_presence(pmd_t *pmd, bool present, bool *old)
|
||||
{
|
||||
pmdval_t v = pmd_val(*pmd);
|
||||
*old = !!(v & _PAGE_PRESENT);
|
||||
v &= ~_PAGE_PRESENT;
|
||||
if (present)
|
||||
v |= _PAGE_PRESENT;
|
||||
set_pmd(pmd, __pmd(v));
|
||||
}
|
||||
|
||||
static void set_pte_presence(pte_t *pte, bool present, bool *old)
|
||||
{
|
||||
pteval_t v = pte_val(*pte);
|
||||
*old = !!(v & _PAGE_PRESENT);
|
||||
v &= ~_PAGE_PRESENT;
|
||||
if (present)
|
||||
v |= _PAGE_PRESENT;
|
||||
set_pte_atomic(pte, __pte(v));
|
||||
}
|
||||
|
||||
static int set_page_presence(unsigned long addr, bool present, bool *old)
|
||||
{
|
||||
pteval_t pteval;
|
||||
pmdval_t pmdval;
|
||||
unsigned int level;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte = lookup_address(addr, &level);
|
||||
|
||||
if (!pte) {
|
||||
pr_err("kmmio: no pte for page 0x%08lx\n", addr);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pglevel)
|
||||
*pglevel = level;
|
||||
|
||||
switch (level) {
|
||||
case PG_LEVEL_2M:
|
||||
pmd = (pmd_t *)pte;
|
||||
pmdval = pmd_val(*pmd) & ~_PAGE_PRESENT;
|
||||
if (present)
|
||||
pmdval |= _PAGE_PRESENT;
|
||||
set_pmd(pmd, __pmd(pmdval));
|
||||
set_pmd_presence((pmd_t *)pte, present, old);
|
||||
break;
|
||||
|
||||
case PG_LEVEL_4K:
|
||||
pteval = pte_val(*pte) & ~_PAGE_PRESENT;
|
||||
if (present)
|
||||
pteval |= _PAGE_PRESENT;
|
||||
set_pte_atomic(pte, __pte(pteval));
|
||||
set_pte_presence(pte, present, old);
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_err("kmmio: unexpected page level 0x%x.\n", level);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
__flush_tlb_one(addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Mark the given page as not present. Access to it will trigger a fault. */
|
||||
static void arm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
|
||||
/*
|
||||
* Mark the given page as not present. Access to it will trigger a fault.
|
||||
*
|
||||
* Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
|
||||
* protection is ignored here. RCU read lock is assumed held, so the struct
|
||||
* will not disappear unexpectedly. Furthermore, the caller must guarantee,
|
||||
* that double arming the same virtual address (page) cannot occur.
|
||||
*
|
||||
* Double disarming on the other hand is allowed, and may occur when a fault
|
||||
* and mmiotrace shutdown happen simultaneously.
|
||||
*/
|
||||
static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
|
||||
{
|
||||
set_page_present(page & PAGE_MASK, false, pglevel);
|
||||
int ret;
|
||||
WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");
|
||||
if (f->armed) {
|
||||
pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",
|
||||
f->page, f->count, f->old_presence);
|
||||
}
|
||||
ret = set_page_presence(f->page, false, &f->old_presence);
|
||||
WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);
|
||||
f->armed = true;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** Mark the given page as present. */
|
||||
static void disarm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
|
||||
/** Restore the given page to saved presence state. */
|
||||
static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
|
||||
{
|
||||
set_page_present(page & PAGE_MASK, true, pglevel);
|
||||
bool tmp;
|
||||
int ret = set_page_presence(f->page, f->old_presence, &tmp);
|
||||
WARN_ONCE(ret < 0,
|
||||
KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
|
||||
f->armed = false;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -202,28 +233,32 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
|
|||
|
||||
ctx = &get_cpu_var(kmmio_ctx);
|
||||
if (ctx->active) {
|
||||
disarm_kmmio_fault_page(faultpage->page, NULL);
|
||||
if (addr == ctx->addr) {
|
||||
/*
|
||||
* On SMP we sometimes get recursive probe hits on the
|
||||
* same address. Context is already saved, fall out.
|
||||
* A second fault on the same page means some other
|
||||
* condition needs handling by do_page_fault(), the
|
||||
* page really not being present is the most common.
|
||||
*/
|
||||
pr_debug("kmmio: duplicate probe hit on CPU %d, for "
|
||||
"address 0x%08lx.\n",
|
||||
smp_processor_id(), addr);
|
||||
ret = 1;
|
||||
goto no_kmmio_ctx;
|
||||
}
|
||||
/*
|
||||
* Prevent overwriting already in-flight context.
|
||||
* This should not happen, let's hope disarming at least
|
||||
* prevents a panic.
|
||||
*/
|
||||
pr_emerg("kmmio: recursive probe hit on CPU %d, "
|
||||
pr_debug("kmmio: secondary hit for 0x%08lx CPU %d.\n",
|
||||
addr, smp_processor_id());
|
||||
|
||||
if (!faultpage->old_presence)
|
||||
pr_info("kmmio: unexpected secondary hit for "
|
||||
"address 0x%08lx on CPU %d.\n", addr,
|
||||
smp_processor_id());
|
||||
} else {
|
||||
/*
|
||||
* Prevent overwriting already in-flight context.
|
||||
* This should not happen, let's hope disarming at
|
||||
* least prevents a panic.
|
||||
*/
|
||||
pr_emerg("kmmio: recursive probe hit on CPU %d, "
|
||||
"for address 0x%08lx. Ignoring.\n",
|
||||
smp_processor_id(), addr);
|
||||
pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
|
||||
ctx->addr);
|
||||
pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
|
||||
ctx->addr);
|
||||
disarm_kmmio_fault_page(faultpage);
|
||||
}
|
||||
goto no_kmmio_ctx;
|
||||
}
|
||||
ctx->active++;
|
||||
|
@ -244,7 +279,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
|
|||
regs->flags &= ~X86_EFLAGS_IF;
|
||||
|
||||
/* Now we set present bit in PTE and single step. */
|
||||
disarm_kmmio_fault_page(ctx->fpage->page, NULL);
|
||||
disarm_kmmio_fault_page(ctx->fpage);
|
||||
|
||||
/*
|
||||
* If another cpu accesses the same page while we are stepping,
|
||||
|
@ -275,7 +310,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
|
|||
struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
|
||||
|
||||
if (!ctx->active) {
|
||||
pr_debug("kmmio: spurious debug trap on CPU %d.\n",
|
||||
pr_warning("kmmio: spurious debug trap on CPU %d.\n",
|
||||
smp_processor_id());
|
||||
goto out;
|
||||
}
|
||||
|
@ -283,7 +318,11 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
|
|||
if (ctx->probe && ctx->probe->post_handler)
|
||||
ctx->probe->post_handler(ctx->probe, condition, regs);
|
||||
|
||||
arm_kmmio_fault_page(ctx->fpage->page, NULL);
|
||||
/* Prevent racing against release_kmmio_fault_page(). */
|
||||
spin_lock(&kmmio_lock);
|
||||
if (ctx->fpage->count)
|
||||
arm_kmmio_fault_page(ctx->fpage);
|
||||
spin_unlock(&kmmio_lock);
|
||||
|
||||
regs->flags &= ~X86_EFLAGS_TF;
|
||||
regs->flags |= ctx->saved_flags;
|
||||
|
@ -315,20 +354,24 @@ static int add_kmmio_fault_page(unsigned long page)
|
|||
f = get_kmmio_fault_page(page);
|
||||
if (f) {
|
||||
if (!f->count)
|
||||
arm_kmmio_fault_page(f->page, NULL);
|
||||
arm_kmmio_fault_page(f);
|
||||
f->count++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
f = kmalloc(sizeof(*f), GFP_ATOMIC);
|
||||
f = kzalloc(sizeof(*f), GFP_ATOMIC);
|
||||
if (!f)
|
||||
return -1;
|
||||
|
||||
f->count = 1;
|
||||
f->page = page;
|
||||
list_add_rcu(&f->list, kmmio_page_list(f->page));
|
||||
|
||||
arm_kmmio_fault_page(f->page, NULL);
|
||||
if (arm_kmmio_fault_page(f)) {
|
||||
kfree(f);
|
||||
return -1;
|
||||
}
|
||||
|
||||
list_add_rcu(&f->list, kmmio_page_list(f->page));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -347,7 +390,7 @@ static void release_kmmio_fault_page(unsigned long page,
|
|||
f->count--;
|
||||
BUG_ON(f->count < 0);
|
||||
if (!f->count) {
|
||||
disarm_kmmio_fault_page(f->page, NULL);
|
||||
disarm_kmmio_fault_page(f);
|
||||
f->release_next = *release_list;
|
||||
*release_list = f;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Written by Pekka Paalanen, 2008 <pq@iki.fi>
|
||||
* Written by Pekka Paalanen, 2008-2009 <pq@iki.fi>
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/io.h>
|
||||
|
@ -9,35 +9,74 @@
|
|||
|
||||
static unsigned long mmio_address;
|
||||
module_param(mmio_address, ulong, 0);
|
||||
MODULE_PARM_DESC(mmio_address, "Start address of the mapping of 16 kB.");
|
||||
MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB "
|
||||
"(or 8 MB if read_far is non-zero).");
|
||||
|
||||
static unsigned long read_far = 0x400100;
|
||||
module_param(read_far, ulong, 0);
|
||||
MODULE_PARM_DESC(read_far, " Offset of a 32-bit read within 8 MB "
|
||||
"(default: 0x400100).");
|
||||
|
||||
static unsigned v16(unsigned i)
|
||||
{
|
||||
return i * 12 + 7;
|
||||
}
|
||||
|
||||
static unsigned v32(unsigned i)
|
||||
{
|
||||
return i * 212371 + 13;
|
||||
}
|
||||
|
||||
static void do_write_test(void __iomem *p)
|
||||
{
|
||||
unsigned int i;
|
||||
pr_info(MODULE_NAME ": write test.\n");
|
||||
mmiotrace_printk("Write test.\n");
|
||||
|
||||
for (i = 0; i < 256; i++)
|
||||
iowrite8(i, p + i);
|
||||
|
||||
for (i = 1024; i < (5 * 1024); i += 2)
|
||||
iowrite16(i * 12 + 7, p + i);
|
||||
iowrite16(v16(i), p + i);
|
||||
|
||||
for (i = (5 * 1024); i < (16 * 1024); i += 4)
|
||||
iowrite32(i * 212371 + 13, p + i);
|
||||
iowrite32(v32(i), p + i);
|
||||
}
|
||||
|
||||
static void do_read_test(void __iomem *p)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned errs[3] = { 0 };
|
||||
pr_info(MODULE_NAME ": read test.\n");
|
||||
mmiotrace_printk("Read test.\n");
|
||||
|
||||
for (i = 0; i < 256; i++)
|
||||
ioread8(p + i);
|
||||
if (ioread8(p + i) != i)
|
||||
++errs[0];
|
||||
|
||||
for (i = 1024; i < (5 * 1024); i += 2)
|
||||
ioread16(p + i);
|
||||
if (ioread16(p + i) != v16(i))
|
||||
++errs[1];
|
||||
|
||||
for (i = (5 * 1024); i < (16 * 1024); i += 4)
|
||||
ioread32(p + i);
|
||||
if (ioread32(p + i) != v32(i))
|
||||
++errs[2];
|
||||
|
||||
mmiotrace_printk("Read errors: 8-bit %d, 16-bit %d, 32-bit %d.\n",
|
||||
errs[0], errs[1], errs[2]);
|
||||
}
|
||||
|
||||
static void do_test(void)
|
||||
static void do_read_far_test(void __iomem *p)
|
||||
{
|
||||
void __iomem *p = ioremap_nocache(mmio_address, 0x4000);
|
||||
pr_info(MODULE_NAME ": read far test.\n");
|
||||
mmiotrace_printk("Read far test.\n");
|
||||
|
||||
ioread32(p + read_far);
|
||||
}
|
||||
|
||||
static void do_test(unsigned long size)
|
||||
{
|
||||
void __iomem *p = ioremap_nocache(mmio_address, size);
|
||||
if (!p) {
|
||||
pr_err(MODULE_NAME ": could not ioremap, aborting.\n");
|
||||
return;
|
||||
|
@ -45,11 +84,15 @@ static void do_test(void)
|
|||
mmiotrace_printk("ioremap returned %p.\n", p);
|
||||
do_write_test(p);
|
||||
do_read_test(p);
|
||||
if (read_far && read_far < size - 4)
|
||||
do_read_far_test(p);
|
||||
iounmap(p);
|
||||
}
|
||||
|
||||
static int __init init(void)
|
||||
{
|
||||
unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
|
||||
|
||||
if (mmio_address == 0) {
|
||||
pr_err(MODULE_NAME ": you have to use the module argument "
|
||||
"mmio_address.\n");
|
||||
|
@ -58,10 +101,11 @@ static int __init init(void)
|
|||
return -ENXIO;
|
||||
}
|
||||
|
||||
pr_warning(MODULE_NAME ": WARNING: mapping 16 kB @ 0x%08lx "
|
||||
"in PCI address space, and writing "
|
||||
"rubbish in there.\n", mmio_address);
|
||||
do_test();
|
||||
pr_warning(MODULE_NAME ": WARNING: mapping %lu kB @ 0x%08lx in PCI "
|
||||
"address space, and writing 16 kB of rubbish in there.\n",
|
||||
size >> 10, mmio_address);
|
||||
do_test(size);
|
||||
pr_info(MODULE_NAME ": All done.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
15
crypto/api.c
15
crypto/api.c
|
@ -215,8 +215,19 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
|
|||
mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
|
||||
type &= mask;
|
||||
|
||||
alg = try_then_request_module(crypto_alg_lookup(name, type, mask),
|
||||
name);
|
||||
alg = crypto_alg_lookup(name, type, mask);
|
||||
if (!alg) {
|
||||
char tmp[CRYPTO_MAX_ALG_NAME];
|
||||
|
||||
request_module(name);
|
||||
|
||||
if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask) &&
|
||||
snprintf(tmp, sizeof(tmp), "%s-all", name) < sizeof(tmp))
|
||||
request_module(tmp);
|
||||
|
||||
alg = crypto_alg_lookup(name, type, mask);
|
||||
}
|
||||
|
||||
if (alg)
|
||||
return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
|
||||
|
||||
|
|
|
@ -457,10 +457,12 @@ static int init_ixp_crypto(void)
|
|||
if (!ctx_pool) {
|
||||
goto err;
|
||||
}
|
||||
ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0);
|
||||
ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
|
||||
"ixp_crypto:out", NULL);
|
||||
if (ret)
|
||||
goto err;
|
||||
ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0);
|
||||
ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
|
||||
"ixp_crypto:in", NULL);
|
||||
if (ret) {
|
||||
qmgr_release_queue(SEND_QID);
|
||||
goto err;
|
||||
|
|
|
@ -489,4 +489,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
|
|||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Michal Ludvig");
|
||||
|
||||
MODULE_ALIAS("aes");
|
||||
MODULE_ALIAS("aes-all");
|
||||
|
|
|
@ -304,7 +304,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
|
|||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Michal Ludvig");
|
||||
|
||||
MODULE_ALIAS("sha1");
|
||||
MODULE_ALIAS("sha256");
|
||||
MODULE_ALIAS("sha1-all");
|
||||
MODULE_ALIAS("sha256-all");
|
||||
MODULE_ALIAS("sha1-padlock");
|
||||
MODULE_ALIAS("sha256-padlock");
|
||||
|
|
|
@ -1401,7 +1401,7 @@ MODULE_ALIAS("platform:iop-adma");
|
|||
|
||||
static struct platform_driver iop_adma_driver = {
|
||||
.probe = iop_adma_probe,
|
||||
.remove = iop_adma_remove,
|
||||
.remove = __devexit_p(iop_adma_remove),
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "iop-adma",
|
||||
|
|
|
@ -1287,7 +1287,7 @@ mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
|
|||
|
||||
static struct platform_driver mv_xor_driver = {
|
||||
.probe = mv_xor_probe,
|
||||
.remove = mv_xor_remove,
|
||||
.remove = __devexit_p(mv_xor_remove),
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = MV_XOR_NAME,
|
||||
|
|
|
@ -168,7 +168,7 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
|
|||
file_priv->minor->master != file_priv->master) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
file_priv->minor->master = drm_master_get(file_priv->master);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -482,7 +482,7 @@ mv64xxx_i2c_map_regs(struct platform_device *pd,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void __devexit
|
||||
static void
|
||||
mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data)
|
||||
{
|
||||
if (drv_data->reg_base) {
|
||||
|
@ -577,7 +577,7 @@ mv64xxx_i2c_remove(struct platform_device *dev)
|
|||
|
||||
static struct platform_driver mv64xxx_i2c_driver = {
|
||||
.probe = mv64xxx_i2c_probe,
|
||||
.remove = mv64xxx_i2c_remove,
|
||||
.remove = __devexit_p(mv64xxx_i2c_remove),
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = MV64XXX_I2C_CTLR_NAME,
|
||||
|
|
|
@ -149,7 +149,7 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
|
|||
|
||||
static struct platform_driver orion_nand_driver = {
|
||||
.probe = orion_nand_probe,
|
||||
.remove = orion_nand_remove,
|
||||
.remove = __devexit_p(orion_nand_remove),
|
||||
.driver = {
|
||||
.name = "orion_nand",
|
||||
.owner = THIS_MODULE,
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#
|
||||
|
||||
obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
|
||||
obj-$(CONFIG_ARM_ETHERH) += etherh.o ../8390.o
|
||||
obj-$(CONFIG_ARM_ETHERH) += etherh.o
|
||||
obj-$(CONFIG_ARM_ETHER3) += ether3.o
|
||||
obj-$(CONFIG_ARM_ETHER1) += ether1.o
|
||||
obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
|
||||
|
|
|
@ -641,15 +641,15 @@ static const struct net_device_ops etherh_netdev_ops = {
|
|||
.ndo_open = etherh_open,
|
||||
.ndo_stop = etherh_close,
|
||||
.ndo_set_config = etherh_set_config,
|
||||
.ndo_start_xmit = ei_start_xmit,
|
||||
.ndo_tx_timeout = ei_tx_timeout,
|
||||
.ndo_get_stats = ei_get_stats,
|
||||
.ndo_set_multicast_list = ei_set_multicast_list,
|
||||
.ndo_start_xmit = __ei_start_xmit,
|
||||
.ndo_tx_timeout = __ei_tx_timeout,
|
||||
.ndo_get_stats = __ei_get_stats,
|
||||
.ndo_set_multicast_list = __ei_set_multicast_list,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ei_poll,
|
||||
.ndo_poll_controller = __ei_poll,
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -2230,7 +2230,7 @@ static int __devexit pxafb_remove(struct platform_device *dev)
|
|||
|
||||
static struct platform_driver pxafb_driver = {
|
||||
.probe = pxafb_probe,
|
||||
.remove = pxafb_remove,
|
||||
.remove = __devexit_p(pxafb_remove),
|
||||
.suspend = pxafb_suspend,
|
||||
.resume = pxafb_resume,
|
||||
.driver = {
|
||||
|
|
|
@ -181,4 +181,10 @@ extern long rcu_batches_completed_bh(void);
|
|||
#define rcu_enter_nohz() do { } while (0)
|
||||
#define rcu_exit_nohz() do { } while (0)
|
||||
|
||||
/* A context switch is a grace period for rcuclassic. */
|
||||
static inline int rcu_blocking_is_gp(void)
|
||||
{
|
||||
return num_online_cpus() == 1;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_RCUCLASSIC_H */
|
||||
|
|
|
@ -52,6 +52,9 @@ struct rcu_head {
|
|||
void (*func)(struct rcu_head *head);
|
||||
};
|
||||
|
||||
/* Internal to kernel, but needed by rcupreempt.h. */
|
||||
extern int rcu_scheduler_active;
|
||||
|
||||
#if defined(CONFIG_CLASSIC_RCU)
|
||||
#include <linux/rcuclassic.h>
|
||||
#elif defined(CONFIG_TREE_RCU)
|
||||
|
@ -265,6 +268,7 @@ extern void rcu_barrier_sched(void);
|
|||
|
||||
/* Internal to kernel */
|
||||
extern void rcu_init(void);
|
||||
extern void rcu_scheduler_starting(void);
|
||||
extern int rcu_needs_cpu(int cpu);
|
||||
|
||||
#endif /* __LINUX_RCUPDATE_H */
|
||||
|
|
|
@ -142,4 +142,19 @@ static inline void rcu_exit_nohz(void)
|
|||
#define rcu_exit_nohz() do { } while (0)
|
||||
#endif /* CONFIG_NO_HZ */
|
||||
|
||||
/*
|
||||
* A context switch is a grace period for rcupreempt synchronize_rcu()
|
||||
* only during early boot, before the scheduler has been initialized.
|
||||
* So, how the heck do we get a context switch? Well, if the caller
|
||||
* invokes synchronize_rcu(), they are willing to accept a context
|
||||
* switch, so we simply pretend that one happened.
|
||||
*
|
||||
* After boot, there might be a blocked or preempted task in an RCU
|
||||
* read-side critical section, so we cannot then take the fastpath.
|
||||
*/
|
||||
static inline int rcu_blocking_is_gp(void)
|
||||
{
|
||||
return num_online_cpus() == 1 && !rcu_scheduler_active;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_RCUPREEMPT_H */
|
||||
|
|
|
@ -326,4 +326,10 @@ static inline void rcu_exit_nohz(void)
|
|||
}
|
||||
#endif /* CONFIG_NO_HZ */
|
||||
|
||||
/* A context switch is a grace period for rcutree. */
|
||||
static inline int rcu_blocking_is_gp(void)
|
||||
{
|
||||
return num_online_cpus() == 1;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_RCUTREE_H */
|
||||
|
|
|
@ -2303,9 +2303,13 @@ extern long sched_group_rt_runtime(struct task_group *tg);
|
|||
extern int sched_group_set_rt_period(struct task_group *tg,
|
||||
long rt_period_us);
|
||||
extern long sched_group_rt_period(struct task_group *tg);
|
||||
extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
extern int task_can_switch_user(struct user_struct *up,
|
||||
struct task_struct *tsk);
|
||||
|
||||
#ifdef CONFIG_TASK_XACCT
|
||||
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
|
||||
{
|
||||
|
|
|
@ -98,7 +98,7 @@ static inline void mark_rodata_ro(void) { }
|
|||
extern void tc_init(void);
|
||||
#endif
|
||||
|
||||
enum system_states system_state;
|
||||
enum system_states system_state __read_mostly;
|
||||
EXPORT_SYMBOL(system_state);
|
||||
|
||||
/*
|
||||
|
@ -464,6 +464,7 @@ static noinline void __init_refok rest_init(void)
|
|||
* at least once to get things moving:
|
||||
*/
|
||||
init_idle_bootup_task(current);
|
||||
rcu_scheduler_starting();
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
|
|
|
@ -679,8 +679,8 @@ int rcu_needs_cpu(int cpu)
|
|||
void rcu_check_callbacks(int cpu, int user)
|
||||
{
|
||||
if (user ||
|
||||
(idle_cpu(cpu) && !in_softirq() &&
|
||||
hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
|
||||
(idle_cpu(cpu) && rcu_scheduler_active &&
|
||||
!in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
|
||||
|
||||
/*
|
||||
* Get here if this CPU took its interrupt from user
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
#include <linux/cpu.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
|
||||
enum rcu_barrier {
|
||||
RCU_BARRIER_STD,
|
||||
|
@ -55,6 +56,7 @@ static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
|
|||
static atomic_t rcu_barrier_cpu_count;
|
||||
static DEFINE_MUTEX(rcu_barrier_mutex);
|
||||
static struct completion rcu_barrier_completion;
|
||||
int rcu_scheduler_active __read_mostly;
|
||||
|
||||
/*
|
||||
* Awaken the corresponding synchronize_rcu() instance now that a
|
||||
|
@ -80,6 +82,10 @@ void wakeme_after_rcu(struct rcu_head *head)
|
|||
void synchronize_rcu(void)
|
||||
{
|
||||
struct rcu_synchronize rcu;
|
||||
|
||||
if (rcu_blocking_is_gp())
|
||||
return;
|
||||
|
||||
init_completion(&rcu.completion);
|
||||
/* Will wake me after RCU finished. */
|
||||
call_rcu(&rcu.head, wakeme_after_rcu);
|
||||
|
@ -175,3 +181,9 @@ void __init rcu_init(void)
|
|||
__rcu_init();
|
||||
}
|
||||
|
||||
void rcu_scheduler_starting(void)
|
||||
{
|
||||
WARN_ON(num_online_cpus() != 1);
|
||||
WARN_ON(nr_context_switches() > 0);
|
||||
rcu_scheduler_active = 1;
|
||||
}
|
||||
|
|
|
@ -1181,6 +1181,9 @@ void __synchronize_sched(void)
|
|||
{
|
||||
struct rcu_synchronize rcu;
|
||||
|
||||
if (num_online_cpus() == 1)
|
||||
return; /* blocking is gp if only one CPU! */
|
||||
|
||||
init_completion(&rcu.completion);
|
||||
/* Will wake me after RCU finished. */
|
||||
call_rcu_sched(&rcu.head, wakeme_after_rcu);
|
||||
|
|
|
@ -948,8 +948,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
|||
void rcu_check_callbacks(int cpu, int user)
|
||||
{
|
||||
if (user ||
|
||||
(idle_cpu(cpu) && !in_softirq() &&
|
||||
hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
|
||||
(idle_cpu(cpu) && rcu_scheduler_active &&
|
||||
!in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
|
||||
|
||||
/*
|
||||
* Get here if this CPU took its interrupt from user
|
||||
|
|
|
@ -223,7 +223,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
|
|||
{
|
||||
ktime_t now;
|
||||
|
||||
if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF)
|
||||
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
|
||||
return;
|
||||
|
||||
if (hrtimer_active(&rt_b->rt_period_timer))
|
||||
|
@ -9219,6 +9219,16 @@ static int sched_rt_global_constraints(void)
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
|
||||
{
|
||||
/* Don't accept realtime tasks when there is no way for them to run */
|
||||
if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_RT_GROUP_SCHED */
|
||||
static int sched_rt_global_constraints(void)
|
||||
{
|
||||
|
@ -9312,8 +9322,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
|
|||
struct task_struct *tsk)
|
||||
{
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
/* Don't accept realtime tasks when there is no way for them to run */
|
||||
if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
|
||||
if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
|
||||
return -EINVAL;
|
||||
#else
|
||||
/* We don't support RT-tasks being in separate groups */
|
||||
|
|
31
kernel/sys.c
31
kernel/sys.c
|
@ -559,7 +559,7 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
|
|||
abort_creds(new);
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* change the user struct in a credentials set to match the new UID
|
||||
*/
|
||||
|
@ -571,6 +571,11 @@ static int set_user(struct cred *new)
|
|||
if (!new_user)
|
||||
return -EAGAIN;
|
||||
|
||||
if (!task_can_switch_user(new_user, current)) {
|
||||
free_uid(new_user);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (atomic_read(&new_user->processes) >=
|
||||
current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
|
||||
new_user != INIT_USER) {
|
||||
|
@ -631,10 +636,11 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
|
|||
goto error;
|
||||
}
|
||||
|
||||
retval = -EAGAIN;
|
||||
if (new->uid != old->uid && set_user(new) < 0)
|
||||
goto error;
|
||||
|
||||
if (new->uid != old->uid) {
|
||||
retval = set_user(new);
|
||||
if (retval < 0)
|
||||
goto error;
|
||||
}
|
||||
if (ruid != (uid_t) -1 ||
|
||||
(euid != (uid_t) -1 && euid != old->uid))
|
||||
new->suid = new->euid;
|
||||
|
@ -680,9 +686,10 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
|
|||
retval = -EPERM;
|
||||
if (capable(CAP_SETUID)) {
|
||||
new->suid = new->uid = uid;
|
||||
if (uid != old->uid && set_user(new) < 0) {
|
||||
retval = -EAGAIN;
|
||||
goto error;
|
||||
if (uid != old->uid) {
|
||||
retval = set_user(new);
|
||||
if (retval < 0)
|
||||
goto error;
|
||||
}
|
||||
} else if (uid != old->uid && uid != new->suid) {
|
||||
goto error;
|
||||
|
@ -734,11 +741,13 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
|
|||
goto error;
|
||||
}
|
||||
|
||||
retval = -EAGAIN;
|
||||
if (ruid != (uid_t) -1) {
|
||||
new->uid = ruid;
|
||||
if (ruid != old->uid && set_user(new) < 0)
|
||||
goto error;
|
||||
if (ruid != old->uid) {
|
||||
retval = set_user(new);
|
||||
if (retval < 0)
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
if (euid != (uid_t) -1)
|
||||
new->euid = euid;
|
||||
|
|
|
@ -362,6 +362,24 @@ static void free_user(struct user_struct *up, unsigned long flags)
|
|||
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)
|
||||
/*
|
||||
* We need to check if a setuid can take place. This function should be called
|
||||
* before successfully completing the setuid.
|
||||
*/
|
||||
int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
|
||||
{
|
||||
|
||||
return sched_rt_can_attach(up->tg, tsk);
|
||||
|
||||
}
|
||||
#else
|
||||
int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Locate the user_struct for the passed UID. If found, take a ref on it. The
|
||||
* caller must undo that ref with free_uid().
|
||||
|
|
Loading…
Reference in a new issue