Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6: sparc64: Use a TS_RESTORE_SIGMASK lmb: Make lmb debugging more useful. lmb: Fix inconsistent alignment of size argument. sparc: Fix mremap address range validation.
This commit is contained in:
commit
8978a31883
10 changed files with 89 additions and 151 deletions
|
@ -219,7 +219,7 @@ asmlinkage int sys_ipc (uint call, int first, int second, int third, void __user
|
|||
return err;
|
||||
}
|
||||
|
||||
int sparc_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
|
||||
int sparc_mmap_check(unsigned long addr, unsigned long len)
|
||||
{
|
||||
if (ARCH_SUN4C_SUN4 &&
|
||||
(len > 0x20000000 ||
|
||||
|
@ -295,52 +295,14 @@ asmlinkage unsigned long sparc_mremap(unsigned long addr,
|
|||
unsigned long old_len, unsigned long new_len,
|
||||
unsigned long flags, unsigned long new_addr)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long ret = -EINVAL;
|
||||
if (ARCH_SUN4C_SUN4) {
|
||||
if (old_len > 0x20000000 || new_len > 0x20000000)
|
||||
|
||||
if (unlikely(sparc_mmap_check(addr, old_len)))
|
||||
goto out;
|
||||
if (addr < 0xe0000000 && addr + old_len > 0x20000000)
|
||||
goto out;
|
||||
}
|
||||
if (old_len > TASK_SIZE - PAGE_SIZE ||
|
||||
new_len > TASK_SIZE - PAGE_SIZE)
|
||||
if (unlikely(sparc_mmap_check(new_addr, new_len)))
|
||||
goto out;
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
if (flags & MREMAP_FIXED) {
|
||||
if (ARCH_SUN4C_SUN4 &&
|
||||
new_addr < 0xe0000000 &&
|
||||
new_addr + new_len > 0x20000000)
|
||||
goto out_sem;
|
||||
if (new_addr + new_len > TASK_SIZE - PAGE_SIZE)
|
||||
goto out_sem;
|
||||
} else if ((ARCH_SUN4C_SUN4 && addr < 0xe0000000 &&
|
||||
addr + new_len > 0x20000000) ||
|
||||
addr + new_len > TASK_SIZE - PAGE_SIZE) {
|
||||
unsigned long map_flags = 0;
|
||||
struct file *file = NULL;
|
||||
|
||||
ret = -ENOMEM;
|
||||
if (!(flags & MREMAP_MAYMOVE))
|
||||
goto out_sem;
|
||||
|
||||
vma = find_vma(current->mm, addr);
|
||||
if (vma) {
|
||||
if (vma->vm_flags & VM_SHARED)
|
||||
map_flags |= MAP_SHARED;
|
||||
file = vma->vm_file;
|
||||
}
|
||||
|
||||
new_addr = get_unmapped_area(file, addr, new_len,
|
||||
vma ? vma->vm_pgoff : 0,
|
||||
map_flags);
|
||||
ret = new_addr;
|
||||
if (new_addr & ~PAGE_MASK)
|
||||
goto out_sem;
|
||||
flags |= MREMAP_FIXED;
|
||||
}
|
||||
ret = do_mremap(addr, old_len, new_len, flags, new_addr);
|
||||
out_sem:
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
out:
|
||||
return ret;
|
||||
|
|
|
@ -46,7 +46,7 @@ __handle_user_windows:
|
|||
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
|
||||
ldx [%g6 + TI_FLAGS], %l0
|
||||
|
||||
1: andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
|
||||
1: andcc %l0, _TIF_SIGPENDING, %g0
|
||||
be,pt %xcc, __handle_user_windows_continue
|
||||
nop
|
||||
mov %l5, %o1
|
||||
|
@ -86,7 +86,7 @@ __handle_perfctrs:
|
|||
wrpr %g0, RTRAP_PSTATE, %pstate
|
||||
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
|
||||
ldx [%g6 + TI_FLAGS], %l0
|
||||
1: andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
|
||||
1: andcc %l0, _TIF_SIGPENDING, %g0
|
||||
|
||||
be,pt %xcc, __handle_perfctrs_continue
|
||||
sethi %hi(TSTATE_PEF), %o0
|
||||
|
@ -195,7 +195,7 @@ __handle_preemption_continue:
|
|||
andcc %l1, %o0, %g0
|
||||
andcc %l0, _TIF_NEED_RESCHED, %g0
|
||||
bne,pn %xcc, __handle_preemption
|
||||
andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
|
||||
andcc %l0, _TIF_SIGPENDING, %g0
|
||||
bne,pn %xcc, __handle_signal
|
||||
__handle_signal_continue:
|
||||
ldub [%g6 + TI_WSAVED], %o2
|
||||
|
|
|
@ -247,7 +247,9 @@ static long _sigpause_common(old_sigset_t set)
|
|||
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
schedule();
|
||||
set_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
|
||||
set_restore_sigmask();
|
||||
|
||||
return -ERESTARTNOHAND;
|
||||
}
|
||||
|
||||
|
@ -537,7 +539,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
|
|||
} else
|
||||
restart_syscall = 0;
|
||||
|
||||
if (test_thread_flag(TIF_RESTORE_SIGMASK))
|
||||
if (current_thread_info()->status & TS_RESTORE_SIGMASK)
|
||||
oldset = ¤t->saved_sigmask;
|
||||
else
|
||||
oldset = ¤t->blocked;
|
||||
|
@ -566,13 +568,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
|
|||
syscall_restart(orig_i0, regs, &ka.sa);
|
||||
handle_signal(signr, &ka, &info, oldset, regs);
|
||||
|
||||
/* a signal was successfully delivered; the saved
|
||||
/* A signal was successfully delivered; the saved
|
||||
* sigmask will have been stored in the signal frame,
|
||||
* and will be restored by sigreturn, so we can simply
|
||||
* clear the TIF_RESTORE_SIGMASK flag.
|
||||
* clear the TS_RESTORE_SIGMASK flag.
|
||||
*/
|
||||
if (test_thread_flag(TIF_RESTORE_SIGMASK))
|
||||
clear_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
|
||||
return;
|
||||
}
|
||||
if (restart_syscall &&
|
||||
|
@ -591,17 +592,17 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
|
|||
regs->tnpc -= 4;
|
||||
}
|
||||
|
||||
/* if there's no signal to deliver, we just put the saved sigmask
|
||||
/* If there's no signal to deliver, we just put the saved sigmask
|
||||
* back
|
||||
*/
|
||||
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
|
||||
clear_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
|
||||
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
|
||||
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags)
|
||||
{
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
|
||||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
do_signal(regs, orig_i0);
|
||||
}
|
||||
|
|
|
@ -788,13 +788,12 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
|
|||
syscall_restart32(orig_i0, regs, &ka.sa);
|
||||
handle_signal32(signr, &ka, &info, oldset, regs);
|
||||
|
||||
/* a signal was successfully delivered; the saved
|
||||
/* A signal was successfully delivered; the saved
|
||||
* sigmask will have been stored in the signal frame,
|
||||
* and will be restored by sigreturn, so we can simply
|
||||
* clear the TIF_RESTORE_SIGMASK flag.
|
||||
* clear the TS_RESTORE_SIGMASK flag.
|
||||
*/
|
||||
if (test_thread_flag(TIF_RESTORE_SIGMASK))
|
||||
clear_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
|
||||
return;
|
||||
}
|
||||
if (restart_syscall &&
|
||||
|
@ -813,11 +812,11 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
|
|||
regs->tnpc -= 4;
|
||||
}
|
||||
|
||||
/* if there's no signal to deliver, we just put the saved sigmask
|
||||
/* If there's no signal to deliver, we just put the saved sigmask
|
||||
* back
|
||||
*/
|
||||
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
|
||||
clear_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
|
||||
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
|
||||
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -542,8 +542,7 @@ asmlinkage long sparc64_personality(unsigned long personality)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int sparc64_mmap_check(unsigned long addr, unsigned long len,
|
||||
unsigned long flags)
|
||||
int sparc64_mmap_check(unsigned long addr, unsigned long len)
|
||||
{
|
||||
if (test_thread_flag(TIF_32BIT)) {
|
||||
if (len >= STACK_TOP32)
|
||||
|
@ -609,46 +608,19 @@ asmlinkage unsigned long sys64_mremap(unsigned long addr,
|
|||
unsigned long old_len, unsigned long new_len,
|
||||
unsigned long flags, unsigned long new_addr)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long ret = -EINVAL;
|
||||
|
||||
if (test_thread_flag(TIF_32BIT))
|
||||
goto out;
|
||||
if (unlikely(new_len >= VA_EXCLUDE_START))
|
||||
goto out;
|
||||
if (unlikely(invalid_64bit_range(addr, old_len)))
|
||||
if (unlikely(sparc64_mmap_check(addr, old_len)))
|
||||
goto out;
|
||||
if (unlikely(sparc64_mmap_check(new_addr, new_len)))
|
||||
goto out;
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
if (flags & MREMAP_FIXED) {
|
||||
if (invalid_64bit_range(new_addr, new_len))
|
||||
goto out_sem;
|
||||
} else if (invalid_64bit_range(addr, new_len)) {
|
||||
unsigned long map_flags = 0;
|
||||
struct file *file = NULL;
|
||||
|
||||
ret = -ENOMEM;
|
||||
if (!(flags & MREMAP_MAYMOVE))
|
||||
goto out_sem;
|
||||
|
||||
vma = find_vma(current->mm, addr);
|
||||
if (vma) {
|
||||
if (vma->vm_flags & VM_SHARED)
|
||||
map_flags |= MAP_SHARED;
|
||||
file = vma->vm_file;
|
||||
}
|
||||
|
||||
/* MREMAP_FIXED checked above. */
|
||||
new_addr = get_unmapped_area(file, addr, new_len,
|
||||
vma ? vma->vm_pgoff : 0,
|
||||
map_flags);
|
||||
ret = new_addr;
|
||||
if (new_addr & ~PAGE_MASK)
|
||||
goto out_sem;
|
||||
flags |= MREMAP_FIXED;
|
||||
}
|
||||
ret = do_mremap(addr, old_len, new_len, flags, new_addr);
|
||||
out_sem:
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
out:
|
||||
return ret;
|
||||
|
|
|
@ -867,44 +867,15 @@ asmlinkage unsigned long sys32_mremap(unsigned long addr,
|
|||
unsigned long old_len, unsigned long new_len,
|
||||
unsigned long flags, u32 __new_addr)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long ret = -EINVAL;
|
||||
unsigned long new_addr = __new_addr;
|
||||
|
||||
if (old_len > STACK_TOP32 || new_len > STACK_TOP32)
|
||||
if (unlikely(sparc64_mmap_check(addr, old_len)))
|
||||
goto out;
|
||||
if (addr > STACK_TOP32 - old_len)
|
||||
if (unlikely(sparc64_mmap_check(new_addr, new_len)))
|
||||
goto out;
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
if (flags & MREMAP_FIXED) {
|
||||
if (new_addr > STACK_TOP32 - new_len)
|
||||
goto out_sem;
|
||||
} else if (addr > STACK_TOP32 - new_len) {
|
||||
unsigned long map_flags = 0;
|
||||
struct file *file = NULL;
|
||||
|
||||
ret = -ENOMEM;
|
||||
if (!(flags & MREMAP_MAYMOVE))
|
||||
goto out_sem;
|
||||
|
||||
vma = find_vma(current->mm, addr);
|
||||
if (vma) {
|
||||
if (vma->vm_flags & VM_SHARED)
|
||||
map_flags |= MAP_SHARED;
|
||||
file = vma->vm_file;
|
||||
}
|
||||
|
||||
/* MREMAP_FIXED checked above. */
|
||||
new_addr = get_unmapped_area(file, addr, new_len,
|
||||
vma ? vma->vm_pgoff : 0,
|
||||
map_flags);
|
||||
ret = new_addr;
|
||||
if (new_addr & ~PAGE_MASK)
|
||||
goto out_sem;
|
||||
flags |= MREMAP_FIXED;
|
||||
}
|
||||
ret = do_mremap(addr, old_len, new_len, flags, new_addr);
|
||||
out_sem:
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
out:
|
||||
return ret;
|
||||
|
|
|
@ -24,9 +24,8 @@
|
|||
|
||||
#ifdef __KERNEL__
|
||||
#ifndef __ASSEMBLY__
|
||||
#define arch_mmap_check sparc_mmap_check
|
||||
int sparc_mmap_check(unsigned long addr, unsigned long len,
|
||||
unsigned long flags);
|
||||
#define arch_mmap_check(addr,len,flags) sparc_mmap_check(addr,len)
|
||||
int sparc_mmap_check(unsigned long addr, unsigned long len);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
|
|
@ -24,9 +24,8 @@
|
|||
|
||||
#ifdef __KERNEL__
|
||||
#ifndef __ASSEMBLY__
|
||||
#define arch_mmap_check sparc64_mmap_check
|
||||
int sparc64_mmap_check(unsigned long addr, unsigned long len,
|
||||
unsigned long flags);
|
||||
#define arch_mmap_check(addr,len,flags) sparc64_mmap_check(addr,len)
|
||||
int sparc64_mmap_check(unsigned long addr, unsigned long len);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ struct thread_info {
|
|||
struct task_struct *task;
|
||||
unsigned long flags;
|
||||
__u8 fpsaved[7];
|
||||
__u8 pad;
|
||||
__u8 status;
|
||||
unsigned long ksp;
|
||||
|
||||
/* D$ line 2 */
|
||||
|
@ -217,7 +217,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
|||
* nop
|
||||
*/
|
||||
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
|
||||
#define TIF_RESTORE_SIGMASK 1 /* restore signal mask in do_signal() */
|
||||
/* flags bit 1 is available */
|
||||
#define TIF_SIGPENDING 2 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
||||
#define TIF_PERFCTR 4 /* performance counters active */
|
||||
|
@ -244,14 +244,34 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
|||
#define _TIF_32BIT (1<<TIF_32BIT)
|
||||
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
|
||||
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
|
||||
#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
|
||||
(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | \
|
||||
(_TIF_SIGPENDING | \
|
||||
_TIF_NEED_RESCHED | _TIF_PERFCTR))
|
||||
|
||||
/*
|
||||
* Thread-synchronous status.
|
||||
*
|
||||
* This is different from the flags in that nobody else
|
||||
* ever touches our thread-synchronous status, so we don't
|
||||
* have to worry about atomic accesses.
|
||||
*
|
||||
* Note that there are only 8 bits available.
|
||||
*/
|
||||
#define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define HAVE_SET_RESTORE_SIGMASK 1
|
||||
static inline void set_restore_sigmask(void)
|
||||
{
|
||||
struct thread_info *ti = current_thread_info();
|
||||
ti->status |= TS_RESTORE_SIGMASK;
|
||||
set_bit(TIF_SIGPENDING, &ti->flags);
|
||||
}
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_THREAD_INFO_H */
|
||||
|
|
45
lib/lmb.c
45
lib/lmb.c
|
@ -19,31 +19,42 @@
|
|||
|
||||
struct lmb lmb;
|
||||
|
||||
static int lmb_debug;
|
||||
|
||||
static int __init early_lmb(char *p)
|
||||
{
|
||||
if (p && strstr(p, "debug"))
|
||||
lmb_debug = 1;
|
||||
return 0;
|
||||
}
|
||||
early_param("lmb", early_lmb);
|
||||
|
||||
void lmb_dump_all(void)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
unsigned long i;
|
||||
|
||||
pr_debug("lmb_dump_all:\n");
|
||||
pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
|
||||
pr_debug(" memory.size = 0x%llx\n",
|
||||
if (!lmb_debug)
|
||||
return;
|
||||
|
||||
pr_info("lmb_dump_all:\n");
|
||||
pr_info(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
|
||||
pr_info(" memory.size = 0x%llx\n",
|
||||
(unsigned long long)lmb.memory.size);
|
||||
for (i=0; i < lmb.memory.cnt ;i++) {
|
||||
pr_debug(" memory.region[0x%x].base = 0x%llx\n",
|
||||
pr_info(" memory.region[0x%lx].base = 0x%llx\n",
|
||||
i, (unsigned long long)lmb.memory.region[i].base);
|
||||
pr_debug(" .size = 0x%llx\n",
|
||||
pr_info(" .size = 0x%llx\n",
|
||||
(unsigned long long)lmb.memory.region[i].size);
|
||||
}
|
||||
|
||||
pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
|
||||
pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size);
|
||||
pr_info(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
|
||||
pr_info(" reserved.size = 0x%lx\n", lmb.reserved.size);
|
||||
for (i=0; i < lmb.reserved.cnt ;i++) {
|
||||
pr_debug(" reserved.region[0x%x].base = 0x%llx\n",
|
||||
pr_info(" reserved.region[0x%lx].base = 0x%llx\n",
|
||||
i, (unsigned long long)lmb.reserved.region[i].base);
|
||||
pr_debug(" .size = 0x%llx\n",
|
||||
pr_info(" .size = 0x%llx\n",
|
||||
(unsigned long long)lmb.reserved.region[i].size);
|
||||
}
|
||||
#endif /* DEBUG */
|
||||
}
|
||||
|
||||
static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
|
||||
|
@ -286,8 +297,7 @@ static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
|
|||
j = lmb_overlaps_region(&lmb.reserved, base, size);
|
||||
if (j < 0) {
|
||||
/* this area isn't reserved, take it */
|
||||
if (lmb_add_region(&lmb.reserved, base,
|
||||
lmb_align_up(size, align)) < 0)
|
||||
if (lmb_add_region(&lmb.reserved, base, size) < 0)
|
||||
base = ~(u64)0;
|
||||
return base;
|
||||
}
|
||||
|
@ -333,6 +343,10 @@ u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
|
|||
struct lmb_region *mem = &lmb.memory;
|
||||
int i;
|
||||
|
||||
BUG_ON(0 == size);
|
||||
|
||||
size = lmb_align_up(size, align);
|
||||
|
||||
for (i = 0; i < mem->cnt; i++) {
|
||||
u64 ret = lmb_alloc_nid_region(&mem->region[i],
|
||||
nid_range,
|
||||
|
@ -370,6 +384,8 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
|
|||
|
||||
BUG_ON(0 == size);
|
||||
|
||||
size = lmb_align_up(size, align);
|
||||
|
||||
/* On some platforms, make sure we allocate lowmem */
|
||||
/* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
|
||||
if (max_addr == LMB_ALLOC_ANYWHERE)
|
||||
|
@ -393,8 +409,7 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
|
|||
j = lmb_overlaps_region(&lmb.reserved, base, size);
|
||||
if (j < 0) {
|
||||
/* this area isn't reserved, take it */
|
||||
if (lmb_add_region(&lmb.reserved, base,
|
||||
lmb_align_up(size, align)) < 0)
|
||||
if (lmb_add_region(&lmb.reserved, base, size) < 0)
|
||||
return 0;
|
||||
return base;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue