Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] Drop duplicated "config IOMMU_HELPER" [IA64] invoke oom-killer from page fault [IA64] use __ratelimit [IA64] Use set_cpus_allowed_ptr [IA64] Use set_cpus_allowed_ptr [IA64] arch/ia64/hp/common/sba_iommu.c: Rename dev_info to adi [IA64] removing redundant ifdef
This commit is contained in:
commit
cf77e988dd
10 changed files with 26 additions and 58 deletions
arch/ia64
|
@ -59,9 +59,6 @@ config NEED_DMA_MAP_STATE
|
|||
config SWIOTLB
|
||||
bool
|
||||
|
||||
config IOMMU_HELPER
|
||||
bool
|
||||
|
||||
config GENERIC_LOCKBREAK
|
||||
def_bool n
|
||||
|
||||
|
|
|
@ -2046,13 +2046,13 @@ acpi_sba_ioc_add(struct acpi_device *device)
|
|||
struct ioc *ioc;
|
||||
acpi_status status;
|
||||
u64 hpa, length;
|
||||
struct acpi_device_info *dev_info;
|
||||
struct acpi_device_info *adi;
|
||||
|
||||
status = hp_acpi_csr_space(device->handle, &hpa, &length);
|
||||
if (ACPI_FAILURE(status))
|
||||
return 1;
|
||||
|
||||
status = acpi_get_object_info(device->handle, &dev_info);
|
||||
status = acpi_get_object_info(device->handle, &adi);
|
||||
if (ACPI_FAILURE(status))
|
||||
return 1;
|
||||
|
||||
|
@ -2060,13 +2060,13 @@ acpi_sba_ioc_add(struct acpi_device *device)
|
|||
* For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
|
||||
* root bridges, and its CSR space includes the IOC function.
|
||||
*/
|
||||
if (strncmp("HWP0001", dev_info->hardware_id.string, 7) == 0) {
|
||||
if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
|
||||
hpa += ZX1_IOC_OFFSET;
|
||||
/* zx1 based systems default to kernel page size iommu pages */
|
||||
if (!iovp_shift)
|
||||
iovp_shift = min(PAGE_SHIFT, 16);
|
||||
}
|
||||
kfree(dev_info);
|
||||
kfree(adi);
|
||||
|
||||
/*
|
||||
* default anything not caught above or specified on cmdline to 4k
|
||||
|
|
|
@ -19,16 +19,12 @@
|
|||
|
||||
static inline int pfn_to_nid(unsigned long pfn)
|
||||
{
|
||||
#ifdef CONFIG_NUMA
|
||||
extern int paddr_to_nid(unsigned long);
|
||||
int nid = paddr_to_nid(pfn << PAGE_SHIFT);
|
||||
if (nid < 0)
|
||||
return 0;
|
||||
else
|
||||
return nid;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IA64_DIG /* DIG systems are small */
|
||||
|
|
|
@ -113,7 +113,7 @@ processor_get_freq (
|
|||
dprintk("processor_get_freq\n");
|
||||
|
||||
saved_mask = current->cpus_allowed;
|
||||
set_cpus_allowed(current, cpumask_of_cpu(cpu));
|
||||
set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
||||
if (smp_processor_id() != cpu)
|
||||
goto migrate_end;
|
||||
|
||||
|
@ -121,7 +121,7 @@ processor_get_freq (
|
|||
ret = processor_get_pstate(&value);
|
||||
|
||||
if (ret) {
|
||||
set_cpus_allowed(current, saved_mask);
|
||||
set_cpus_allowed_ptr(current, &saved_mask);
|
||||
printk(KERN_WARNING "get performance failed with error %d\n",
|
||||
ret);
|
||||
ret = 0;
|
||||
|
@ -131,7 +131,7 @@ processor_get_freq (
|
|||
ret = (clock_freq*1000);
|
||||
|
||||
migrate_end:
|
||||
set_cpus_allowed(current, saved_mask);
|
||||
set_cpus_allowed_ptr(current, &saved_mask);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ processor_set_freq (
|
|||
dprintk("processor_set_freq\n");
|
||||
|
||||
saved_mask = current->cpus_allowed;
|
||||
set_cpus_allowed(current, cpumask_of_cpu(cpu));
|
||||
set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
||||
if (smp_processor_id() != cpu) {
|
||||
retval = -EAGAIN;
|
||||
goto migrate_end;
|
||||
|
@ -208,7 +208,7 @@ processor_set_freq (
|
|||
retval = 0;
|
||||
|
||||
migrate_end:
|
||||
set_cpus_allowed(current, saved_mask);
|
||||
set_cpus_allowed_ptr(current, &saved_mask);
|
||||
return (retval);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/threads.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
#include <asm/delay.h>
|
||||
#include <asm/intrinsics.h>
|
||||
|
@ -467,13 +468,9 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
|
|||
sp = ia64_getreg(_IA64_REG_SP);
|
||||
|
||||
if ((sp - bsp) < 1024) {
|
||||
static unsigned char count;
|
||||
static long last_time;
|
||||
static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
|
||||
|
||||
if (time_after(jiffies, last_time + 5 * HZ))
|
||||
count = 0;
|
||||
if (++count < 5) {
|
||||
last_time = jiffies;
|
||||
if (__ratelimit(&ratelimit)) {
|
||||
printk("ia64_handle_irq: DANGER: less than "
|
||||
"1KB of free stack space!!\n"
|
||||
"(bsp=0x%lx, sp=%lx)\n", bsp, sp);
|
||||
|
|
|
@ -404,10 +404,9 @@ static void
|
|||
call_on_cpu(int cpu, void (*fn)(void *), void *arg)
|
||||
{
|
||||
cpumask_t save_cpus_allowed = current->cpus_allowed;
|
||||
cpumask_t new_cpus_allowed = cpumask_of_cpu(cpu);
|
||||
set_cpus_allowed(current, new_cpus_allowed);
|
||||
set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
||||
(*fn)(arg);
|
||||
set_cpus_allowed(current, save_cpus_allowed);
|
||||
set_cpus_allowed_ptr(current, &save_cpus_allowed);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -361,12 +361,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
|
|||
return 0;
|
||||
|
||||
oldmask = current->cpus_allowed;
|
||||
retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
|
||||
retval = set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
||||
if (unlikely(retval))
|
||||
return retval;
|
||||
|
||||
retval = cpu_cache_sysfs_init(cpu);
|
||||
set_cpus_allowed(current, oldmask);
|
||||
set_cpus_allowed_ptr(current, &oldmask);
|
||||
if (unlikely(retval < 0))
|
||||
return retval;
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
#include <asm/intrinsics.h>
|
||||
#include <asm/processor.h>
|
||||
|
@ -1283,24 +1284,9 @@ emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
|
|||
/*
|
||||
* Make sure we log the unaligned access, so that user/sysadmin can notice it and
|
||||
* eventually fix the program. However, we don't want to do that for every access so we
|
||||
* pace it with jiffies. This isn't really MP-safe, but it doesn't really have to be
|
||||
* either...
|
||||
* pace it with jiffies.
|
||||
*/
|
||||
static int
|
||||
within_logging_rate_limit (void)
|
||||
{
|
||||
static unsigned long count, last_time;
|
||||
|
||||
if (time_after(jiffies, last_time + 5 * HZ))
|
||||
count = 0;
|
||||
if (count < 5) {
|
||||
last_time = jiffies;
|
||||
count++;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
|
||||
}
|
||||
static DEFINE_RATELIMIT_STATE(logging_rate_limit, 5 * HZ, 5);
|
||||
|
||||
void
|
||||
ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
|
||||
|
@ -1337,7 +1323,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
|
|||
|
||||
if (!no_unaligned_warning &&
|
||||
!(current->thread.flags & IA64_THREAD_UAC_NOPRINT) &&
|
||||
within_logging_rate_limit())
|
||||
__ratelimit(&logging_rate_limit))
|
||||
{
|
||||
char buf[200]; /* comm[] is at most 16 bytes... */
|
||||
size_t len;
|
||||
|
@ -1370,7 +1356,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
|
|||
}
|
||||
}
|
||||
} else {
|
||||
if (within_logging_rate_limit()) {
|
||||
if (__ratelimit(&logging_rate_limit)) {
|
||||
printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n",
|
||||
ifa, regs->cr_iip + ipsr->ri);
|
||||
if (unaligned_dump_stack)
|
||||
|
|
|
@ -148,7 +148,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
|
|||
if ((vma->vm_flags & mask) != mask)
|
||||
goto bad_area;
|
||||
|
||||
survive:
|
||||
/*
|
||||
* If for any reason at all we couldn't handle the fault, make
|
||||
* sure we exit gracefully rather than endlessly redo the
|
||||
|
@ -276,13 +275,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
|
|||
|
||||
out_of_memory:
|
||||
up_read(&mm->mmap_sem);
|
||||
if (is_global_init(current)) {
|
||||
yield();
|
||||
down_read(&mm->mmap_sem);
|
||||
goto survive;
|
||||
}
|
||||
printk(KERN_CRIT "VM: killing process %s\n", current->comm);
|
||||
if (user_mode(regs))
|
||||
do_group_exit(SIGKILL);
|
||||
goto no_context;
|
||||
if (!user_mode(regs))
|
||||
goto no_context;
|
||||
pagefault_out_of_memory();
|
||||
}
|
||||
|
|
|
@ -629,9 +629,9 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
|
|||
else {
|
||||
/* migrate the task before calling SAL */
|
||||
save_allowed = current->cpus_allowed;
|
||||
set_cpus_allowed(current, cpumask_of_cpu(cpu));
|
||||
set_cpus_allowed_ptr(current, cpumask_of(cpu));
|
||||
sn_hwperf_call_sal(op_info);
|
||||
set_cpus_allowed(current, save_allowed);
|
||||
set_cpus_allowed_ptr(current, &save_allowed);
|
||||
}
|
||||
}
|
||||
r = op_info->ret;
|
||||
|
|
Loading…
Add table
Reference in a new issue