Merge branch 'akpm' (incoming from Andrew Morton)
Merge misc fixes from Andrew Morton: "21 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (21 commits) mm/balloon_compaction: fix deflation when compaction is disabled sh: fix sh770x SCIF memory regions zram: avoid NULL pointer access in concurrent situation mm/slab_common: don't check for duplicate cache names ocfs2: fix d_splice_alias() return code checking mm: rmap: split out page_remove_file_rmap() mm: memcontrol: fix missed end-writeback page accounting mm: page-writeback: inline account_page_dirtied() into single caller lib/bitmap.c: fix undefined shift in __bitmap_shift_{left|right}() drivers/rtc/rtc-bq32k.c: fix register value memory-hotplug: clear pgdat which is allocated by bootmem in try_offline_node() drivers/rtc/rtc-s3c.c: fix initialization failure without rtc source clock kernel/kmod: fix use-after-free of the sub_info structure drivers/rtc/rtc-pm8xxx.c: rework to support pm8941 rtc mm, thp: fix collapsing of hugepages on madvise drivers: of: add return value to of_reserved_mem_device_init() mm: free compound page with correct order gcov: add ARM64 to GCOV_PROFILE_ALL fsnotify: next_i is freed during fsnotify_unmount_inodes. mm/compaction.c: avoid premature range skip in isolate_migratepages_range ...
This commit is contained in:
commit
a7ca10f263
27 changed files with 393 additions and 348 deletions
|
@ -118,7 +118,7 @@ static struct plat_sci_port scif0_platform_data = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct resource scif0_resources[] = {
|
static struct resource scif0_resources[] = {
|
||||||
DEFINE_RES_MEM(0xfffffe80, 0x100),
|
DEFINE_RES_MEM(0xfffffe80, 0x10),
|
||||||
DEFINE_RES_IRQ(evt2irq(0x4e0)),
|
DEFINE_RES_IRQ(evt2irq(0x4e0)),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ static struct plat_sci_port scif1_platform_data = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct resource scif1_resources[] = {
|
static struct resource scif1_resources[] = {
|
||||||
DEFINE_RES_MEM(0xa4000150, 0x100),
|
DEFINE_RES_MEM(0xa4000150, 0x10),
|
||||||
DEFINE_RES_IRQ(evt2irq(0x900)),
|
DEFINE_RES_IRQ(evt2irq(0x900)),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -169,7 +169,7 @@ static struct plat_sci_port scif2_platform_data = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct resource scif2_resources[] = {
|
static struct resource scif2_resources[] = {
|
||||||
DEFINE_RES_MEM(0xa4000140, 0x100),
|
DEFINE_RES_MEM(0xa4000140, 0x10),
|
||||||
DEFINE_RES_IRQ(evt2irq(0x880)),
|
DEFINE_RES_IRQ(evt2irq(0x880)),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -223,9 +223,10 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
||||||
#undef pr_fmt
|
#undef pr_fmt
|
||||||
#define pr_fmt(fmt) fmt
|
#define pr_fmt(fmt) fmt
|
||||||
|
|
||||||
static void rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
|
static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
|
||||||
{
|
{
|
||||||
dev_set_cma_area(dev, rmem->priv);
|
dev_set_cma_area(dev, rmem->priv);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rmem_cma_device_release(struct reserved_mem *rmem,
|
static void rmem_cma_device_release(struct reserved_mem *rmem,
|
||||||
|
|
|
@ -99,11 +99,12 @@ static ssize_t mem_used_total_show(struct device *dev,
|
||||||
{
|
{
|
||||||
u64 val = 0;
|
u64 val = 0;
|
||||||
struct zram *zram = dev_to_zram(dev);
|
struct zram *zram = dev_to_zram(dev);
|
||||||
struct zram_meta *meta = zram->meta;
|
|
||||||
|
|
||||||
down_read(&zram->init_lock);
|
down_read(&zram->init_lock);
|
||||||
if (init_done(zram))
|
if (init_done(zram)) {
|
||||||
|
struct zram_meta *meta = zram->meta;
|
||||||
val = zs_get_total_pages(meta->mem_pool);
|
val = zs_get_total_pages(meta->mem_pool);
|
||||||
|
}
|
||||||
up_read(&zram->init_lock);
|
up_read(&zram->init_lock);
|
||||||
|
|
||||||
return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
|
return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
|
||||||
|
@ -173,16 +174,17 @@ static ssize_t mem_used_max_store(struct device *dev,
|
||||||
int err;
|
int err;
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
struct zram *zram = dev_to_zram(dev);
|
struct zram *zram = dev_to_zram(dev);
|
||||||
struct zram_meta *meta = zram->meta;
|
|
||||||
|
|
||||||
err = kstrtoul(buf, 10, &val);
|
err = kstrtoul(buf, 10, &val);
|
||||||
if (err || val != 0)
|
if (err || val != 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
down_read(&zram->init_lock);
|
down_read(&zram->init_lock);
|
||||||
if (init_done(zram))
|
if (init_done(zram)) {
|
||||||
|
struct zram_meta *meta = zram->meta;
|
||||||
atomic_long_set(&zram->stats.max_used_pages,
|
atomic_long_set(&zram->stats.max_used_pages,
|
||||||
zs_get_total_pages(meta->mem_pool));
|
zs_get_total_pages(meta->mem_pool));
|
||||||
|
}
|
||||||
up_read(&zram->init_lock);
|
up_read(&zram->init_lock);
|
||||||
|
|
||||||
return len;
|
return len;
|
||||||
|
|
|
@ -243,23 +243,27 @@ static inline struct reserved_mem *__find_rmem(struct device_node *node)
|
||||||
* This function assign memory region pointed by "memory-region" device tree
|
* This function assign memory region pointed by "memory-region" device tree
|
||||||
* property to the given device.
|
* property to the given device.
|
||||||
*/
|
*/
|
||||||
void of_reserved_mem_device_init(struct device *dev)
|
int of_reserved_mem_device_init(struct device *dev)
|
||||||
{
|
{
|
||||||
struct reserved_mem *rmem;
|
struct reserved_mem *rmem;
|
||||||
struct device_node *np;
|
struct device_node *np;
|
||||||
|
int ret;
|
||||||
|
|
||||||
np = of_parse_phandle(dev->of_node, "memory-region", 0);
|
np = of_parse_phandle(dev->of_node, "memory-region", 0);
|
||||||
if (!np)
|
if (!np)
|
||||||
return;
|
return -ENODEV;
|
||||||
|
|
||||||
rmem = __find_rmem(np);
|
rmem = __find_rmem(np);
|
||||||
of_node_put(np);
|
of_node_put(np);
|
||||||
|
|
||||||
if (!rmem || !rmem->ops || !rmem->ops->device_init)
|
if (!rmem || !rmem->ops || !rmem->ops->device_init)
|
||||||
return;
|
return -EINVAL;
|
||||||
|
|
||||||
rmem->ops->device_init(rmem, dev);
|
ret = rmem->ops->device_init(rmem, dev);
|
||||||
|
if (ret == 0)
|
||||||
dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
|
dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1320,7 +1320,7 @@ config RTC_DRV_LPC32XX
|
||||||
|
|
||||||
config RTC_DRV_PM8XXX
|
config RTC_DRV_PM8XXX
|
||||||
tristate "Qualcomm PMIC8XXX RTC"
|
tristate "Qualcomm PMIC8XXX RTC"
|
||||||
depends on MFD_PM8XXX
|
depends on MFD_PM8XXX || MFD_SPMI_PMIC
|
||||||
help
|
help
|
||||||
If you say yes here you get support for the
|
If you say yes here you get support for the
|
||||||
Qualcomm PMIC8XXX RTC.
|
Qualcomm PMIC8XXX RTC.
|
||||||
|
|
|
@ -160,7 +160,7 @@ static int trickle_charger_of_init(struct device *dev, struct device_node *node)
|
||||||
dev_err(dev, "bq32k: diode and resistor mismatch\n");
|
dev_err(dev, "bq32k: diode and resistor mismatch\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
reg = 0x25;
|
reg = 0x45;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -27,21 +27,36 @@
|
||||||
|
|
||||||
/* RTC_CTRL register bit fields */
|
/* RTC_CTRL register bit fields */
|
||||||
#define PM8xxx_RTC_ENABLE BIT(7)
|
#define PM8xxx_RTC_ENABLE BIT(7)
|
||||||
#define PM8xxx_RTC_ALARM_ENABLE BIT(1)
|
|
||||||
#define PM8xxx_RTC_ALARM_CLEAR BIT(0)
|
#define PM8xxx_RTC_ALARM_CLEAR BIT(0)
|
||||||
|
|
||||||
#define NUM_8_BIT_RTC_REGS 0x4
|
#define NUM_8_BIT_RTC_REGS 0x4
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct pm8xxx_rtc_regs - describe RTC registers per PMIC versions
|
||||||
|
* @ctrl: base address of control register
|
||||||
|
* @write: base address of write register
|
||||||
|
* @read: base address of read register
|
||||||
|
* @alarm_ctrl: base address of alarm control register
|
||||||
|
* @alarm_ctrl2: base address of alarm control2 register
|
||||||
|
* @alarm_rw: base address of alarm read-write register
|
||||||
|
* @alarm_en: alarm enable mask
|
||||||
|
*/
|
||||||
|
struct pm8xxx_rtc_regs {
|
||||||
|
unsigned int ctrl;
|
||||||
|
unsigned int write;
|
||||||
|
unsigned int read;
|
||||||
|
unsigned int alarm_ctrl;
|
||||||
|
unsigned int alarm_ctrl2;
|
||||||
|
unsigned int alarm_rw;
|
||||||
|
unsigned int alarm_en;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct pm8xxx_rtc - rtc driver internal structure
|
* struct pm8xxx_rtc - rtc driver internal structure
|
||||||
* @rtc: rtc device for this driver.
|
* @rtc: rtc device for this driver.
|
||||||
* @regmap: regmap used to access RTC registers
|
* @regmap: regmap used to access RTC registers
|
||||||
* @allow_set_time: indicates whether writing to the RTC is allowed
|
* @allow_set_time: indicates whether writing to the RTC is allowed
|
||||||
* @rtc_alarm_irq: rtc alarm irq number.
|
* @rtc_alarm_irq: rtc alarm irq number.
|
||||||
* @rtc_base: address of rtc control register.
|
|
||||||
* @rtc_read_base: base address of read registers.
|
|
||||||
* @rtc_write_base: base address of write registers.
|
|
||||||
* @alarm_rw_base: base address of alarm registers.
|
|
||||||
* @ctrl_reg: rtc control register.
|
* @ctrl_reg: rtc control register.
|
||||||
* @rtc_dev: device structure.
|
* @rtc_dev: device structure.
|
||||||
* @ctrl_reg_lock: spinlock protecting access to ctrl_reg.
|
* @ctrl_reg_lock: spinlock protecting access to ctrl_reg.
|
||||||
|
@ -51,11 +66,7 @@ struct pm8xxx_rtc {
|
||||||
struct regmap *regmap;
|
struct regmap *regmap;
|
||||||
bool allow_set_time;
|
bool allow_set_time;
|
||||||
int rtc_alarm_irq;
|
int rtc_alarm_irq;
|
||||||
int rtc_base;
|
const struct pm8xxx_rtc_regs *regs;
|
||||||
int rtc_read_base;
|
|
||||||
int rtc_write_base;
|
|
||||||
int alarm_rw_base;
|
|
||||||
u8 ctrl_reg;
|
|
||||||
struct device *rtc_dev;
|
struct device *rtc_dev;
|
||||||
spinlock_t ctrl_reg_lock;
|
spinlock_t ctrl_reg_lock;
|
||||||
};
|
};
|
||||||
|
@ -71,8 +82,10 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
|
||||||
{
|
{
|
||||||
int rc, i;
|
int rc, i;
|
||||||
unsigned long secs, irq_flags;
|
unsigned long secs, irq_flags;
|
||||||
u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0, ctrl_reg;
|
u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0;
|
||||||
|
unsigned int ctrl_reg;
|
||||||
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
|
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
|
||||||
|
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
|
||||||
|
|
||||||
if (!rtc_dd->allow_set_time)
|
if (!rtc_dd->allow_set_time)
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
|
@ -87,30 +100,30 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
|
||||||
dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
|
dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
|
||||||
|
|
||||||
spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
|
spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
|
||||||
ctrl_reg = rtc_dd->ctrl_reg;
|
|
||||||
|
|
||||||
if (ctrl_reg & PM8xxx_RTC_ALARM_ENABLE) {
|
rc = regmap_read(rtc_dd->regmap, regs->ctrl, &ctrl_reg);
|
||||||
|
if (rc)
|
||||||
|
goto rtc_rw_fail;
|
||||||
|
|
||||||
|
if (ctrl_reg & regs->alarm_en) {
|
||||||
alarm_enabled = 1;
|
alarm_enabled = 1;
|
||||||
ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE;
|
ctrl_reg &= ~regs->alarm_en;
|
||||||
rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
|
rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(dev, "Write to RTC control register failed\n");
|
dev_err(dev, "Write to RTC control register failed\n");
|
||||||
goto rtc_rw_fail;
|
goto rtc_rw_fail;
|
||||||
}
|
}
|
||||||
rtc_dd->ctrl_reg = ctrl_reg;
|
|
||||||
} else {
|
|
||||||
spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write 0 to Byte[0] */
|
/* Write 0 to Byte[0] */
|
||||||
rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_write_base, 0);
|
rc = regmap_write(rtc_dd->regmap, regs->write, 0);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(dev, "Write to RTC write data register failed\n");
|
dev_err(dev, "Write to RTC write data register failed\n");
|
||||||
goto rtc_rw_fail;
|
goto rtc_rw_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write Byte[1], Byte[2], Byte[3] */
|
/* Write Byte[1], Byte[2], Byte[3] */
|
||||||
rc = regmap_bulk_write(rtc_dd->regmap, rtc_dd->rtc_write_base + 1,
|
rc = regmap_bulk_write(rtc_dd->regmap, regs->write + 1,
|
||||||
&value[1], sizeof(value) - 1);
|
&value[1], sizeof(value) - 1);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(dev, "Write to RTC write data register failed\n");
|
dev_err(dev, "Write to RTC write data register failed\n");
|
||||||
|
@ -118,24 +131,22 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write Byte[0] */
|
/* Write Byte[0] */
|
||||||
rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_write_base, value[0]);
|
rc = regmap_write(rtc_dd->regmap, regs->write, value[0]);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(dev, "Write to RTC write data register failed\n");
|
dev_err(dev, "Write to RTC write data register failed\n");
|
||||||
goto rtc_rw_fail;
|
goto rtc_rw_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (alarm_enabled) {
|
if (alarm_enabled) {
|
||||||
ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE;
|
ctrl_reg |= regs->alarm_en;
|
||||||
rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
|
rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(dev, "Write to RTC control register failed\n");
|
dev_err(dev, "Write to RTC control register failed\n");
|
||||||
goto rtc_rw_fail;
|
goto rtc_rw_fail;
|
||||||
}
|
}
|
||||||
rtc_dd->ctrl_reg = ctrl_reg;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rtc_rw_fail:
|
rtc_rw_fail:
|
||||||
if (alarm_enabled)
|
|
||||||
spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
|
spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -148,9 +159,9 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
|
||||||
unsigned long secs;
|
unsigned long secs;
|
||||||
unsigned int reg;
|
unsigned int reg;
|
||||||
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
|
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
|
||||||
|
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
|
||||||
|
|
||||||
rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->rtc_read_base,
|
rc = regmap_bulk_read(rtc_dd->regmap, regs->read, value, sizeof(value));
|
||||||
value, sizeof(value));
|
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(dev, "RTC read data register failed\n");
|
dev_err(dev, "RTC read data register failed\n");
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -160,14 +171,14 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
|
||||||
* Read the LSB again and check if there has been a carry over.
|
* Read the LSB again and check if there has been a carry over.
|
||||||
* If there is, redo the read operation.
|
* If there is, redo the read operation.
|
||||||
*/
|
*/
|
||||||
rc = regmap_read(rtc_dd->regmap, rtc_dd->rtc_read_base, ®);
|
rc = regmap_read(rtc_dd->regmap, regs->read, ®);
|
||||||
if (rc < 0) {
|
if (rc < 0) {
|
||||||
dev_err(dev, "RTC read data register failed\n");
|
dev_err(dev, "RTC read data register failed\n");
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(reg < value[0])) {
|
if (unlikely(reg < value[0])) {
|
||||||
rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->rtc_read_base,
|
rc = regmap_bulk_read(rtc_dd->regmap, regs->read,
|
||||||
value, sizeof(value));
|
value, sizeof(value));
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(dev, "RTC read data register failed\n");
|
dev_err(dev, "RTC read data register failed\n");
|
||||||
|
@ -195,9 +206,11 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
|
||||||
static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
|
static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
|
||||||
{
|
{
|
||||||
int rc, i;
|
int rc, i;
|
||||||
u8 value[NUM_8_BIT_RTC_REGS], ctrl_reg;
|
u8 value[NUM_8_BIT_RTC_REGS];
|
||||||
|
unsigned int ctrl_reg;
|
||||||
unsigned long secs, irq_flags;
|
unsigned long secs, irq_flags;
|
||||||
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
|
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
|
||||||
|
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
|
||||||
|
|
||||||
rtc_tm_to_time(&alarm->time, &secs);
|
rtc_tm_to_time(&alarm->time, &secs);
|
||||||
|
|
||||||
|
@ -208,28 +221,28 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
|
||||||
|
|
||||||
spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
|
spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
|
||||||
|
|
||||||
rc = regmap_bulk_write(rtc_dd->regmap, rtc_dd->alarm_rw_base, value,
|
rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value,
|
||||||
sizeof(value));
|
sizeof(value));
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(dev, "Write to RTC ALARM register failed\n");
|
dev_err(dev, "Write to RTC ALARM register failed\n");
|
||||||
goto rtc_rw_fail;
|
goto rtc_rw_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctrl_reg = rtc_dd->ctrl_reg;
|
rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
|
||||||
|
if (rc)
|
||||||
|
goto rtc_rw_fail;
|
||||||
|
|
||||||
if (alarm->enabled)
|
if (alarm->enabled)
|
||||||
ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE;
|
ctrl_reg |= regs->alarm_en;
|
||||||
else
|
else
|
||||||
ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE;
|
ctrl_reg &= ~regs->alarm_en;
|
||||||
|
|
||||||
rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
|
rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(dev, "Write to RTC control register failed\n");
|
dev_err(dev, "Write to RTC alarm control register failed\n");
|
||||||
goto rtc_rw_fail;
|
goto rtc_rw_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
rtc_dd->ctrl_reg = ctrl_reg;
|
|
||||||
|
|
||||||
dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
|
dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
|
||||||
alarm->time.tm_hour, alarm->time.tm_min,
|
alarm->time.tm_hour, alarm->time.tm_min,
|
||||||
alarm->time.tm_sec, alarm->time.tm_mday,
|
alarm->time.tm_sec, alarm->time.tm_mday,
|
||||||
|
@ -245,8 +258,9 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
|
||||||
u8 value[NUM_8_BIT_RTC_REGS];
|
u8 value[NUM_8_BIT_RTC_REGS];
|
||||||
unsigned long secs;
|
unsigned long secs;
|
||||||
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
|
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
|
||||||
|
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
|
||||||
|
|
||||||
rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->alarm_rw_base, value,
|
rc = regmap_bulk_read(rtc_dd->regmap, regs->alarm_rw, value,
|
||||||
sizeof(value));
|
sizeof(value));
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(dev, "RTC alarm time read failed\n");
|
dev_err(dev, "RTC alarm time read failed\n");
|
||||||
|
@ -276,25 +290,26 @@ static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
|
||||||
int rc;
|
int rc;
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
|
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
|
||||||
u8 ctrl_reg;
|
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
|
||||||
|
unsigned int ctrl_reg;
|
||||||
|
|
||||||
spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
|
spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
|
||||||
|
|
||||||
ctrl_reg = rtc_dd->ctrl_reg;
|
rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
|
||||||
|
if (rc)
|
||||||
|
goto rtc_rw_fail;
|
||||||
|
|
||||||
if (enable)
|
if (enable)
|
||||||
ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE;
|
ctrl_reg |= regs->alarm_en;
|
||||||
else
|
else
|
||||||
ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE;
|
ctrl_reg &= ~regs->alarm_en;
|
||||||
|
|
||||||
rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
|
rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(dev, "Write to RTC control register failed\n");
|
dev_err(dev, "Write to RTC control register failed\n");
|
||||||
goto rtc_rw_fail;
|
goto rtc_rw_fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
rtc_dd->ctrl_reg = ctrl_reg;
|
|
||||||
|
|
||||||
rtc_rw_fail:
|
rtc_rw_fail:
|
||||||
spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
|
spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -311,6 +326,7 @@ static const struct rtc_class_ops pm8xxx_rtc_ops = {
|
||||||
static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
|
static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
struct pm8xxx_rtc *rtc_dd = dev_id;
|
struct pm8xxx_rtc *rtc_dd = dev_id;
|
||||||
|
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
|
||||||
unsigned int ctrl_reg;
|
unsigned int ctrl_reg;
|
||||||
int rc;
|
int rc;
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
|
@ -320,48 +336,100 @@ static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
|
||||||
spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
|
spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
|
||||||
|
|
||||||
/* Clear the alarm enable bit */
|
/* Clear the alarm enable bit */
|
||||||
ctrl_reg = rtc_dd->ctrl_reg;
|
rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
|
||||||
ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE;
|
|
||||||
|
|
||||||
rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
|
|
||||||
if (rc) {
|
if (rc) {
|
||||||
spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
|
spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
|
||||||
dev_err(rtc_dd->rtc_dev,
|
|
||||||
"Write to RTC control register failed\n");
|
|
||||||
goto rtc_alarm_handled;
|
goto rtc_alarm_handled;
|
||||||
}
|
}
|
||||||
|
|
||||||
rtc_dd->ctrl_reg = ctrl_reg;
|
ctrl_reg &= ~regs->alarm_en;
|
||||||
|
|
||||||
|
rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
|
||||||
|
if (rc) {
|
||||||
|
spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
|
||||||
|
dev_err(rtc_dd->rtc_dev,
|
||||||
|
"Write to alarm control register failed\n");
|
||||||
|
goto rtc_alarm_handled;
|
||||||
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
|
spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
|
||||||
|
|
||||||
/* Clear RTC alarm register */
|
/* Clear RTC alarm register */
|
||||||
rc = regmap_read(rtc_dd->regmap,
|
rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl2, &ctrl_reg);
|
||||||
rtc_dd->rtc_base + PM8XXX_ALARM_CTRL_OFFSET,
|
|
||||||
&ctrl_reg);
|
|
||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(rtc_dd->rtc_dev,
|
dev_err(rtc_dd->rtc_dev,
|
||||||
"RTC Alarm control register read failed\n");
|
"RTC Alarm control2 register read failed\n");
|
||||||
goto rtc_alarm_handled;
|
goto rtc_alarm_handled;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctrl_reg &= ~PM8xxx_RTC_ALARM_CLEAR;
|
ctrl_reg |= PM8xxx_RTC_ALARM_CLEAR;
|
||||||
rc = regmap_write(rtc_dd->regmap,
|
rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl2, ctrl_reg);
|
||||||
rtc_dd->rtc_base + PM8XXX_ALARM_CTRL_OFFSET,
|
|
||||||
ctrl_reg);
|
|
||||||
if (rc)
|
if (rc)
|
||||||
dev_err(rtc_dd->rtc_dev,
|
dev_err(rtc_dd->rtc_dev,
|
||||||
"Write to RTC Alarm control register failed\n");
|
"Write to RTC Alarm control2 register failed\n");
|
||||||
|
|
||||||
rtc_alarm_handled:
|
rtc_alarm_handled:
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int pm8xxx_rtc_enable(struct pm8xxx_rtc *rtc_dd)
|
||||||
|
{
|
||||||
|
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
|
||||||
|
unsigned int ctrl_reg;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
/* Check if the RTC is on, else turn it on */
|
||||||
|
rc = regmap_read(rtc_dd->regmap, regs->ctrl, &ctrl_reg);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) {
|
||||||
|
ctrl_reg |= PM8xxx_RTC_ENABLE;
|
||||||
|
rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct pm8xxx_rtc_regs pm8921_regs = {
|
||||||
|
.ctrl = 0x11d,
|
||||||
|
.write = 0x11f,
|
||||||
|
.read = 0x123,
|
||||||
|
.alarm_rw = 0x127,
|
||||||
|
.alarm_ctrl = 0x11d,
|
||||||
|
.alarm_ctrl2 = 0x11e,
|
||||||
|
.alarm_en = BIT(1),
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct pm8xxx_rtc_regs pm8058_regs = {
|
||||||
|
.ctrl = 0x1e8,
|
||||||
|
.write = 0x1ea,
|
||||||
|
.read = 0x1ee,
|
||||||
|
.alarm_rw = 0x1f2,
|
||||||
|
.alarm_ctrl = 0x1e8,
|
||||||
|
.alarm_ctrl2 = 0x1e9,
|
||||||
|
.alarm_en = BIT(1),
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct pm8xxx_rtc_regs pm8941_regs = {
|
||||||
|
.ctrl = 0x6046,
|
||||||
|
.write = 0x6040,
|
||||||
|
.read = 0x6048,
|
||||||
|
.alarm_rw = 0x6140,
|
||||||
|
.alarm_ctrl = 0x6146,
|
||||||
|
.alarm_ctrl2 = 0x6148,
|
||||||
|
.alarm_en = BIT(7),
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hardcoded RTC bases until IORESOURCE_REG mapping is figured out
|
* Hardcoded RTC bases until IORESOURCE_REG mapping is figured out
|
||||||
*/
|
*/
|
||||||
static const struct of_device_id pm8xxx_id_table[] = {
|
static const struct of_device_id pm8xxx_id_table[] = {
|
||||||
{ .compatible = "qcom,pm8921-rtc", .data = (void *) 0x11D },
|
{ .compatible = "qcom,pm8921-rtc", .data = &pm8921_regs },
|
||||||
{ .compatible = "qcom,pm8058-rtc", .data = (void *) 0x1E8 },
|
{ .compatible = "qcom,pm8058-rtc", .data = &pm8058_regs },
|
||||||
|
{ .compatible = "qcom,pm8941-rtc", .data = &pm8941_regs },
|
||||||
{ },
|
{ },
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
|
MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
|
||||||
|
@ -369,7 +437,6 @@ MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
|
||||||
static int pm8xxx_rtc_probe(struct platform_device *pdev)
|
static int pm8xxx_rtc_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
unsigned int ctrl_reg;
|
|
||||||
struct pm8xxx_rtc *rtc_dd;
|
struct pm8xxx_rtc *rtc_dd;
|
||||||
const struct of_device_id *match;
|
const struct of_device_id *match;
|
||||||
|
|
||||||
|
@ -399,33 +466,12 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
|
||||||
rtc_dd->allow_set_time = of_property_read_bool(pdev->dev.of_node,
|
rtc_dd->allow_set_time = of_property_read_bool(pdev->dev.of_node,
|
||||||
"allow-set-time");
|
"allow-set-time");
|
||||||
|
|
||||||
rtc_dd->rtc_base = (long) match->data;
|
rtc_dd->regs = match->data;
|
||||||
|
|
||||||
/* Setup RTC register addresses */
|
|
||||||
rtc_dd->rtc_write_base = rtc_dd->rtc_base + PM8XXX_RTC_WRITE_OFFSET;
|
|
||||||
rtc_dd->rtc_read_base = rtc_dd->rtc_base + PM8XXX_RTC_READ_OFFSET;
|
|
||||||
rtc_dd->alarm_rw_base = rtc_dd->rtc_base + PM8XXX_ALARM_RW_OFFSET;
|
|
||||||
|
|
||||||
rtc_dd->rtc_dev = &pdev->dev;
|
rtc_dd->rtc_dev = &pdev->dev;
|
||||||
|
|
||||||
/* Check if the RTC is on, else turn it on */
|
rc = pm8xxx_rtc_enable(rtc_dd);
|
||||||
rc = regmap_read(rtc_dd->regmap, rtc_dd->rtc_base, &ctrl_reg);
|
if (rc)
|
||||||
if (rc) {
|
|
||||||
dev_err(&pdev->dev, "RTC control register read failed!\n");
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
|
||||||
|
|
||||||
if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) {
|
|
||||||
ctrl_reg |= PM8xxx_RTC_ENABLE;
|
|
||||||
rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
|
|
||||||
if (rc) {
|
|
||||||
dev_err(&pdev->dev,
|
|
||||||
"Write to RTC control register failed\n");
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rtc_dd->ctrl_reg = ctrl_reg;
|
|
||||||
|
|
||||||
platform_set_drvdata(pdev, rtc_dd);
|
platform_set_drvdata(pdev, rtc_dd);
|
||||||
|
|
||||||
|
|
|
@ -535,13 +535,15 @@ static int s3c_rtc_probe(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
clk_prepare_enable(info->rtc_clk);
|
clk_prepare_enable(info->rtc_clk);
|
||||||
|
|
||||||
|
if (info->data->needs_src_clk) {
|
||||||
info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src");
|
info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src");
|
||||||
if (IS_ERR(info->rtc_src_clk)) {
|
if (IS_ERR(info->rtc_src_clk)) {
|
||||||
dev_err(&pdev->dev, "failed to find rtc source clock\n");
|
dev_err(&pdev->dev,
|
||||||
|
"failed to find rtc source clock\n");
|
||||||
return PTR_ERR(info->rtc_src_clk);
|
return PTR_ERR(info->rtc_src_clk);
|
||||||
}
|
}
|
||||||
clk_prepare_enable(info->rtc_src_clk);
|
clk_prepare_enable(info->rtc_src_clk);
|
||||||
|
}
|
||||||
|
|
||||||
/* check to see if everything is setup correctly */
|
/* check to see if everything is setup correctly */
|
||||||
if (info->data->enable)
|
if (info->data->enable)
|
||||||
|
|
|
@ -288,20 +288,25 @@ void fsnotify_unmount_inodes(struct list_head *list)
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
|
|
||||||
/* In case the dropping of a reference would nuke next_i. */
|
/* In case the dropping of a reference would nuke next_i. */
|
||||||
if ((&next_i->i_sb_list != list) &&
|
while (&next_i->i_sb_list != list) {
|
||||||
atomic_read(&next_i->i_count)) {
|
|
||||||
spin_lock(&next_i->i_lock);
|
spin_lock(&next_i->i_lock);
|
||||||
if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
|
if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) &&
|
||||||
|
atomic_read(&next_i->i_count)) {
|
||||||
__iget(next_i);
|
__iget(next_i);
|
||||||
need_iput = next_i;
|
need_iput = next_i;
|
||||||
|
spin_unlock(&next_i->i_lock);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
spin_unlock(&next_i->i_lock);
|
spin_unlock(&next_i->i_lock);
|
||||||
|
next_i = list_entry(next_i->i_sb_list.next,
|
||||||
|
struct inode, i_sb_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We can safely drop inode_sb_list_lock here because we hold
|
* We can safely drop inode_sb_list_lock here because either
|
||||||
* references on both inode and next_i. Also no new inodes
|
* we actually hold references on both inode and next_i or
|
||||||
* will be added since the umount has begun.
|
* end of list. Also no new inodes will be added since the
|
||||||
|
* umount has begun.
|
||||||
*/
|
*/
|
||||||
spin_unlock(&inode_sb_list_lock);
|
spin_unlock(&inode_sb_list_lock);
|
||||||
|
|
||||||
|
|
|
@ -158,7 +158,7 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
|
||||||
* NOTE: This dentry already has ->d_op set from
|
* NOTE: This dentry already has ->d_op set from
|
||||||
* ocfs2_get_parent() and ocfs2_get_dentry()
|
* ocfs2_get_parent() and ocfs2_get_dentry()
|
||||||
*/
|
*/
|
||||||
if (ret)
|
if (!IS_ERR_OR_NULL(ret))
|
||||||
dentry = ret;
|
dentry = ret;
|
||||||
|
|
||||||
status = ocfs2_dentry_attach_lock(dentry, inode,
|
status = ocfs2_dentry_attach_lock(dentry, inode,
|
||||||
|
|
|
@ -6,7 +6,8 @@
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
extern int __khugepaged_enter(struct mm_struct *mm);
|
extern int __khugepaged_enter(struct mm_struct *mm);
|
||||||
extern void __khugepaged_exit(struct mm_struct *mm);
|
extern void __khugepaged_exit(struct mm_struct *mm);
|
||||||
extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma);
|
extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
|
||||||
|
unsigned long vm_flags);
|
||||||
|
|
||||||
#define khugepaged_enabled() \
|
#define khugepaged_enabled() \
|
||||||
(transparent_hugepage_flags & \
|
(transparent_hugepage_flags & \
|
||||||
|
@ -35,13 +36,13 @@ static inline void khugepaged_exit(struct mm_struct *mm)
|
||||||
__khugepaged_exit(mm);
|
__khugepaged_exit(mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int khugepaged_enter(struct vm_area_struct *vma)
|
static inline int khugepaged_enter(struct vm_area_struct *vma,
|
||||||
|
unsigned long vm_flags)
|
||||||
{
|
{
|
||||||
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
|
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
|
||||||
if ((khugepaged_always() ||
|
if ((khugepaged_always() ||
|
||||||
(khugepaged_req_madv() &&
|
(khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
|
||||||
vma->vm_flags & VM_HUGEPAGE)) &&
|
!(vm_flags & VM_NOHUGEPAGE))
|
||||||
!(vma->vm_flags & VM_NOHUGEPAGE))
|
|
||||||
if (__khugepaged_enter(vma->vm_mm))
|
if (__khugepaged_enter(vma->vm_mm))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -54,11 +55,13 @@ static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||||
static inline void khugepaged_exit(struct mm_struct *mm)
|
static inline void khugepaged_exit(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline int khugepaged_enter(struct vm_area_struct *vma)
|
static inline int khugepaged_enter(struct vm_area_struct *vma,
|
||||||
|
unsigned long vm_flags)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
|
static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
|
||||||
|
unsigned long vm_flags)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -139,48 +139,23 @@ static inline bool mem_cgroup_disabled(void)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
|
struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked,
|
||||||
unsigned long *flags);
|
unsigned long *flags);
|
||||||
|
void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
|
||||||
|
unsigned long flags);
|
||||||
|
void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
|
||||||
|
enum mem_cgroup_stat_index idx, int val);
|
||||||
|
|
||||||
extern atomic_t memcg_moving;
|
static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
|
||||||
|
|
||||||
static inline void mem_cgroup_begin_update_page_stat(struct page *page,
|
|
||||||
bool *locked, unsigned long *flags)
|
|
||||||
{
|
|
||||||
if (mem_cgroup_disabled())
|
|
||||||
return;
|
|
||||||
rcu_read_lock();
|
|
||||||
*locked = false;
|
|
||||||
if (atomic_read(&memcg_moving))
|
|
||||||
__mem_cgroup_begin_update_page_stat(page, locked, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __mem_cgroup_end_update_page_stat(struct page *page,
|
|
||||||
unsigned long *flags);
|
|
||||||
static inline void mem_cgroup_end_update_page_stat(struct page *page,
|
|
||||||
bool *locked, unsigned long *flags)
|
|
||||||
{
|
|
||||||
if (mem_cgroup_disabled())
|
|
||||||
return;
|
|
||||||
if (*locked)
|
|
||||||
__mem_cgroup_end_update_page_stat(page, flags);
|
|
||||||
rcu_read_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
void mem_cgroup_update_page_stat(struct page *page,
|
|
||||||
enum mem_cgroup_stat_index idx,
|
|
||||||
int val);
|
|
||||||
|
|
||||||
static inline void mem_cgroup_inc_page_stat(struct page *page,
|
|
||||||
enum mem_cgroup_stat_index idx)
|
enum mem_cgroup_stat_index idx)
|
||||||
{
|
{
|
||||||
mem_cgroup_update_page_stat(page, idx, 1);
|
mem_cgroup_update_page_stat(memcg, idx, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mem_cgroup_dec_page_stat(struct page *page,
|
static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
|
||||||
enum mem_cgroup_stat_index idx)
|
enum mem_cgroup_stat_index idx)
|
||||||
{
|
{
|
||||||
mem_cgroup_update_page_stat(page, idx, -1);
|
mem_cgroup_update_page_stat(memcg, idx, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
|
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
|
||||||
|
@ -315,13 +290,14 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mem_cgroup_begin_update_page_stat(struct page *page,
|
static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
|
||||||
bool *locked, unsigned long *flags)
|
bool *locked, unsigned long *flags)
|
||||||
{
|
{
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mem_cgroup_end_update_page_stat(struct page *page,
|
static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg,
|
||||||
bool *locked, unsigned long *flags)
|
bool locked, unsigned long flags)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -343,12 +319,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mem_cgroup_inc_page_stat(struct page *page,
|
static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
|
||||||
enum mem_cgroup_stat_index idx)
|
enum mem_cgroup_stat_index idx)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mem_cgroup_dec_page_stat(struct page *page,
|
static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
|
||||||
enum mem_cgroup_stat_index idx)
|
enum mem_cgroup_stat_index idx)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
|
@ -1235,7 +1235,6 @@ int __set_page_dirty_no_writeback(struct page *page);
|
||||||
int redirty_page_for_writepage(struct writeback_control *wbc,
|
int redirty_page_for_writepage(struct writeback_control *wbc,
|
||||||
struct page *page);
|
struct page *page);
|
||||||
void account_page_dirtied(struct page *page, struct address_space *mapping);
|
void account_page_dirtied(struct page *page, struct address_space *mapping);
|
||||||
void account_page_writeback(struct page *page);
|
|
||||||
int set_page_dirty(struct page *page);
|
int set_page_dirty(struct page *page);
|
||||||
int set_page_dirty_lock(struct page *page);
|
int set_page_dirty_lock(struct page *page);
|
||||||
int clear_page_dirty_for_io(struct page *page);
|
int clear_page_dirty_for_io(struct page *page);
|
||||||
|
|
|
@ -16,7 +16,7 @@ struct reserved_mem {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct reserved_mem_ops {
|
struct reserved_mem_ops {
|
||||||
void (*device_init)(struct reserved_mem *rmem,
|
int (*device_init)(struct reserved_mem *rmem,
|
||||||
struct device *dev);
|
struct device *dev);
|
||||||
void (*device_release)(struct reserved_mem *rmem,
|
void (*device_release)(struct reserved_mem *rmem,
|
||||||
struct device *dev);
|
struct device *dev);
|
||||||
|
@ -28,14 +28,17 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
|
||||||
_OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
|
_OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
|
||||||
|
|
||||||
#ifdef CONFIG_OF_RESERVED_MEM
|
#ifdef CONFIG_OF_RESERVED_MEM
|
||||||
void of_reserved_mem_device_init(struct device *dev);
|
int of_reserved_mem_device_init(struct device *dev);
|
||||||
void of_reserved_mem_device_release(struct device *dev);
|
void of_reserved_mem_device_release(struct device *dev);
|
||||||
|
|
||||||
void fdt_init_reserved_mem(void);
|
void fdt_init_reserved_mem(void);
|
||||||
void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
|
void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
|
||||||
phys_addr_t base, phys_addr_t size);
|
phys_addr_t base, phys_addr_t size);
|
||||||
#else
|
#else
|
||||||
static inline void of_reserved_mem_device_init(struct device *dev) { }
|
static inline int of_reserved_mem_device_init(struct device *dev)
|
||||||
|
{
|
||||||
|
return -ENOSYS;
|
||||||
|
}
|
||||||
static inline void of_reserved_mem_device_release(struct device *pdev) { }
|
static inline void of_reserved_mem_device_release(struct device *pdev) { }
|
||||||
|
|
||||||
static inline void fdt_init_reserved_mem(void) { }
|
static inline void fdt_init_reserved_mem(void) { }
|
||||||
|
|
|
@ -35,7 +35,7 @@ config GCOV_KERNEL
|
||||||
config GCOV_PROFILE_ALL
|
config GCOV_PROFILE_ALL
|
||||||
bool "Profile entire Kernel"
|
bool "Profile entire Kernel"
|
||||||
depends on GCOV_KERNEL
|
depends on GCOV_KERNEL
|
||||||
depends on SUPERH || S390 || X86 || PPC || MICROBLAZE || ARM
|
depends on SUPERH || S390 || X86 || PPC || MICROBLAZE || ARM || ARM64
|
||||||
default n
|
default n
|
||||||
---help---
|
---help---
|
||||||
This options activates profiling for the entire kernel.
|
This options activates profiling for the entire kernel.
|
||||||
|
|
|
@ -196,12 +196,34 @@ int __request_module(bool wait, const char *fmt, ...)
|
||||||
EXPORT_SYMBOL(__request_module);
|
EXPORT_SYMBOL(__request_module);
|
||||||
#endif /* CONFIG_MODULES */
|
#endif /* CONFIG_MODULES */
|
||||||
|
|
||||||
|
static void call_usermodehelper_freeinfo(struct subprocess_info *info)
|
||||||
|
{
|
||||||
|
if (info->cleanup)
|
||||||
|
(*info->cleanup)(info);
|
||||||
|
kfree(info);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void umh_complete(struct subprocess_info *sub_info)
|
||||||
|
{
|
||||||
|
struct completion *comp = xchg(&sub_info->complete, NULL);
|
||||||
|
/*
|
||||||
|
* See call_usermodehelper_exec(). If xchg() returns NULL
|
||||||
|
* we own sub_info, the UMH_KILLABLE caller has gone away
|
||||||
|
* or the caller used UMH_NO_WAIT.
|
||||||
|
*/
|
||||||
|
if (comp)
|
||||||
|
complete(comp);
|
||||||
|
else
|
||||||
|
call_usermodehelper_freeinfo(sub_info);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is the task which runs the usermode application
|
* This is the task which runs the usermode application
|
||||||
*/
|
*/
|
||||||
static int ____call_usermodehelper(void *data)
|
static int ____call_usermodehelper(void *data)
|
||||||
{
|
{
|
||||||
struct subprocess_info *sub_info = data;
|
struct subprocess_info *sub_info = data;
|
||||||
|
int wait = sub_info->wait & ~UMH_KILLABLE;
|
||||||
struct cred *new;
|
struct cred *new;
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
|
@ -221,7 +243,7 @@ static int ____call_usermodehelper(void *data)
|
||||||
retval = -ENOMEM;
|
retval = -ENOMEM;
|
||||||
new = prepare_kernel_cred(current);
|
new = prepare_kernel_cred(current);
|
||||||
if (!new)
|
if (!new)
|
||||||
goto fail;
|
goto out;
|
||||||
|
|
||||||
spin_lock(&umh_sysctl_lock);
|
spin_lock(&umh_sysctl_lock);
|
||||||
new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
|
new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
|
||||||
|
@ -233,7 +255,7 @@ static int ____call_usermodehelper(void *data)
|
||||||
retval = sub_info->init(sub_info, new);
|
retval = sub_info->init(sub_info, new);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
abort_creds(new);
|
abort_creds(new);
|
||||||
goto fail;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -242,12 +264,13 @@ static int ____call_usermodehelper(void *data)
|
||||||
retval = do_execve(getname_kernel(sub_info->path),
|
retval = do_execve(getname_kernel(sub_info->path),
|
||||||
(const char __user *const __user *)sub_info->argv,
|
(const char __user *const __user *)sub_info->argv,
|
||||||
(const char __user *const __user *)sub_info->envp);
|
(const char __user *const __user *)sub_info->envp);
|
||||||
|
out:
|
||||||
|
sub_info->retval = retval;
|
||||||
|
/* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
|
||||||
|
if (wait != UMH_WAIT_PROC)
|
||||||
|
umh_complete(sub_info);
|
||||||
if (!retval)
|
if (!retval)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Exec failed? */
|
|
||||||
fail:
|
|
||||||
sub_info->retval = retval;
|
|
||||||
do_exit(0);
|
do_exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -258,26 +281,6 @@ static int call_helper(void *data)
|
||||||
return ____call_usermodehelper(data);
|
return ____call_usermodehelper(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void call_usermodehelper_freeinfo(struct subprocess_info *info)
|
|
||||||
{
|
|
||||||
if (info->cleanup)
|
|
||||||
(*info->cleanup)(info);
|
|
||||||
kfree(info);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void umh_complete(struct subprocess_info *sub_info)
|
|
||||||
{
|
|
||||||
struct completion *comp = xchg(&sub_info->complete, NULL);
|
|
||||||
/*
|
|
||||||
* See call_usermodehelper_exec(). If xchg() returns NULL
|
|
||||||
* we own sub_info, the UMH_KILLABLE caller has gone away.
|
|
||||||
*/
|
|
||||||
if (comp)
|
|
||||||
complete(comp);
|
|
||||||
else
|
|
||||||
call_usermodehelper_freeinfo(sub_info);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Keventd can't block, but this (a child) can. */
|
/* Keventd can't block, but this (a child) can. */
|
||||||
static int wait_for_helper(void *data)
|
static int wait_for_helper(void *data)
|
||||||
{
|
{
|
||||||
|
@ -336,17 +339,7 @@ static void __call_usermodehelper(struct work_struct *work)
|
||||||
kmod_thread_locker = NULL;
|
kmod_thread_locker = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (wait) {
|
if (pid < 0) {
|
||||||
case UMH_NO_WAIT:
|
|
||||||
call_usermodehelper_freeinfo(sub_info);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case UMH_WAIT_PROC:
|
|
||||||
if (pid > 0)
|
|
||||||
break;
|
|
||||||
/* FALLTHROUGH */
|
|
||||||
case UMH_WAIT_EXEC:
|
|
||||||
if (pid < 0)
|
|
||||||
sub_info->retval = pid;
|
sub_info->retval = pid;
|
||||||
umh_complete(sub_info);
|
umh_complete(sub_info);
|
||||||
}
|
}
|
||||||
|
@ -588,7 +581,12 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
sub_info->complete = &done;
|
/*
|
||||||
|
* Set the completion pointer only if there is a waiter.
|
||||||
|
* This makes it possible to use umh_complete to free
|
||||||
|
* the data structure in case of UMH_NO_WAIT.
|
||||||
|
*/
|
||||||
|
sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
|
||||||
sub_info->wait = wait;
|
sub_info->wait = wait;
|
||||||
|
|
||||||
queue_work(khelper_wq, &sub_info->work);
|
queue_work(khelper_wq, &sub_info->work);
|
||||||
|
|
|
@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
|
||||||
lower = src[off + k];
|
lower = src[off + k];
|
||||||
if (left && off + k == lim - 1)
|
if (left && off + k == lim - 1)
|
||||||
lower &= mask;
|
lower &= mask;
|
||||||
dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
|
dst[k] = lower >> rem;
|
||||||
|
if (rem)
|
||||||
|
dst[k] |= upper << (BITS_PER_LONG - rem);
|
||||||
if (left && k == lim - 1)
|
if (left && k == lim - 1)
|
||||||
dst[k] &= mask;
|
dst[k] &= mask;
|
||||||
}
|
}
|
||||||
|
@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
|
||||||
upper = src[k];
|
upper = src[k];
|
||||||
if (left && k == lim - 1)
|
if (left && k == lim - 1)
|
||||||
upper &= (1UL << left) - 1;
|
upper &= (1UL << left) - 1;
|
||||||
dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
|
dst[k + off] = upper << rem;
|
||||||
|
if (rem)
|
||||||
|
dst[k + off] |= lower >> (BITS_PER_LONG - rem);
|
||||||
if (left && k + off == lim - 1)
|
if (left && k + off == lim - 1)
|
||||||
dst[k + off] &= (1UL << left) - 1;
|
dst[k + off] &= (1UL << left) - 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,11 +68,13 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
|
||||||
* to be released by the balloon driver.
|
* to be released by the balloon driver.
|
||||||
*/
|
*/
|
||||||
if (trylock_page(page)) {
|
if (trylock_page(page)) {
|
||||||
|
#ifdef CONFIG_BALLOON_COMPACTION
|
||||||
if (!PagePrivate(page)) {
|
if (!PagePrivate(page)) {
|
||||||
/* raced with isolation */
|
/* raced with isolation */
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
|
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
|
||||||
balloon_page_delete(page);
|
balloon_page_delete(page);
|
||||||
__count_vm_event(BALLOON_DEFLATE);
|
__count_vm_event(BALLOON_DEFLATE);
|
||||||
|
|
|
@ -784,6 +784,9 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
|
||||||
cc->nr_migratepages = 0;
|
cc->nr_migratepages = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
acct_isolated(cc->zone, cc);
|
acct_isolated(cc->zone, cc);
|
||||||
|
|
||||||
|
|
|
@ -200,7 +200,7 @@ static struct page *get_huge_zero_page(void)
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
|
if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
__free_page(zero_page);
|
__free_pages(zero_page, compound_order(zero_page));
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,7 +232,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
|
||||||
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
|
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
|
||||||
struct page *zero_page = xchg(&huge_zero_page, NULL);
|
struct page *zero_page = xchg(&huge_zero_page, NULL);
|
||||||
BUG_ON(zero_page == NULL);
|
BUG_ON(zero_page == NULL);
|
||||||
__free_page(zero_page);
|
__free_pages(zero_page, compound_order(zero_page));
|
||||||
return HPAGE_PMD_NR;
|
return HPAGE_PMD_NR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -803,7 +803,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
return VM_FAULT_FALLBACK;
|
return VM_FAULT_FALLBACK;
|
||||||
if (unlikely(anon_vma_prepare(vma)))
|
if (unlikely(anon_vma_prepare(vma)))
|
||||||
return VM_FAULT_OOM;
|
return VM_FAULT_OOM;
|
||||||
if (unlikely(khugepaged_enter(vma)))
|
if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
|
||||||
return VM_FAULT_OOM;
|
return VM_FAULT_OOM;
|
||||||
if (!(flags & FAULT_FLAG_WRITE) &&
|
if (!(flags & FAULT_FLAG_WRITE) &&
|
||||||
transparent_hugepage_use_zero_page()) {
|
transparent_hugepage_use_zero_page()) {
|
||||||
|
@ -1970,7 +1970,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
|
||||||
* register it here without waiting a page fault that
|
* register it here without waiting a page fault that
|
||||||
* may not happen any time soon.
|
* may not happen any time soon.
|
||||||
*/
|
*/
|
||||||
if (unlikely(khugepaged_enter_vma_merge(vma)))
|
if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
break;
|
break;
|
||||||
case MADV_NOHUGEPAGE:
|
case MADV_NOHUGEPAGE:
|
||||||
|
@ -2071,7 +2071,8 @@ int __khugepaged_enter(struct mm_struct *mm)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
|
int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
|
||||||
|
unsigned long vm_flags)
|
||||||
{
|
{
|
||||||
unsigned long hstart, hend;
|
unsigned long hstart, hend;
|
||||||
if (!vma->anon_vma)
|
if (!vma->anon_vma)
|
||||||
|
@ -2083,11 +2084,11 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
|
||||||
if (vma->vm_ops)
|
if (vma->vm_ops)
|
||||||
/* khugepaged not yet working on file or special mappings */
|
/* khugepaged not yet working on file or special mappings */
|
||||||
return 0;
|
return 0;
|
||||||
VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
|
VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
|
||||||
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
|
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
|
||||||
hend = vma->vm_end & HPAGE_PMD_MASK;
|
hend = vma->vm_end & HPAGE_PMD_MASK;
|
||||||
if (hstart < hend)
|
if (hstart < hend)
|
||||||
return khugepaged_enter(vma);
|
return khugepaged_enter(vma, vm_flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
105
mm/memcontrol.c
105
mm/memcontrol.c
|
@ -1536,12 +1536,8 @@ int mem_cgroup_swappiness(struct mem_cgroup *memcg)
|
||||||
* start move here.
|
* start move here.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* for quick checking without looking up memcg */
|
|
||||||
atomic_t memcg_moving __read_mostly;
|
|
||||||
|
|
||||||
static void mem_cgroup_start_move(struct mem_cgroup *memcg)
|
static void mem_cgroup_start_move(struct mem_cgroup *memcg)
|
||||||
{
|
{
|
||||||
atomic_inc(&memcg_moving);
|
|
||||||
atomic_inc(&memcg->moving_account);
|
atomic_inc(&memcg->moving_account);
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
}
|
}
|
||||||
|
@ -1552,11 +1548,9 @@ static void mem_cgroup_end_move(struct mem_cgroup *memcg)
|
||||||
* Now, mem_cgroup_clear_mc() may call this function with NULL.
|
* Now, mem_cgroup_clear_mc() may call this function with NULL.
|
||||||
* We check NULL in callee rather than caller.
|
* We check NULL in callee rather than caller.
|
||||||
*/
|
*/
|
||||||
if (memcg) {
|
if (memcg)
|
||||||
atomic_dec(&memcg_moving);
|
|
||||||
atomic_dec(&memcg->moving_account);
|
atomic_dec(&memcg->moving_account);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A routine for checking "mem" is under move_account() or not.
|
* A routine for checking "mem" is under move_account() or not.
|
||||||
|
@ -2204,41 +2198,52 @@ bool mem_cgroup_oom_synchronize(bool handle)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* Used to update mapped file or writeback or other statistics.
|
* mem_cgroup_begin_page_stat - begin a page state statistics transaction
|
||||||
|
* @page: page that is going to change accounted state
|
||||||
|
* @locked: &memcg->move_lock slowpath was taken
|
||||||
|
* @flags: IRQ-state flags for &memcg->move_lock
|
||||||
*
|
*
|
||||||
* Notes: Race condition
|
* This function must mark the beginning of an accounted page state
|
||||||
|
* change to prevent double accounting when the page is concurrently
|
||||||
|
* being moved to another memcg:
|
||||||
*
|
*
|
||||||
* Charging occurs during page instantiation, while the page is
|
* memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
|
||||||
* unmapped and locked in page migration, or while the page table is
|
* if (TestClearPageState(page))
|
||||||
* locked in THP migration. No race is possible.
|
* mem_cgroup_update_page_stat(memcg, state, -1);
|
||||||
|
* mem_cgroup_end_page_stat(memcg, locked, flags);
|
||||||
*
|
*
|
||||||
* Uncharge happens to pages with zero references, no race possible.
|
* The RCU lock is held throughout the transaction. The fast path can
|
||||||
|
* get away without acquiring the memcg->move_lock (@locked is false)
|
||||||
|
* because page moving starts with an RCU grace period.
|
||||||
*
|
*
|
||||||
* Charge moving between groups is protected by checking mm->moving
|
* The RCU lock also protects the memcg from being freed when the page
|
||||||
* account and taking the move_lock in the slowpath.
|
* state that is going to change is the only thing preventing the page
|
||||||
|
* from being uncharged. E.g. end-writeback clearing PageWriteback(),
|
||||||
|
* which allows migration to go ahead and uncharge the page before the
|
||||||
|
* account transaction might be complete.
|
||||||
*/
|
*/
|
||||||
|
struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
|
||||||
void __mem_cgroup_begin_update_page_stat(struct page *page,
|
bool *locked,
|
||||||
bool *locked, unsigned long *flags)
|
unsigned long *flags)
|
||||||
{
|
{
|
||||||
struct mem_cgroup *memcg;
|
struct mem_cgroup *memcg;
|
||||||
struct page_cgroup *pc;
|
struct page_cgroup *pc;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
|
||||||
|
if (mem_cgroup_disabled())
|
||||||
|
return NULL;
|
||||||
|
|
||||||
pc = lookup_page_cgroup(page);
|
pc = lookup_page_cgroup(page);
|
||||||
again:
|
again:
|
||||||
memcg = pc->mem_cgroup;
|
memcg = pc->mem_cgroup;
|
||||||
if (unlikely(!memcg || !PageCgroupUsed(pc)))
|
if (unlikely(!memcg || !PageCgroupUsed(pc)))
|
||||||
return;
|
return NULL;
|
||||||
/*
|
|
||||||
* If this memory cgroup is not under account moving, we don't
|
*locked = false;
|
||||||
* need to take move_lock_mem_cgroup(). Because we already hold
|
|
||||||
* rcu_read_lock(), any calls to move_account will be delayed until
|
|
||||||
* rcu_read_unlock().
|
|
||||||
*/
|
|
||||||
VM_BUG_ON(!rcu_read_lock_held());
|
|
||||||
if (atomic_read(&memcg->moving_account) <= 0)
|
if (atomic_read(&memcg->moving_account) <= 0)
|
||||||
return;
|
return memcg;
|
||||||
|
|
||||||
move_lock_mem_cgroup(memcg, flags);
|
move_lock_mem_cgroup(memcg, flags);
|
||||||
if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
|
if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
|
||||||
|
@ -2246,35 +2251,39 @@ void __mem_cgroup_begin_update_page_stat(struct page *page,
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
*locked = true;
|
*locked = true;
|
||||||
|
|
||||||
|
return memcg;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
|
/**
|
||||||
{
|
* mem_cgroup_end_page_stat - finish a page state statistics transaction
|
||||||
struct page_cgroup *pc = lookup_page_cgroup(page);
|
* @memcg: the memcg that was accounted against
|
||||||
|
* @locked: value received from mem_cgroup_begin_page_stat()
|
||||||
/*
|
* @flags: value received from mem_cgroup_begin_page_stat()
|
||||||
* It's guaranteed that pc->mem_cgroup never changes while
|
|
||||||
* lock is held because a routine modifies pc->mem_cgroup
|
|
||||||
* should take move_lock_mem_cgroup().
|
|
||||||
*/
|
*/
|
||||||
move_unlock_mem_cgroup(pc->mem_cgroup, flags);
|
void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
|
||||||
|
unsigned long flags)
|
||||||
|
{
|
||||||
|
if (memcg && locked)
|
||||||
|
move_unlock_mem_cgroup(memcg, &flags);
|
||||||
|
|
||||||
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void mem_cgroup_update_page_stat(struct page *page,
|
/**
|
||||||
|
* mem_cgroup_update_page_stat - update page state statistics
|
||||||
|
* @memcg: memcg to account against
|
||||||
|
* @idx: page state item to account
|
||||||
|
* @val: number of pages (positive or negative)
|
||||||
|
*
|
||||||
|
* See mem_cgroup_begin_page_stat() for locking requirements.
|
||||||
|
*/
|
||||||
|
void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
|
||||||
enum mem_cgroup_stat_index idx, int val)
|
enum mem_cgroup_stat_index idx, int val)
|
||||||
{
|
{
|
||||||
struct mem_cgroup *memcg;
|
|
||||||
struct page_cgroup *pc = lookup_page_cgroup(page);
|
|
||||||
unsigned long uninitialized_var(flags);
|
|
||||||
|
|
||||||
if (mem_cgroup_disabled())
|
|
||||||
return;
|
|
||||||
|
|
||||||
VM_BUG_ON(!rcu_read_lock_held());
|
VM_BUG_ON(!rcu_read_lock_held());
|
||||||
memcg = pc->mem_cgroup;
|
|
||||||
if (unlikely(!memcg || !PageCgroupUsed(pc)))
|
|
||||||
return;
|
|
||||||
|
|
||||||
|
if (memcg)
|
||||||
this_cpu_add(memcg->stat->count[idx], val);
|
this_cpu_add(memcg->stat->count[idx], val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1912,7 +1912,6 @@ void try_offline_node(int nid)
|
||||||
unsigned long start_pfn = pgdat->node_start_pfn;
|
unsigned long start_pfn = pgdat->node_start_pfn;
|
||||||
unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
|
unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
struct page *pgdat_page = virt_to_page(pgdat);
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
|
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
|
||||||
|
@ -1941,10 +1940,6 @@ void try_offline_node(int nid)
|
||||||
node_set_offline(nid);
|
node_set_offline(nid);
|
||||||
unregister_one_node(nid);
|
unregister_one_node(nid);
|
||||||
|
|
||||||
if (!PageSlab(pgdat_page) && !PageCompound(pgdat_page))
|
|
||||||
/* node data is allocated from boot memory */
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* free waittable in each zone */
|
/* free waittable in each zone */
|
||||||
for (i = 0; i < MAX_NR_ZONES; i++) {
|
for (i = 0; i < MAX_NR_ZONES; i++) {
|
||||||
struct zone *zone = pgdat->node_zones + i;
|
struct zone *zone = pgdat->node_zones + i;
|
||||||
|
|
|
@ -1080,7 +1080,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
|
||||||
end, prev->vm_pgoff, NULL);
|
end, prev->vm_pgoff, NULL);
|
||||||
if (err)
|
if (err)
|
||||||
return NULL;
|
return NULL;
|
||||||
khugepaged_enter_vma_merge(prev);
|
khugepaged_enter_vma_merge(prev, vm_flags);
|
||||||
return prev;
|
return prev;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1099,7 +1099,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
|
||||||
next->vm_pgoff - pglen, NULL);
|
next->vm_pgoff - pglen, NULL);
|
||||||
if (err)
|
if (err)
|
||||||
return NULL;
|
return NULL;
|
||||||
khugepaged_enter_vma_merge(area);
|
khugepaged_enter_vma_merge(area, vm_flags);
|
||||||
return area;
|
return area;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2208,7 +2208,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
vma_unlock_anon_vma(vma);
|
vma_unlock_anon_vma(vma);
|
||||||
khugepaged_enter_vma_merge(vma);
|
khugepaged_enter_vma_merge(vma, vma->vm_flags);
|
||||||
validate_mm(vma->vm_mm);
|
validate_mm(vma->vm_mm);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
@ -2277,7 +2277,7 @@ int expand_downwards(struct vm_area_struct *vma,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
vma_unlock_anon_vma(vma);
|
vma_unlock_anon_vma(vma);
|
||||||
khugepaged_enter_vma_merge(vma);
|
khugepaged_enter_vma_merge(vma, vma->vm_flags);
|
||||||
validate_mm(vma->vm_mm);
|
validate_mm(vma->vm_mm);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2115,23 +2115,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(account_page_dirtied);
|
EXPORT_SYMBOL(account_page_dirtied);
|
||||||
|
|
||||||
/*
|
|
||||||
* Helper function for set_page_writeback family.
|
|
||||||
*
|
|
||||||
* The caller must hold mem_cgroup_begin/end_update_page_stat() lock
|
|
||||||
* while calling this function.
|
|
||||||
* See test_set_page_writeback for example.
|
|
||||||
*
|
|
||||||
* NOTE: Unlike account_page_dirtied this does not rely on being atomic
|
|
||||||
* wrt interrupts.
|
|
||||||
*/
|
|
||||||
void account_page_writeback(struct page *page)
|
|
||||||
{
|
|
||||||
mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
|
|
||||||
inc_zone_page_state(page, NR_WRITEBACK);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(account_page_writeback);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For address_spaces which do not use buffers. Just tag the page as dirty in
|
* For address_spaces which do not use buffers. Just tag the page as dirty in
|
||||||
* its radix tree.
|
* its radix tree.
|
||||||
|
@ -2344,11 +2327,12 @@ EXPORT_SYMBOL(clear_page_dirty_for_io);
|
||||||
int test_clear_page_writeback(struct page *page)
|
int test_clear_page_writeback(struct page *page)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = page_mapping(page);
|
struct address_space *mapping = page_mapping(page);
|
||||||
int ret;
|
|
||||||
bool locked;
|
|
||||||
unsigned long memcg_flags;
|
unsigned long memcg_flags;
|
||||||
|
struct mem_cgroup *memcg;
|
||||||
|
bool locked;
|
||||||
|
int ret;
|
||||||
|
|
||||||
mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags);
|
memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
|
||||||
if (mapping) {
|
if (mapping) {
|
||||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -2369,22 +2353,23 @@ int test_clear_page_writeback(struct page *page)
|
||||||
ret = TestClearPageWriteback(page);
|
ret = TestClearPageWriteback(page);
|
||||||
}
|
}
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
|
mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
|
||||||
dec_zone_page_state(page, NR_WRITEBACK);
|
dec_zone_page_state(page, NR_WRITEBACK);
|
||||||
inc_zone_page_state(page, NR_WRITTEN);
|
inc_zone_page_state(page, NR_WRITTEN);
|
||||||
}
|
}
|
||||||
mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags);
|
mem_cgroup_end_page_stat(memcg, locked, memcg_flags);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __test_set_page_writeback(struct page *page, bool keep_write)
|
int __test_set_page_writeback(struct page *page, bool keep_write)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = page_mapping(page);
|
struct address_space *mapping = page_mapping(page);
|
||||||
int ret;
|
|
||||||
bool locked;
|
|
||||||
unsigned long memcg_flags;
|
unsigned long memcg_flags;
|
||||||
|
struct mem_cgroup *memcg;
|
||||||
|
bool locked;
|
||||||
|
int ret;
|
||||||
|
|
||||||
mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags);
|
memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
|
||||||
if (mapping) {
|
if (mapping) {
|
||||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -2410,9 +2395,11 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
|
||||||
} else {
|
} else {
|
||||||
ret = TestSetPageWriteback(page);
|
ret = TestSetPageWriteback(page);
|
||||||
}
|
}
|
||||||
if (!ret)
|
if (!ret) {
|
||||||
account_page_writeback(page);
|
mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
|
||||||
mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags);
|
inc_zone_page_state(page, NR_WRITEBACK);
|
||||||
|
}
|
||||||
|
mem_cgroup_end_page_stat(memcg, locked, memcg_flags);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -171,6 +171,7 @@ static void free_page_cgroup(void *addr)
|
||||||
sizeof(struct page_cgroup) * PAGES_PER_SECTION;
|
sizeof(struct page_cgroup) * PAGES_PER_SECTION;
|
||||||
|
|
||||||
BUG_ON(PageReserved(page));
|
BUG_ON(PageReserved(page));
|
||||||
|
kmemleak_free(addr);
|
||||||
free_pages_exact(addr, table_size);
|
free_pages_exact(addr, table_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
82
mm/rmap.c
82
mm/rmap.c
|
@ -1042,15 +1042,46 @@ void page_add_new_anon_rmap(struct page *page,
|
||||||
*/
|
*/
|
||||||
void page_add_file_rmap(struct page *page)
|
void page_add_file_rmap(struct page *page)
|
||||||
{
|
{
|
||||||
bool locked;
|
struct mem_cgroup *memcg;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
bool locked;
|
||||||
|
|
||||||
mem_cgroup_begin_update_page_stat(page, &locked, &flags);
|
memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
|
||||||
if (atomic_inc_and_test(&page->_mapcount)) {
|
if (atomic_inc_and_test(&page->_mapcount)) {
|
||||||
__inc_zone_page_state(page, NR_FILE_MAPPED);
|
__inc_zone_page_state(page, NR_FILE_MAPPED);
|
||||||
mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
|
mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
|
||||||
}
|
}
|
||||||
mem_cgroup_end_update_page_stat(page, &locked, &flags);
|
mem_cgroup_end_page_stat(memcg, locked, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void page_remove_file_rmap(struct page *page)
|
||||||
|
{
|
||||||
|
struct mem_cgroup *memcg;
|
||||||
|
unsigned long flags;
|
||||||
|
bool locked;
|
||||||
|
|
||||||
|
memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
|
||||||
|
|
||||||
|
/* page still mapped by someone else? */
|
||||||
|
if (!atomic_add_negative(-1, &page->_mapcount))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
/* Hugepages are not counted in NR_FILE_MAPPED for now. */
|
||||||
|
if (unlikely(PageHuge(page)))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
|
||||||
|
* these counters are not modified in interrupt context, and
|
||||||
|
* pte lock(a spinlock) is held, which implies preemption disabled.
|
||||||
|
*/
|
||||||
|
__dec_zone_page_state(page, NR_FILE_MAPPED);
|
||||||
|
mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
|
||||||
|
|
||||||
|
if (unlikely(PageMlocked(page)))
|
||||||
|
clear_page_mlock(page);
|
||||||
|
out:
|
||||||
|
mem_cgroup_end_page_stat(memcg, locked, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1061,46 +1092,33 @@ void page_add_file_rmap(struct page *page)
|
||||||
*/
|
*/
|
||||||
void page_remove_rmap(struct page *page)
|
void page_remove_rmap(struct page *page)
|
||||||
{
|
{
|
||||||
bool anon = PageAnon(page);
|
if (!PageAnon(page)) {
|
||||||
bool locked;
|
page_remove_file_rmap(page);
|
||||||
unsigned long flags;
|
return;
|
||||||
|
}
|
||||||
/*
|
|
||||||
* The anon case has no mem_cgroup page_stat to update; but may
|
|
||||||
* uncharge_page() below, where the lock ordering can deadlock if
|
|
||||||
* we hold the lock against page_stat move: so avoid it on anon.
|
|
||||||
*/
|
|
||||||
if (!anon)
|
|
||||||
mem_cgroup_begin_update_page_stat(page, &locked, &flags);
|
|
||||||
|
|
||||||
/* page still mapped by someone else? */
|
/* page still mapped by someone else? */
|
||||||
if (!atomic_add_negative(-1, &page->_mapcount))
|
if (!atomic_add_negative(-1, &page->_mapcount))
|
||||||
goto out;
|
return;
|
||||||
|
|
||||||
|
/* Hugepages are not counted in NR_ANON_PAGES for now. */
|
||||||
|
if (unlikely(PageHuge(page)))
|
||||||
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
|
|
||||||
* and not charged by memcg for now.
|
|
||||||
*
|
|
||||||
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
|
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
|
||||||
* these counters are not modified in interrupt context, and
|
* these counters are not modified in interrupt context, and
|
||||||
* these counters are not modified in interrupt context, and
|
|
||||||
* pte lock(a spinlock) is held, which implies preemption disabled.
|
* pte lock(a spinlock) is held, which implies preemption disabled.
|
||||||
*/
|
*/
|
||||||
if (unlikely(PageHuge(page)))
|
|
||||||
goto out;
|
|
||||||
if (anon) {
|
|
||||||
if (PageTransHuge(page))
|
if (PageTransHuge(page))
|
||||||
__dec_zone_page_state(page,
|
__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
|
||||||
NR_ANON_TRANSPARENT_HUGEPAGES);
|
|
||||||
__mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
|
__mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
|
||||||
-hpage_nr_pages(page));
|
-hpage_nr_pages(page));
|
||||||
} else {
|
|
||||||
__dec_zone_page_state(page, NR_FILE_MAPPED);
|
|
||||||
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
|
|
||||||
mem_cgroup_end_update_page_stat(page, &locked, &flags);
|
|
||||||
}
|
|
||||||
if (unlikely(PageMlocked(page)))
|
if (unlikely(PageMlocked(page)))
|
||||||
clear_page_mlock(page);
|
clear_page_mlock(page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It would be tidy to reset the PageAnon mapping here,
|
* It would be tidy to reset the PageAnon mapping here,
|
||||||
* but that might overwrite a racing page_add_anon_rmap
|
* but that might overwrite a racing page_add_anon_rmap
|
||||||
|
@ -1110,10 +1128,6 @@ void page_remove_rmap(struct page *page)
|
||||||
* Leaving it set also helps swapoff to reinstate ptes
|
* Leaving it set also helps swapoff to reinstate ptes
|
||||||
* faster for those pages still in swapcache.
|
* faster for those pages still in swapcache.
|
||||||
*/
|
*/
|
||||||
return;
|
|
||||||
out:
|
|
||||||
if (!anon)
|
|
||||||
mem_cgroup_end_update_page_stat(page, &locked, &flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -93,16 +93,6 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
|
||||||
s->object_size);
|
s->object_size);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(CONFIG_SLUB)
|
|
||||||
if (!strcmp(s->name, name)) {
|
|
||||||
pr_err("%s (%s): Cache name already exists.\n",
|
|
||||||
__func__, name);
|
|
||||||
dump_stack();
|
|
||||||
s = NULL;
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
|
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
|
||||||
|
|
Loading…
Reference in a new issue