hugetlbfs: correct handling of negative input to /proc/sys/vm/nr_hugepages
When the user inserts a negative value into /proc/sys/vm/nr_hugepages it will cause the kernel to allocate as many hugepages as possible and to then update /proc/meminfo to reflect this. This changes the behavior so that the negative input will result in nr_hugepages value being unchanged. Signed-off-by: Petr Holasek <pholasek@redhat.com> Signed-off-by: Anton Arapov <anton@redhat.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Eric B Munson <emunson@mgebm.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
8afdcece49
commit
c033a93c0d
1 changed files with 2 additions and 4 deletions
|
@ -1872,8 +1872,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
|
|||
unsigned long tmp;
|
||||
int ret;
|
||||
|
||||
if (!write)
|
||||
tmp = h->max_huge_pages;
|
||||
tmp = h->max_huge_pages;
|
||||
|
||||
if (write && h->order >= MAX_ORDER)
|
||||
return -EINVAL;
|
||||
|
@ -1938,8 +1937,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
|
|||
unsigned long tmp;
|
||||
int ret;
|
||||
|
||||
if (!write)
|
||||
tmp = h->nr_overcommit_huge_pages;
|
||||
tmp = h->nr_overcommit_huge_pages;
|
||||
|
||||
if (write && h->order >= MAX_ORDER)
|
||||
return -EINVAL;
|
||||
|
|
Loading…
Reference in a new issue