FROMLIST: scs: add accounting
This change adds accounting for the memory allocated for shadow stacks. Bug: 145210207 Change-Id: I51157fe0b23b4cb28bb33c86a5dfe3ac911296a4 (am from https://lore.kernel.org/patchwork/patch/1149055/) Reviewed-by: Kees Cook <keescook@chromium.org> Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
This commit is contained in:
parent
16e13c60ff
commit
9c265248fa
6 changed files with 42 additions and 0 deletions
|
@ -114,6 +114,9 @@ static ssize_t node_read_meminfo(struct device *dev,
|
|||
"Node %d AnonPages: %8lu kB\n"
|
||||
"Node %d Shmem: %8lu kB\n"
|
||||
"Node %d KernelStack: %8lu kB\n"
|
||||
#ifdef CONFIG_SHADOW_CALL_STACK
|
||||
"Node %d ShadowCallStack:%8lu kB\n"
|
||||
#endif
|
||||
"Node %d PageTables: %8lu kB\n"
|
||||
"Node %d NFS_Unstable: %8lu kB\n"
|
||||
"Node %d Bounce: %8lu kB\n"
|
||||
|
@ -134,6 +137,9 @@ static ssize_t node_read_meminfo(struct device *dev,
|
|||
nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
|
||||
nid, K(i.sharedram),
|
||||
nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB),
|
||||
#ifdef CONFIG_SHADOW_CALL_STACK
|
||||
nid, sum_zone_node_page_state(nid, NR_KERNEL_SCS_BYTES) / 1024,
|
||||
#endif
|
||||
nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
|
||||
nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
|
||||
nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
|
||||
|
|
|
@ -104,6 +104,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
|
|||
global_node_page_state(NR_SLAB_UNRECLAIMABLE));
|
||||
seq_printf(m, "KernelStack: %8lu kB\n",
|
||||
global_zone_page_state(NR_KERNEL_STACK_KB));
|
||||
#ifdef CONFIG_SHADOW_CALL_STACK
|
||||
seq_printf(m, "ShadowCallStack:%8lu kB\n",
|
||||
global_zone_page_state(NR_KERNEL_SCS_BYTES) / 1024);
|
||||
#endif
|
||||
show_val_kb(m, "PageTables: ",
|
||||
global_zone_page_state(NR_PAGETABLE));
|
||||
#ifdef CONFIG_QUICKLIST
|
||||
|
|
|
@ -142,6 +142,9 @@ enum zone_stat_item {
|
|||
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
|
||||
NR_PAGETABLE, /* used for pagetables */
|
||||
NR_KERNEL_STACK_KB, /* measured in KiB */
|
||||
#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
|
||||
NR_KERNEL_SCS_BYTES, /* measured in bytes */
|
||||
#endif
|
||||
/* Second 128 byte cacheline */
|
||||
NR_BOUNCE,
|
||||
#if IS_ENABLED(CONFIG_ZSMALLOC)
|
||||
|
|
20
kernel/scs.c
20
kernel/scs.c
|
@ -12,6 +12,7 @@
|
|||
#include <linux/scs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/vmstat.h>
|
||||
#include <asm/scs.h>
|
||||
|
||||
static inline void *__scs_base(struct task_struct *tsk)
|
||||
|
@ -89,6 +90,11 @@ static void scs_free(void *s)
|
|||
vfree_atomic(s);
|
||||
}
|
||||
|
||||
static struct page *__scs_page(struct task_struct *tsk)
|
||||
{
|
||||
return vmalloc_to_page(__scs_base(tsk));
|
||||
}
|
||||
|
||||
static int scs_cleanup(unsigned int cpu)
|
||||
{
|
||||
int i;
|
||||
|
@ -135,6 +141,11 @@ static inline void scs_free(void *s)
|
|||
kmem_cache_free(scs_cache, s);
|
||||
}
|
||||
|
||||
static struct page *__scs_page(struct task_struct *tsk)
|
||||
{
|
||||
return virt_to_page(__scs_base(tsk));
|
||||
}
|
||||
|
||||
void __init scs_init(void)
|
||||
{
|
||||
scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, SCS_SIZE,
|
||||
|
@ -153,6 +164,12 @@ void scs_task_reset(struct task_struct *tsk)
|
|||
task_set_scs(tsk, __scs_base(tsk));
|
||||
}
|
||||
|
||||
static void scs_account(struct task_struct *tsk, int account)
|
||||
{
|
||||
mod_zone_page_state(page_zone(__scs_page(tsk)), NR_KERNEL_SCS_BYTES,
|
||||
account * SCS_SIZE);
|
||||
}
|
||||
|
||||
int scs_prepare(struct task_struct *tsk, int node)
|
||||
{
|
||||
void *s;
|
||||
|
@ -162,6 +179,8 @@ int scs_prepare(struct task_struct *tsk, int node)
|
|||
return -ENOMEM;
|
||||
|
||||
task_set_scs(tsk, s);
|
||||
scs_account(tsk, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -182,6 +201,7 @@ void scs_release(struct task_struct *tsk)
|
|||
|
||||
WARN_ON(scs_corrupted(tsk));
|
||||
|
||||
scs_account(tsk, -1);
|
||||
task_set_scs(tsk, NULL);
|
||||
scs_free(s);
|
||||
}
|
||||
|
|
|
@ -5073,6 +5073,9 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
|
|||
" managed:%lukB"
|
||||
" mlocked:%lukB"
|
||||
" kernel_stack:%lukB"
|
||||
#ifdef CONFIG_SHADOW_CALL_STACK
|
||||
" shadow_call_stack:%lukB"
|
||||
#endif
|
||||
" pagetables:%lukB"
|
||||
" bounce:%lukB"
|
||||
" free_pcp:%lukB"
|
||||
|
@ -5094,6 +5097,9 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
|
|||
K(zone->managed_pages),
|
||||
K(zone_page_state(zone, NR_MLOCK)),
|
||||
zone_page_state(zone, NR_KERNEL_STACK_KB),
|
||||
#ifdef CONFIG_SHADOW_CALL_STACK
|
||||
zone_page_state(zone, NR_KERNEL_SCS_BYTES) / 1024,
|
||||
#endif
|
||||
K(zone_page_state(zone, NR_PAGETABLE)),
|
||||
K(zone_page_state(zone, NR_BOUNCE)),
|
||||
K(free_pcp),
|
||||
|
|
|
@ -1117,6 +1117,9 @@ const char * const vmstat_text[] = {
|
|||
"nr_mlock",
|
||||
"nr_page_table_pages",
|
||||
"nr_kernel_stack",
|
||||
#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
|
||||
"nr_shadow_call_stack_bytes",
|
||||
#endif
|
||||
"nr_bounce",
|
||||
#if IS_ENABLED(CONFIG_ZSMALLOC)
|
||||
"nr_zspages",
|
||||
|
|
Loading…
Reference in a new issue