x86/intel_rdt: Prevent pseudo-locking from using stale pointers
[ Upstream commit b61b8bba18fe2b63d38fdaf9b83de25e2d787dfe ] When the last CPU in an rdt_domain goes offline, its rdt_domain struct gets freed. Current pseudo-locking code is unaware of this scenario and tries to dereference the freed structure in a few places. Add checks to prevent pseudo-locking code from doing this. While further work is needed to seamlessly restore resource groups (not just pseudo-locking) to their configuration when the domain is brought back online, the immediate issue of invalid pointers is addressed here. Fixes:f4e80d67a5
("x86/intel_rdt: Resctrl files reflect pseudo-locked information") Fixes:443810fe61
("x86/intel_rdt: Create debugfs files for pseudo-locking testing") Fixes:746e08590b
("x86/intel_rdt: Create character device exposing pseudo-locked region") Fixes:33dc3e410a
("x86/intel_rdt: Make CPU information accessible for pseudo-locked regions") Signed-off-by: Jithu Joseph <jithu.joseph@intel.com> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: fenghua.yu@intel.com Cc: tony.luck@intel.com Cc: gavin.hindman@intel.com Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/231f742dbb7b00a31cc104416860e27dba6b072d.1539384145.git.reinette.chatre@intel.com Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
b6e44f7439
commit
3d02e3bb3c
4 changed files with 55 additions and 12 deletions
|
@ -610,6 +610,13 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
|
|||
cancel_delayed_work(&d->cqm_limbo);
|
||||
}
|
||||
|
||||
/*
|
||||
* rdt_domain "d" is going to be freed below, so clear
|
||||
* its pointer from pseudo_lock_region struct.
|
||||
*/
|
||||
if (d->plr)
|
||||
d->plr->d = NULL;
|
||||
|
||||
kfree(d->ctrl_val);
|
||||
kfree(d->mbps_val);
|
||||
kfree(d->rmid_busy_llc);
|
||||
|
|
|
@ -408,8 +408,16 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
|
|||
for_each_alloc_enabled_rdt_resource(r)
|
||||
seq_printf(s, "%s:uninitialized\n", r->name);
|
||||
} else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
|
||||
seq_printf(s, "%s:%d=%x\n", rdtgrp->plr->r->name,
|
||||
rdtgrp->plr->d->id, rdtgrp->plr->cbm);
|
||||
if (!rdtgrp->plr->d) {
|
||||
rdt_last_cmd_clear();
|
||||
rdt_last_cmd_puts("Cache domain offline\n");
|
||||
ret = -ENODEV;
|
||||
} else {
|
||||
seq_printf(s, "%s:%d=%x\n",
|
||||
rdtgrp->plr->r->name,
|
||||
rdtgrp->plr->d->id,
|
||||
rdtgrp->plr->cbm);
|
||||
}
|
||||
} else {
|
||||
closid = rdtgrp->closid;
|
||||
for_each_alloc_enabled_rdt_resource(r) {
|
||||
|
|
|
@ -1116,6 +1116,11 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!plr->d) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
plr->thread_done = 0;
|
||||
cpu = cpumask_first(&plr->d->cpu_mask);
|
||||
if (!cpu_online(cpu)) {
|
||||
|
@ -1429,6 +1434,11 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
|
||||
plr = rdtgrp->plr;
|
||||
|
||||
if (!plr->d) {
|
||||
mutex_unlock(&rdtgroup_mutex);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* Task is required to run with affinity to the cpus associated
|
||||
* with the pseudo-locked region. If this is not the case the task
|
||||
|
|
|
@ -268,17 +268,27 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
|
|||
struct seq_file *s, void *v)
|
||||
{
|
||||
struct rdtgroup *rdtgrp;
|
||||
struct cpumask *mask;
|
||||
int ret = 0;
|
||||
|
||||
rdtgrp = rdtgroup_kn_lock_live(of->kn);
|
||||
|
||||
if (rdtgrp) {
|
||||
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
|
||||
seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
|
||||
cpumask_pr_args(&rdtgrp->plr->d->cpu_mask));
|
||||
else
|
||||
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
|
||||
if (!rdtgrp->plr->d) {
|
||||
rdt_last_cmd_clear();
|
||||
rdt_last_cmd_puts("Cache domain offline\n");
|
||||
ret = -ENODEV;
|
||||
} else {
|
||||
mask = &rdtgrp->plr->d->cpu_mask;
|
||||
seq_printf(s, is_cpu_list(of) ?
|
||||
"%*pbl\n" : "%*pb\n",
|
||||
cpumask_pr_args(mask));
|
||||
}
|
||||
} else {
|
||||
seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
|
||||
cpumask_pr_args(&rdtgrp->cpu_mask));
|
||||
}
|
||||
} else {
|
||||
ret = -ENOENT;
|
||||
}
|
||||
|
@ -1286,6 +1296,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
|
|||
struct rdt_resource *r;
|
||||
struct rdt_domain *d;
|
||||
unsigned int size;
|
||||
int ret = 0;
|
||||
bool sep;
|
||||
u32 ctrl;
|
||||
|
||||
|
@ -1296,11 +1307,18 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
|
|||
}
|
||||
|
||||
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
|
||||
seq_printf(s, "%*s:", max_name_width, rdtgrp->plr->r->name);
|
||||
size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
|
||||
rdtgrp->plr->d,
|
||||
rdtgrp->plr->cbm);
|
||||
seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
|
||||
if (!rdtgrp->plr->d) {
|
||||
rdt_last_cmd_clear();
|
||||
rdt_last_cmd_puts("Cache domain offline\n");
|
||||
ret = -ENODEV;
|
||||
} else {
|
||||
seq_printf(s, "%*s:", max_name_width,
|
||||
rdtgrp->plr->r->name);
|
||||
size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
|
||||
rdtgrp->plr->d,
|
||||
rdtgrp->plr->cbm);
|
||||
seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1330,7 +1348,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
|
|||
out:
|
||||
rdtgroup_kn_unlock(of->kn);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* rdtgroup information files for one cache resource. */
|
||||
|
|
Loading…
Reference in a new issue