x86/events, drivers/amd/iommu: Prepare for multiple IOMMUs support

Currently, amd_iommu_pc_get_set_reg_val() cannot support multiple
IOMMUs. Modify it to allow callers to specify an IOMMU. This is in
preparation for supporting multiple IOMMUs.

Signed-off-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Jörg Rödel <joro@8bytes.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: iommu@lists.linux-foundation.org
Link: http://lkml.kernel.org/r/1487926102-13073-8-git-send-email-Suravee.Suthikulpanit@amd.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Suravee Suthikulpanit 2017-02-24 02:48:19 -06:00 committed by Ingo Molnar
parent f5863a00e7
commit 1650dfd1a9
4 changed files with 49 additions and 48 deletions

View file

@ -248,49 +248,45 @@ static int perf_iommu_event_init(struct perf_event *event)
static void perf_iommu_enable_event(struct perf_event *ev)
{
struct amd_iommu *iommu = get_amd_iommu(0);
u8 csource = _GET_CSOURCE(ev);
u16 devid = _GET_DEVID(ev);
u8 bank = _GET_BANK(ev);
u8 cntr = _GET_CNTR(ev);
u64 reg = 0ULL;
reg = csource;
amd_iommu_pc_get_set_reg_val(devid,
_GET_BANK(ev), _GET_CNTR(ev) ,
IOMMU_PC_COUNTER_SRC_REG, &reg, true);
amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_COUNTER_SRC_REG, &reg);
reg = devid | (_GET_DEVID_MASK(ev) << 32);
if (reg)
reg |= BIT(31);
amd_iommu_pc_get_set_reg_val(devid,
_GET_BANK(ev), _GET_CNTR(ev) ,
IOMMU_PC_DEVID_MATCH_REG, &reg, true);
amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DEVID_MATCH_REG, &reg);
reg = _GET_PASID(ev) | (_GET_PASID_MASK(ev) << 32);
if (reg)
reg |= BIT(31);
amd_iommu_pc_get_set_reg_val(devid,
_GET_BANK(ev), _GET_CNTR(ev) ,
IOMMU_PC_PASID_MATCH_REG, &reg, true);
amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_PASID_MATCH_REG, &reg);
reg = _GET_DOMID(ev) | (_GET_DOMID_MASK(ev) << 32);
if (reg)
reg |= BIT(31);
amd_iommu_pc_get_set_reg_val(devid,
_GET_BANK(ev), _GET_CNTR(ev) ,
IOMMU_PC_DOMID_MATCH_REG, &reg, true);
amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DOMID_MATCH_REG, &reg);
}
static void perf_iommu_disable_event(struct perf_event *event)
{
struct amd_iommu *iommu = get_amd_iommu(0);
u64 reg = 0ULL;
amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
_GET_BANK(event), _GET_CNTR(event),
IOMMU_PC_COUNTER_SRC_REG, &reg, true);
amd_iommu_pc_set_reg(iommu, _GET_BANK(event), _GET_CNTR(event),
IOMMU_PC_COUNTER_SRC_REG, &reg);
}
static void perf_iommu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct amd_iommu *iommu = get_amd_iommu(0);
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
@ -300,9 +296,8 @@ static void perf_iommu_start(struct perf_event *event, int flags)
if (flags & PERF_EF_RELOAD) {
u64 prev_raw_count = local64_read(&hwc->prev_count);
amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
_GET_BANK(event), _GET_CNTR(event),
IOMMU_PC_COUNTER_REG, &prev_raw_count, true);
amd_iommu_pc_set_reg(iommu, _GET_BANK(event), _GET_CNTR(event),
IOMMU_PC_COUNTER_REG, &prev_raw_count);
}
perf_iommu_enable_event(event);
@ -314,10 +309,11 @@ static void perf_iommu_read(struct perf_event *event)
{
u64 count, prev, delta;
struct hw_perf_event *hwc = &event->hw;
struct amd_iommu *iommu = get_amd_iommu(0);
amd_iommu_pc_get_set_reg_val(_GET_DEVID(event),
_GET_BANK(event), _GET_CNTR(event),
IOMMU_PC_COUNTER_REG, &count, false);
if (amd_iommu_pc_get_reg(iommu, _GET_BANK(event), _GET_CNTR(event),
IOMMU_PC_COUNTER_REG, &count))
return;
/* IOMMU pc counter register is only 48 bits */
count &= GENMASK_ULL(47, 0);

View file

@ -24,6 +24,8 @@
#define PC_MAX_SPEC_BNKS 64
#define PC_MAX_SPEC_CNTRS 16
struct amd_iommu;
/* amd_iommu_init.c external support functions */
extern int amd_iommu_get_num_iommus(void);
@ -33,8 +35,11 @@ extern u8 amd_iommu_pc_get_max_banks(unsigned int idx);
extern u8 amd_iommu_pc_get_max_counters(unsigned int idx);
extern int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr,
u8 fxn, u64 *value, bool is_write);
extern int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
u8 fxn, u64 *value);
extern int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
u8 fxn, u64 *value);
extern struct amd_iommu *get_amd_iommu(int idx);

View file

@ -256,10 +256,6 @@ static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(void);
static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
u8 bank, u8 cntr, u8 fxn,
u64 *value, bool is_write);
static inline void update_last_devid(u16 devid)
{
if (devid > amd_iommu_last_bdf)
@ -1484,6 +1480,8 @@ static int __init init_iommu_all(struct acpi_table_header *table)
return 0;
}
static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
u8 fxn, u64 *value, bool is_write);
static void init_iommu_perf_ctr(struct amd_iommu *iommu)
{
@ -1495,8 +1493,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
amd_iommu_pc_present = true;
/* Check if the performance counters can be written to */
if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
(0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
(iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
(val != val2)) {
pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
amd_iommu_pc_present = false;
@ -2765,15 +2763,18 @@ u8 amd_iommu_pc_get_max_counters(unsigned int idx)
}
EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
u8 bank, u8 cntr, u8 fxn,
u64 *value, bool is_write)
static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
u8 fxn, u64 *value, bool is_write)
{
u32 offset;
u32 max_offset_lim;
/* Make sure the IOMMU PC resource is available */
if (!amd_iommu_pc_present)
return -ENODEV;
/* Check for valid iommu and pc register indexing */
if (WARN_ON((fxn > 0x28) || (fxn & 7)))
if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
return -ENODEV;
offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
@ -2799,17 +2800,21 @@ static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
return 0;
}
EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
u64 *value, bool is_write)
int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
{
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
if (!iommu)
return -EINVAL;
/* Make sure the IOMMU PC resource is available */
if (!amd_iommu_pc_present || iommu == NULL)
return -ENODEV;
return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
value, is_write);
return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
}
EXPORT_SYMBOL(amd_iommu_pc_get_reg);
int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
{
if (!iommu)
return -EINVAL;
return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
}
EXPORT_SYMBOL(amd_iommu_pc_set_reg);

View file

@ -57,11 +57,6 @@ extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
/* IOMMU Performance Counter functions */
extern bool amd_iommu_pc_supported(void);
extern int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
u64 *value, bool is_write);
#ifdef CONFIG_IRQ_REMAP
extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
#else