af391b15f7
This patch renames __cpu_suspend to cpu_suspend so that it's aligned with ARM32. It also removes the redundant wrapper created. This is in preparation to implement generic PSCI system suspend using the cpu_{suspend,resume} which now has the same interface on both ARM and ARM64. Cc: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Reviewed-by: Ashwin Chaugule <ashwin.chaugule@linaro.org> Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
599 lines
13 KiB
C
599 lines
13 KiB
C
/*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* Copyright (C) 2013 ARM Limited
|
|
*
|
|
* Author: Will Deacon <will.deacon@arm.com>
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "psci: " fmt
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/of.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/reboot.h>
|
|
#include <linux/pm.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/slab.h>
|
|
#include <uapi/linux/psci.h>
|
|
|
|
#include <asm/compiler.h>
|
|
#include <asm/cputype.h>
|
|
#include <asm/cpu_ops.h>
|
|
#include <asm/errno.h>
|
|
#include <asm/psci.h>
|
|
#include <asm/smp_plat.h>
|
|
#include <asm/suspend.h>
|
|
#include <asm/system_misc.h>
|
|
|
|
#define PSCI_POWER_STATE_TYPE_STANDBY 0
|
|
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
|
|
|
|
static bool psci_power_state_loses_context(u32 state)
|
|
{
|
|
return state & PSCI_0_2_POWER_STATE_TYPE_MASK;
|
|
}
|
|
|
|
static bool psci_power_state_is_valid(u32 state)
|
|
{
|
|
const u32 valid_mask = PSCI_0_2_POWER_STATE_ID_MASK |
|
|
PSCI_0_2_POWER_STATE_TYPE_MASK |
|
|
PSCI_0_2_POWER_STATE_AFFL_MASK;
|
|
|
|
return !(state & ~valid_mask);
|
|
}
|
|
|
|
/*
|
|
* The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
|
|
* calls to its resident CPU, so we must avoid issuing those. We never migrate
|
|
* a Trusted OS even if it claims to be capable of migration -- doing so will
|
|
* require cooperation with a Trusted OS driver.
|
|
*/
|
|
static int resident_cpu = -1;
|
|
|
|
struct psci_operations {
|
|
int (*cpu_suspend)(u32 state, unsigned long entry_point);
|
|
int (*cpu_off)(u32 state);
|
|
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
|
|
int (*migrate)(unsigned long cpuid);
|
|
int (*affinity_info)(unsigned long target_affinity,
|
|
unsigned long lowest_affinity_level);
|
|
int (*migrate_info_type)(void);
|
|
};
|
|
|
|
static struct psci_operations psci_ops;
|
|
|
|
typedef unsigned long (psci_fn)(unsigned long, unsigned long,
|
|
unsigned long, unsigned long);
|
|
asmlinkage psci_fn __invoke_psci_fn_hvc;
|
|
asmlinkage psci_fn __invoke_psci_fn_smc;
|
|
static psci_fn *invoke_psci_fn;
|
|
|
|
enum psci_function {
|
|
PSCI_FN_CPU_SUSPEND,
|
|
PSCI_FN_CPU_ON,
|
|
PSCI_FN_CPU_OFF,
|
|
PSCI_FN_MIGRATE,
|
|
PSCI_FN_MAX,
|
|
};
|
|
|
|
static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
|
|
|
|
static u32 psci_function_id[PSCI_FN_MAX];
|
|
|
|
static int psci_to_linux_errno(int errno)
|
|
{
|
|
switch (errno) {
|
|
case PSCI_RET_SUCCESS:
|
|
return 0;
|
|
case PSCI_RET_NOT_SUPPORTED:
|
|
return -EOPNOTSUPP;
|
|
case PSCI_RET_INVALID_PARAMS:
|
|
return -EINVAL;
|
|
case PSCI_RET_DENIED:
|
|
return -EPERM;
|
|
};
|
|
|
|
return -EINVAL;
|
|
}
|
|
|
|
static u32 psci_get_version(void)
|
|
{
|
|
return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
|
|
}
|
|
|
|
static int psci_cpu_suspend(u32 state, unsigned long entry_point)
|
|
{
|
|
int err;
|
|
u32 fn;
|
|
|
|
fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
|
|
err = invoke_psci_fn(fn, state, entry_point, 0);
|
|
return psci_to_linux_errno(err);
|
|
}
|
|
|
|
static int psci_cpu_off(u32 state)
|
|
{
|
|
int err;
|
|
u32 fn;
|
|
|
|
fn = psci_function_id[PSCI_FN_CPU_OFF];
|
|
err = invoke_psci_fn(fn, state, 0, 0);
|
|
return psci_to_linux_errno(err);
|
|
}
|
|
|
|
static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
|
|
{
|
|
int err;
|
|
u32 fn;
|
|
|
|
fn = psci_function_id[PSCI_FN_CPU_ON];
|
|
err = invoke_psci_fn(fn, cpuid, entry_point, 0);
|
|
return psci_to_linux_errno(err);
|
|
}
|
|
|
|
static int psci_migrate(unsigned long cpuid)
|
|
{
|
|
int err;
|
|
u32 fn;
|
|
|
|
fn = psci_function_id[PSCI_FN_MIGRATE];
|
|
err = invoke_psci_fn(fn, cpuid, 0, 0);
|
|
return psci_to_linux_errno(err);
|
|
}
|
|
|
|
static int psci_affinity_info(unsigned long target_affinity,
|
|
unsigned long lowest_affinity_level)
|
|
{
|
|
return invoke_psci_fn(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity,
|
|
lowest_affinity_level, 0);
|
|
}
|
|
|
|
static int psci_migrate_info_type(void)
|
|
{
|
|
return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
|
|
}
|
|
|
|
static unsigned long psci_migrate_info_up_cpu(void)
|
|
{
|
|
return invoke_psci_fn(PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU, 0, 0, 0);
|
|
}
|
|
|
|
static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
|
|
{
|
|
int i, ret, count = 0;
|
|
u32 *psci_states;
|
|
struct device_node *state_node, *cpu_node;
|
|
|
|
cpu_node = of_get_cpu_node(cpu, NULL);
|
|
if (!cpu_node)
|
|
return -ENODEV;
|
|
|
|
/*
|
|
* If the PSCI cpu_suspend function hook has not been initialized
|
|
* idle states must not be enabled, so bail out
|
|
*/
|
|
if (!psci_ops.cpu_suspend)
|
|
return -EOPNOTSUPP;
|
|
|
|
/* Count idle states */
|
|
while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
|
|
count))) {
|
|
count++;
|
|
of_node_put(state_node);
|
|
}
|
|
|
|
if (!count)
|
|
return -ENODEV;
|
|
|
|
psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
|
|
if (!psci_states)
|
|
return -ENOMEM;
|
|
|
|
for (i = 0; i < count; i++) {
|
|
u32 state;
|
|
|
|
state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
|
|
|
|
ret = of_property_read_u32(state_node,
|
|
"arm,psci-suspend-param",
|
|
&state);
|
|
if (ret) {
|
|
pr_warn(" * %s missing arm,psci-suspend-param property\n",
|
|
state_node->full_name);
|
|
of_node_put(state_node);
|
|
goto free_mem;
|
|
}
|
|
|
|
of_node_put(state_node);
|
|
pr_debug("psci-power-state %#x index %d\n", state, i);
|
|
if (!psci_power_state_is_valid(state)) {
|
|
pr_warn("Invalid PSCI power state %#x\n", state);
|
|
ret = -EINVAL;
|
|
goto free_mem;
|
|
}
|
|
psci_states[i] = state;
|
|
}
|
|
/* Idle states parsed correctly, initialize per-cpu pointer */
|
|
per_cpu(psci_power_state, cpu) = psci_states;
|
|
return 0;
|
|
|
|
free_mem:
|
|
kfree(psci_states);
|
|
return ret;
|
|
}
|
|
|
|
static int get_set_conduit_method(struct device_node *np)
|
|
{
|
|
const char *method;
|
|
|
|
pr_info("probing for conduit method from DT.\n");
|
|
|
|
if (of_property_read_string(np, "method", &method)) {
|
|
pr_warn("missing \"method\" property\n");
|
|
return -ENXIO;
|
|
}
|
|
|
|
if (!strcmp("hvc", method)) {
|
|
invoke_psci_fn = __invoke_psci_fn_hvc;
|
|
} else if (!strcmp("smc", method)) {
|
|
invoke_psci_fn = __invoke_psci_fn_smc;
|
|
} else {
|
|
pr_warn("invalid \"method\" property: %s\n", method);
|
|
return -EINVAL;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
|
|
{
|
|
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
|
|
}
|
|
|
|
static void psci_sys_poweroff(void)
|
|
{
|
|
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
|
|
}
|
|
|
|
/*
|
|
* Detect the presence of a resident Trusted OS which may cause CPU_OFF to
|
|
* return DENIED (which would be fatal).
|
|
*/
|
|
static void __init psci_init_migrate(void)
|
|
{
|
|
unsigned long cpuid;
|
|
int type, cpu;
|
|
|
|
type = psci_ops.migrate_info_type();
|
|
|
|
if (type == PSCI_0_2_TOS_MP) {
|
|
pr_info("Trusted OS migration not required\n");
|
|
return;
|
|
}
|
|
|
|
if (type == PSCI_RET_NOT_SUPPORTED) {
|
|
pr_info("MIGRATE_INFO_TYPE not supported.\n");
|
|
return;
|
|
}
|
|
|
|
if (type != PSCI_0_2_TOS_UP_MIGRATE &&
|
|
type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
|
|
pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
|
|
return;
|
|
}
|
|
|
|
cpuid = psci_migrate_info_up_cpu();
|
|
if (cpuid & ~MPIDR_HWID_BITMASK) {
|
|
pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
|
|
cpuid);
|
|
return;
|
|
}
|
|
|
|
cpu = get_logical_index(cpuid);
|
|
resident_cpu = cpu >= 0 ? cpu : -1;
|
|
|
|
pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
|
|
}
|
|
|
|
static void __init psci_0_2_set_functions(void)
|
|
{
|
|
pr_info("Using standard PSCI v0.2 function IDs\n");
|
|
psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
|
|
psci_ops.cpu_suspend = psci_cpu_suspend;
|
|
|
|
psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
|
|
psci_ops.cpu_off = psci_cpu_off;
|
|
|
|
psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
|
|
psci_ops.cpu_on = psci_cpu_on;
|
|
|
|
psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
|
|
psci_ops.migrate = psci_migrate;
|
|
|
|
psci_ops.affinity_info = psci_affinity_info;
|
|
|
|
psci_ops.migrate_info_type = psci_migrate_info_type;
|
|
|
|
arm_pm_restart = psci_sys_reset;
|
|
|
|
pm_power_off = psci_sys_poweroff;
|
|
}
|
|
|
|
/*
|
|
* Probe function for PSCI firmware versions >= 0.2
|
|
*/
|
|
static int __init psci_probe(void)
|
|
{
|
|
u32 ver = psci_get_version();
|
|
|
|
pr_info("PSCIv%d.%d detected in firmware.\n",
|
|
PSCI_VERSION_MAJOR(ver),
|
|
PSCI_VERSION_MINOR(ver));
|
|
|
|
if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
|
|
pr_err("Conflicting PSCI version detected.\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
psci_0_2_set_functions();
|
|
|
|
psci_init_migrate();
|
|
|
|
return 0;
|
|
}
|
|
|
|
typedef int (*psci_initcall_t)(const struct device_node *);
|
|
|
|
/*
|
|
* PSCI init function for PSCI versions >=0.2
|
|
*
|
|
* Probe based on PSCI PSCI_VERSION function
|
|
*/
|
|
static int __init psci_0_2_init(struct device_node *np)
|
|
{
|
|
int err;
|
|
|
|
err = get_set_conduit_method(np);
|
|
|
|
if (err)
|
|
goto out_put_node;
|
|
/*
|
|
* Starting with v0.2, the PSCI specification introduced a call
|
|
* (PSCI_VERSION) that allows probing the firmware version, so
|
|
* that PSCI function IDs and version specific initialization
|
|
* can be carried out according to the specific version reported
|
|
* by firmware
|
|
*/
|
|
err = psci_probe();
|
|
|
|
out_put_node:
|
|
of_node_put(np);
|
|
return err;
|
|
}
|
|
|
|
/*
|
|
* PSCI < v0.2 get PSCI Function IDs via DT.
|
|
*/
|
|
static int __init psci_0_1_init(struct device_node *np)
|
|
{
|
|
u32 id;
|
|
int err;
|
|
|
|
err = get_set_conduit_method(np);
|
|
|
|
if (err)
|
|
goto out_put_node;
|
|
|
|
pr_info("Using PSCI v0.1 Function IDs from DT\n");
|
|
|
|
if (!of_property_read_u32(np, "cpu_suspend", &id)) {
|
|
psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
|
|
psci_ops.cpu_suspend = psci_cpu_suspend;
|
|
}
|
|
|
|
if (!of_property_read_u32(np, "cpu_off", &id)) {
|
|
psci_function_id[PSCI_FN_CPU_OFF] = id;
|
|
psci_ops.cpu_off = psci_cpu_off;
|
|
}
|
|
|
|
if (!of_property_read_u32(np, "cpu_on", &id)) {
|
|
psci_function_id[PSCI_FN_CPU_ON] = id;
|
|
psci_ops.cpu_on = psci_cpu_on;
|
|
}
|
|
|
|
if (!of_property_read_u32(np, "migrate", &id)) {
|
|
psci_function_id[PSCI_FN_MIGRATE] = id;
|
|
psci_ops.migrate = psci_migrate;
|
|
}
|
|
|
|
out_put_node:
|
|
of_node_put(np);
|
|
return err;
|
|
}
|
|
|
|
static const struct of_device_id psci_of_match[] __initconst = {
|
|
{ .compatible = "arm,psci", .data = psci_0_1_init},
|
|
{ .compatible = "arm,psci-0.2", .data = psci_0_2_init},
|
|
{},
|
|
};
|
|
|
|
int __init psci_dt_init(void)
|
|
{
|
|
struct device_node *np;
|
|
const struct of_device_id *matched_np;
|
|
psci_initcall_t init_fn;
|
|
|
|
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
|
|
|
|
if (!np)
|
|
return -ENODEV;
|
|
|
|
init_fn = (psci_initcall_t)matched_np->data;
|
|
return init_fn(np);
|
|
}
|
|
|
|
#ifdef CONFIG_ACPI
|
|
/*
|
|
* We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
|
|
* explicitly clarified in SBBR
|
|
*/
|
|
int __init psci_acpi_init(void)
|
|
{
|
|
if (!acpi_psci_present()) {
|
|
pr_info("is not implemented in ACPI.\n");
|
|
return -EOPNOTSUPP;
|
|
}
|
|
|
|
pr_info("probing for conduit method from ACPI.\n");
|
|
|
|
if (acpi_psci_use_hvc())
|
|
invoke_psci_fn = __invoke_psci_fn_hvc;
|
|
else
|
|
invoke_psci_fn = __invoke_psci_fn_smc;
|
|
|
|
return psci_probe();
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static int __init cpu_psci_cpu_init(unsigned int cpu)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static int __init cpu_psci_cpu_prepare(unsigned int cpu)
|
|
{
|
|
if (!psci_ops.cpu_on) {
|
|
pr_err("no cpu_on method, not booting CPU%d\n", cpu);
|
|
return -ENODEV;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int cpu_psci_cpu_boot(unsigned int cpu)
|
|
{
|
|
int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
|
|
if (err)
|
|
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
|
|
|
|
return err;
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
static bool psci_tos_resident_on(int cpu)
|
|
{
|
|
return cpu == resident_cpu;
|
|
}
|
|
|
|
static int cpu_psci_cpu_disable(unsigned int cpu)
|
|
{
|
|
/* Fail early if we don't have CPU_OFF support */
|
|
if (!psci_ops.cpu_off)
|
|
return -EOPNOTSUPP;
|
|
|
|
/* Trusted OS will deny CPU_OFF */
|
|
if (psci_tos_resident_on(cpu))
|
|
return -EPERM;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void cpu_psci_cpu_die(unsigned int cpu)
|
|
{
|
|
int ret;
|
|
/*
|
|
* There are no known implementations of PSCI actually using the
|
|
* power state field, pass a sensible default for now.
|
|
*/
|
|
u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
|
|
PSCI_0_2_POWER_STATE_TYPE_SHIFT;
|
|
|
|
ret = psci_ops.cpu_off(state);
|
|
|
|
pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
|
|
}
|
|
|
|
static int cpu_psci_cpu_kill(unsigned int cpu)
|
|
{
|
|
int err, i;
|
|
|
|
if (!psci_ops.affinity_info)
|
|
return 0;
|
|
/*
|
|
* cpu_kill could race with cpu_die and we can
|
|
* potentially end up declaring this cpu undead
|
|
* while it is dying. So, try again a few times.
|
|
*/
|
|
|
|
for (i = 0; i < 10; i++) {
|
|
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
|
|
if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
|
|
pr_info("CPU%d killed.\n", cpu);
|
|
return 0;
|
|
}
|
|
|
|
msleep(10);
|
|
pr_info("Retrying again to check for CPU kill\n");
|
|
}
|
|
|
|
pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
|
|
cpu, err);
|
|
return -ETIMEDOUT;
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
static int psci_suspend_finisher(unsigned long index)
|
|
{
|
|
u32 *state = __this_cpu_read(psci_power_state);
|
|
|
|
return psci_ops.cpu_suspend(state[index - 1],
|
|
virt_to_phys(cpu_resume));
|
|
}
|
|
|
|
static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
|
|
{
|
|
int ret;
|
|
u32 *state = __this_cpu_read(psci_power_state);
|
|
/*
|
|
* idle state index 0 corresponds to wfi, should never be called
|
|
* from the cpu_suspend operations
|
|
*/
|
|
if (WARN_ON_ONCE(!index))
|
|
return -EINVAL;
|
|
|
|
if (!psci_power_state_loses_context(state[index - 1]))
|
|
ret = psci_ops.cpu_suspend(state[index - 1], 0);
|
|
else
|
|
ret = cpu_suspend(index, psci_suspend_finisher);
|
|
|
|
return ret;
|
|
}
|
|
|
|
const struct cpu_operations cpu_psci_ops = {
|
|
.name = "psci",
|
|
#ifdef CONFIG_CPU_IDLE
|
|
.cpu_init_idle = cpu_psci_cpu_init_idle,
|
|
.cpu_suspend = cpu_psci_cpu_suspend,
|
|
#endif
|
|
#ifdef CONFIG_SMP
|
|
.cpu_init = cpu_psci_cpu_init,
|
|
.cpu_prepare = cpu_psci_cpu_prepare,
|
|
.cpu_boot = cpu_psci_cpu_boot,
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
.cpu_disable = cpu_psci_cpu_disable,
|
|
.cpu_die = cpu_psci_cpu_die,
|
|
.cpu_kill = cpu_psci_cpu_kill,
|
|
#endif
|
|
#endif
|
|
};
|
|
|