Merge branches 'perf-urgent-for-linus', 'smp-urgent-for-linus' and 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf, cpu hotplug and timer fixes from Ingo Molnar: "perf: - A single tooling fix for a user-triggerable segfault. CPU hotplug: - Fix a CPU hotplug corner case regression, introduced by the recent hotplug rework timers: - Fix a boot hang in the ARM based Tango SoC clocksource driver" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf intel-pt: Fix segfault tracing transactions * 'smp-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: cpu/hotplug: Fix rollback during error-out in __cpu_disable() * 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: clocksource/drivers/tango-xtal: Fix boot hang due to incorrect test
This commit is contained in:
commit
82b23cb94b
3 changed files with 28 additions and 9 deletions
|
@ -42,7 +42,7 @@ static void __init tango_clocksource_init(struct device_node *np)
|
|||
|
||||
ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
|
||||
32, clocksource_mmio_readl_up);
|
||||
if (!ret) {
|
||||
if (ret) {
|
||||
pr_err("%s: registration failed\n", np->full_name);
|
||||
return;
|
||||
}
|
||||
|
|
33
kernel/cpu.c
33
kernel/cpu.c
|
@ -36,6 +36,7 @@
|
|||
* @target: The target state
|
||||
* @thread: Pointer to the hotplug thread
|
||||
* @should_run: Thread should execute
|
||||
* @rollback: Perform a rollback
|
||||
* @cb_stat: The state for a single callback (install/uninstall)
|
||||
* @cb: Single callback function (install/uninstall)
|
||||
* @result: Result of the operation
|
||||
|
@ -47,6 +48,7 @@ struct cpuhp_cpu_state {
|
|||
#ifdef CONFIG_SMP
|
||||
struct task_struct *thread;
|
||||
bool should_run;
|
||||
bool rollback;
|
||||
enum cpuhp_state cb_state;
|
||||
int (*cb)(unsigned int cpu);
|
||||
int result;
|
||||
|
@ -301,6 +303,11 @@ static int cpu_notify(unsigned long val, unsigned int cpu)
|
|||
return __cpu_notify(val, cpu, -1, NULL);
|
||||
}
|
||||
|
||||
static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
|
||||
{
|
||||
BUG_ON(cpu_notify(val, cpu));
|
||||
}
|
||||
|
||||
/* Notifier wrappers for transitioning to state machine */
|
||||
static int notify_prepare(unsigned int cpu)
|
||||
{
|
||||
|
@ -477,6 +484,16 @@ static void cpuhp_thread_fun(unsigned int cpu)
|
|||
} else {
|
||||
ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
|
||||
}
|
||||
} else if (st->rollback) {
|
||||
BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
|
||||
|
||||
undo_cpu_down(cpu, st, cpuhp_ap_states);
|
||||
/*
|
||||
* This is a momentary workaround to keep the notifier users
|
||||
* happy. Will go away once we got rid of the notifiers.
|
||||
*/
|
||||
cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
|
||||
st->rollback = false;
|
||||
} else {
|
||||
/* Cannot happen .... */
|
||||
BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
|
||||
|
@ -636,11 +653,6 @@ static inline void check_for_tasks(int dead_cpu)
|
|||
read_unlock(&tasklist_lock);
|
||||
}
|
||||
|
||||
static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
|
||||
{
|
||||
BUG_ON(cpu_notify(val, cpu));
|
||||
}
|
||||
|
||||
static int notify_down_prepare(unsigned int cpu)
|
||||
{
|
||||
int err, nr_calls = 0;
|
||||
|
@ -721,9 +733,10 @@ static int takedown_cpu(unsigned int cpu)
|
|||
*/
|
||||
err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
|
||||
if (err) {
|
||||
/* CPU didn't die: tell everyone. Can't complain. */
|
||||
cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
|
||||
/* CPU refused to die */
|
||||
irq_unlock_sparse();
|
||||
/* Unpark the hotplug thread so we can rollback there */
|
||||
kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
|
||||
return err;
|
||||
}
|
||||
BUG_ON(cpu_online(cpu));
|
||||
|
@ -832,6 +845,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
|
|||
* to do the further cleanups.
|
||||
*/
|
||||
ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
|
||||
if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
|
||||
st->target = prev_state;
|
||||
st->rollback = true;
|
||||
cpuhp_kick_ap_work(cpu);
|
||||
}
|
||||
|
||||
hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
|
||||
out:
|
||||
|
@ -1249,6 +1267,7 @@ static struct cpuhp_step cpuhp_ap_states[] = {
|
|||
.name = "notify:online",
|
||||
.startup = notify_online,
|
||||
.teardown = notify_down_prepare,
|
||||
.skip_onerr = true,
|
||||
},
|
||||
#endif
|
||||
/*
|
||||
|
|
|
@ -1130,7 +1130,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
|
|||
pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
|
||||
ret);
|
||||
|
||||
if (pt->synth_opts.callchain)
|
||||
if (pt->synth_opts.last_branch)
|
||||
intel_pt_reset_last_branch_rb(ptq);
|
||||
|
||||
return ret;
|
||||
|
|
Loading…
Reference in a new issue