Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer updates from Thomas Gleixner:
 "A rather smalish set of updates for timers and timekeeping:

   - Two core fixes to prevent potential undefinded behaviour about
     which gcc is complaining rightfully.

   - A fix to prevent stopping the tick on an (soon) offline CPU so it
     can complete the shutdown procedure.

   - Wait for clocks to stabilize before making decisions, so a not yet
     validated clock is not rejected.

   - The usual pile of fixes to the various clocksource drivers.

   - Core code typo and include fixlets"

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  timekeeping: Include the correct header for errno definitions
  clocksource/drivers/ti-32k: Prevent ftrace recursion
  clocksource/mips-gic-timer: Stop checking cpu_has_counter
  clocksource/mips-gic-timer: Print an error if IRQ setup fails
  tick/nohz: Prevent stopping the tick on an offline CPU
  clocksource/drivers/oxnas: Add OX820 compatible
  clocksource/drivers/timer-atmel-pit: Simplify IRQ handler
  clocksource/drivers/timer-atmel-pit: Remove uselesss WARN_ON_ONCE
  clocksource/drivers/timer-atmel-pit: Drop at91sam926x_pit_common_init
  clocksource/drivers/moxart: Replace panic by pr_err
  clocksource/drivers/moxart: Replace setup_irq by request_irq
  clocksource/drivers/moxart: Add Aspeed support
  clocksource/drivers/moxart: Use struct to hold state
  clocksource/drivers/moxart: Refactor enable/disable
  time: Avoid undefined behaviour in ktime_add_safe()
  time: Avoid undefined behaviour in timespec64_add_safe()
  timekeeping: Prints the amounts of time spent during suspend
  clocksource: Defer override invalidation unless clock is unstable
  hrtimer: Spelling fixes
This commit is contained in:
Linus Torvalds 2016-10-03 18:09:13 -07:00
commit 5e1b834b27
15 changed files with 208 additions and 136 deletions

View file

@ -2,7 +2,9 @@ MOXA ART timer
Required properties:
- compatible : Must be "moxa,moxart-timer"
- compatible : Must be one of:
- "moxa,moxart-timer"
- "aspeed,ast2400-timer"
- reg : Should contain registers location and length
- interrupts : Should contain the timer interrupt number
- clocks : Should contain phandle for the clock that drives the counter

View file

@ -2,7 +2,7 @@ Oxford Semiconductor OXNAS SoCs Family RPS Timer
================================================
Required properties:
- compatible: Should be "oxsemi,ox810se-rps-timer"
- compatible: Should be "oxsemi,ox810se-rps-timer" or "oxsemi,ox820-rps-timer"
- reg : Specifies base physical address and size of the registers.
- interrupts : The interrupts of the two timers
- clocks : The phandle of the timer clock source

View file

@ -109,12 +109,15 @@ static int gic_clockevent_init(void)
{
int ret;
if (!cpu_has_counter || !gic_frequency)
if (!gic_frequency)
return -ENXIO;
ret = setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction);
if (ret < 0)
if (ret < 0) {
pr_err("GIC timer IRQ %d setup failed: %d\n",
gic_timer_irq, ret);
return ret;
}
cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING,
"AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu,

View file

@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/clocksource.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#define TIMER1_BASE 0x00
#define TIMER2_BASE 0x10
@ -36,75 +37,109 @@
#define TIMER_INTR_MASK 0x38
/*
* TIMER_CR flags:
* Moxart TIMER_CR flags:
*
* TIMEREG_CR_*_CLOCK 0: PCLK, 1: EXT1CLK
* TIMEREG_CR_*_INT overflow interrupt enable bit
* MOXART_CR_*_CLOCK 0: PCLK, 1: EXT1CLK
* MOXART_CR_*_INT overflow interrupt enable bit
*/
#define TIMEREG_CR_1_ENABLE BIT(0)
#define TIMEREG_CR_1_CLOCK BIT(1)
#define TIMEREG_CR_1_INT BIT(2)
#define TIMEREG_CR_2_ENABLE BIT(3)
#define TIMEREG_CR_2_CLOCK BIT(4)
#define TIMEREG_CR_2_INT BIT(5)
#define TIMEREG_CR_3_ENABLE BIT(6)
#define TIMEREG_CR_3_CLOCK BIT(7)
#define TIMEREG_CR_3_INT BIT(8)
#define TIMEREG_CR_COUNT_UP BIT(9)
#define MOXART_CR_1_ENABLE BIT(0)
#define MOXART_CR_1_CLOCK BIT(1)
#define MOXART_CR_1_INT BIT(2)
#define MOXART_CR_2_ENABLE BIT(3)
#define MOXART_CR_2_CLOCK BIT(4)
#define MOXART_CR_2_INT BIT(5)
#define MOXART_CR_3_ENABLE BIT(6)
#define MOXART_CR_3_CLOCK BIT(7)
#define MOXART_CR_3_INT BIT(8)
#define MOXART_CR_COUNT_UP BIT(9)
#define TIMER1_ENABLE (TIMEREG_CR_2_ENABLE | TIMEREG_CR_1_ENABLE)
#define TIMER1_DISABLE (TIMEREG_CR_2_ENABLE)
#define MOXART_TIMER1_ENABLE (MOXART_CR_2_ENABLE | MOXART_CR_1_ENABLE)
#define MOXART_TIMER1_DISABLE (MOXART_CR_2_ENABLE)
static void __iomem *base;
static unsigned int clock_count_per_tick;
/*
* The ASpeed variant of the IP block has a different layout
* for the control register
*/
#define ASPEED_CR_1_ENABLE BIT(0)
#define ASPEED_CR_1_CLOCK BIT(1)
#define ASPEED_CR_1_INT BIT(2)
#define ASPEED_CR_2_ENABLE BIT(4)
#define ASPEED_CR_2_CLOCK BIT(5)
#define ASPEED_CR_2_INT BIT(6)
#define ASPEED_CR_3_ENABLE BIT(8)
#define ASPEED_CR_3_CLOCK BIT(9)
#define ASPEED_CR_3_INT BIT(10)
#define ASPEED_TIMER1_ENABLE (ASPEED_CR_2_ENABLE | ASPEED_CR_1_ENABLE)
#define ASPEED_TIMER1_DISABLE (ASPEED_CR_2_ENABLE)
struct moxart_timer {
void __iomem *base;
unsigned int t1_disable_val;
unsigned int t1_enable_val;
unsigned int count_per_tick;
struct clock_event_device clkevt;
};
static inline struct moxart_timer *to_moxart(struct clock_event_device *evt)
{
return container_of(evt, struct moxart_timer, clkevt);
}
static inline void moxart_disable(struct clock_event_device *evt)
{
struct moxart_timer *timer = to_moxart(evt);
writel(timer->t1_disable_val, timer->base + TIMER_CR);
}
static inline void moxart_enable(struct clock_event_device *evt)
{
struct moxart_timer *timer = to_moxart(evt);
writel(timer->t1_enable_val, timer->base + TIMER_CR);
}
static int moxart_shutdown(struct clock_event_device *evt)
{
writel(TIMER1_DISABLE, base + TIMER_CR);
moxart_disable(evt);
return 0;
}
static int moxart_set_oneshot(struct clock_event_device *evt)
{
writel(TIMER1_DISABLE, base + TIMER_CR);
writel(~0, base + TIMER1_BASE + REG_LOAD);
moxart_disable(evt);
writel(~0, to_moxart(evt)->base + TIMER1_BASE + REG_LOAD);
return 0;
}
static int moxart_set_periodic(struct clock_event_device *evt)
{
writel(clock_count_per_tick, base + TIMER1_BASE + REG_LOAD);
writel(TIMER1_ENABLE, base + TIMER_CR);
struct moxart_timer *timer = to_moxart(evt);
moxart_disable(evt);
writel(timer->count_per_tick, timer->base + TIMER1_BASE + REG_LOAD);
writel(0, timer->base + TIMER1_BASE + REG_MATCH1);
moxart_enable(evt);
return 0;
}
static int moxart_clkevt_next_event(unsigned long cycles,
struct clock_event_device *unused)
struct clock_event_device *evt)
{
struct moxart_timer *timer = to_moxart(evt);
u32 u;
writel(TIMER1_DISABLE, base + TIMER_CR);
moxart_disable(evt);
u = readl(base + TIMER1_BASE + REG_COUNT) - cycles;
writel(u, base + TIMER1_BASE + REG_MATCH1);
u = readl(timer->base + TIMER1_BASE + REG_COUNT) - cycles;
writel(u, timer->base + TIMER1_BASE + REG_MATCH1);
writel(TIMER1_ENABLE, base + TIMER_CR);
moxart_enable(evt);
return 0;
}
static struct clock_event_device moxart_clockevent = {
.name = "moxart_timer",
.rating = 200,
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = moxart_shutdown,
.set_state_periodic = moxart_set_periodic,
.set_state_oneshot = moxart_set_oneshot,
.tick_resume = moxart_set_oneshot,
.set_next_event = moxart_clkevt_next_event,
};
static irqreturn_t moxart_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
@ -112,21 +147,19 @@ static irqreturn_t moxart_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
static struct irqaction moxart_timer_irq = {
.name = "moxart-timer",
.flags = IRQF_TIMER,
.handler = moxart_timer_interrupt,
.dev_id = &moxart_clockevent,
};
static int __init moxart_timer_init(struct device_node *node)
{
int ret, irq;
unsigned long pclk;
struct clk *clk;
struct moxart_timer *timer;
base = of_iomap(node, 0);
if (!base) {
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer)
return -ENOMEM;
timer->base = of_iomap(node, 0);
if (!timer->base) {
pr_err("%s: of_iomap failed\n", node->full_name);
return -ENXIO;
}
@ -137,12 +170,6 @@ static int __init moxart_timer_init(struct device_node *node)
return -EINVAL;
}
ret = setup_irq(irq, &moxart_timer_irq);
if (ret) {
pr_err("%s: setup_irq failed\n", node->full_name);
return ret;
}
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("%s: of_clk_get failed\n", node->full_name);
@ -151,7 +178,32 @@ static int __init moxart_timer_init(struct device_node *node)
pclk = clk_get_rate(clk);
ret = clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT,
if (of_device_is_compatible(node, "moxa,moxart-timer")) {
timer->t1_enable_val = MOXART_TIMER1_ENABLE;
timer->t1_disable_val = MOXART_TIMER1_DISABLE;
} else if (of_device_is_compatible(node, "aspeed,ast2400-timer")) {
timer->t1_enable_val = ASPEED_TIMER1_ENABLE;
timer->t1_disable_val = ASPEED_TIMER1_DISABLE;
} else {
pr_err("%s: unknown platform\n", node->full_name);
return -EINVAL;
}
timer->count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ);
timer->clkevt.name = node->name;
timer->clkevt.rating = 200;
timer->clkevt.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT;
timer->clkevt.set_state_shutdown = moxart_shutdown;
timer->clkevt.set_state_periodic = moxart_set_periodic;
timer->clkevt.set_state_oneshot = moxart_set_oneshot;
timer->clkevt.tick_resume = moxart_set_oneshot;
timer->clkevt.set_next_event = moxart_clkevt_next_event;
timer->clkevt.cpumask = cpumask_of(0);
timer->clkevt.irq = irq;
ret = clocksource_mmio_init(timer->base + TIMER2_BASE + REG_COUNT,
"moxart_timer", pclk, 200, 32,
clocksource_mmio_readl_down);
if (ret) {
@ -159,13 +211,26 @@ static int __init moxart_timer_init(struct device_node *node)
return ret;
}
clock_count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ);
ret = request_irq(irq, moxart_timer_interrupt, IRQF_TIMER,
node->name, &timer->clkevt);
if (ret) {
pr_err("%s: setup_irq failed\n", node->full_name);
return ret;
}
writel(~0, base + TIMER2_BASE + REG_LOAD);
writel(TIMEREG_CR_2_ENABLE, base + TIMER_CR);
/* Clear match registers */
writel(0, timer->base + TIMER1_BASE + REG_MATCH1);
writel(0, timer->base + TIMER1_BASE + REG_MATCH2);
writel(0, timer->base + TIMER2_BASE + REG_MATCH1);
writel(0, timer->base + TIMER2_BASE + REG_MATCH2);
moxart_clockevent.cpumask = cpumask_of(0);
moxart_clockevent.irq = irq;
/*
* Start timer 2 rolling as our main wall clock source, keep timer 1
* disabled
*/
writel(0, timer->base + TIMER_CR);
writel(~0, timer->base + TIMER2_BASE + REG_LOAD);
writel(timer->t1_disable_val, timer->base + TIMER_CR);
/*
* documentation is not publicly available:
@ -173,9 +238,9 @@ static int __init moxart_timer_init(struct device_node *node)
* max_delta 0xfffffffe should be ok because count
* register size is u32
*/
clockevents_config_and_register(&moxart_clockevent, pclk,
0x4, 0xfffffffe);
clockevents_config_and_register(&timer->clkevt, pclk, 0x4, 0xfffffffe);
return 0;
}
CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init);
CLOCKSOURCE_OF_DECLARE(aspeed, "aspeed,ast2400-timer", moxart_timer_init);

View file

@ -149,24 +149,13 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
{
struct pit_data *data = dev_id;
/*
* irqs should be disabled here, but as the irq is shared they are only
* guaranteed to be off if the timer irq is registered first.
*/
WARN_ON_ONCE(!irqs_disabled());
/* The PIT interrupt may be disabled, and is shared */
if (clockevent_state_periodic(&data->clkevt) &&
(pit_read(data->base, AT91_PIT_SR) & AT91_PIT_PITS)) {
unsigned nr_ticks;
/* Get number of ticks performed before irq, and ack it */
nr_ticks = PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
do {
data->cnt += data->cycle;
data->clkevt.event_handler(&data->clkevt);
nr_ticks--;
} while (nr_ticks);
data->cnt += data->cycle * PIT_PICNT(pit_read(data->base,
AT91_PIT_PIVR));
data->clkevt.event_handler(&data->clkevt);
return IRQ_HANDLED;
}
@ -177,11 +166,41 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
/*
* Set up both clocksource and clockevent support.
*/
static int __init at91sam926x_pit_common_init(struct pit_data *data)
static int __init at91sam926x_pit_dt_init(struct device_node *node)
{
unsigned long pit_rate;
unsigned bits;
int ret;
unsigned long pit_rate;
unsigned bits;
int ret;
struct pit_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->base = of_iomap(node, 0);
if (!data->base) {
pr_err("Could not map PIT address\n");
return -ENXIO;
}
data->mck = of_clk_get(node, 0);
if (IS_ERR(data->mck)) {
pr_err("Unable to get mck clk\n");
return PTR_ERR(data->mck);
}
ret = clk_prepare_enable(data->mck);
if (ret) {
pr_err("Unable to enable mck\n");
return ret;
}
/* Get the interrupts property */
data->irq = irq_of_parse_and_map(node, 0);
if (!data->irq) {
pr_err("Unable to get IRQ from DT\n");
return -EINVAL;
}
/*
* Use our actual MCK to figure out how many MCK/16 ticks per
@ -236,46 +255,5 @@ static int __init at91sam926x_pit_common_init(struct pit_data *data)
return 0;
}
static int __init at91sam926x_pit_dt_init(struct device_node *node)
{
struct pit_data *data;
int ret;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->base = of_iomap(node, 0);
if (!data->base) {
pr_err("Could not map PIT address\n");
return -ENXIO;
}
data->mck = of_clk_get(node, 0);
if (IS_ERR(data->mck))
/* Fallback on clkdev for !CCF-based boards */
data->mck = clk_get(NULL, "mck");
if (IS_ERR(data->mck)) {
pr_err("Unable to get mck clk\n");
return PTR_ERR(data->mck);
}
ret = clk_prepare_enable(data->mck);
if (ret) {
pr_err("Unable to enable mck\n");
return ret;
}
/* Get the interrupts property */
data->irq = irq_of_parse_and_map(node, 0);
if (!data->irq) {
pr_err("Unable to get IRQ from DT\n");
return -EINVAL;
}
return at91sam926x_pit_common_init(data);
}
CLOCKSOURCE_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
at91sam926x_pit_dt_init);

View file

@ -295,3 +295,5 @@ static int __init oxnas_rps_timer_init(struct device_node *np)
CLOCKSOURCE_OF_DECLARE(ox810se_rps,
"oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
CLOCKSOURCE_OF_DECLARE(ox820_rps,
"oxsemi,ox820se-rps-timer", oxnas_rps_timer_init);

View file

@ -65,7 +65,7 @@ static inline struct ti_32k *to_ti_32k(struct clocksource *cs)
return container_of(cs, struct ti_32k, cs);
}
static cycle_t ti_32k_read_cycles(struct clocksource *cs)
static cycle_t notrace ti_32k_read_cycles(struct clocksource *cs)
{
struct ti_32k *ti = to_ti_32k(cs);

View file

@ -63,6 +63,13 @@ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
#define ktime_add(lhs, rhs) \
({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
/*
* Same as ktime_add(), but avoids undefined behaviour on overflow; however,
* this means that you must check the result for overflow yourself.
*/
#define ktime_add_unsafe(lhs, rhs) \
({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; })
/*
* Add a ktime_t variable and a scalar nanosecond value.
* res = kt + nsval:

View file

@ -5,6 +5,7 @@
#include <linux/math64.h>
typedef __s64 time64_t;
typedef __u64 timeu64_t;
/*
* This wants to go into uapi/linux/time.h once we agreed about the

View file

@ -1,7 +1,7 @@
#ifndef _LINUX_TIMEKEEPING_H
#define _LINUX_TIMEKEEPING_H
#include <asm-generic/errno-base.h>
#include <linux/errno.h>
/* Included from linux/ktime.h */

View file

@ -600,9 +600,18 @@ static void __clocksource_select(bool skipcur)
*/
if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
/* Override clocksource cannot be used. */
pr_warn("Override clocksource %s is not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
cs->name);
override_name[0] = 0;
if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
cs->name);
override_name[0] = 0;
} else {
/*
* The override cannot be currently verified.
* Deferring to let the watchdog check.
*/
pr_info("Override clocksource %s is not currently HRT compatible - deferring\n",
cs->name);
}
} else
/* Override clocksource can be used. */
best = cs;

View file

@ -307,7 +307,7 @@ EXPORT_SYMBOL_GPL(__ktime_divns);
*/
ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
{
ktime_t res = ktime_add(lhs, rhs);
ktime_t res = ktime_add_unsafe(lhs, rhs);
/*
* We use KTIME_SEC_MAX here, the maximum timeout which we can
@ -703,7 +703,7 @@ static void clock_was_set_work(struct work_struct *work)
static DECLARE_WORK(hrtimer_work, clock_was_set_work);
/*
* Called from timekeeping and resume code to reprogramm the hrtimer
* Called from timekeeping and resume code to reprogram the hrtimer
* interrupt device on all cpus.
*/
void clock_was_set_delayed(void)
@ -1241,7 +1241,7 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
/*
* Note: We clear the running state after enqueue_hrtimer and
* we do not reprogramm the event hardware. Happens either in
* we do not reprogram the event hardware. Happens either in
* hrtimer_start_range_ns() or in hrtimer_interrupt()
*
* Note: Because we dropped the cpu_base->lock above,

View file

@ -186,10 +186,13 @@ static bool check_tick_dependency(atomic_t *dep)
return false;
}
static bool can_stop_full_tick(struct tick_sched *ts)
static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
{
WARN_ON_ONCE(!irqs_disabled());
if (unlikely(!cpu_online(cpu)))
return false;
if (check_tick_dependency(&tick_dep_mask))
return false;
@ -843,7 +846,7 @@ static void tick_nohz_full_update_tick(struct tick_sched *ts)
if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
return;
if (can_stop_full_tick(ts))
if (can_stop_full_tick(cpu, ts))
tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
else if (ts->tick_stopped)
tick_nohz_restart_sched_tick(ts, ktime_get());

View file

@ -780,7 +780,7 @@ struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
{
struct timespec64 res;
set_normalized_timespec64(&res, lhs.tv_sec + rhs.tv_sec,
set_normalized_timespec64(&res, (timeu64_t) lhs.tv_sec + rhs.tv_sec,
lhs.tv_nsec + rhs.tv_nsec);
if (unlikely(res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)) {

View file

@ -75,5 +75,7 @@ void tk_debug_account_sleep_time(struct timespec64 *t)
int bin = min(fls(t->tv_sec), NUM_BINS-1);
sleep_time_bin[bin]++;
pr_info("Suspended for %lld.%03lu seconds\n", (s64)t->tv_sec,
t->tv_nsec / NSEC_PER_MSEC);
}