sched/preempt: Fix cond_resched_lock() and cond_resched_softirq()
These functions check should_resched() before unlocking spinlock/bh-enable:
preempt_count always non-zero => should_resched() always returns false.
cond_resched_lock() worked iff spin_needbreak is set.
This patch adds argument "preempt_offset" to should_resched().
preempt_count offset constants for that:
PREEMPT_DISABLE_OFFSET - offset after preempt_disable()
PREEMPT_LOCK_OFFSET - offset after spin_lock()
SOFTIRQ_DISABLE_OFFSET - offset after local_bh_distable()
SOFTIRQ_LOCK_OFFSET - offset after spin_lock_bh()
Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Graf <agraf@suse.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: bdb4380658
("sched: Extract the basic add/sub preempt_count modifiers")
Link: http://lkml.kernel.org/r/20150715095204.12246.98268.stgit@buzz
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
c56dadf397
commit
fe32d3cd5e
5 changed files with 22 additions and 18 deletions
|
@ -90,9 +90,9 @@ static __always_inline bool __preempt_count_dec_and_test(void)
|
|||
/*
|
||||
* Returns true when we need to resched and can (barring IRQ state).
|
||||
*/
|
||||
static __always_inline bool should_resched(void)
|
||||
static __always_inline bool should_resched(int preempt_offset)
|
||||
{
|
||||
return unlikely(!raw_cpu_read_4(__preempt_count));
|
||||
return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
|
|
|
@ -71,9 +71,10 @@ static __always_inline bool __preempt_count_dec_and_test(void)
|
|||
/*
|
||||
* Returns true when we need to resched and can (barring IRQ state).
|
||||
*/
|
||||
static __always_inline bool should_resched(void)
|
||||
static __always_inline bool should_resched(int preempt_offset)
|
||||
{
|
||||
return unlikely(!preempt_count() && tif_need_resched());
|
||||
return unlikely(preempt_count() == preempt_offset &&
|
||||
tif_need_resched());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
|
|
|
@ -84,12 +84,20 @@
|
|||
*/
|
||||
#define in_nmi() (preempt_count() & NMI_MASK)
|
||||
|
||||
/*
|
||||
* The preempt_count offset after preempt_disable();
|
||||
*/
|
||||
#if defined(CONFIG_PREEMPT_COUNT)
|
||||
# define PREEMPT_DISABLE_OFFSET 1
|
||||
# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
|
||||
#else
|
||||
# define PREEMPT_DISABLE_OFFSET 0
|
||||
# define PREEMPT_DISABLE_OFFSET 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The preempt_count offset after spin_lock()
|
||||
*/
|
||||
#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
|
||||
|
||||
/*
|
||||
* The preempt_count offset needed for things like:
|
||||
*
|
||||
|
@ -103,7 +111,7 @@
|
|||
*
|
||||
* Work as expected.
|
||||
*/
|
||||
#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
|
||||
#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
|
||||
|
||||
/*
|
||||
* Are we running in atomic context? WARNING: this macro cannot
|
||||
|
@ -124,7 +132,8 @@
|
|||
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
|
||||
extern void preempt_count_add(int val);
|
||||
extern void preempt_count_sub(int val);
|
||||
#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
|
||||
#define preempt_count_dec_and_test() \
|
||||
({ preempt_count_sub(1); should_resched(0); })
|
||||
#else
|
||||
#define preempt_count_add(val) __preempt_count_add(val)
|
||||
#define preempt_count_sub(val) __preempt_count_sub(val)
|
||||
|
@ -184,7 +193,7 @@ do { \
|
|||
|
||||
#define preempt_check_resched() \
|
||||
do { \
|
||||
if (should_resched()) \
|
||||
if (should_resched(0)) \
|
||||
__preempt_schedule(); \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -2891,12 +2891,6 @@ extern int _cond_resched(void);
|
|||
|
||||
extern int __cond_resched_lock(spinlock_t *lock);
|
||||
|
||||
#ifdef CONFIG_PREEMPT_COUNT
|
||||
#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
|
||||
#else
|
||||
#define PREEMPT_LOCK_OFFSET 0
|
||||
#endif
|
||||
|
||||
#define cond_resched_lock(lock) ({ \
|
||||
___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
|
||||
__cond_resched_lock(lock); \
|
||||
|
|
|
@ -4496,7 +4496,7 @@ SYSCALL_DEFINE0(sched_yield)
|
|||
|
||||
int __sched _cond_resched(void)
|
||||
{
|
||||
if (should_resched()) {
|
||||
if (should_resched(0)) {
|
||||
preempt_schedule_common();
|
||||
return 1;
|
||||
}
|
||||
|
@ -4514,7 +4514,7 @@ EXPORT_SYMBOL(_cond_resched);
|
|||
*/
|
||||
int __cond_resched_lock(spinlock_t *lock)
|
||||
{
|
||||
int resched = should_resched();
|
||||
int resched = should_resched(PREEMPT_LOCK_OFFSET);
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(lock);
|
||||
|
@ -4536,7 +4536,7 @@ int __sched __cond_resched_softirq(void)
|
|||
{
|
||||
BUG_ON(!in_softirq());
|
||||
|
||||
if (should_resched()) {
|
||||
if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
|
||||
local_bh_enable();
|
||||
preempt_schedule_common();
|
||||
local_bh_disable();
|
||||
|
|
Loading…
Reference in a new issue