tasklets: execute tasklets in the same order they were queued
I noticed this when looking at an openswan issue. Openswan (ab?)uses the tasklet API to defer processing of packets in some situations, with one packet per tasklet_action(). I started noticing sequences of backwards-ordered sequence numbers coming over the wire, since new tasklets are always queued at the head of the list but processed sequentially. Convert it to instead append new entries to the tail of the list. As an extra bonus, the splicing code in takeover_tasklets() no longer has to iterate over the list. Signed-off-by: Olof Johansson <olof@lixom.net> Cc: "David S. Miller" <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ac086bc229
commit
48f20a9a94
1 changed files with 41 additions and 22 deletions
|
@ -356,7 +356,8 @@ void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
|
||||||
/* Tasklets */
|
/* Tasklets */
|
||||||
struct tasklet_head
|
struct tasklet_head
|
||||||
{
|
{
|
||||||
struct tasklet_struct *list;
|
struct tasklet_struct *head;
|
||||||
|
struct tasklet_struct **tail;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Some compilers disobey section attribute on statics when not
|
/* Some compilers disobey section attribute on statics when not
|
||||||
|
@ -369,8 +370,9 @@ void __tasklet_schedule(struct tasklet_struct *t)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
t->next = __get_cpu_var(tasklet_vec).list;
|
t->next = NULL;
|
||||||
__get_cpu_var(tasklet_vec).list = t;
|
*__get_cpu_var(tasklet_vec).tail = t;
|
||||||
|
__get_cpu_var(tasklet_vec).tail = &(t->next);
|
||||||
raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
@ -382,8 +384,9 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
t->next = __get_cpu_var(tasklet_hi_vec).list;
|
t->next = NULL;
|
||||||
__get_cpu_var(tasklet_hi_vec).list = t;
|
*__get_cpu_var(tasklet_hi_vec).tail = t;
|
||||||
|
__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
|
||||||
raise_softirq_irqoff(HI_SOFTIRQ);
|
raise_softirq_irqoff(HI_SOFTIRQ);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
@ -395,8 +398,9 @@ static void tasklet_action(struct softirq_action *a)
|
||||||
struct tasklet_struct *list;
|
struct tasklet_struct *list;
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
list = __get_cpu_var(tasklet_vec).list;
|
list = __get_cpu_var(tasklet_vec).head;
|
||||||
__get_cpu_var(tasklet_vec).list = NULL;
|
__get_cpu_var(tasklet_vec).head = NULL;
|
||||||
|
__get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
while (list) {
|
while (list) {
|
||||||
|
@ -416,8 +420,9 @@ static void tasklet_action(struct softirq_action *a)
|
||||||
}
|
}
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
t->next = __get_cpu_var(tasklet_vec).list;
|
t->next = NULL;
|
||||||
__get_cpu_var(tasklet_vec).list = t;
|
*__get_cpu_var(tasklet_vec).tail = t;
|
||||||
|
__get_cpu_var(tasklet_vec).tail = &(t->next);
|
||||||
__raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
__raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
}
|
}
|
||||||
|
@ -428,8 +433,9 @@ static void tasklet_hi_action(struct softirq_action *a)
|
||||||
struct tasklet_struct *list;
|
struct tasklet_struct *list;
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
list = __get_cpu_var(tasklet_hi_vec).list;
|
list = __get_cpu_var(tasklet_hi_vec).head;
|
||||||
__get_cpu_var(tasklet_hi_vec).list = NULL;
|
__get_cpu_var(tasklet_hi_vec).head = NULL;
|
||||||
|
__get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
while (list) {
|
while (list) {
|
||||||
|
@ -449,8 +455,9 @@ static void tasklet_hi_action(struct softirq_action *a)
|
||||||
}
|
}
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
t->next = __get_cpu_var(tasklet_hi_vec).list;
|
t->next = NULL;
|
||||||
__get_cpu_var(tasklet_hi_vec).list = t;
|
*__get_cpu_var(tasklet_hi_vec).tail = t;
|
||||||
|
__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
|
||||||
__raise_softirq_irqoff(HI_SOFTIRQ);
|
__raise_softirq_irqoff(HI_SOFTIRQ);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
}
|
}
|
||||||
|
@ -487,6 +494,15 @@ EXPORT_SYMBOL(tasklet_kill);
|
||||||
|
|
||||||
void __init softirq_init(void)
|
void __init softirq_init(void)
|
||||||
{
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
per_cpu(tasklet_vec, cpu).tail =
|
||||||
|
&per_cpu(tasklet_vec, cpu).head;
|
||||||
|
per_cpu(tasklet_hi_vec, cpu).tail =
|
||||||
|
&per_cpu(tasklet_hi_vec, cpu).head;
|
||||||
|
}
|
||||||
|
|
||||||
open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
|
open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
|
||||||
open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
|
open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
|
||||||
}
|
}
|
||||||
|
@ -555,9 +571,12 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* CPU is dead, so no lock needed. */
|
/* CPU is dead, so no lock needed. */
|
||||||
for (i = &per_cpu(tasklet_vec, cpu).list; *i; i = &(*i)->next) {
|
for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
|
||||||
if (*i == t) {
|
if (*i == t) {
|
||||||
*i = t->next;
|
*i = t->next;
|
||||||
|
/* If this was the tail element, move the tail ptr */
|
||||||
|
if (*i == NULL)
|
||||||
|
per_cpu(tasklet_vec, cpu).tail = i;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -566,20 +585,20 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
|
||||||
|
|
||||||
static void takeover_tasklets(unsigned int cpu)
|
static void takeover_tasklets(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct tasklet_struct **i;
|
|
||||||
|
|
||||||
/* CPU is dead, so no lock needed. */
|
/* CPU is dead, so no lock needed. */
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
|
||||||
/* Find end, append list for that CPU. */
|
/* Find end, append list for that CPU. */
|
||||||
for (i = &__get_cpu_var(tasklet_vec).list; *i; i = &(*i)->next);
|
*__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).head;
|
||||||
*i = per_cpu(tasklet_vec, cpu).list;
|
__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
|
||||||
per_cpu(tasklet_vec, cpu).list = NULL;
|
per_cpu(tasklet_vec, cpu).head = NULL;
|
||||||
|
per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
|
||||||
raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
||||||
|
|
||||||
for (i = &__get_cpu_var(tasklet_hi_vec).list; *i; i = &(*i)->next);
|
*__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
|
||||||
*i = per_cpu(tasklet_hi_vec, cpu).list;
|
__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
|
||||||
per_cpu(tasklet_hi_vec, cpu).list = NULL;
|
per_cpu(tasklet_hi_vec, cpu).head = NULL;
|
||||||
|
per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
|
||||||
raise_softirq_irqoff(HI_SOFTIRQ);
|
raise_softirq_irqoff(HI_SOFTIRQ);
|
||||||
|
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
Loading…
Reference in a new issue