[PKT_SCHED]: GRED: Support ECN marking

Adds a new u8 flags in a unused padding area of the netlink
message. Adds ECN marking support to be used instead of dropping
packets immediately.

Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
This commit is contained in:
Thomas Graf 2005-11-05 21:14:27 +01:00 committed by Thomas Graf
parent d8f64e1960
commit b38c7eef7e
2 changed files with 23 additions and 6 deletions

View file

@ -220,8 +220,8 @@ struct tc_gred_sopt
__u32 DPs; __u32 DPs;
__u32 def_DP; __u32 def_DP;
__u8 grio; __u8 grio;
__u8 pad1; __u8 flags;
__u16 pad2; __u16 pad1;
}; };
/* HTB section */ /* HTB section */

View file

@ -55,6 +55,7 @@ struct gred_sched
{ {
struct gred_sched_data *tab[MAX_DPs]; struct gred_sched_data *tab[MAX_DPs];
unsigned long flags; unsigned long flags;
u32 red_flags;
u32 DPs; u32 DPs;
u32 def; u32 def;
struct red_parms wred_set; struct red_parms wred_set;
@ -140,6 +141,11 @@ static inline void gred_store_wred_set(struct gred_sched *table,
table->wred_set.qavg = q->parms.qavg; table->wred_set.qavg = q->parms.qavg;
} }
static inline int gred_use_ecn(struct gred_sched *t)
{
return t->red_flags & TC_RED_ECN;
}
static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{ {
struct gred_sched_data *q=NULL; struct gred_sched_data *q=NULL;
@ -198,13 +204,22 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
case RED_PROB_MARK: case RED_PROB_MARK:
sch->qstats.overlimits++; sch->qstats.overlimits++;
q->stats.prob_drop++; if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
goto congestion_drop; q->stats.prob_drop++;
goto congestion_drop;
}
q->stats.prob_mark++;
break;
case RED_HARD_MARK: case RED_HARD_MARK:
sch->qstats.overlimits++; sch->qstats.overlimits++;
q->stats.forced_drop++; if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
goto congestion_drop; q->stats.forced_drop++;
goto congestion_drop;
}
q->stats.forced_mark++;
break;
} }
if (q->backlog + skb->len <= q->limit) { if (q->backlog + skb->len <= q->limit) {
@ -348,6 +363,7 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
sch_tree_lock(sch); sch_tree_lock(sch);
table->DPs = sopt->DPs; table->DPs = sopt->DPs;
table->def = sopt->def_DP; table->def = sopt->def_DP;
table->red_flags = sopt->flags;
/* /*
* Every entry point to GRED is synchronized with the above code * Every entry point to GRED is synchronized with the above code
@ -489,6 +505,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
.DPs = table->DPs, .DPs = table->DPs,
.def_DP = table->def, .def_DP = table->def,
.grio = gred_rio_mode(table), .grio = gred_rio_mode(table),
.flags = table->red_flags,
}; };
opts = RTA_NEST(skb, TCA_OPTIONS); opts = RTA_NEST(skb, TCA_OPTIONS);