[TCP] H-TCP: Account for delayed-ACKs

Account for delayed-ACKs in H-TCP.

Delayed-ACKs cause H-TCP to be less aggressive than its design calls
for. It is especially true when the receiver is a Linux machine where
the average delayed ack is over 3 packets with values of 7 not unheard
of.

Signed-off-By: Baruch Even <baruch@ev-en.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Baruch Even 2006-03-20 22:22:47 -08:00 committed by David S. Miller
parent c33ad6e476
commit 0bc6d90b82

View file

@ -29,7 +29,8 @@ struct htcp {
u8 modeswitch; /* Delay modeswitch until we had at least one congestion event */ u8 modeswitch; /* Delay modeswitch until we had at least one congestion event */
u8 ccount; /* Number of RTTs since last congestion event */ u8 ccount; /* Number of RTTs since last congestion event */
u8 undo_ccount; u8 undo_ccount;
u16 packetcount; u16 pkts_acked;
u32 packetcount;
u32 minRTT; u32 minRTT;
u32 maxRTT; u32 maxRTT;
u32 snd_cwnd_cnt2; u32 snd_cwnd_cnt2;
@ -92,6 +93,12 @@ static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked)
struct htcp *ca = inet_csk_ca(sk); struct htcp *ca = inet_csk_ca(sk);
u32 now = tcp_time_stamp; u32 now = tcp_time_stamp;
if (icsk->icsk_ca_state == TCP_CA_Open)
ca->pkts_acked = pkts_acked;
if (!use_bandwidth_switch)
return;
/* achieved throughput calculations */ /* achieved throughput calculations */
if (icsk->icsk_ca_state != TCP_CA_Open && if (icsk->icsk_ca_state != TCP_CA_Open &&
icsk->icsk_ca_state != TCP_CA_Disorder) { icsk->icsk_ca_state != TCP_CA_Disorder) {
@ -217,20 +224,24 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
measure_rtt(sk); measure_rtt(sk);
/* keep track of number of round-trip times since last backoff event */ /* keep track of number of round-trip times since last backoff event */
if (ca->snd_cwnd_cnt2++ > tp->snd_cwnd) { if (ca->snd_cwnd_cnt2 >= tp->snd_cwnd) {
ca->ccount++; ca->ccount++;
ca->snd_cwnd_cnt2 = 0; ca->snd_cwnd_cnt2 -= tp->snd_cwnd;
htcp_alpha_update(ca); htcp_alpha_update(ca);
} } else
ca->snd_cwnd_cnt2 += ca->pkts_acked;
/* In dangerous area, increase slowly. /* In dangerous area, increase slowly.
* In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
*/ */
if ((tp->snd_cwnd_cnt++ * ca->alpha)>>7 >= tp->snd_cwnd) { if ((tp->snd_cwnd_cnt * ca->alpha)>>7 >= tp->snd_cwnd) {
if (tp->snd_cwnd < tp->snd_cwnd_clamp) if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++; tp->snd_cwnd++;
tp->snd_cwnd_cnt = 0; tp->snd_cwnd_cnt = 0;
} } else
tp->snd_cwnd_cnt += ca->pkts_acked;
ca->pkts_acked = 1;
} }
} }
@ -249,6 +260,7 @@ static void htcp_init(struct sock *sk)
memset(ca, 0, sizeof(struct htcp)); memset(ca, 0, sizeof(struct htcp));
ca->alpha = ALPHA_BASE; ca->alpha = ALPHA_BASE;
ca->beta = BETA_MIN; ca->beta = BETA_MIN;
ca->pkts_acked = 1;
} }
static void htcp_state(struct sock *sk, u8 new_state) static void htcp_state(struct sock *sk, u8 new_state)
@ -278,8 +290,6 @@ static int __init htcp_register(void)
{ {
BUG_ON(sizeof(struct htcp) > ICSK_CA_PRIV_SIZE); BUG_ON(sizeof(struct htcp) > ICSK_CA_PRIV_SIZE);
BUILD_BUG_ON(BETA_MIN >= BETA_MAX); BUILD_BUG_ON(BETA_MIN >= BETA_MAX);
if (!use_bandwidth_switch)
htcp.pkts_acked = NULL;
return tcp_register_congestion_control(&htcp); return tcp_register_congestion_control(&htcp);
} }