[SK_BUFF]: Introduce tcp_hdr(), remove skb->h.th
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
ab6a5bb6b2
commit
aa8223c7bb
23 changed files with 134 additions and 122 deletions
|
@ -1298,9 +1298,10 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
|
||||||
|
|
||||||
iph->tot_len = 0;
|
iph->tot_len = 0;
|
||||||
iph->check = 0;
|
iph->check = 0;
|
||||||
skb->h.th->check = ~csum_tcpudp_magic(iph->saddr,
|
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
|
||||||
iph->daddr, 0,
|
iph->daddr, 0,
|
||||||
IPPROTO_TCP, 0);
|
IPPROTO_TCP,
|
||||||
|
0);
|
||||||
ipofst = skb_network_offset(skb);
|
ipofst = skb_network_offset(skb);
|
||||||
if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */
|
if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */
|
||||||
tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT;
|
tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT;
|
||||||
|
|
|
@ -4524,7 +4524,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
|
vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
|
||||||
|
|
||||||
tcp_opt_len = 0;
|
tcp_opt_len = 0;
|
||||||
if (skb->h.th->doff > 5)
|
if (tcp_hdr(skb)->doff > 5)
|
||||||
tcp_opt_len = tcp_optlen(skb);
|
tcp_opt_len = tcp_optlen(skb);
|
||||||
|
|
||||||
ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
|
ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
|
||||||
|
@ -4532,9 +4532,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
iph = ip_hdr(skb);
|
iph = ip_hdr(skb);
|
||||||
iph->check = 0;
|
iph->check = 0;
|
||||||
iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
|
iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
|
||||||
skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
|
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
|
||||||
0, IPPROTO_TCP, 0);
|
iph->daddr, 0,
|
||||||
|
IPPROTO_TCP, 0);
|
||||||
if (tcp_opt_len || (iph->ihl > 5)) {
|
if (tcp_opt_len || (iph->ihl > 5)) {
|
||||||
vlan_tag_flags |= ((iph->ihl - 5) +
|
vlan_tag_flags |= ((iph->ihl - 5) +
|
||||||
(tcp_opt_len >> 2)) << 8;
|
(tcp_opt_len >> 2)) << 8;
|
||||||
|
|
|
@ -1872,7 +1872,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
hdr->opcode = CPL_TX_PKT_LSO;
|
hdr->opcode = CPL_TX_PKT_LSO;
|
||||||
hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
|
hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
|
||||||
hdr->ip_hdr_words = ip_hdr(skb)->ihl;
|
hdr->ip_hdr_words = ip_hdr(skb)->ihl;
|
||||||
hdr->tcp_hdr_words = skb->h.th->doff;
|
hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
|
||||||
hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
|
hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
|
||||||
skb_shinfo(skb)->gso_size));
|
skb_shinfo(skb)->gso_size));
|
||||||
hdr->len = htonl(skb->len - sizeof(*hdr));
|
hdr->len = htonl(skb->len - sizeof(*hdr));
|
||||||
|
|
|
@ -901,7 +901,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
|
||||||
CPL_ETH_II : CPL_ETH_II_VLAN;
|
CPL_ETH_II : CPL_ETH_II_VLAN;
|
||||||
tso_info |= V_LSO_ETH_TYPE(eth_type) |
|
tso_info |= V_LSO_ETH_TYPE(eth_type) |
|
||||||
V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
|
V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
|
||||||
V_LSO_TCPHDR_WORDS(skb->h.th->doff);
|
V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
|
||||||
hdr->lso_info = htonl(tso_info);
|
hdr->lso_info = htonl(tso_info);
|
||||||
flits = 3;
|
flits = 3;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -2893,14 +2893,15 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
|
||||||
struct iphdr *iph = ip_hdr(skb);
|
struct iphdr *iph = ip_hdr(skb);
|
||||||
iph->tot_len = 0;
|
iph->tot_len = 0;
|
||||||
iph->check = 0;
|
iph->check = 0;
|
||||||
skb->h.th->check = ~csum_tcpudp_magic(iph->saddr,
|
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
|
||||||
iph->daddr, 0,
|
iph->daddr, 0,
|
||||||
IPPROTO_TCP, 0);
|
IPPROTO_TCP,
|
||||||
|
0);
|
||||||
cmd_length = E1000_TXD_CMD_IP;
|
cmd_length = E1000_TXD_CMD_IP;
|
||||||
ipcse = skb_transport_offset(skb) - 1;
|
ipcse = skb_transport_offset(skb) - 1;
|
||||||
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||||
ipv6_hdr(skb)->payload_len = 0;
|
ipv6_hdr(skb)->payload_len = 0;
|
||||||
skb->h.th->check =
|
tcp_hdr(skb)->check =
|
||||||
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
|
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
|
||||||
&ipv6_hdr(skb)->daddr,
|
&ipv6_hdr(skb)->daddr,
|
||||||
0, IPPROTO_TCP, 0);
|
0, IPPROTO_TCP, 0);
|
||||||
|
@ -2909,7 +2910,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
|
||||||
ipcss = skb_network_offset(skb);
|
ipcss = skb_network_offset(skb);
|
||||||
ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
|
ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
|
||||||
tucss = skb_transport_offset(skb);
|
tucss = skb_transport_offset(skb);
|
||||||
tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
|
tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
|
||||||
tucse = 0;
|
tucse = 0;
|
||||||
|
|
||||||
cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
|
cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
|
||||||
|
|
|
@ -1426,7 +1426,7 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
}
|
}
|
||||||
if (proto == IPPROTO_TCP) {
|
if (proto == IPPROTO_TCP) {
|
||||||
csoff += offsetof(struct tcphdr, check);
|
csoff += offsetof(struct tcphdr, check);
|
||||||
skb->h.th->check = csum;
|
tcp_hdr(skb)->check = csum;
|
||||||
}
|
}
|
||||||
|
|
||||||
w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT);
|
w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT);
|
||||||
|
|
|
@ -1195,13 +1195,14 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
|
||||||
iph = ip_hdr(skb);
|
iph = ip_hdr(skb);
|
||||||
iph->tot_len = 0;
|
iph->tot_len = 0;
|
||||||
iph->check = 0;
|
iph->check = 0;
|
||||||
skb->h.th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
|
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
|
||||||
0, IPPROTO_TCP, 0);
|
iph->daddr, 0,
|
||||||
|
IPPROTO_TCP, 0);
|
||||||
ipcss = skb_network_offset(skb);
|
ipcss = skb_network_offset(skb);
|
||||||
ipcso = (void *)&(iph->check) - (void *)skb->data;
|
ipcso = (void *)&(iph->check) - (void *)skb->data;
|
||||||
ipcse = skb_transport_offset(skb) - 1;
|
ipcse = skb_transport_offset(skb) - 1;
|
||||||
tucss = skb_transport_offset(skb);
|
tucss = skb_transport_offset(skb);
|
||||||
tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
|
tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
|
||||||
tucse = 0;
|
tucse = 0;
|
||||||
|
|
||||||
i = adapter->tx_ring.next_to_use;
|
i = adapter->tx_ring.next_to_use;
|
||||||
|
|
|
@ -1169,7 +1169,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
|
||||||
desc->l4i_chk = udp_hdr(skb)->check;
|
desc->l4i_chk = udp_hdr(skb)->check;
|
||||||
break;
|
break;
|
||||||
case IPPROTO_TCP:
|
case IPPROTO_TCP:
|
||||||
desc->l4i_chk = skb->h.th->check;
|
desc->l4i_chk = tcp_hdr(skb)->check;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
BUG();
|
BUG();
|
||||||
|
|
|
@ -3922,7 +3922,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
|
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
|
||||||
TXD_FLAG_CPU_POST_DMA);
|
TXD_FLAG_CPU_POST_DMA);
|
||||||
|
|
||||||
skb->h.th->check = 0;
|
tcp_hdr(skb)->check = 0;
|
||||||
|
|
||||||
}
|
}
|
||||||
else if (skb->ip_summed == CHECKSUM_PARTIAL)
|
else if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||||
|
@ -4080,14 +4080,13 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
|
||||||
iph->check = 0;
|
iph->check = 0;
|
||||||
iph->tot_len = htons(mss + hdr_len);
|
iph->tot_len = htons(mss + hdr_len);
|
||||||
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
|
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
|
||||||
skb->h.th->check = 0;
|
tcp_hdr(skb)->check = 0;
|
||||||
base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
|
base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
|
||||||
}
|
} else
|
||||||
else {
|
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
|
||||||
skb->h.th->check = ~csum_tcpudp_magic(iph->saddr,
|
iph->daddr, 0,
|
||||||
iph->daddr, 0,
|
IPPROTO_TCP,
|
||||||
IPPROTO_TCP, 0);
|
0);
|
||||||
}
|
|
||||||
|
|
||||||
if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
|
if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
|
||||||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
|
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
|
||||||
|
|
|
@ -416,7 +416,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
|
||||||
eddp->skb_offset += VLAN_HLEN;
|
eddp->skb_offset += VLAN_HLEN;
|
||||||
#endif /* CONFIG_QETH_VLAN */
|
#endif /* CONFIG_QETH_VLAN */
|
||||||
}
|
}
|
||||||
tcph = eddp->skb->h.th;
|
tcph = tcp_hdr(eddp->skb);
|
||||||
while (eddp->skb_offset < eddp->skb->len) {
|
while (eddp->skb_offset < eddp->skb->len) {
|
||||||
data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
|
data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
|
||||||
(int)(eddp->skb->len - eddp->skb_offset));
|
(int)(eddp->skb->len - eddp->skb_offset));
|
||||||
|
|
|
@ -41,7 +41,7 @@ qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
|
||||||
|
|
||||||
hdr = (struct qeth_hdr_tso *) skb->data;
|
hdr = (struct qeth_hdr_tso *) skb->data;
|
||||||
iph = ip_hdr(skb);
|
iph = ip_hdr(skb);
|
||||||
tcph = skb->h.th;
|
tcph = tcp_hdr(skb);
|
||||||
/*fix header to TSO values ...*/
|
/*fix header to TSO values ...*/
|
||||||
hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
|
hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
|
||||||
/*set values which are fix for the first approach ...*/
|
/*set values which are fix for the first approach ...*/
|
||||||
|
@ -65,7 +65,7 @@ qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct iphdr *iph = ip_hdr(skb);
|
struct iphdr *iph = ip_hdr(skb);
|
||||||
struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
||||||
struct tcphdr *tcph = skb->h.th;
|
struct tcphdr *tcph = tcp_hdr(skb);
|
||||||
|
|
||||||
tcph->check = 0;
|
tcph->check = 0;
|
||||||
if (skb->protocol == ETH_P_IPV6) {
|
if (skb->protocol == ETH_P_IPV6) {
|
||||||
|
|
|
@ -237,7 +237,6 @@ struct sk_buff {
|
||||||
/* 4 byte hole on 64 bit*/
|
/* 4 byte hole on 64 bit*/
|
||||||
|
|
||||||
union {
|
union {
|
||||||
struct tcphdr *th;
|
|
||||||
struct iphdr *ipiph;
|
struct iphdr *ipiph;
|
||||||
struct ipv6hdr *ipv6h;
|
struct ipv6hdr *ipv6h;
|
||||||
unsigned char *raw;
|
unsigned char *raw;
|
||||||
|
|
|
@ -178,14 +178,19 @@ struct tcp_md5sig {
|
||||||
#include <net/inet_connection_sock.h>
|
#include <net/inet_connection_sock.h>
|
||||||
#include <net/inet_timewait_sock.h>
|
#include <net/inet_timewait_sock.h>
|
||||||
|
|
||||||
|
static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
return (struct tcphdr *)skb->h.raw;
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
|
static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
return skb->h.th->doff * 4;
|
return tcp_hdr(skb)->doff * 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int tcp_optlen(const struct sk_buff *skb)
|
static inline unsigned int tcp_optlen(const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
return (skb->h.th->doff - 5) * 4;
|
return (tcp_hdr(skb)->doff - 5) * 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This defines a selective acknowledgement block. */
|
/* This defines a selective acknowledgement block. */
|
||||||
|
|
|
@ -984,7 +984,7 @@ static inline void tcp_openreq_init(struct request_sock *req,
|
||||||
ireq->wscale_ok = rx_opt->wscale_ok;
|
ireq->wscale_ok = rx_opt->wscale_ok;
|
||||||
ireq->acked = 0;
|
ireq->acked = 0;
|
||||||
ireq->ecn_ok = 0;
|
ireq->ecn_ok = 0;
|
||||||
ireq->rmt_port = skb->h.th->source;
|
ireq->rmt_port = tcp_hdr(skb)->source;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void tcp_enter_memory_pressure(void);
|
extern void tcp_enter_memory_pressure(void);
|
||||||
|
|
|
@ -54,7 +54,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp,
|
||||||
INET_ECN_xmit(sk);
|
INET_ECN_xmit(sk);
|
||||||
if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) {
|
if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) {
|
||||||
tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
|
tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
|
||||||
skb->h.th->cwr = 1;
|
tcp_hdr(skb)->cwr = 1;
|
||||||
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
|
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -62,7 +62,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp,
|
||||||
INET_ECN_dontxmit(sk);
|
INET_ECN_dontxmit(sk);
|
||||||
}
|
}
|
||||||
if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
|
if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
|
||||||
skb->h.th->ece = 1;
|
tcp_hdr(skb)->ece = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp,
|
||||||
|
|
||||||
static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb)
|
static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
if (skb->h.th->cwr)
|
if (tcp_hdr(skb)->cwr)
|
||||||
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
|
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1352,8 +1352,8 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
|
||||||
.tos = RT_TOS(ip_hdr(skb)->tos) } },
|
.tos = RT_TOS(ip_hdr(skb)->tos) } },
|
||||||
/* Not quite clean, but right. */
|
/* Not quite clean, but right. */
|
||||||
.uli_u = { .ports =
|
.uli_u = { .ports =
|
||||||
{ .sport = skb->h.th->dest,
|
{ .sport = tcp_hdr(skb)->dest,
|
||||||
.dport = skb->h.th->source } },
|
.dport = tcp_hdr(skb)->source } },
|
||||||
.proto = sk->sk_protocol };
|
.proto = sk->sk_protocol };
|
||||||
security_skb_classify_flow(skb, &fl);
|
security_skb_classify_flow(skb, &fl);
|
||||||
if (ip_route_output_key(&rt, &fl))
|
if (ip_route_output_key(&rt, &fl))
|
||||||
|
|
|
@ -125,10 +125,11 @@ static __u16 const msstab[] = {
|
||||||
__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
|
__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
|
||||||
{
|
{
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
const struct iphdr *iph = ip_hdr(skb);
|
||||||
|
const struct tcphdr *th = tcp_hdr(skb);
|
||||||
int mssind;
|
int mssind;
|
||||||
const __u16 mss = *mssp;
|
const __u16 mss = *mssp;
|
||||||
|
|
||||||
|
|
||||||
tp->last_synq_overflow = jiffies;
|
tp->last_synq_overflow = jiffies;
|
||||||
|
|
||||||
/* XXX sort msstab[] by probability? Binary search? */
|
/* XXX sort msstab[] by probability? Binary search? */
|
||||||
|
@ -138,9 +139,8 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
|
||||||
|
|
||||||
NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT);
|
NET_INC_STATS_BH(LINUX_MIB_SYNCOOKIESSENT);
|
||||||
|
|
||||||
return secure_tcp_syn_cookie(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
|
return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
|
||||||
skb->h.th->source, skb->h.th->dest,
|
th->source, th->dest, ntohl(th->seq),
|
||||||
ntohl(skb->h.th->seq),
|
|
||||||
jiffies / (HZ * 60), mssind);
|
jiffies / (HZ * 60), mssind);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,14 +157,13 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
|
||||||
*/
|
*/
|
||||||
static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
|
static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
|
||||||
{
|
{
|
||||||
__u32 seq;
|
const struct iphdr *iph = ip_hdr(skb);
|
||||||
__u32 mssind;
|
const struct tcphdr *th = tcp_hdr(skb);
|
||||||
|
__u32 seq = ntohl(th->seq) - 1;
|
||||||
seq = ntohl(skb->h.th->seq)-1;
|
__u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
|
||||||
mssind = check_tcp_syn_cookie(cookie,
|
th->source, th->dest, seq,
|
||||||
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
|
jiffies / (HZ * 60),
|
||||||
skb->h.th->source, skb->h.th->dest,
|
COUNTER_TRIES);
|
||||||
seq, jiffies / (HZ * 60), COUNTER_TRIES);
|
|
||||||
|
|
||||||
return mssind < NUM_MSS ? msstab[mssind] + 1 : 0;
|
return mssind < NUM_MSS ? msstab[mssind] + 1 : 0;
|
||||||
}
|
}
|
||||||
|
@ -191,14 +190,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
|
||||||
struct inet_request_sock *ireq;
|
struct inet_request_sock *ireq;
|
||||||
struct tcp_request_sock *treq;
|
struct tcp_request_sock *treq;
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
__u32 cookie = ntohl(skb->h.th->ack_seq) - 1;
|
const struct tcphdr *th = tcp_hdr(skb);
|
||||||
|
__u32 cookie = ntohl(th->ack_seq) - 1;
|
||||||
struct sock *ret = sk;
|
struct sock *ret = sk;
|
||||||
struct request_sock *req;
|
struct request_sock *req;
|
||||||
int mss;
|
int mss;
|
||||||
struct rtable *rt;
|
struct rtable *rt;
|
||||||
__u8 rcv_wscale;
|
__u8 rcv_wscale;
|
||||||
|
|
||||||
if (!sysctl_tcp_syncookies || !skb->h.th->ack)
|
if (!sysctl_tcp_syncookies || !th->ack)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
|
if (time_after(jiffies, tp->last_synq_overflow + TCP_TIMEOUT_INIT) ||
|
||||||
|
@ -220,10 +220,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
|
||||||
}
|
}
|
||||||
ireq = inet_rsk(req);
|
ireq = inet_rsk(req);
|
||||||
treq = tcp_rsk(req);
|
treq = tcp_rsk(req);
|
||||||
treq->rcv_isn = ntohl(skb->h.th->seq) - 1;
|
treq->rcv_isn = ntohl(th->seq) - 1;
|
||||||
treq->snt_isn = cookie;
|
treq->snt_isn = cookie;
|
||||||
req->mss = mss;
|
req->mss = mss;
|
||||||
ireq->rmt_port = skb->h.th->source;
|
ireq->rmt_port = th->source;
|
||||||
ireq->loc_addr = ip_hdr(skb)->daddr;
|
ireq->loc_addr = ip_hdr(skb)->daddr;
|
||||||
ireq->rmt_addr = ip_hdr(skb)->saddr;
|
ireq->rmt_addr = ip_hdr(skb)->saddr;
|
||||||
ireq->opt = NULL;
|
ireq->opt = NULL;
|
||||||
|
@ -261,8 +261,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
|
||||||
.tos = RT_CONN_FLAGS(sk) } },
|
.tos = RT_CONN_FLAGS(sk) } },
|
||||||
.proto = IPPROTO_TCP,
|
.proto = IPPROTO_TCP,
|
||||||
.uli_u = { .ports =
|
.uli_u = { .ports =
|
||||||
{ .sport = skb->h.th->dest,
|
{ .sport = th->dest,
|
||||||
.dport = skb->h.th->source } } };
|
.dport = th->source } } };
|
||||||
security_req_classify_flow(req, &fl);
|
security_req_classify_flow(req, &fl);
|
||||||
if (ip_route_output_key(&rt, &fl)) {
|
if (ip_route_output_key(&rt, &fl)) {
|
||||||
reqsk_free(req);
|
reqsk_free(req);
|
||||||
|
|
|
@ -425,7 +425,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
||||||
/* Subtract 1, if FIN is in queue. */
|
/* Subtract 1, if FIN is in queue. */
|
||||||
if (answ && !skb_queue_empty(&sk->sk_receive_queue))
|
if (answ && !skb_queue_empty(&sk->sk_receive_queue))
|
||||||
answ -=
|
answ -=
|
||||||
((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
|
tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
|
||||||
} else
|
} else
|
||||||
answ = tp->urg_seq - tp->copied_seq;
|
answ = tp->urg_seq - tp->copied_seq;
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
|
@ -1016,9 +1016,9 @@ static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
|
||||||
|
|
||||||
skb_queue_walk(&sk->sk_receive_queue, skb) {
|
skb_queue_walk(&sk->sk_receive_queue, skb) {
|
||||||
offset = seq - TCP_SKB_CB(skb)->seq;
|
offset = seq - TCP_SKB_CB(skb)->seq;
|
||||||
if (skb->h.th->syn)
|
if (tcp_hdr(skb)->syn)
|
||||||
offset--;
|
offset--;
|
||||||
if (offset < skb->len || skb->h.th->fin) {
|
if (offset < skb->len || tcp_hdr(skb)->fin) {
|
||||||
*off = offset;
|
*off = offset;
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
|
@ -1070,7 +1070,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
|
||||||
if (offset != skb->len)
|
if (offset != skb->len)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (skb->h.th->fin) {
|
if (tcp_hdr(skb)->fin) {
|
||||||
sk_eat_skb(sk, skb, 0);
|
sk_eat_skb(sk, skb, 0);
|
||||||
++seq;
|
++seq;
|
||||||
break;
|
break;
|
||||||
|
@ -1174,11 +1174,11 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
offset = *seq - TCP_SKB_CB(skb)->seq;
|
offset = *seq - TCP_SKB_CB(skb)->seq;
|
||||||
if (skb->h.th->syn)
|
if (tcp_hdr(skb)->syn)
|
||||||
offset--;
|
offset--;
|
||||||
if (offset < skb->len)
|
if (offset < skb->len)
|
||||||
goto found_ok_skb;
|
goto found_ok_skb;
|
||||||
if (skb->h.th->fin)
|
if (tcp_hdr(skb)->fin)
|
||||||
goto found_fin_ok;
|
goto found_fin_ok;
|
||||||
BUG_TRAP(flags & MSG_PEEK);
|
BUG_TRAP(flags & MSG_PEEK);
|
||||||
skb = skb->next;
|
skb = skb->next;
|
||||||
|
@ -1394,7 +1394,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||||
if (used + offset < skb->len)
|
if (used + offset < skb->len)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (skb->h.th->fin)
|
if (tcp_hdr(skb)->fin)
|
||||||
goto found_fin_ok;
|
goto found_fin_ok;
|
||||||
if (!(flags & MSG_PEEK)) {
|
if (!(flags & MSG_PEEK)) {
|
||||||
sk_eat_skb(sk, skb, copied_early);
|
sk_eat_skb(sk, skb, copied_early);
|
||||||
|
@ -1563,7 +1563,7 @@ void tcp_close(struct sock *sk, long timeout)
|
||||||
*/
|
*/
|
||||||
while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
|
while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
|
||||||
u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
|
u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
|
||||||
skb->h.th->fin;
|
tcp_hdr(skb)->fin;
|
||||||
data_was_unread += len;
|
data_was_unread += len;
|
||||||
__kfree_skb(skb);
|
__kfree_skb(skb);
|
||||||
}
|
}
|
||||||
|
@ -2170,7 +2170,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
|
||||||
if (!pskb_may_pull(skb, sizeof(*th)))
|
if (!pskb_may_pull(skb, sizeof(*th)))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
th = skb->h.th;
|
th = tcp_hdr(skb);
|
||||||
thlen = th->doff * 4;
|
thlen = th->doff * 4;
|
||||||
if (thlen < sizeof(*th))
|
if (thlen < sizeof(*th))
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -2210,7 +2210,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
|
||||||
delta = htonl(oldlen + (thlen + len));
|
delta = htonl(oldlen + (thlen + len));
|
||||||
|
|
||||||
skb = segs;
|
skb = segs;
|
||||||
th = skb->h.th;
|
th = tcp_hdr(skb);
|
||||||
seq = ntohl(th->seq);
|
seq = ntohl(th->seq);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
@ -2224,7 +2224,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
|
||||||
|
|
||||||
seq += len;
|
seq += len;
|
||||||
skb = skb->next;
|
skb = skb->next;
|
||||||
th = skb->h.th;
|
th = tcp_hdr(skb);
|
||||||
|
|
||||||
th->seq = htonl(seq);
|
th->seq = htonl(seq);
|
||||||
th->cwr = 0;
|
th->cwr = 0;
|
||||||
|
|
|
@ -148,7 +148,7 @@ static void tcp_measure_rcv_mss(struct sock *sk,
|
||||||
* to handle super-low mtu links fairly.
|
* to handle super-low mtu links fairly.
|
||||||
*/
|
*/
|
||||||
(len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
|
(len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
|
||||||
!(tcp_flag_word(skb->h.th)&TCP_REMNANT))) {
|
!(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
|
||||||
/* Subtract also invariant (if peer is RFC compliant),
|
/* Subtract also invariant (if peer is RFC compliant),
|
||||||
* tcp header plus fixed timestamp option length.
|
* tcp header plus fixed timestamp option length.
|
||||||
* Resulting "len" is MSS free of SACK jitter.
|
* Resulting "len" is MSS free of SACK jitter.
|
||||||
|
@ -2559,9 +2559,9 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
|
||||||
struct sk_buff *skb, u32 ack, u32 ack_seq)
|
struct sk_buff *skb, u32 ack, u32 ack_seq)
|
||||||
{
|
{
|
||||||
int flag = 0;
|
int flag = 0;
|
||||||
u32 nwin = ntohs(skb->h.th->window);
|
u32 nwin = ntohs(tcp_hdr(skb)->window);
|
||||||
|
|
||||||
if (likely(!skb->h.th->syn))
|
if (likely(!tcp_hdr(skb)->syn))
|
||||||
nwin <<= tp->rx_opt.snd_wscale;
|
nwin <<= tp->rx_opt.snd_wscale;
|
||||||
|
|
||||||
if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
|
if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
|
||||||
|
@ -2766,7 +2766,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
||||||
if (TCP_SKB_CB(skb)->sacked)
|
if (TCP_SKB_CB(skb)->sacked)
|
||||||
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
|
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
|
||||||
|
|
||||||
if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th))
|
if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
|
||||||
flag |= FLAG_ECE;
|
flag |= FLAG_ECE;
|
||||||
|
|
||||||
tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
|
tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
|
||||||
|
@ -2833,7 +2833,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
||||||
void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab)
|
void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab)
|
||||||
{
|
{
|
||||||
unsigned char *ptr;
|
unsigned char *ptr;
|
||||||
struct tcphdr *th = skb->h.th;
|
struct tcphdr *th = tcp_hdr(skb);
|
||||||
int length=(th->doff*4)-sizeof(struct tcphdr);
|
int length=(th->doff*4)-sizeof(struct tcphdr);
|
||||||
|
|
||||||
ptr = (unsigned char *)(th + 1);
|
ptr = (unsigned char *)(th + 1);
|
||||||
|
@ -2995,7 +2995,7 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
|
||||||
static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
|
static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
struct tcphdr *th = skb->h.th;
|
struct tcphdr *th = tcp_hdr(skb);
|
||||||
u32 seq = TCP_SKB_CB(skb)->seq;
|
u32 seq = TCP_SKB_CB(skb)->seq;
|
||||||
u32 ack = TCP_SKB_CB(skb)->ack_seq;
|
u32 ack = TCP_SKB_CB(skb)->ack_seq;
|
||||||
|
|
||||||
|
@ -3357,8 +3357,8 @@ static void tcp_ofo_queue(struct sock *sk)
|
||||||
__skb_unlink(skb, &tp->out_of_order_queue);
|
__skb_unlink(skb, &tp->out_of_order_queue);
|
||||||
__skb_queue_tail(&sk->sk_receive_queue, skb);
|
__skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||||
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
|
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
|
||||||
if (skb->h.th->fin)
|
if (tcp_hdr(skb)->fin)
|
||||||
tcp_fin(skb, sk, skb->h.th);
|
tcp_fin(skb, sk, tcp_hdr(skb));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3366,7 +3366,7 @@ static int tcp_prune_queue(struct sock *sk);
|
||||||
|
|
||||||
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct tcphdr *th = skb->h.th;
|
struct tcphdr *th = tcp_hdr(skb);
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
int eaten = -1;
|
int eaten = -1;
|
||||||
|
|
||||||
|
@ -3605,7 +3605,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
|
||||||
* - bloated or contains data before "start" or
|
* - bloated or contains data before "start" or
|
||||||
* overlaps to the next one.
|
* overlaps to the next one.
|
||||||
*/
|
*/
|
||||||
if (!skb->h.th->syn && !skb->h.th->fin &&
|
if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
|
||||||
(tcp_win_from_space(skb->truesize) > skb->len ||
|
(tcp_win_from_space(skb->truesize) > skb->len ||
|
||||||
before(TCP_SKB_CB(skb)->seq, start) ||
|
before(TCP_SKB_CB(skb)->seq, start) ||
|
||||||
(skb->next != tail &&
|
(skb->next != tail &&
|
||||||
|
@ -3616,7 +3616,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
|
||||||
start = TCP_SKB_CB(skb)->end_seq;
|
start = TCP_SKB_CB(skb)->end_seq;
|
||||||
skb = skb->next;
|
skb = skb->next;
|
||||||
}
|
}
|
||||||
if (skb == tail || skb->h.th->syn || skb->h.th->fin)
|
if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
while (before(start, end)) {
|
while (before(start, end)) {
|
||||||
|
@ -3665,7 +3665,9 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
|
||||||
__kfree_skb(skb);
|
__kfree_skb(skb);
|
||||||
NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
|
NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
|
||||||
skb = next;
|
skb = next;
|
||||||
if (skb == tail || skb->h.th->syn || skb->h.th->fin)
|
if (skb == tail ||
|
||||||
|
tcp_hdr(skb)->syn ||
|
||||||
|
tcp_hdr(skb)->fin)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4072,7 +4074,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen
|
||||||
tcp_rcv_space_adjust(sk);
|
tcp_rcv_space_adjust(sk);
|
||||||
|
|
||||||
if ((tp->ucopy.len == 0) ||
|
if ((tp->ucopy.len == 0) ||
|
||||||
(tcp_flag_word(skb->h.th) & TCP_FLAG_PSH) ||
|
(tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
|
||||||
(atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
|
(atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
|
||||||
tp->ucopy.wakeup = 1;
|
tp->ucopy.wakeup = 1;
|
||||||
sk->sk_data_ready(sk, 0);
|
sk->sk_data_ready(sk, 0);
|
||||||
|
|
|
@ -127,8 +127,8 @@ static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
|
return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
|
||||||
ip_hdr(skb)->saddr,
|
ip_hdr(skb)->saddr,
|
||||||
skb->h.th->dest,
|
tcp_hdr(skb)->dest,
|
||||||
skb->h.th->source);
|
tcp_hdr(skb)->source);
|
||||||
}
|
}
|
||||||
|
|
||||||
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
|
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
|
||||||
|
@ -499,7 +499,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
|
||||||
void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
|
void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct inet_sock *inet = inet_sk(sk);
|
struct inet_sock *inet = inet_sk(sk);
|
||||||
struct tcphdr *th = skb->h.th;
|
struct tcphdr *th = tcp_hdr(skb);
|
||||||
|
|
||||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||||
th->check = ~tcp_v4_check(len, inet->saddr,
|
th->check = ~tcp_v4_check(len, inet->saddr,
|
||||||
|
@ -522,7 +522,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
iph = ip_hdr(skb);
|
iph = ip_hdr(skb);
|
||||||
th = skb->h.th;
|
th = tcp_hdr(skb);
|
||||||
|
|
||||||
th->check = 0;
|
th->check = 0;
|
||||||
th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
|
th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
|
||||||
|
@ -546,7 +546,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
|
||||||
|
|
||||||
static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
|
static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct tcphdr *th = skb->h.th;
|
struct tcphdr *th = tcp_hdr(skb);
|
||||||
struct {
|
struct {
|
||||||
struct tcphdr th;
|
struct tcphdr th;
|
||||||
#ifdef CONFIG_TCP_MD5SIG
|
#ifdef CONFIG_TCP_MD5SIG
|
||||||
|
@ -622,7 +622,7 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
|
||||||
struct sk_buff *skb, u32 seq, u32 ack,
|
struct sk_buff *skb, u32 seq, u32 ack,
|
||||||
u32 win, u32 ts)
|
u32 win, u32 ts)
|
||||||
{
|
{
|
||||||
struct tcphdr *th = skb->h.th;
|
struct tcphdr *th = tcp_hdr(skb);
|
||||||
struct {
|
struct {
|
||||||
struct tcphdr th;
|
struct tcphdr th;
|
||||||
__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
|
__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
|
||||||
|
@ -745,7 +745,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
|
||||||
skb = tcp_make_synack(sk, dst, req);
|
skb = tcp_make_synack(sk, dst, req);
|
||||||
|
|
||||||
if (skb) {
|
if (skb) {
|
||||||
struct tcphdr *th = skb->h.th;
|
struct tcphdr *th = tcp_hdr(skb);
|
||||||
|
|
||||||
th->check = tcp_v4_check(skb->len,
|
th->check = tcp_v4_check(skb->len,
|
||||||
ireq->loc_addr,
|
ireq->loc_addr,
|
||||||
|
@ -781,7 +781,7 @@ static void syn_flood_warning(struct sk_buff *skb)
|
||||||
warntime = jiffies;
|
warntime = jiffies;
|
||||||
printk(KERN_INFO
|
printk(KERN_INFO
|
||||||
"possible SYN flooding on port %d. Sending cookies.\n",
|
"possible SYN flooding on port %d. Sending cookies.\n",
|
||||||
ntohs(skb->h.th->dest));
|
ntohs(tcp_hdr(skb)->dest));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -1134,7 +1134,7 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
|
||||||
__u8 *hash_location = NULL;
|
__u8 *hash_location = NULL;
|
||||||
struct tcp_md5sig_key *hash_expected;
|
struct tcp_md5sig_key *hash_expected;
|
||||||
const struct iphdr *iph = ip_hdr(skb);
|
const struct iphdr *iph = ip_hdr(skb);
|
||||||
struct tcphdr *th = skb->h.th;
|
struct tcphdr *th = tcp_hdr(skb);
|
||||||
int length = (th->doff << 2) - sizeof(struct tcphdr);
|
int length = (th->doff << 2) - sizeof(struct tcphdr);
|
||||||
int genhash;
|
int genhash;
|
||||||
unsigned char *ptr;
|
unsigned char *ptr;
|
||||||
|
@ -1327,7 +1327,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||||
ireq->rmt_addr = saddr;
|
ireq->rmt_addr = saddr;
|
||||||
ireq->opt = tcp_v4_save_options(sk, skb);
|
ireq->opt = tcp_v4_save_options(sk, skb);
|
||||||
if (!want_cookie)
|
if (!want_cookie)
|
||||||
TCP_ECN_create_request(req, skb->h.th);
|
TCP_ECN_create_request(req, tcp_hdr(skb));
|
||||||
|
|
||||||
if (want_cookie) {
|
if (want_cookie) {
|
||||||
#ifdef CONFIG_SYN_COOKIES
|
#ifdef CONFIG_SYN_COOKIES
|
||||||
|
@ -1375,7 +1375,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||||
LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
|
LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
|
||||||
"request from %u.%u.%u.%u/%u\n",
|
"request from %u.%u.%u.%u/%u\n",
|
||||||
NIPQUAD(saddr),
|
NIPQUAD(saddr),
|
||||||
ntohs(skb->h.th->source));
|
ntohs(tcp_hdr(skb)->source));
|
||||||
dst_release(dst);
|
dst_release(dst);
|
||||||
goto drop_and_free;
|
goto drop_and_free;
|
||||||
}
|
}
|
||||||
|
@ -1481,7 +1481,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
|
||||||
|
|
||||||
static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
|
static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct tcphdr *th = skb->h.th;
|
struct tcphdr *th = tcp_hdr(skb);
|
||||||
const struct iphdr *iph = ip_hdr(skb);
|
const struct iphdr *iph = ip_hdr(skb);
|
||||||
struct sock *nsk;
|
struct sock *nsk;
|
||||||
struct request_sock **prev;
|
struct request_sock **prev;
|
||||||
|
@ -1556,7 +1556,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||||
|
|
||||||
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
|
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
|
||||||
TCP_CHECK_TIMER(sk);
|
TCP_CHECK_TIMER(sk);
|
||||||
if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) {
|
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
|
||||||
rsk = sk;
|
rsk = sk;
|
||||||
goto reset;
|
goto reset;
|
||||||
}
|
}
|
||||||
|
@ -1582,7 +1582,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||||
}
|
}
|
||||||
|
|
||||||
TCP_CHECK_TIMER(sk);
|
TCP_CHECK_TIMER(sk);
|
||||||
if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) {
|
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
|
||||||
rsk = sk;
|
rsk = sk;
|
||||||
goto reset;
|
goto reset;
|
||||||
}
|
}
|
||||||
|
@ -1625,7 +1625,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
|
||||||
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
|
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
|
||||||
goto discard_it;
|
goto discard_it;
|
||||||
|
|
||||||
th = skb->h.th;
|
th = tcp_hdr(skb);
|
||||||
|
|
||||||
if (th->doff < sizeof(struct tcphdr) / 4)
|
if (th->doff < sizeof(struct tcphdr) / 4)
|
||||||
goto bad_packet;
|
goto bad_packet;
|
||||||
|
@ -1640,7 +1640,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
|
||||||
tcp_v4_checksum_init(skb)))
|
tcp_v4_checksum_init(skb)))
|
||||||
goto bad_packet;
|
goto bad_packet;
|
||||||
|
|
||||||
th = skb->h.th;
|
th = tcp_hdr(skb);
|
||||||
iph = ip_hdr(skb);
|
iph = ip_hdr(skb);
|
||||||
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
|
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
|
||||||
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
|
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
|
||||||
|
|
|
@ -453,7 +453,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
|
||||||
newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
|
newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
|
||||||
newtp->window_clamp = min(newtp->window_clamp, 65535U);
|
newtp->window_clamp = min(newtp->window_clamp, 65535U);
|
||||||
}
|
}
|
||||||
newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale;
|
newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
|
||||||
|
newtp->rx_opt.snd_wscale);
|
||||||
newtp->max_window = newtp->snd_wnd;
|
newtp->max_window = newtp->snd_wnd;
|
||||||
|
|
||||||
if (newtp->rx_opt.tstamp_ok) {
|
if (newtp->rx_opt.tstamp_ok) {
|
||||||
|
@ -488,7 +489,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
|
||||||
struct request_sock *req,
|
struct request_sock *req,
|
||||||
struct request_sock **prev)
|
struct request_sock **prev)
|
||||||
{
|
{
|
||||||
struct tcphdr *th = skb->h.th;
|
const struct tcphdr *th = tcp_hdr(skb);
|
||||||
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
|
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
|
||||||
int paws_reject = 0;
|
int paws_reject = 0;
|
||||||
struct tcp_options_received tmp_opt;
|
struct tcp_options_received tmp_opt;
|
||||||
|
@ -710,8 +711,8 @@ int tcp_child_process(struct sock *parent, struct sock *child,
|
||||||
int state = child->sk_state;
|
int state = child->sk_state;
|
||||||
|
|
||||||
if (!sock_owned_by_user(child)) {
|
if (!sock_owned_by_user(child)) {
|
||||||
ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
|
ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
|
||||||
|
skb->len);
|
||||||
/* Wakeup parent, send SIGIO */
|
/* Wakeup parent, send SIGIO */
|
||||||
if (state == TCP_SYN_RECV && child->sk_state != state)
|
if (state == TCP_SYN_RECV && child->sk_state != state)
|
||||||
parent->sk_data_ready(parent, 0);
|
parent->sk_data_ready(parent, 0);
|
||||||
|
|
|
@ -465,11 +465,12 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
||||||
tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
|
tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
th = (struct tcphdr *) skb_push(skb, tcp_header_size);
|
skb_push(skb, tcp_header_size);
|
||||||
skb->h.th = th;
|
skb_reset_transport_header(skb);
|
||||||
skb_set_owner_w(skb, sk);
|
skb_set_owner_w(skb, sk);
|
||||||
|
|
||||||
/* Build TCP header and checksum it. */
|
/* Build TCP header and checksum it. */
|
||||||
|
th = tcp_hdr(skb);
|
||||||
th->source = inet->sport;
|
th->source = inet->sport;
|
||||||
th->dest = inet->dport;
|
th->dest = inet->dport;
|
||||||
th->seq = htonl(tcb->seq);
|
th->seq = htonl(tcb->seq);
|
||||||
|
@ -524,7 +525,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
||||||
tp->af_specific->calc_md5_hash(md5_hash_location,
|
tp->af_specific->calc_md5_hash(md5_hash_location,
|
||||||
md5,
|
md5,
|
||||||
sk, NULL, NULL,
|
sk, NULL, NULL,
|
||||||
skb->h.th,
|
tcp_hdr(skb),
|
||||||
sk->sk_protocol,
|
sk->sk_protocol,
|
||||||
skb->len);
|
skb->len);
|
||||||
}
|
}
|
||||||
|
@ -2128,8 +2129,10 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
|
||||||
if (md5)
|
if (md5)
|
||||||
tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
|
tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
|
||||||
#endif
|
#endif
|
||||||
skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
|
skb_push(skb, tcp_header_size);
|
||||||
|
skb_reset_transport_header(skb);
|
||||||
|
|
||||||
|
th = tcp_hdr(skb);
|
||||||
memset(th, 0, sizeof(struct tcphdr));
|
memset(th, 0, sizeof(struct tcphdr));
|
||||||
th->syn = 1;
|
th->syn = 1;
|
||||||
th->ack = 1;
|
th->ack = 1;
|
||||||
|
@ -2183,7 +2186,7 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
|
||||||
tp->af_specific->calc_md5_hash(md5_hash_location,
|
tp->af_specific->calc_md5_hash(md5_hash_location,
|
||||||
md5,
|
md5,
|
||||||
NULL, dst, req,
|
NULL, dst, req,
|
||||||
skb->h.th, sk->sk_protocol,
|
tcp_hdr(skb), sk->sk_protocol,
|
||||||
skb->len);
|
skb->len);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -117,8 +117,8 @@ static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
|
return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
|
||||||
ipv6_hdr(skb)->saddr.s6_addr32,
|
ipv6_hdr(skb)->saddr.s6_addr32,
|
||||||
skb->h.th->dest,
|
tcp_hdr(skb)->dest,
|
||||||
skb->h.th->source);
|
tcp_hdr(skb)->source);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
|
static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
|
||||||
|
@ -509,7 +509,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
|
||||||
|
|
||||||
skb = tcp_make_synack(sk, dst, req);
|
skb = tcp_make_synack(sk, dst, req);
|
||||||
if (skb) {
|
if (skb) {
|
||||||
struct tcphdr *th = skb->h.th;
|
struct tcphdr *th = tcp_hdr(skb);
|
||||||
|
|
||||||
th->check = tcp_v6_check(th, skb->len,
|
th->check = tcp_v6_check(th, skb->len,
|
||||||
&treq->loc_addr, &treq->rmt_addr,
|
&treq->loc_addr, &treq->rmt_addr,
|
||||||
|
@ -838,7 +838,7 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
|
||||||
__u8 *hash_location = NULL;
|
__u8 *hash_location = NULL;
|
||||||
struct tcp_md5sig_key *hash_expected;
|
struct tcp_md5sig_key *hash_expected;
|
||||||
struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
||||||
struct tcphdr *th = skb->h.th;
|
struct tcphdr *th = tcp_hdr(skb);
|
||||||
int length = (th->doff << 2) - sizeof (*th);
|
int length = (th->doff << 2) - sizeof (*th);
|
||||||
int genhash;
|
int genhash;
|
||||||
u8 *ptr;
|
u8 *ptr;
|
||||||
|
@ -946,7 +946,7 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
|
||||||
static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
|
static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||||
struct tcphdr *th = skb->h.th;
|
struct tcphdr *th = tcp_hdr(skb);
|
||||||
|
|
||||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||||
th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
|
th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
|
||||||
|
@ -967,7 +967,7 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ipv6h = ipv6_hdr(skb);
|
ipv6h = ipv6_hdr(skb);
|
||||||
th = skb->h.th;
|
th = tcp_hdr(skb);
|
||||||
|
|
||||||
th->check = 0;
|
th->check = 0;
|
||||||
th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
|
th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
|
||||||
|
@ -979,7 +979,7 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
|
||||||
|
|
||||||
static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
|
static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct tcphdr *th = skb->h.th, *t1;
|
struct tcphdr *th = tcp_hdr(skb), *t1;
|
||||||
struct sk_buff *buff;
|
struct sk_buff *buff;
|
||||||
struct flowi fl;
|
struct flowi fl;
|
||||||
int tot_len = sizeof(*th);
|
int tot_len = sizeof(*th);
|
||||||
|
@ -1079,7 +1079,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
|
||||||
static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
|
static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
|
||||||
struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
|
struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
|
||||||
{
|
{
|
||||||
struct tcphdr *th = skb->h.th, *t1;
|
struct tcphdr *th = tcp_hdr(skb), *t1;
|
||||||
struct sk_buff *buff;
|
struct sk_buff *buff;
|
||||||
struct flowi fl;
|
struct flowi fl;
|
||||||
int tot_len = sizeof(struct tcphdr);
|
int tot_len = sizeof(struct tcphdr);
|
||||||
|
@ -1195,7 +1195,7 @@ static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
|
||||||
static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
|
static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct request_sock *req, **prev;
|
struct request_sock *req, **prev;
|
||||||
const struct tcphdr *th = skb->h.th;
|
const struct tcphdr *th = tcp_hdr(skb);
|
||||||
struct sock *nsk;
|
struct sock *nsk;
|
||||||
|
|
||||||
/* Find possible connection requests. */
|
/* Find possible connection requests. */
|
||||||
|
@ -1275,7 +1275,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||||
treq = inet6_rsk(req);
|
treq = inet6_rsk(req);
|
||||||
ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
|
ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
|
||||||
ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
|
ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
|
||||||
TCP_ECN_create_request(req, skb->h.th);
|
TCP_ECN_create_request(req, tcp_hdr(skb));
|
||||||
treq->pktopts = NULL;
|
treq->pktopts = NULL;
|
||||||
if (ipv6_opt_accepted(sk, skb) ||
|
if (ipv6_opt_accepted(sk, skb) ||
|
||||||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
|
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
|
||||||
|
@ -1528,14 +1528,14 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
|
||||||
static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
|
static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
if (skb->ip_summed == CHECKSUM_COMPLETE) {
|
if (skb->ip_summed == CHECKSUM_COMPLETE) {
|
||||||
if (!tcp_v6_check(skb->h.th, skb->len, &ipv6_hdr(skb)->saddr,
|
if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
|
||||||
&ipv6_hdr(skb)->daddr, skb->csum)) {
|
&ipv6_hdr(skb)->daddr, skb->csum)) {
|
||||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
skb->csum = ~csum_unfold(tcp_v6_check(skb->h.th, skb->len,
|
skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
|
||||||
&ipv6_hdr(skb)->saddr,
|
&ipv6_hdr(skb)->saddr,
|
||||||
&ipv6_hdr(skb)->daddr, 0));
|
&ipv6_hdr(skb)->daddr, 0));
|
||||||
|
|
||||||
|
@ -1601,7 +1601,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||||
|
|
||||||
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
|
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
|
||||||
TCP_CHECK_TIMER(sk);
|
TCP_CHECK_TIMER(sk);
|
||||||
if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
|
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
|
||||||
goto reset;
|
goto reset;
|
||||||
TCP_CHECK_TIMER(sk);
|
TCP_CHECK_TIMER(sk);
|
||||||
if (opt_skb)
|
if (opt_skb)
|
||||||
|
@ -1632,7 +1632,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
|
||||||
}
|
}
|
||||||
|
|
||||||
TCP_CHECK_TIMER(sk);
|
TCP_CHECK_TIMER(sk);
|
||||||
if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
|
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
|
||||||
goto reset;
|
goto reset;
|
||||||
TCP_CHECK_TIMER(sk);
|
TCP_CHECK_TIMER(sk);
|
||||||
if (opt_skb)
|
if (opt_skb)
|
||||||
|
@ -1698,7 +1698,7 @@ static int tcp_v6_rcv(struct sk_buff **pskb)
|
||||||
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
|
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
|
||||||
goto discard_it;
|
goto discard_it;
|
||||||
|
|
||||||
th = skb->h.th;
|
th = tcp_hdr(skb);
|
||||||
|
|
||||||
if (th->doff < sizeof(struct tcphdr)/4)
|
if (th->doff < sizeof(struct tcphdr)/4)
|
||||||
goto bad_packet;
|
goto bad_packet;
|
||||||
|
@ -1709,7 +1709,7 @@ static int tcp_v6_rcv(struct sk_buff **pskb)
|
||||||
tcp_v6_checksum_init(skb)))
|
tcp_v6_checksum_init(skb)))
|
||||||
goto bad_packet;
|
goto bad_packet;
|
||||||
|
|
||||||
th = skb->h.th;
|
th = tcp_hdr(skb);
|
||||||
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
|
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
|
||||||
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
|
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
|
||||||
skb->len - th->doff*4);
|
skb->len - th->doff*4);
|
||||||
|
|
Loading…
Reference in a new issue