tcp: fix MD5 (RFC2385) support
TCP MD5 support uses percpu data for temporary storage. It currently disables preemption so that same storage cannot be reclaimed by another thread on same cpu. We also have to make sure a softirq handler wont try to use also same context. Various bug reports demonstrated corruptions. Fix is to disable preemption and BH. Reported-by: Bhaskar Dutta <bhaskie@gmail.com> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
d77f873fdd
commit
35790c0421
2 changed files with 27 additions and 28 deletions
|
@ -1197,30 +1197,15 @@ extern int tcp_v4_md5_do_del(struct sock *sk,
|
|||
extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *);
|
||||
extern void tcp_free_md5sig_pool(void);
|
||||
|
||||
extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu);
|
||||
extern void __tcp_put_md5sig_pool(void);
|
||||
extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
|
||||
extern void tcp_put_md5sig_pool(void);
|
||||
|
||||
extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
|
||||
extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
|
||||
unsigned header_len);
|
||||
extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
|
||||
struct tcp_md5sig_key *key);
|
||||
|
||||
static inline
|
||||
struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
|
||||
{
|
||||
int cpu = get_cpu();
|
||||
struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu);
|
||||
if (!ret)
|
||||
put_cpu();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void tcp_put_md5sig_pool(void)
|
||||
{
|
||||
__tcp_put_md5sig_pool();
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
/* write queue abstraction */
|
||||
static inline void tcp_write_queue_purge(struct sock *sk)
|
||||
{
|
||||
|
|
|
@ -2839,7 +2839,6 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)
|
|||
if (p->md5_desc.tfm)
|
||||
crypto_free_hash(p->md5_desc.tfm);
|
||||
kfree(p);
|
||||
p = NULL;
|
||||
}
|
||||
}
|
||||
free_percpu(pool);
|
||||
|
@ -2937,25 +2936,40 @@ struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
|
|||
|
||||
EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
|
||||
|
||||
struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
|
||||
|
||||
/**
|
||||
* tcp_get_md5sig_pool - get md5sig_pool for this user
|
||||
*
|
||||
* We use percpu structure, so if we succeed, we exit with preemption
|
||||
* and BH disabled, to make sure another thread or softirq handling
|
||||
* wont try to get same context.
|
||||
*/
|
||||
struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
|
||||
{
|
||||
struct tcp_md5sig_pool * __percpu *p;
|
||||
spin_lock_bh(&tcp_md5sig_pool_lock);
|
||||
|
||||
local_bh_disable();
|
||||
|
||||
spin_lock(&tcp_md5sig_pool_lock);
|
||||
p = tcp_md5sig_pool;
|
||||
if (p)
|
||||
tcp_md5sig_users++;
|
||||
spin_unlock_bh(&tcp_md5sig_pool_lock);
|
||||
return (p ? *per_cpu_ptr(p, cpu) : NULL);
|
||||
spin_unlock(&tcp_md5sig_pool_lock);
|
||||
|
||||
if (p)
|
||||
return *per_cpu_ptr(p, smp_processor_id());
|
||||
|
||||
local_bh_enable();
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_get_md5sig_pool);
|
||||
|
||||
EXPORT_SYMBOL(__tcp_get_md5sig_pool);
|
||||
|
||||
void __tcp_put_md5sig_pool(void)
|
||||
void tcp_put_md5sig_pool(void)
|
||||
{
|
||||
local_bh_enable();
|
||||
tcp_free_md5sig_pool();
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__tcp_put_md5sig_pool);
|
||||
EXPORT_SYMBOL(tcp_put_md5sig_pool);
|
||||
|
||||
int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
|
||||
struct tcphdr *th)
|
||||
|
|
Loading…
Reference in a new issue