[SCTP]: Handle address add/delete events in a more efficient way.
Currently in SCTP, we maintain a local address list by rebuilding the whole list from the device list whenever we get a address add/delete event. This patch fixes it by only adding/deleting the address for which we receive the event. Also removed the sctp_local_addr_lock() which is no longer needed as we now use list_for_each_safe() to traverse this list. This fixes the bugs in sctp_copy_laddrs_xxx() routines where we do copy_to_user() while holding this lock. Signed-off-by: Sridhar Samudrala <sri@us.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6931ba7cef
commit
29c7cf9618
4 changed files with 81 additions and 66 deletions
|
@ -201,13 +201,12 @@ extern struct sctp_globals {
|
|||
struct sctp_bind_hashbucket *port_hashtable;
|
||||
|
||||
/* This is the global local address list.
|
||||
* We actively maintain this complete list of interfaces on
|
||||
* the system by catching routing events.
|
||||
* We actively maintain this complete list of addresses on
|
||||
* the system by catching address add/delete events.
|
||||
*
|
||||
* It is a list of sctp_sockaddr_entry.
|
||||
*/
|
||||
struct list_head local_addr_list;
|
||||
spinlock_t local_addr_lock;
|
||||
|
||||
/* Flag to indicate if addip is enabled. */
|
||||
int addip_enable;
|
||||
|
@ -243,7 +242,6 @@ extern struct sctp_globals {
|
|||
#define sctp_port_alloc_lock (sctp_globals.port_alloc_lock)
|
||||
#define sctp_port_hashtable (sctp_globals.port_hashtable)
|
||||
#define sctp_local_addr_list (sctp_globals.local_addr_list)
|
||||
#define sctp_local_addr_lock (sctp_globals.local_addr_lock)
|
||||
#define sctp_addip_enable (sctp_globals.addip_enable)
|
||||
#define sctp_prsctp_enable (sctp_globals.prsctp_enable)
|
||||
|
||||
|
|
|
@ -78,8 +78,44 @@
|
|||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/* Event handler for inet6 address addition/deletion events. */
|
||||
int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
|
||||
void *ptr)
|
||||
{
|
||||
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
|
||||
struct sctp_sockaddr_entry *addr;
|
||||
struct list_head *pos, *temp;
|
||||
|
||||
switch (ev) {
|
||||
case NETDEV_UP:
|
||||
addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
|
||||
if (addr) {
|
||||
addr->a.v6.sin6_family = AF_INET6;
|
||||
addr->a.v6.sin6_port = 0;
|
||||
memcpy(&addr->a.v6.sin6_addr, &ifa->addr,
|
||||
sizeof(struct in6_addr));
|
||||
addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
|
||||
list_add_tail(&addr->list, &sctp_local_addr_list);
|
||||
}
|
||||
break;
|
||||
case NETDEV_DOWN:
|
||||
list_for_each_safe(pos, temp, &sctp_local_addr_list) {
|
||||
addr = list_entry(pos, struct sctp_sockaddr_entry, list);
|
||||
if (ipv6_addr_equal(&addr->a.v6.sin6_addr, &ifa->addr)) {
|
||||
list_del(pos);
|
||||
kfree(addr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block sctp_inet6addr_notifier = {
|
||||
.notifier_call = sctp_inetaddr_event,
|
||||
.notifier_call = sctp_inet6addr_event,
|
||||
};
|
||||
|
||||
/* ICMP error handler. */
|
||||
|
|
|
@ -163,7 +163,7 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
|
|||
/* Extract our IP addresses from the system and stash them in the
|
||||
* protocol structure.
|
||||
*/
|
||||
static void __sctp_get_local_addr_list(void)
|
||||
static void sctp_get_local_addr_list(void)
|
||||
{
|
||||
struct net_device *dev;
|
||||
struct list_head *pos;
|
||||
|
@ -179,17 +179,8 @@ static void __sctp_get_local_addr_list(void)
|
|||
read_unlock(&dev_base_lock);
|
||||
}
|
||||
|
||||
static void sctp_get_local_addr_list(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags);
|
||||
__sctp_get_local_addr_list();
|
||||
sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags);
|
||||
}
|
||||
|
||||
/* Free the existing local addresses. */
|
||||
static void __sctp_free_local_addr_list(void)
|
||||
static void sctp_free_local_addr_list(void)
|
||||
{
|
||||
struct sctp_sockaddr_entry *addr;
|
||||
struct list_head *pos, *temp;
|
||||
|
@ -201,27 +192,15 @@ static void __sctp_free_local_addr_list(void)
|
|||
}
|
||||
}
|
||||
|
||||
/* Free the existing local addresses. */
|
||||
static void sctp_free_local_addr_list(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags);
|
||||
__sctp_free_local_addr_list();
|
||||
sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags);
|
||||
}
|
||||
|
||||
/* Copy the local addresses which are valid for 'scope' into 'bp'. */
|
||||
int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
|
||||
gfp_t gfp, int copy_flags)
|
||||
{
|
||||
struct sctp_sockaddr_entry *addr;
|
||||
int error = 0;
|
||||
struct list_head *pos;
|
||||
unsigned long flags;
|
||||
struct list_head *pos, *temp;
|
||||
|
||||
sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags);
|
||||
list_for_each(pos, &sctp_local_addr_list) {
|
||||
list_for_each_safe(pos, temp, &sctp_local_addr_list) {
|
||||
addr = list_entry(pos, struct sctp_sockaddr_entry, list);
|
||||
if (sctp_in_scope(&addr->a, scope)) {
|
||||
/* Now that the address is in scope, check to see if
|
||||
|
@ -242,7 +221,6 @@ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
|
|||
}
|
||||
|
||||
end_copy:
|
||||
sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -622,18 +600,36 @@ static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
|
|||
seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr));
|
||||
}
|
||||
|
||||
/* Event handler for inet address addition/deletion events.
|
||||
* Basically, whenever there is an event, we re-build our local address list.
|
||||
*/
|
||||
/* Event handler for inet address addition/deletion events. */
|
||||
int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
|
||||
void *ptr)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
|
||||
struct sctp_sockaddr_entry *addr;
|
||||
struct list_head *pos, *temp;
|
||||
|
||||
sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags);
|
||||
__sctp_free_local_addr_list();
|
||||
__sctp_get_local_addr_list();
|
||||
sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags);
|
||||
switch (ev) {
|
||||
case NETDEV_UP:
|
||||
addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
|
||||
if (addr) {
|
||||
addr->a.v4.sin_family = AF_INET;
|
||||
addr->a.v4.sin_port = 0;
|
||||
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
|
||||
list_add_tail(&addr->list, &sctp_local_addr_list);
|
||||
}
|
||||
break;
|
||||
case NETDEV_DOWN:
|
||||
list_for_each_safe(pos, temp, &sctp_local_addr_list) {
|
||||
addr = list_entry(pos, struct sctp_sockaddr_entry, list);
|
||||
if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) {
|
||||
list_del(pos);
|
||||
kfree(addr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
@ -1172,13 +1168,12 @@ SCTP_STATIC __init int sctp_init(void)
|
|||
|
||||
/* Initialize the local address list. */
|
||||
INIT_LIST_HEAD(&sctp_local_addr_list);
|
||||
spin_lock_init(&sctp_local_addr_lock);
|
||||
|
||||
sctp_get_local_addr_list();
|
||||
|
||||
/* Register notifier for inet address additions/deletions. */
|
||||
register_inetaddr_notifier(&sctp_inetaddr_notifier);
|
||||
|
||||
sctp_get_local_addr_list();
|
||||
|
||||
__unsafe(THIS_MODULE);
|
||||
status = 0;
|
||||
out:
|
||||
|
|
|
@ -3821,10 +3821,9 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
|
|||
sctp_assoc_t id;
|
||||
struct sctp_bind_addr *bp;
|
||||
struct sctp_association *asoc;
|
||||
struct list_head *pos;
|
||||
struct list_head *pos, *temp;
|
||||
struct sctp_sockaddr_entry *addr;
|
||||
rwlock_t *addr_lock;
|
||||
unsigned long flags;
|
||||
int cnt = 0;
|
||||
|
||||
if (len != sizeof(sctp_assoc_t))
|
||||
|
@ -3859,8 +3858,7 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
|
|||
addr = list_entry(bp->address_list.next,
|
||||
struct sctp_sockaddr_entry, list);
|
||||
if (sctp_is_any(&addr->a)) {
|
||||
sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags);
|
||||
list_for_each(pos, &sctp_local_addr_list) {
|
||||
list_for_each_safe(pos, temp, &sctp_local_addr_list) {
|
||||
addr = list_entry(pos,
|
||||
struct sctp_sockaddr_entry,
|
||||
list);
|
||||
|
@ -3869,8 +3867,6 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
|
|||
continue;
|
||||
cnt++;
|
||||
}
|
||||
sctp_spin_unlock_irqrestore(&sctp_local_addr_lock,
|
||||
flags);
|
||||
} else {
|
||||
cnt = 1;
|
||||
}
|
||||
|
@ -3892,15 +3888,13 @@ static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
|
|||
static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_addrs,
|
||||
void __user *to)
|
||||
{
|
||||
struct list_head *pos;
|
||||
struct list_head *pos, *next;
|
||||
struct sctp_sockaddr_entry *addr;
|
||||
unsigned long flags;
|
||||
union sctp_addr temp;
|
||||
int cnt = 0;
|
||||
int addrlen;
|
||||
|
||||
sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags);
|
||||
list_for_each(pos, &sctp_local_addr_list) {
|
||||
list_for_each_safe(pos, next, &sctp_local_addr_list) {
|
||||
addr = list_entry(pos, struct sctp_sockaddr_entry, list);
|
||||
if ((PF_INET == sk->sk_family) &&
|
||||
(AF_INET6 == addr->a.sa.sa_family))
|
||||
|
@ -3909,16 +3903,13 @@ static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_add
|
|||
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
|
||||
&temp);
|
||||
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
|
||||
if (copy_to_user(to, &temp, addrlen)) {
|
||||
sctp_spin_unlock_irqrestore(&sctp_local_addr_lock,
|
||||
flags);
|
||||
if (copy_to_user(to, &temp, addrlen))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
to += addrlen;
|
||||
cnt ++;
|
||||
if (cnt >= max_addrs) break;
|
||||
}
|
||||
sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
@ -3926,15 +3917,13 @@ static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_add
|
|||
static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port,
|
||||
void __user **to, size_t space_left)
|
||||
{
|
||||
struct list_head *pos;
|
||||
struct list_head *pos, *next;
|
||||
struct sctp_sockaddr_entry *addr;
|
||||
unsigned long flags;
|
||||
union sctp_addr temp;
|
||||
int cnt = 0;
|
||||
int addrlen;
|
||||
|
||||
sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags);
|
||||
list_for_each(pos, &sctp_local_addr_list) {
|
||||
list_for_each_safe(pos, next, &sctp_local_addr_list) {
|
||||
addr = list_entry(pos, struct sctp_sockaddr_entry, list);
|
||||
if ((PF_INET == sk->sk_family) &&
|
||||
(AF_INET6 == addr->a.sa.sa_family))
|
||||
|
@ -3945,16 +3934,13 @@ static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port,
|
|||
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
|
||||
if(space_left<addrlen)
|
||||
return -ENOMEM;
|
||||
if (copy_to_user(*to, &temp, addrlen)) {
|
||||
sctp_spin_unlock_irqrestore(&sctp_local_addr_lock,
|
||||
flags);
|
||||
if (copy_to_user(*to, &temp, addrlen))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
*to += addrlen;
|
||||
cnt ++;
|
||||
space_left -= addrlen;
|
||||
}
|
||||
sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue