[RTNETLINK]: Remove unnecessary locking in dump callbacks

Since we're now holding the rtnl during the entire dump operation, we can
remove additional locking for rtnl protected data. This patch does that
for all simple cases (dev_base_lock for dev_base walking, RCU protection
for FIB rule dumping).

Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Patrick McHardy 2007-04-16 17:00:53 -07:00 committed by David S. Miller
parent 1c2d670f36
commit 6313c1e099
6 changed files with 3 additions and 22 deletions

View file

@ -109,7 +109,6 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
struct net_device *dev;
int idx;
read_lock(&dev_base_lock);
for (dev = dev_base, idx = 0; dev; dev = dev->next) {
/* not a bridge port */
if (dev->br_port == NULL || idx < cb->args[0])
@ -122,7 +121,6 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
skip:
++idx;
}
read_unlock(&dev_base_lock);
cb->args[0] = idx;

View file

@ -495,8 +495,7 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
int idx = 0;
struct fib_rule *rule;
rcu_read_lock();
list_for_each_entry_rcu(rule, ops->rules_list, list) {
list_for_each_entry(rule, ops->rules_list, list) {
if (idx < cb->args[1])
goto skip;
@ -507,7 +506,6 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
skip:
idx++;
}
rcu_read_unlock();
cb->args[1] = idx;
rules_ops_put(ops);

View file

@ -543,7 +543,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
int s_idx = cb->args[0];
struct net_device *dev;
read_lock(&dev_base_lock);
for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) {
if (idx < s_idx)
continue;
@ -552,7 +551,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
cb->nlh->nlmsg_seq, 0, NLM_F_MULTI) <= 0)
break;
}
read_unlock(&dev_base_lock);
cb->args[0] = idx;
return skb->len;

View file

@ -799,7 +799,6 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
skip_ndevs = cb->args[0];
skip_naddr = cb->args[1];
read_lock(&dev_base_lock);
for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) {
if (idx < skip_ndevs)
continue;
@ -824,8 +823,6 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
}
}
done:
read_unlock(&dev_base_lock);
cb->args[0] = idx;
cb->args[1] = dn_idx;

View file

@ -1182,17 +1182,13 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
int s_ip_idx, s_idx = cb->args[0];
s_ip_idx = ip_idx = cb->args[1];
read_lock(&dev_base_lock);
for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) {
if (idx < s_idx)
continue;
if (idx > s_idx)
s_ip_idx = 0;
rcu_read_lock();
if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
rcu_read_unlock();
if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
continue;
}
for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
ifa = ifa->ifa_next, ip_idx++) {
@ -1200,16 +1196,12 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
continue;
if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
RTM_NEWADDR, NLM_F_MULTI) <= 0) {
rcu_read_unlock();
RTM_NEWADDR, NLM_F_MULTI) <= 0)
goto done;
}
}
rcu_read_unlock();
}
done:
read_unlock(&dev_base_lock);
cb->args[0] = idx;
cb->args[1] = ip_idx;

View file

@ -3224,7 +3224,6 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
s_idx = cb->args[0];
s_ip_idx = ip_idx = cb->args[1];
read_lock(&dev_base_lock);
for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) {
if (idx < s_idx)
@ -3286,7 +3285,6 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
read_unlock_bh(&idev->lock);
in6_dev_put(idev);
}
read_unlock(&dev_base_lock);
cb->args[0] = idx;
cb->args[1] = ip_idx;
return skb->len;