netfilter: nf_nat: don't check if the tuple is unique when there isn't any other choice
The tuple got from unique_tuple() doesn't need to be really unique, so the check for the unique tuple isn't necessary, when there isn't any other choice. Eliminating the unnecessary nf_nat_used_tuple() can save some CPU cycles too. Signed-off-by: Changli Gao <xiaosuo@gmail.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
This commit is contained in:
parent
f43dc98b3b
commit
2452a99dc0
3 changed files with 6 additions and 6 deletions
|
@ -81,9 +81,9 @@ void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
|
|||
else
|
||||
off = *rover;
|
||||
|
||||
for (i = 0; i < range_size; i++, off++) {
|
||||
for (i = 0; ; ++off) {
|
||||
*portptr = htons(min + off % range_size);
|
||||
if (nf_nat_used_tuple(tuple, ct))
|
||||
if (++i != range_size && nf_nat_used_tuple(tuple, ct))
|
||||
continue;
|
||||
if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
|
||||
*rover = off;
|
||||
|
|
|
@ -68,9 +68,9 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
|
|||
|
||||
pr_debug("min = %u, range_size = %u\n", min, range_size);
|
||||
|
||||
for (i = 0; i < range_size; i++, key++) {
|
||||
for (i = 0; ; ++key) {
|
||||
*keyptr = htons(min + key % range_size);
|
||||
if (!nf_nat_used_tuple(tuple, ct))
|
||||
if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -42,10 +42,10 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
|
|||
if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED))
|
||||
range_size = 0xFFFF;
|
||||
|
||||
for (i = 0; i < range_size; i++, id++) {
|
||||
for (i = 0; ; ++id) {
|
||||
tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
|
||||
(id % range_size));
|
||||
if (!nf_nat_used_tuple(tuple, ct))
|
||||
if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
|
||||
return;
|
||||
}
|
||||
return;
|
||||
|
|
Loading…
Reference in a new issue