net sched: use xps information for qdisc NUMA affinity
Allocate qdisc memory according to NUMA properties of cpus included in xps map. To be effective, qdisc should be (re)setup after changes of /sys/class/net/eth<n>/queues/tx-<n>/xps_cpus I added a numa_node field in struct netdev_queue, containing NUMA node if all cpus included in xps_cpus share same node, else -1. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Ben Hutchings <bhutchings@solarflare.com> Cc: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
0793f83f0e
commit
f2cd2d3e9b
4 changed files with 36 additions and 5 deletions
|
@ -508,7 +508,9 @@ struct netdev_queue {
|
|||
#ifdef CONFIG_RPS
|
||||
struct kobject kobj;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
|
||||
int numa_node;
|
||||
#endif
|
||||
/*
|
||||
* write mostly part
|
||||
*/
|
||||
|
@ -523,6 +525,22 @@ struct netdev_queue {
|
|||
u64 tx_dropped;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
|
||||
{
|
||||
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
|
||||
return q->numa_node;
|
||||
#else
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
|
||||
{
|
||||
#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
|
||||
q->numa_node = node;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RPS
|
||||
/*
|
||||
* This structure holds an RPS map which can be of variable length. The
|
||||
|
|
|
@ -5125,9 +5125,10 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
|
|||
}
|
||||
dev->_tx = tx;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
for (i = 0; i < count; i++) {
|
||||
netdev_queue_numa_node_write(&tx[i], -1);
|
||||
tx[i].dev = dev;
|
||||
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -913,6 +913,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
|
|||
struct xps_map *map, *new_map;
|
||||
struct xps_dev_maps *dev_maps, *new_dev_maps;
|
||||
int nonempty = 0;
|
||||
int numa_node = -2;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
@ -953,7 +954,14 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
|
|||
pos = map_len = alloc_len = 0;
|
||||
|
||||
need_set = cpu_isset(cpu, *mask) && cpu_online(cpu);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
if (need_set) {
|
||||
if (numa_node == -2)
|
||||
numa_node = cpu_to_node(cpu);
|
||||
else if (numa_node != cpu_to_node(cpu))
|
||||
numa_node = -1;
|
||||
}
|
||||
#endif
|
||||
if (need_set && pos >= map_len) {
|
||||
/* Need to add queue to this CPU's map */
|
||||
if (map_len >= alloc_len) {
|
||||
|
@ -1001,6 +1009,8 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
|
|||
if (dev_maps)
|
||||
call_rcu(&dev_maps->rcu, xps_dev_maps_release);
|
||||
|
||||
netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node : -1);
|
||||
|
||||
mutex_unlock(&xps_map_mutex);
|
||||
|
||||
free_cpumask_var(mask);
|
||||
|
|
|
@ -553,7 +553,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
|
|||
size = QDISC_ALIGN(sizeof(*sch));
|
||||
size += ops->priv_size + (QDISC_ALIGNTO - 1);
|
||||
|
||||
p = kzalloc(size, GFP_KERNEL);
|
||||
p = kzalloc_node(size, GFP_KERNEL,
|
||||
netdev_queue_numa_node_read(dev_queue));
|
||||
|
||||
if (!p)
|
||||
goto errout;
|
||||
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
|
||||
|
|
Loading…
Reference in a new issue