2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* include/linux/topology.h
|
|
|
|
*
|
|
|
|
* Written by: Matthew Dobson, IBM Corporation
|
|
|
|
*
|
|
|
|
* Copyright (C) 2002, IBM Corp.
|
|
|
|
*
|
2008-01-25 13:08:20 -07:00
|
|
|
* All rights reserved.
|
2005-04-16 16:20:36 -06:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
|
|
* NON INFRINGEMENT. See the GNU General Public License for more
|
|
|
|
* details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
|
|
|
*
|
|
|
|
* Send feedback to <colpatch@us.ibm.com>
|
|
|
|
*/
|
|
|
|
#ifndef _LINUX_TOPOLOGY_H
|
|
|
|
#define _LINUX_TOPOLOGY_H
|
|
|
|
|
|
|
|
#include <linux/cpumask.h>
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/mmzone.h>
|
|
|
|
#include <linux/smp.h>
|
2010-05-26 15:44:56 -06:00
|
|
|
#include <linux/percpu.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
#include <asm/topology.h>
|
|
|
|
|
|
|
|
#ifndef nr_cpus_node
|
2009-03-12 22:19:46 -06:00
|
|
|
#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
|
2005-04-16 16:20:36 -06:00
|
|
|
#endif
|
|
|
|
|
2008-04-04 19:11:11 -06:00
|
|
|
#define for_each_node_with_cpus(node) \
|
|
|
|
for_each_online_node(node) \
|
2005-04-16 16:20:36 -06:00
|
|
|
if (nr_cpus_node(node))
|
|
|
|
|
2008-12-09 10:49:50 -07:00
|
|
|
int arch_update_cpu_topology(void);
|
2008-03-12 11:31:59 -06:00
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/* Conform to ACPI 2.0 SLIT distance definitions */
|
|
|
|
#define LOCAL_DISTANCE 10
|
|
|
|
#define REMOTE_DISTANCE 20
|
2007-07-11 13:21:47 -06:00
|
|
|
#ifndef node_distance
|
2005-04-16 16:20:36 -06:00
|
|
|
#define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
|
|
|
|
#endif
|
2006-01-18 18:42:31 -07:00
|
|
|
#ifndef RECLAIM_DISTANCE
|
|
|
|
/*
|
|
|
|
* If the distance between nodes in a system is larger than RECLAIM_DISTANCE
|
|
|
|
* (in whatever arch specific measurement units returned by node_distance())
|
2016-07-28 16:46:32 -06:00
|
|
|
* and node_reclaim_mode is enabled then the VM will only call node_reclaim()
|
2014-06-04 17:07:14 -06:00
|
|
|
* on nodes within this distance.
|
2006-01-18 18:42:31 -07:00
|
|
|
*/
|
mm: increase RECLAIM_DISTANCE to 30
Recently, Robert Mueller reported (http://lkml.org/lkml/2010/9/12/236)
that zone_reclaim_mode doesn't work properly on his new NUMA server (Dual
Xeon E5520 + Intel S5520UR MB). He is using Cyrus IMAPd and it's built on
a very traditional single-process model.
* a master process which reads config files and manages the other
process
* multiple imapd processes, one per connection
* multiple pop3d processes, one per connection
* multiple lmtpd processes, one per connection
* periodical "cleanup" processes.
There are thousands of independent processes. The problem is, recent
Intel motherboard turn on zone_reclaim_mode by default and traditional
prefork model software don't work well on it. Unfortunatelly, such models
are still typical even in the 21st century. We can't ignore them.
This patch raises the zone_reclaim_mode threshold to 30. 30 doesn't have
any specific meaning. but 20 means that one-hop QPI/Hypertransport and
such relatively cheap 2-4 socket machine are often used for traditional
servers as above. The intention is that these machines don't use
zone_reclaim_mode.
Note: ia64 and Power have arch specific RECLAIM_DISTANCE definitions.
This patch doesn't change such high-end NUMA machine behavior.
Dave Hansen said:
: I know specifically of pieces of x86 hardware that set the information
: in the BIOS to '21' *specifically* so they'll get the zone_reclaim_mode
: behavior which that implies.
:
: They've done performance testing and run very large and scary benchmarks
: to make sure that they _want_ this turned on. What this means for them
: is that they'll probably be de-optimized, at least on newer versions of
: the kernel.
:
: If you want to do this for particular systems, maybe _that_'s what we
: should do. Have a list of specific configurations that need the
: defaults overridden either because they're buggy, or they have an
: unusual hardware configuration not really reflected in the distance
: table.
And later said:
: The original change in the hardware tables was for the benefit of a
: benchmark. Said benchmark isn't going to get run on mainline until the
: next batch of enterprise distros drops, at which point the hardware where
: this was done will be irrelevant for the benchmark. I'm sure any new
: hardware will just set this distance to another yet arbitrary value to
: make the kernel do what it wants. :)
:
: Also, when the hardware got _set_ to this initially, I complained. So, I
: guess I'm getting my way now, with this patch. I'm cool with it.
Reported-by: Robert Mueller <robm@fastmail.fm>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Christoph Lameter <cl@linux.com>
Acked-by: David Rientjes <rientjes@google.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: "Luck, Tony" <tony.luck@intel.com>
Acked-by: Dave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-06-15 16:08:20 -06:00
|
|
|
#define RECLAIM_DISTANCE 30
|
2006-01-18 18:42:31 -07:00
|
|
|
#endif
|
2005-04-16 16:20:36 -06:00
|
|
|
#ifndef PENALTY_FOR_NODE_WITH_CPUS
|
|
|
|
#define PENALTY_FOR_NODE_WITH_CPUS (1)
|
|
|
|
#endif
|
|
|
|
|
2010-05-26 15:44:56 -06:00
|
|
|
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
|
|
|
|
DECLARE_PER_CPU(int, numa_node);
|
|
|
|
|
|
|
|
#ifndef numa_node_id
|
|
|
|
/* Returns the number of the current Node. */
|
|
|
|
static inline int numa_node_id(void)
|
|
|
|
{
|
2014-04-07 16:39:38 -06:00
|
|
|
return raw_cpu_read(numa_node);
|
2010-05-26 15:44:56 -06:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef cpu_to_node
|
|
|
|
static inline int cpu_to_node(int cpu)
|
|
|
|
{
|
|
|
|
return per_cpu(numa_node, cpu);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef set_numa_node
|
|
|
|
static inline void set_numa_node(int node)
|
|
|
|
{
|
2012-05-11 01:35:27 -06:00
|
|
|
this_cpu_write(numa_node, node);
|
2010-05-26 15:44:56 -06:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef set_cpu_numa_node
|
|
|
|
static inline void set_cpu_numa_node(int cpu, int node)
|
|
|
|
{
|
|
|
|
per_cpu(numa_node, cpu) = node;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */
|
|
|
|
|
|
|
|
/* Returns the number of the current Node. */
|
|
|
|
#ifndef numa_node_id
|
|
|
|
static inline int numa_node_id(void)
|
|
|
|
{
|
|
|
|
return cpu_to_node(raw_smp_processor_id());
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */
|
|
|
|
|
2010-05-26 15:45:00 -06:00
|
|
|
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
|
|
|
|
|
|
|
|
/*
|
|
|
|
* N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
|
|
|
|
* It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
|
|
|
|
* Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
|
|
|
|
*/
|
|
|
|
DECLARE_PER_CPU(int, _numa_mem_);
|
topology: add support for node_to_mem_node() to determine the fallback node
Anton noticed (http://www.spinics.net/lists/linux-mm/msg67489.html) that
on ppc LPARs with memoryless nodes, a large amount of memory was consumed
by slabs and was marked unreclaimable. He tracked it down to slab
deactivations in the SLUB core when we allocate remotely, leading to poor
efficiency always when memoryless nodes are present.
After much discussion, Joonsoo provided a few patches that help
significantly. They don't resolve the problem altogether:
- memory hotplug still needs testing, that is when a memoryless node
becomes memory-ful, we want to dtrt
- there are other reasons for going off-node than memoryless nodes,
e.g., fully exhausted local nodes
Neither case is resolved with this series, but I don't think that should
block their acceptance, as they can be explored/resolved with follow-on
patches.
The series consists of:
[1/3] topology: add support for node_to_mem_node() to determine the
fallback node
[2/3] slub: fallback to node_to_mem_node() node if allocating on
memoryless node
- Joonsoo's patches to cache the nearest node with memory for each
NUMA node
[3/3] Partial revert of 81c98869faa5 (""kthread: ensure locality of
task_struct allocations")
- At Tejun's request, keep the knowledge of memoryless node fallback
to the allocator core.
This patch (of 3):
We need to determine the fallback node in slub allocator if the allocation
target node is memoryless node. Without it, the SLUB wrongly select the
node which has no memory and can't use a partial slab, because of node
mismatch. Introduced function, node_to_mem_node(X), will return a node Y
with memory that has the nearest distance. If X is memoryless node, it
will return nearest distance node, but, if X is normal node, it will
return itself.
We will use this function in following patch to determine the fallback
node.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Han Pingtian <hanpt@linux.vnet.ibm.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Anton Blanchard <anton@samba.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-10-09 16:26:13 -06:00
|
|
|
extern int _node_numa_mem_[MAX_NUMNODES];
|
2010-05-26 15:45:00 -06:00
|
|
|
|
|
|
|
#ifndef set_numa_mem
|
|
|
|
static inline void set_numa_mem(int node)
|
|
|
|
{
|
2012-05-11 01:35:27 -06:00
|
|
|
this_cpu_write(_numa_mem_, node);
|
topology: add support for node_to_mem_node() to determine the fallback node
Anton noticed (http://www.spinics.net/lists/linux-mm/msg67489.html) that
on ppc LPARs with memoryless nodes, a large amount of memory was consumed
by slabs and was marked unreclaimable. He tracked it down to slab
deactivations in the SLUB core when we allocate remotely, leading to poor
efficiency always when memoryless nodes are present.
After much discussion, Joonsoo provided a few patches that help
significantly. They don't resolve the problem altogether:
- memory hotplug still needs testing, that is when a memoryless node
becomes memory-ful, we want to dtrt
- there are other reasons for going off-node than memoryless nodes,
e.g., fully exhausted local nodes
Neither case is resolved with this series, but I don't think that should
block their acceptance, as they can be explored/resolved with follow-on
patches.
The series consists of:
[1/3] topology: add support for node_to_mem_node() to determine the
fallback node
[2/3] slub: fallback to node_to_mem_node() node if allocating on
memoryless node
- Joonsoo's patches to cache the nearest node with memory for each
NUMA node
[3/3] Partial revert of 81c98869faa5 (""kthread: ensure locality of
task_struct allocations")
- At Tejun's request, keep the knowledge of memoryless node fallback
to the allocator core.
This patch (of 3):
We need to determine the fallback node in slub allocator if the allocation
target node is memoryless node. Without it, the SLUB wrongly select the
node which has no memory and can't use a partial slab, because of node
mismatch. Introduced function, node_to_mem_node(X), will return a node Y
with memory that has the nearest distance. If X is memoryless node, it
will return nearest distance node, but, if X is normal node, it will
return itself.
We will use this function in following patch to determine the fallback
node.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Han Pingtian <hanpt@linux.vnet.ibm.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Anton Blanchard <anton@samba.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-10-09 16:26:13 -06:00
|
|
|
_node_numa_mem_[numa_node_id()] = node;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef node_to_mem_node
|
|
|
|
static inline int node_to_mem_node(int node)
|
|
|
|
{
|
|
|
|
return _node_numa_mem_[node];
|
2010-05-26 15:45:00 -06:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef numa_mem_id
|
|
|
|
/* Returns the number of the nearest Node with memory */
|
|
|
|
static inline int numa_mem_id(void)
|
|
|
|
{
|
2014-04-07 16:39:38 -06:00
|
|
|
return raw_cpu_read(_numa_mem_);
|
2010-05-26 15:45:00 -06:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef cpu_to_mem
|
|
|
|
static inline int cpu_to_mem(int cpu)
|
|
|
|
{
|
|
|
|
return per_cpu(_numa_mem_, cpu);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef set_cpu_numa_mem
|
|
|
|
static inline void set_cpu_numa_mem(int cpu, int node)
|
|
|
|
{
|
|
|
|
per_cpu(_numa_mem_, cpu) = node;
|
topology: add support for node_to_mem_node() to determine the fallback node
Anton noticed (http://www.spinics.net/lists/linux-mm/msg67489.html) that
on ppc LPARs with memoryless nodes, a large amount of memory was consumed
by slabs and was marked unreclaimable. He tracked it down to slab
deactivations in the SLUB core when we allocate remotely, leading to poor
efficiency always when memoryless nodes are present.
After much discussion, Joonsoo provided a few patches that help
significantly. They don't resolve the problem altogether:
- memory hotplug still needs testing, that is when a memoryless node
becomes memory-ful, we want to dtrt
- there are other reasons for going off-node than memoryless nodes,
e.g., fully exhausted local nodes
Neither case is resolved with this series, but I don't think that should
block their acceptance, as they can be explored/resolved with follow-on
patches.
The series consists of:
[1/3] topology: add support for node_to_mem_node() to determine the
fallback node
[2/3] slub: fallback to node_to_mem_node() node if allocating on
memoryless node
- Joonsoo's patches to cache the nearest node with memory for each
NUMA node
[3/3] Partial revert of 81c98869faa5 (""kthread: ensure locality of
task_struct allocations")
- At Tejun's request, keep the knowledge of memoryless node fallback
to the allocator core.
This patch (of 3):
We need to determine the fallback node in slub allocator if the allocation
target node is memoryless node. Without it, the SLUB wrongly select the
node which has no memory and can't use a partial slab, because of node
mismatch. Introduced function, node_to_mem_node(X), will return a node Y
with memory that has the nearest distance. If X is memoryless node, it
will return nearest distance node, but, if X is normal node, it will
return itself.
We will use this function in following patch to determine the fallback
node.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Han Pingtian <hanpt@linux.vnet.ibm.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Anton Blanchard <anton@samba.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-10-09 16:26:13 -06:00
|
|
|
_node_numa_mem_[cpu_to_node(cpu)] = node;
|
2010-05-26 15:45:00 -06:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#else /* !CONFIG_HAVE_MEMORYLESS_NODES */
|
|
|
|
|
|
|
|
#ifndef numa_mem_id
|
|
|
|
/* Returns the number of the nearest Node with memory */
|
|
|
|
static inline int numa_mem_id(void)
|
|
|
|
{
|
|
|
|
return numa_node_id();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
topology: add support for node_to_mem_node() to determine the fallback node
Anton noticed (http://www.spinics.net/lists/linux-mm/msg67489.html) that
on ppc LPARs with memoryless nodes, a large amount of memory was consumed
by slabs and was marked unreclaimable. He tracked it down to slab
deactivations in the SLUB core when we allocate remotely, leading to poor
efficiency always when memoryless nodes are present.
After much discussion, Joonsoo provided a few patches that help
significantly. They don't resolve the problem altogether:
- memory hotplug still needs testing, that is when a memoryless node
becomes memory-ful, we want to dtrt
- there are other reasons for going off-node than memoryless nodes,
e.g., fully exhausted local nodes
Neither case is resolved with this series, but I don't think that should
block their acceptance, as they can be explored/resolved with follow-on
patches.
The series consists of:
[1/3] topology: add support for node_to_mem_node() to determine the
fallback node
[2/3] slub: fallback to node_to_mem_node() node if allocating on
memoryless node
- Joonsoo's patches to cache the nearest node with memory for each
NUMA node
[3/3] Partial revert of 81c98869faa5 (""kthread: ensure locality of
task_struct allocations")
- At Tejun's request, keep the knowledge of memoryless node fallback
to the allocator core.
This patch (of 3):
We need to determine the fallback node in slub allocator if the allocation
target node is memoryless node. Without it, the SLUB wrongly select the
node which has no memory and can't use a partial slab, because of node
mismatch. Introduced function, node_to_mem_node(X), will return a node Y
with memory that has the nearest distance. If X is memoryless node, it
will return nearest distance node, but, if X is normal node, it will
return itself.
We will use this function in following patch to determine the fallback
node.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Nishanth Aravamudan <nacc@linux.vnet.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Han Pingtian <hanpt@linux.vnet.ibm.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Anton Blanchard <anton@samba.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-10-09 16:26:13 -06:00
|
|
|
#ifndef node_to_mem_node
|
|
|
|
static inline int node_to_mem_node(int node)
|
|
|
|
{
|
|
|
|
return node;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-05-26 15:45:00 -06:00
|
|
|
#ifndef cpu_to_mem
|
|
|
|
static inline int cpu_to_mem(int cpu)
|
|
|
|
{
|
|
|
|
return cpu_to_node(cpu);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
|
|
|
|
|
2008-06-04 22:47:29 -06:00
|
|
|
#ifndef topology_physical_package_id
|
|
|
|
#define topology_physical_package_id(cpu) ((void)(cpu), -1)
|
|
|
|
#endif
|
|
|
|
#ifndef topology_core_id
|
|
|
|
#define topology_core_id(cpu) ((void)(cpu), 0)
|
|
|
|
#endif
|
2015-05-26 07:11:28 -06:00
|
|
|
#ifndef topology_sibling_cpumask
|
|
|
|
#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
|
2009-01-10 22:58:08 -07:00
|
|
|
#endif
|
|
|
|
#ifndef topology_core_cpumask
|
|
|
|
#define topology_core_cpumask(cpu) cpumask_of(cpu)
|
|
|
|
#endif
|
2019-07-19 06:01:20 -06:00
|
|
|
#ifndef topology_possible_sibling_cpumask
|
|
|
|
#define topology_possible_sibling_cpumask(cpu) cpumask_of(cpu)
|
|
|
|
#endif
|
2008-06-04 22:47:29 -06:00
|
|
|
|
2014-04-11 03:44:37 -06:00
|
|
|
#ifdef CONFIG_SCHED_SMT
|
|
|
|
static inline const struct cpumask *cpu_smt_mask(int cpu)
|
|
|
|
{
|
2015-05-26 07:11:28 -06:00
|
|
|
return topology_sibling_cpumask(cpu);
|
2014-04-11 03:44:37 -06:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static inline const struct cpumask *cpu_cpu_mask(int cpu)
|
|
|
|
{
|
|
|
|
return cpumask_of_node(cpu_to_node(cpu));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
#endif /* _LINUX_TOPOLOGY_H */
|