[PATCH] mm: optimize numa policy handling in slab allocator
Move the interrupt check from slab_node into ___cache_alloc and adds an
"unlikely()" to avoid pipeline stalls on some architectures.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 71430d4..7379018 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -982,9 +982,6 @@
*/
unsigned slab_node(struct mempolicy *policy)
{
- if (in_interrupt())
- return numa_node_id();
-
switch (policy->policy) {
case MPOL_INTERLEAVE:
return interleave_nodes(policy);
diff --git a/mm/slab.c b/mm/slab.c
index 9025608..6f8495e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2574,7 +2574,7 @@
struct array_cache *ac;
#ifdef CONFIG_NUMA
- if (current->mempolicy) {
+ if (unlikely(current->mempolicy && !in_interrupt())) {
int nid = slab_node(current->mempolicy);
if (nid != numa_node_id())