Skip to content

Commit

Permalink
mm, slub: do initial checks in ___slab_alloc() with irqs enabled
Browse files Browse the repository at this point in the history
As another step of shortening irq disabled sections in ___slab_alloc(),
delay disabling irqs until we pass the initial checks if there is a cached
percpu slab and it's suitable for our allocation.

Now we have to recheck c->page after actually disabling irqs as an
allocation in irq handler might have replaced it.

Because we call pfmemalloc_match() as one of the checks, we might hit
VM_BUG_ON_PAGE(!PageSlab(page)) in PageSlabPfmemalloc in case we get
interrupted and the page is freed.  Thus introduce a
pfmemalloc_match_unsafe() variant that lacks the PageSlab check.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Vlastimil Babka <[email protected]>
Acked-by: Mel Gorman <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Jann Horn <[email protected]>
Cc: Jesper Dangaard Brouer <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: Sebastian Andrzej Siewior <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Stephen Rothwell <[email protected]>
  • Loading branch information
tehcaster authored and sfrothwell committed Aug 23, 2021
1 parent 9a0b7f6 commit a4914a1
Showing 1 changed file with 32 additions and 9 deletions.
41 changes: 32 additions & 9 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -2668,8 +2668,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,

stat(s, ALLOC_SLOWPATH);

local_irq_save(flags);
page = c->page;
reread_page:

page = READ_ONCE(c->page);
if (!page) {
/*
* if the node is not online or has no normal memory, just
Expand All @@ -2678,6 +2679,11 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (unlikely(node != NUMA_NO_NODE &&
!node_isset(node, slab_nodes)))
node = NUMA_NO_NODE;
local_irq_save(flags);
if (unlikely(c->page)) {
local_irq_restore(flags);
goto reread_page;
}
goto new_slab;
}
redo:
Expand All @@ -2692,8 +2698,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto redo;
} else {
stat(s, ALLOC_NODE_MISMATCH);
deactivate_slab(s, page, c->freelist, c);
goto new_slab;
goto deactivate_slab;
}
}

Expand All @@ -2702,12 +2707,15 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
* PFMEMALLOC but right now, we are losing the pfmemalloc
* information when the page leaves the per-cpu allocator
*/
if (unlikely(!pfmemalloc_match(page, gfpflags))) {
deactivate_slab(s, page, c->freelist, c);
goto new_slab;
}
if (unlikely(!pfmemalloc_match(page, gfpflags)))
goto deactivate_slab;

/* must check again c->freelist in case of cpu migration or IRQ */
/* must check again c->page in case IRQ handler changed it */
local_irq_save(flags);
if (unlikely(page != c->page)) {
local_irq_restore(flags);
goto reread_page;
}
freelist = c->freelist;
if (freelist)
goto load_freelist;
Expand All @@ -2723,6 +2731,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
stat(s, ALLOC_REFILL);

load_freelist:

lockdep_assert_irqs_disabled();

/*
* freelist is pointing to the list of objects to be used.
* page is pointing to the page from which the objects are obtained.
Expand All @@ -2734,11 +2745,23 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
local_irq_restore(flags);
return freelist;

deactivate_slab:

local_irq_save(flags);
if (page != c->page) {
local_irq_restore(flags);
goto reread_page;
}
deactivate_slab(s, page, c->freelist, c);

new_slab:

lockdep_assert_irqs_disabled();

if (slub_percpu_partial(c)) {
page = c->page = slub_percpu_partial(c);
slub_set_percpu_partial(c, page);
local_irq_restore(flags);
stat(s, CPU_PARTIAL_ALLOC);
goto redo;
}
Expand Down

0 comments on commit a4914a1

Please sign in to comment.