Skip to content

Commit

Permalink
Merge branch 'bpf-fix-warning-in-check_obj_size'
Browse files Browse the repository at this point in the history
Hou Tao says:

====================
bpf: Fix warning in check_obj_size()

From: Hou Tao <[email protected]>

Hi,

The patch set aims to fix the warning in check_obj_size() as reported by
lkp [1]. Patch #1 fixes the warning by selecting target cache for free
request through c->unit_size, so the unnecessary adjustment of
size_index and the checking in check_obj_size() can be removed. Patch #2
fixes the test failure in test_bpf_ma after applying patch #1.

Please see individual patches for more details. And comments are always
welcome.

[1]: https://lore.kernel.org/bpf/[email protected]/
====================

Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Alexei Starovoitov <[email protected]>
  • Loading branch information
Alexei Starovoitov committed Dec 20, 2023
2 parents 32f2493 + 69ff403 commit 9299924
Show file tree
Hide file tree
Showing 2 changed files with 60 additions and 145 deletions.
105 changes: 11 additions & 94 deletions kernel/bpf/memalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -490,27 +490,6 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false);
}

static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
{
struct llist_node *first;
unsigned int obj_size;

first = c->free_llist.first;
if (!first)
return 0;

if (c->percpu_size)
obj_size = pcpu_alloc_size(((void **)first)[1]);
else
obj_size = ksize(first);
if (obj_size != c->unit_size) {
WARN_ONCE(1, "bpf_mem_cache[%u]: percpu %d, unexpected object size %u, expect %u\n",
idx, c->percpu_size, obj_size, c->unit_size);
return -EINVAL;
}
return 0;
}

/* When size != 0 bpf_mem_cache for each cpu.
* This is typical bpf hash map use case when all elements have equal size.
*
Expand All @@ -521,10 +500,10 @@ static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
{
static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
int cpu, i, err, unit_size, percpu_size = 0;
struct bpf_mem_caches *cc, __percpu *pcc;
struct bpf_mem_cache *c, __percpu *pc;
struct obj_cgroup *objcg = NULL;
int cpu, i, unit_size, percpu_size = 0;

/* room for llist_node and per-cpu pointer */
if (percpu)
Expand Down Expand Up @@ -560,7 +539,6 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
if (!pcc)
return -ENOMEM;
err = 0;
#ifdef CONFIG_MEMCG_KMEM
objcg = get_obj_cgroup_from_current();
#endif
Expand All @@ -574,28 +552,12 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c->tgt = c;

init_refill_work(c);
/* Another bpf_mem_cache will be used when allocating
* c->unit_size in bpf_mem_alloc(), so doesn't prefill
* for the bpf_mem_cache because these free objects will
* never be used.
*/
if (i != bpf_mem_cache_idx(c->unit_size))
continue;
prefill_mem_cache(c, cpu);
err = check_obj_size(c, i);
if (err)
goto out;
}
}

out:
ma->caches = pcc;
/* refill_work is either zeroed or initialized, so it is safe to
* call irq_work_sync().
*/
if (err)
bpf_mem_alloc_destroy(ma);
return err;
return 0;
}

static void drain_mem_cache(struct bpf_mem_cache *c)
Expand Down Expand Up @@ -869,7 +831,7 @@ void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size)
void *ret;

if (!size)
return ZERO_SIZE_PTR;
return NULL;

idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ);
if (idx < 0)
Expand All @@ -879,40 +841,33 @@ void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size)
return !ret ? NULL : ret + LLIST_NODE_SZ;
}

static notrace int bpf_mem_free_idx(void *ptr, bool percpu)
{
size_t size;

if (percpu)
size = pcpu_alloc_size(*((void **)ptr));
else
size = ksize(ptr - LLIST_NODE_SZ);
return bpf_mem_cache_idx(size);
}

void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr)
{
struct bpf_mem_cache *c;
int idx;

if (!ptr)
return;

idx = bpf_mem_free_idx(ptr, ma->percpu);
if (idx < 0)
c = *(void **)(ptr - LLIST_NODE_SZ);
idx = bpf_mem_cache_idx(c->unit_size);
if (WARN_ON_ONCE(idx < 0))
return;

unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr);
}

void notrace bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr)
{
struct bpf_mem_cache *c;
int idx;

if (!ptr)
return;

idx = bpf_mem_free_idx(ptr, ma->percpu);
if (idx < 0)
c = *(void **)(ptr - LLIST_NODE_SZ);
idx = bpf_mem_cache_idx(c->unit_size);
if (WARN_ON_ONCE(idx < 0))
return;

unit_free_rcu(this_cpu_ptr(ma->caches)->cache + idx, ptr);
Expand Down Expand Up @@ -986,41 +941,3 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)

return !ret ? NULL : ret + LLIST_NODE_SZ;
}

/* The alignment of dynamic per-cpu area is 8, so c->unit_size and the
* actual size of dynamic per-cpu area will always be matched and there is
* no need to adjust size_index for per-cpu allocation. However for the
* simplicity of the implementation, use an unified size_index for both
* kmalloc and per-cpu allocation.
*/
static __init int bpf_mem_cache_adjust_size(void)
{
unsigned int size;

/* Adjusting the indexes in size_index() according to the object_size
* of underlying slab cache, so bpf_mem_alloc() will select a
* bpf_mem_cache with unit_size equal to the object_size of
* the underlying slab cache.
*
* The maximal value of KMALLOC_MIN_SIZE and __kmalloc_minalign() is
* 256-bytes, so only do adjustment for [8-bytes, 192-bytes].
*/
for (size = 192; size >= 8; size -= 8) {
unsigned int kmalloc_size, index;

kmalloc_size = kmalloc_size_roundup(size);
if (kmalloc_size == size)
continue;

if (kmalloc_size <= 192)
index = size_index[(kmalloc_size - 1) / 8];
else
index = fls(kmalloc_size - 1) - 1;
/* Only overwrite if necessary */
if (size_index[(size - 1) / 8] != index)
size_index[(size - 1) / 8] = index;
}

return 0;
}
subsys_initcall(bpf_mem_cache_adjust_size);
100 changes: 49 additions & 51 deletions tools/testing/selftests/bpf/progs/test_bpf_ma.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ struct generic_map_value {

char _license[] SEC("license") = "GPL";

const unsigned int data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};

int err = 0;
Expand Down Expand Up @@ -166,7 +166,7 @@ static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int
batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \
} while (0)

DEFINE_ARRAY_WITH_KPTR(8);
/* kptr doesn't support bin_data_8 which is a zero-sized array */
DEFINE_ARRAY_WITH_KPTR(16);
DEFINE_ARRAY_WITH_KPTR(32);
DEFINE_ARRAY_WITH_KPTR(64);
Expand Down Expand Up @@ -198,21 +198,20 @@ int test_batch_alloc_free(void *ctx)
if ((u32)bpf_get_current_pid_tgid() != pid)
return 0;

/* Alloc 128 8-bytes objects in batch to trigger refilling,
* then free 128 8-bytes objects in batch to trigger freeing.
/* Alloc 128 16-bytes objects in batch to trigger refilling,
* then free 128 16-bytes objects in batch to trigger freeing.
*/
CALL_BATCH_ALLOC_FREE(8, 128, 0);
CALL_BATCH_ALLOC_FREE(16, 128, 1);
CALL_BATCH_ALLOC_FREE(32, 128, 2);
CALL_BATCH_ALLOC_FREE(64, 128, 3);
CALL_BATCH_ALLOC_FREE(96, 128, 4);
CALL_BATCH_ALLOC_FREE(128, 128, 5);
CALL_BATCH_ALLOC_FREE(192, 128, 6);
CALL_BATCH_ALLOC_FREE(256, 128, 7);
CALL_BATCH_ALLOC_FREE(512, 64, 8);
CALL_BATCH_ALLOC_FREE(1024, 32, 9);
CALL_BATCH_ALLOC_FREE(2048, 16, 10);
CALL_BATCH_ALLOC_FREE(4096, 8, 11);
CALL_BATCH_ALLOC_FREE(16, 128, 0);
CALL_BATCH_ALLOC_FREE(32, 128, 1);
CALL_BATCH_ALLOC_FREE(64, 128, 2);
CALL_BATCH_ALLOC_FREE(96, 128, 3);
CALL_BATCH_ALLOC_FREE(128, 128, 4);
CALL_BATCH_ALLOC_FREE(192, 128, 5);
CALL_BATCH_ALLOC_FREE(256, 128, 6);
CALL_BATCH_ALLOC_FREE(512, 64, 7);
CALL_BATCH_ALLOC_FREE(1024, 32, 8);
CALL_BATCH_ALLOC_FREE(2048, 16, 9);
CALL_BATCH_ALLOC_FREE(4096, 8, 10);

return 0;
}
Expand All @@ -223,21 +222,20 @@ int test_free_through_map_free(void *ctx)
if ((u32)bpf_get_current_pid_tgid() != pid)
return 0;

/* Alloc 128 8-bytes objects in batch to trigger refilling,
/* Alloc 128 16-bytes objects in batch to trigger refilling,
* then free these objects through map free.
*/
CALL_BATCH_ALLOC(8, 128, 0);
CALL_BATCH_ALLOC(16, 128, 1);
CALL_BATCH_ALLOC(32, 128, 2);
CALL_BATCH_ALLOC(64, 128, 3);
CALL_BATCH_ALLOC(96, 128, 4);
CALL_BATCH_ALLOC(128, 128, 5);
CALL_BATCH_ALLOC(192, 128, 6);
CALL_BATCH_ALLOC(256, 128, 7);
CALL_BATCH_ALLOC(512, 64, 8);
CALL_BATCH_ALLOC(1024, 32, 9);
CALL_BATCH_ALLOC(2048, 16, 10);
CALL_BATCH_ALLOC(4096, 8, 11);
CALL_BATCH_ALLOC(16, 128, 0);
CALL_BATCH_ALLOC(32, 128, 1);
CALL_BATCH_ALLOC(64, 128, 2);
CALL_BATCH_ALLOC(96, 128, 3);
CALL_BATCH_ALLOC(128, 128, 4);
CALL_BATCH_ALLOC(192, 128, 5);
CALL_BATCH_ALLOC(256, 128, 6);
CALL_BATCH_ALLOC(512, 64, 7);
CALL_BATCH_ALLOC(1024, 32, 8);
CALL_BATCH_ALLOC(2048, 16, 9);
CALL_BATCH_ALLOC(4096, 8, 10);

return 0;
}
Expand All @@ -251,17 +249,17 @@ int test_batch_percpu_alloc_free(void *ctx)
/* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling,
* then free 128 16-bytes per-cpu objects in batch to trigger freeing.
*/
CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1);
CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2);
CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3);
CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4);
CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5);
CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 9);
CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 10);
CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 11);
CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 0);
CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 1);
CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 2);
CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 3);
CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 4);
CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 5);
CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 6);
CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 7);
CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 8);
CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 9);
CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 10);

return 0;
}
Expand All @@ -275,17 +273,17 @@ int test_percpu_free_through_map_free(void *ctx)
/* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling,
* then free these object through map free.
*/
CALL_BATCH_PERCPU_ALLOC(16, 128, 1);
CALL_BATCH_PERCPU_ALLOC(32, 128, 2);
CALL_BATCH_PERCPU_ALLOC(64, 128, 3);
CALL_BATCH_PERCPU_ALLOC(96, 128, 4);
CALL_BATCH_PERCPU_ALLOC(128, 128, 5);
CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
CALL_BATCH_PERCPU_ALLOC(1024, 32, 9);
CALL_BATCH_PERCPU_ALLOC(2048, 16, 10);
CALL_BATCH_PERCPU_ALLOC(4096, 8, 11);
CALL_BATCH_PERCPU_ALLOC(16, 128, 0);
CALL_BATCH_PERCPU_ALLOC(32, 128, 1);
CALL_BATCH_PERCPU_ALLOC(64, 128, 2);
CALL_BATCH_PERCPU_ALLOC(96, 128, 3);
CALL_BATCH_PERCPU_ALLOC(128, 128, 4);
CALL_BATCH_PERCPU_ALLOC(192, 128, 5);
CALL_BATCH_PERCPU_ALLOC(256, 128, 6);
CALL_BATCH_PERCPU_ALLOC(512, 64, 7);
CALL_BATCH_PERCPU_ALLOC(1024, 32, 8);
CALL_BATCH_PERCPU_ALLOC(2048, 16, 9);
CALL_BATCH_PERCPU_ALLOC(4096, 8, 10);

return 0;
}

0 comments on commit 9299924

Please sign in to comment.