Skip to content

Commit

Permalink
bpf: Introduce bpf_prog ID
Browse files Browse the repository at this point in the history
This patch generates an unique ID for each BPF_PROG_LOAD-ed prog.
It is worth to note that each BPF_PROG_LOAD-ed prog will have
a different ID even they have the same bpf instructions.

The ID is generated by the existing idr_alloc_cyclic().
The ID is ranged from [1, INT_MAX).  It is allocated in cyclic manner,
so an ID will get reused every 2 billion BPF_PROG_LOAD.

The bpf_prog_alloc_id() is done after bpf_prog_select_runtime()
because the jit process may have allocated a new prog.  Hence,
we need to ensure the value of pointer 'prog' will not be changed
any more before storing the prog to the prog_idr.

After bpf_prog_select_runtime(), the prog is read-only.  Hence,
the id is stored in 'struct bpf_prog_aux'.

Signed-off-by: Martin KaFai Lau <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
Acked-by: Daniel Borkmann <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
iamkafai authored and davem330 committed Jun 6, 2017
1 parent 8ea4fae commit dc4bb0e
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 1 deletion.
1 change: 1 addition & 0 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,7 @@ struct bpf_prog_aux {
u32 used_map_cnt;
u32 max_ctx_offset;
u32 stack_depth;
u32 id;
struct latch_tree_node ksym_tnode;
struct list_head ksym_lnode;
const struct bpf_verifier_ops *ops;
Expand Down
40 changes: 39 additions & 1 deletion kernel/bpf/syscall.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,11 @@
#include <linux/filter.h>
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/idr.h>

DEFINE_PER_CPU(int, bpf_prog_active);
static DEFINE_IDR(prog_idr);
static DEFINE_SPINLOCK(prog_idr_lock);

int sysctl_unprivileged_bpf_disabled __read_mostly;

Expand Down Expand Up @@ -650,6 +653,34 @@ static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
free_uid(user);
}

static int bpf_prog_alloc_id(struct bpf_prog *prog)
{
int id;

spin_lock_bh(&prog_idr_lock);
id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
if (id > 0)
prog->aux->id = id;
spin_unlock_bh(&prog_idr_lock);

/* id is in [1, INT_MAX) */
if (WARN_ON_ONCE(!id))
return -ENOSPC;

return id > 0 ? 0 : id;
}

static void bpf_prog_free_id(struct bpf_prog *prog)
{
/* cBPF to eBPF migrations are currently not in the idr store. */
if (!prog->aux->id)
return;

spin_lock_bh(&prog_idr_lock);
idr_remove(&prog_idr, prog->aux->id);
spin_unlock_bh(&prog_idr_lock);
}

static void __bpf_prog_put_rcu(struct rcu_head *rcu)
{
struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
Expand All @@ -663,6 +694,7 @@ void bpf_prog_put(struct bpf_prog *prog)
{
if (atomic_dec_and_test(&prog->aux->refcnt)) {
trace_bpf_prog_put_rcu(prog);
bpf_prog_free_id(prog);
bpf_prog_kallsyms_del(prog);
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
}
Expand Down Expand Up @@ -857,15 +889,21 @@ static int bpf_prog_load(union bpf_attr *attr)
if (err < 0)
goto free_used_maps;

err = bpf_prog_alloc_id(prog);
if (err)
goto free_used_maps;

err = bpf_prog_new_fd(prog);
if (err < 0)
/* failed to allocate fd */
goto free_used_maps;
goto free_id;

bpf_prog_kallsyms_add(prog);
trace_bpf_prog_load(prog, err);
return err;

free_id:
bpf_prog_free_id(prog);
free_used_maps:
free_used_maps(prog->aux);
free_prog:
Expand Down

0 comments on commit dc4bb0e

Please sign in to comment.