Skip to content

Commit

Permalink
bpf: Add generic support for update and delete batch ops
Browse files Browse the repository at this point in the history
This commit adds generic support for update and delete batch ops that
can be used for almost all the bpf maps. These commands share the same
UAPI attr that lookup and lookup_and_delete batch ops use and the
syscall commands are:

  BPF_MAP_UPDATE_BATCH
  BPF_MAP_DELETE_BATCH

The main difference between update/delete and lookup batch ops is that
for update/delete keys/values must be specified for userspace and
because of that, neither in_batch nor out_batch are used.

Suggested-by: Stanislav Fomichev <[email protected]>
Signed-off-by: Brian Vazquez <[email protected]>
Signed-off-by: Yonghong Song <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
Link: https://lore.kernel.org/bpf/[email protected]
  • Loading branch information
Brian Vazquez authored and Alexei Starovoitov committed Jan 15, 2020
1 parent cb4d03a commit aa2e93b
Show file tree
Hide file tree
Showing 3 changed files with 127 additions and 0 deletions.
10 changes: 10 additions & 0 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,10 @@ struct bpf_map_ops {
void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);
int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);
int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);

/* funcs callable from userspace and from eBPF programs */
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
Expand Down Expand Up @@ -987,6 +991,12 @@ void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
int generic_map_lookup_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
int generic_map_update_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
int generic_map_delete_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);

extern int sysctl_unprivileged_bpf_disabled;

Expand Down
2 changes: 2 additions & 0 deletions include/uapi/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,8 @@ enum bpf_cmd {
BPF_MAP_FREEZE,
BPF_BTF_GET_NEXT_ID,
BPF_MAP_LOOKUP_BATCH,
BPF_MAP_UPDATE_BATCH,
BPF_MAP_DELETE_BATCH,
};

enum bpf_map_type {
Expand Down
115 changes: 115 additions & 0 deletions kernel/bpf/syscall.c
Original file line number Diff line number Diff line change
Expand Up @@ -1218,6 +1218,111 @@ static int map_get_next_key(union bpf_attr *attr)
return err;
}

int generic_map_delete_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
void __user *keys = u64_to_user_ptr(attr->batch.keys);
u32 cp, max_count;
int err = 0;
void *key;

if (attr->batch.elem_flags & ~BPF_F_LOCK)
return -EINVAL;

if ((attr->batch.elem_flags & BPF_F_LOCK) &&
!map_value_has_spin_lock(map)) {
return -EINVAL;
}

max_count = attr->batch.count;
if (!max_count)
return 0;

for (cp = 0; cp < max_count; cp++) {
key = __bpf_copy_key(keys + cp * map->key_size, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
break;
}

if (bpf_map_is_dev_bound(map)) {
err = bpf_map_offload_delete_elem(map, key);
break;
}

preempt_disable();
__this_cpu_inc(bpf_prog_active);
rcu_read_lock();
err = map->ops->map_delete_elem(map, key);
rcu_read_unlock();
__this_cpu_dec(bpf_prog_active);
preempt_enable();
maybe_wait_bpf_programs(map);
if (err)
break;
}
if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
err = -EFAULT;
return err;
}

int generic_map_update_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
void __user *values = u64_to_user_ptr(attr->batch.values);
void __user *keys = u64_to_user_ptr(attr->batch.keys);
u32 value_size, cp, max_count;
int ufd = attr->map_fd;
void *key, *value;
struct fd f;
int err = 0;

f = fdget(ufd);
if (attr->batch.elem_flags & ~BPF_F_LOCK)
return -EINVAL;

if ((attr->batch.elem_flags & BPF_F_LOCK) &&
!map_value_has_spin_lock(map)) {
return -EINVAL;
}

value_size = bpf_map_value_size(map);

max_count = attr->batch.count;
if (!max_count)
return 0;

value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
if (!value)
return -ENOMEM;

for (cp = 0; cp < max_count; cp++) {
key = __bpf_copy_key(keys + cp * map->key_size, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
break;
}
err = -EFAULT;
if (copy_from_user(value, values + cp * value_size, value_size))
break;

err = bpf_map_update_value(map, f, key, value,
attr->batch.elem_flags);

if (err)
break;
}

if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
err = -EFAULT;

kfree(value);
kfree(key);
return err;
}

#define MAP_LOOKUP_RETRIES 3

int generic_map_lookup_batch(struct bpf_map *map,
Expand Down Expand Up @@ -3219,6 +3324,10 @@ static int bpf_map_do_batch(const union bpf_attr *attr,

if (cmd == BPF_MAP_LOOKUP_BATCH)
BPF_DO_BATCH(map->ops->map_lookup_batch);
else if (cmd == BPF_MAP_UPDATE_BATCH)
BPF_DO_BATCH(map->ops->map_update_batch);
else
BPF_DO_BATCH(map->ops->map_delete_batch);

err_put:
fdput(f);
Expand Down Expand Up @@ -3325,6 +3434,12 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
case BPF_MAP_LOOKUP_BATCH:
err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
break;
case BPF_MAP_UPDATE_BATCH:
err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
break;
case BPF_MAP_DELETE_BATCH:
err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
break;
default:
err = -EINVAL;
break;
Expand Down

0 comments on commit aa2e93b

Please sign in to comment.