Skip to content

Commit

Permalink
bpf: add a bpf_override_function helper
Browse files Browse the repository at this point in the history
Error injection is sloppy and very ad-hoc.  BPF could fill this niche
perfectly with it's kprobe functionality.  We could make sure errors are
only triggered in specific call chains that we care about with very
specific situations.  Accomplish this with the bpf_override_funciton
helper.  This will modify the probe'd callers return value to the
specified value and set the PC to an override function that simply
returns, bypassing the originally probed function.  This gives us a nice
clean way to implement systematic error injection for all of our code
paths.

Acked-by: Alexei Starovoitov <[email protected]>
Acked-by: Ingo Molnar <[email protected]>
Signed-off-by: Josef Bacik <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
  • Loading branch information
Josef Bacik authored and Alexei Starovoitov committed Dec 12, 2017
1 parent 8556e50 commit 9802d86
Show file tree
Hide file tree
Showing 15 changed files with 154 additions and 9 deletions.
3 changes: 3 additions & 0 deletions arch/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,9 @@ config HAVE_OPTPROBES
config HAVE_KPROBES_ON_FTRACE
bool

config HAVE_KPROBE_OVERRIDE
bool

config HAVE_NMI
bool

Expand Down
1 change: 1 addition & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,7 @@ config X86
select HAVE_KERNEL_XZ
select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE
select HAVE_KPROBE_OVERRIDE
select HAVE_KRETPROBES
select HAVE_KVM
select HAVE_LIVEPATCH if X86_64
Expand Down
4 changes: 4 additions & 0 deletions arch/x86/include/asm/kprobes.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,10 @@ extern const int kretprobe_blacklist_size;
void arch_remove_kprobe(struct kprobe *p);
asmlinkage void kretprobe_trampoline(void);

#ifdef CONFIG_KPROBES_ON_FTRACE
extern void arch_ftrace_kprobe_override_function(struct pt_regs *regs);
#endif

/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
/* copy of the original instruction */
Expand Down
5 changes: 5 additions & 0 deletions arch/x86/include/asm/ptrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,11 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
return regs->ax;
}

static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
{
regs->ax = rc;
}

/*
* user_mode(regs) determines whether a register set came from user
* mode. On x86_32, this is true if V8086 mode was enabled OR if the
Expand Down
14 changes: 14 additions & 0 deletions arch/x86/kernel/kprobes/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,3 +97,17 @@ int arch_prepare_kprobe_ftrace(struct kprobe *p)
p->ainsn.boostable = false;
return 0;
}

asmlinkage void override_func(void);
asm(
".type override_func, @function\n"
"override_func:\n"
" ret\n"
".size override_func, .-override_func\n"
);

void arch_ftrace_kprobe_override_function(struct pt_regs *regs)
{
regs->ip = (unsigned long)&override_func;
}
NOKPROBE_SYMBOL(arch_ftrace_kprobe_override_function);
3 changes: 2 additions & 1 deletion include/linux/filter.h
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,8 @@ struct bpf_prog {
locked:1, /* Program image locked? */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
dst_needed:1; /* Do we need dst entry? */
dst_needed:1, /* Do we need dst entry? */
kprobe_override:1; /* Do we override a kprobe? */
enum bpf_prog_type type; /* Type of BPF program */
u32 len; /* Number of filter blocks */
u32 jited_len; /* Size of jited insns in bytes */
Expand Down
1 change: 1 addition & 0 deletions include/linux/trace_events.h
Original file line number Diff line number Diff line change
Expand Up @@ -528,6 +528,7 @@ do { \
struct perf_event;

DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
DECLARE_PER_CPU(int, bpf_kprobe_override);

extern int perf_trace_init(struct perf_event *event);
extern void perf_trace_destroy(struct perf_event *event);
Expand Down
7 changes: 6 additions & 1 deletion include/uapi/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -677,6 +677,10 @@ union bpf_attr {
* @buf: buf to fill
* @buf_size: size of the buf
* Return : 0 on success or negative error code
*
* int bpf_override_return(pt_regs, rc)
* @pt_regs: pointer to struct pt_regs
* @rc: the return value to set
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
Expand Down Expand Up @@ -736,7 +740,8 @@ union bpf_attr {
FN(xdp_adjust_meta), \
FN(perf_event_read_value), \
FN(perf_prog_read_value), \
FN(getsockopt),
FN(getsockopt), \
FN(override_return),

/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
Expand Down
3 changes: 3 additions & 0 deletions kernel/bpf/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1320,6 +1320,9 @@ EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
bool bpf_prog_array_compatible(struct bpf_array *array,
const struct bpf_prog *fp)
{
if (fp->kprobe_override)
return false;

if (!array->owner_prog_type) {
/* There's no owner yet where we could check for
* compatibility.
Expand Down
2 changes: 2 additions & 0 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -4413,6 +4413,8 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
prog->dst_needed = 1;
if (insn->imm == BPF_FUNC_get_prandom_u32)
bpf_user_rnd_init_once();
if (insn->imm == BPF_FUNC_override_return)
prog->kprobe_override = 1;
if (insn->imm == BPF_FUNC_tail_call) {
/* If we tail call into other programs, we
* cannot make any assumptions since they can
Expand Down
7 changes: 7 additions & 0 deletions kernel/events/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -8080,6 +8080,13 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
return -EINVAL;
}

/* Kprobe override only works for kprobes, not uprobes. */
if (prog->kprobe_override &&
!(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) {
bpf_prog_put(prog);
return -EINVAL;
}

if (is_tracepoint || is_syscall_tp) {
int off = trace_event_get_offsets(event->tp_event);

Expand Down
11 changes: 11 additions & 0 deletions kernel/trace/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -529,6 +529,17 @@ config FUNCTION_PROFILER

If in doubt, say N.

config BPF_KPROBE_OVERRIDE
bool "Enable BPF programs to override a kprobed function"
depends on BPF_EVENTS
depends on KPROBES_ON_FTRACE
depends on HAVE_KPROBE_OVERRIDE
depends on DYNAMIC_FTRACE_WITH_REGS
default n
help
Allows BPF to override the execution of a probed function and
set a different return value. This is used for error injection.

config FTRACE_MCOUNT_RECORD
def_bool y
depends on DYNAMIC_FTRACE
Expand Down
35 changes: 35 additions & 0 deletions kernel/trace/bpf_trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@
#include <linux/filter.h>
#include <linux/uaccess.h>
#include <linux/ctype.h>
#include <linux/kprobes.h>
#include <asm/kprobes.h>

#include "trace_probe.h"
#include "trace.h"

u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
Expand Down Expand Up @@ -76,6 +80,24 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
}
EXPORT_SYMBOL_GPL(trace_call_bpf);

#ifdef CONFIG_BPF_KPROBE_OVERRIDE
BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
{
__this_cpu_write(bpf_kprobe_override, 1);
regs_set_return_value(regs, rc);
arch_ftrace_kprobe_override_function(regs);
return 0;
}

static const struct bpf_func_proto bpf_override_return_proto = {
.func = bpf_override_return,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
#endif

BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
{
int ret;
Expand Down Expand Up @@ -551,6 +573,10 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
return &bpf_get_stackid_proto;
case BPF_FUNC_perf_event_read_value:
return &bpf_perf_event_read_value_proto;
#ifdef CONFIG_BPF_KPROBE_OVERRIDE
case BPF_FUNC_override_return:
return &bpf_override_return_proto;
#endif
default:
return tracing_func_proto(func_id);
}
Expand Down Expand Up @@ -768,6 +794,15 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
struct bpf_prog_array *new_array;
int ret = -EEXIST;

/*
* Kprobe override only works for ftrace based kprobes, and only if they
* are on the opt-in list.
*/
if (prog->kprobe_override &&
(!trace_kprobe_ftrace(event->tp_event) ||
!trace_kprobe_error_injectable(event->tp_event)))
return -EINVAL;

mutex_lock(&bpf_event_mutex);

if (event->prog)
Expand Down
55 changes: 48 additions & 7 deletions kernel/trace/trace_kprobe.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ struct trace_kprobe {
(offsetof(struct trace_kprobe, tp.args) + \
(sizeof(struct probe_arg) * (n)))

DEFINE_PER_CPU(int, bpf_kprobe_override);

static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
{
Expand Down Expand Up @@ -87,6 +88,27 @@ static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
return nhit;
}

int trace_kprobe_ftrace(struct trace_event_call *call)
{
struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
return kprobe_ftrace(&tk->rp.kp);
}

int trace_kprobe_error_injectable(struct trace_event_call *call)
{
struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
unsigned long addr;

if (tk->symbol) {
addr = (unsigned long)
kallsyms_lookup_name(trace_kprobe_symbol(tk));
addr += tk->rp.kp.offset;
} else {
addr = (unsigned long)tk->rp.kp.addr;
}
return within_kprobe_error_injection_list(addr);
}

static int register_kprobe_event(struct trace_kprobe *tk);
static int unregister_kprobe_event(struct trace_kprobe *tk);

Expand Down Expand Up @@ -1170,7 +1192,7 @@ static int kretprobe_event_define_fields(struct trace_event_call *event_call)
#ifdef CONFIG_PERF_EVENTS

/* Kprobe profile handler */
static void
static int
kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
{
struct trace_event_call *call = &tk->tp.call;
Expand All @@ -1179,12 +1201,29 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
int size, __size, dsize;
int rctx;

if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
return;
if (bpf_prog_array_valid(call)) {
int ret;

ret = trace_call_bpf(call, regs);

/*
* We need to check and see if we modified the pc of the
* pt_regs, and if so clear the kprobe and return 1 so that we
* don't do the instruction skipping. Also reset our state so
* we are clean the next pass through.
*/
if (__this_cpu_read(bpf_kprobe_override)) {
__this_cpu_write(bpf_kprobe_override, 0);
reset_current_kprobe();
return 1;
}
if (!ret)
return 0;
}

head = this_cpu_ptr(call->perf_events);
if (hlist_empty(head))
return;
return 0;

dsize = __get_data_size(&tk->tp, regs);
__size = sizeof(*entry) + tk->tp.size + dsize;
Expand All @@ -1193,13 +1232,14 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)

entry = perf_trace_buf_alloc(size, NULL, &rctx);
if (!entry)
return;
return 0;

entry->ip = (unsigned long)tk->rp.kp.addr;
memset(&entry[1], 0, dsize);
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
head, NULL);
return 0;
}
NOKPROBE_SYMBOL(kprobe_perf_func);

Expand Down Expand Up @@ -1275,16 +1315,17 @@ static int kprobe_register(struct trace_event_call *event,
static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
{
struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
int ret = 0;

raw_cpu_inc(*tk->nhit);

if (tk->tp.flags & TP_FLAG_TRACE)
kprobe_trace_func(tk, regs);
#ifdef CONFIG_PERF_EVENTS
if (tk->tp.flags & TP_FLAG_PROFILE)
kprobe_perf_func(tk, regs);
ret = kprobe_perf_func(tk, regs);
#endif
return 0; /* We don't tweek kernel, so just return 0 */
return ret;
}
NOKPROBE_SYMBOL(kprobe_dispatcher);

Expand Down
12 changes: 12 additions & 0 deletions kernel/trace/trace_probe.h
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,8 @@ struct symbol_cache;
unsigned long update_symbol_cache(struct symbol_cache *sc);
void free_symbol_cache(struct symbol_cache *sc);
struct symbol_cache *alloc_symbol_cache(const char *sym, long offset);
int trace_kprobe_ftrace(struct trace_event_call *call);
int trace_kprobe_error_injectable(struct trace_event_call *call);
#else
/* uprobes do not support symbol fetch methods */
#define fetch_symbol_u8 NULL
Expand All @@ -277,6 +279,16 @@ alloc_symbol_cache(const char *sym, long offset)
{
return NULL;
}

static inline int trace_kprobe_ftrace(struct trace_event_call *call)
{
return 0;
}

static inline int trace_kprobe_error_injectable(struct trace_event_call *call)
{
return 0;
}
#endif /* CONFIG_KPROBE_EVENTS */

struct probe_arg {
Expand Down

0 comments on commit 9802d86

Please sign in to comment.