diff --git a/libbpf-tools/.gitignore b/libbpf-tools/.gitignore index 2a76ec265c34..d6d3a91461c4 100644 --- a/libbpf-tools/.gitignore +++ b/libbpf-tools/.gitignore @@ -14,6 +14,7 @@ /capable /cpudist /cpufreq +/doublefree /drsnoop /execsnoop /exitsnoop diff --git a/libbpf-tools/Makefile b/libbpf-tools/Makefile index 7ed7ca16ba2f..7d29ea438835 100644 --- a/libbpf-tools/Makefile +++ b/libbpf-tools/Makefile @@ -51,6 +51,7 @@ APPS = \ capable \ cpudist \ cpufreq \ + doublefree \ drsnoop \ execsnoop \ exitsnoop \ diff --git a/libbpf-tools/doublefree.bpf.c b/libbpf-tools/doublefree.bpf.c new file mode 100644 index 000000000000..c51eb1edf446 --- /dev/null +++ b/libbpf-tools/doublefree.bpf.c @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +/* Copyright (c) 2022 LG Electronics */ +#include +#include +#include +#include +#include "doublefree.h" + +struct { + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __type(key, u32); + __type(value, u32); +} events SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, u64); + __type(value, struct doublefree_info_t); +} allocs SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, u64); + __type(value, u64); +} memptrs SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_STACK_TRACE); + __uint(max_entries, MAX_ENTRIES); + __type(key, u32); +} stack_traces SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, u64); + __type(value, u32); +} deallocs SEC(".maps"); + +int gen_alloc_exit2(struct pt_regs *ctx, u64 address) +{ + struct doublefree_info_t info = {}; + + if (address == 0) + return 0; + + info.stackid = bpf_get_stackid(ctx, &stack_traces, BPF_F_USER_STACK); + info.alloc_count = 1; + bpf_map_update_elem(&allocs, &address, &info, BPF_ANY); + + return 0; +} + +int gen_alloc_exit(struct pt_regs *ctx) +{ + return gen_alloc_exit2(ctx, PT_REGS_RC(ctx)); +} + +int gen_free_enter(struct pt_regs *ctx, void *address) +{ + int stackid = 0; + u64 addr = (u64)address; + struct event event = {}; + struct doublefree_info_t *info = bpf_map_lookup_elem(&allocs, &addr); + + if (!info) + return 0; + + __sync_fetch_and_add(&info->alloc_count, -1); + stackid = bpf_get_stackid(ctx, &stack_traces, BPF_F_USER_STACK); + if (info->alloc_count == 0) { + bpf_map_update_elem(&deallocs, &addr, &stackid, BPF_ANY); + } else if (info->alloc_count < 0) { + event.stackid = stackid; + event.addr = addr; + bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, + sizeof(event)); + } else { + event.err = -1; + bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, + sizeof(event)); + } + return 0; +} + +SEC("kretprobe/dummy_malloc") +int BPF_KRETPROBE(malloc_return) +{ + return gen_alloc_exit(ctx); +} + +SEC("kprobe/dummy_free") +int BPF_KPROBE(free_entry, void *address) +{ + return gen_free_enter(ctx, address); +} + +SEC("kretprobe/dummy_calloc") +int BPF_KRETPROBE(calloc_return) +{ + return gen_alloc_exit(ctx); +} + +SEC("kretprobe/dummy_realloc") +int BPF_KRETPROBE(realloc_return) +{ + return gen_alloc_exit(ctx); +} + +SEC("kprobe/dummy_posix_memalign") +int BPF_KPROBE(posix_memalign_entry, void **memptr, size_t alignment, size_t size) +{ + u64 memptr64 = (u64)(size_t)memptr; + u64 pid = bpf_get_current_pid_tgid(); + + bpf_map_update_elem(&memptrs, &pid, &memptr64, BPF_ANY); + return 0; +} + +SEC("kretprobe/dummy_posix_memalign") +int BPF_KRETPROBE(posix_memalign_return) +{ + void *addr = NULL; + u64 pid = bpf_get_current_pid_tgid(); + u64 *memptr64 = bpf_map_lookup_elem(&memptrs, &pid); + + if (memptr64 == NULL) + return 0; + + bpf_map_delete_elem(&memptrs, &pid); + if (bpf_probe_read_user(&addr, sizeof(void *), (void *)(size_t)*memptr64)) + return 0; + + u64 addr64 = (u64)(size_t)addr; + return gen_alloc_exit2(ctx, addr64); +} + +SEC("kretprobe/dummy_aligned_alloc") +int BPF_KRETPROBE(aligned_alloc_return) +{ + return gen_alloc_exit(ctx); +} + +SEC("kretprobe/dummy_valloc") +int BPF_KRETPROBE(valloc_return) +{ + return gen_alloc_exit(ctx); +} + +SEC("kretprobe/dummy_memalign") +int BPF_KRETPROBE(memalign_return) +{ + return gen_alloc_exit(ctx); +} + +SEC("kretprobe/dummy_pvalloc") +int BPF_KRETPROBE(pvalloc_return) +{ + return gen_alloc_exit(ctx); +} + +SEC("kretprobe/dummy_reallocarray") +int BPF_KRETPROBE(reallocarray_return) +{ + return gen_alloc_exit(ctx); +} + +char _license[] SEC("license") = "GPL"; diff --git a/libbpf-tools/doublefree.c b/libbpf-tools/doublefree.c new file mode 100644 index 000000000000..155852df1e9d --- /dev/null +++ b/libbpf-tools/doublefree.c @@ -0,0 +1,473 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +/* Copyright (c) 2022 LG Electronics */ + +// 19-Oct-2022 Bojun Seo Created this. +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "doublefree.h" +#include "doublefree.skel.h" +#include "trace_helpers.h" +#include "uprobe_helpers.h" + +#define STACK_MAX 127 +#define BUF_MAX (STACK_MAX * LINE_MAX * 2) +#define PERF_BUFFER_PAGES 16 +#define PERF_POLL_TIMEOUT_MS 10000 +#define DATE_FORMAT "%2d-%s-%02d %02d:%02d:%02d " + +#define ON_MEM_FAILURE(buf) \ + do { \ + if (NULL == buf) { \ + p_err("FATAL: Failed to allocate memory on %s", __func__); \ + exit(-1); \ + } \ + } while (false) + +#define _CHECK_OFFSET(func_off) \ + do { \ + if (func_off < 0) { \ + p_warn("Failed to get func_offset"); \ + return func_off; \ + } \ + } while (false) + +#define _CHECK_PROGRAM(obj, func_name) \ + do { \ + int err = libbpf_get_error(obj->links.func_name); \ + if (err) { \ + p_warn("Failed to attach %s: %d", #func_name, err); \ + return err; \ + } \ + } while (false) + +#define _ATTACH_UPROBE(obj, lib_path, func_name, is_uretprobe, func_off, pid) \ + do { \ + obj->links.func_name = bpf_program__attach_uprobe( \ + obj->progs.func_name, is_uretprobe, pid, lib_path, func_off); \ + } while (false) + +#define ATTACH_UPROBE(obj, lib_path, func_name, pid) \ + do { \ + off_t func_off = get_elf_func_offset(lib_path, #func_name); \ + _ATTACH_UPROBE(obj, lib_path, func_name##_entry, false, func_off, pid); \ + } while (false) + +#define ATTACH_URETPROBE(obj, lib_path, func_name, pid) \ + do { \ + off_t func_off = get_elf_func_offset(lib_path, #func_name); \ + _ATTACH_UPROBE(obj, lib_path, func_name##_return, true, func_off, pid); \ + } while (false) + +#define ATTACH_UPROBES(obj, lib_path, func_name, pid) \ + do { \ + off_t func_off = get_elf_func_offset(lib_path, #func_name); \ + _ATTACH_UPROBE(obj, lib_path, func_name##_entry, false, func_off, pid); \ + _ATTACH_UPROBE(obj, lib_path, func_name##_return, true, func_off, pid); \ + } while (false) + +#define ATTACH_UPROBE_CHECKED(obj, lib_path, func_name, pid) \ + do { \ + off_t func_off = get_elf_func_offset(lib_path, #func_name); \ + _CHECK_OFFSET(func_off); \ + _ATTACH_UPROBE(obj, lib_path, func_name##_entry, false, func_off, pid); \ + _CHECK_PROGRAM(obj, func_name##_entry); \ + } while (false) + +#define ATTACH_URETPROBE_CHECKED(obj, lib_path, func_name, pid) \ + do { \ + off_t func_off = get_elf_func_offset(lib_path, #func_name); \ + _CHECK_OFFSET(func_off); \ + _ATTACH_UPROBE(obj, lib_path, func_name##_return, true, func_off, pid); \ + _CHECK_PROGRAM(obj, func_name##_return); \ + } while (false) + + +#define ATTACH_UPROBES_CHECKED(obj, lib_path, func_name, pid) \ + do { \ + off_t func_off = get_elf_func_offset(lib_path, #func_name); \ + _CHECK_OFFSET(func_off); \ + _ATTACH_UPROBE(obj, lib_path, func_name##_entry, false, func_off, pid); \ + _CHECK_PROGRAM(obj, func_name##_entry); \ + _ATTACH_UPROBE(obj, lib_path, func_name##_return, true, func_off, pid); \ + _CHECK_PROGRAM(obj, func_name##_return); \ + } while (false) + +enum log_level { + DEBUG, + INFO, + WARN, + ERROR, +}; + +void __p(FILE *outstream, enum log_level level, char *level_str, char *fmt, ...); +void set_log_level(enum log_level level); +pid_t execute_process(char *); + +#define p_debug(fmt, ...) __p(stderr, DEBUG, "DEBUG", fmt, ##__VA_ARGS__) +#define p_info(fmt, ...) __p(stderr, INFO, "INFO", fmt, ##__VA_ARGS__) +#define p_warn(fmt, ...) __p(stderr, WARN, "WARN", fmt, ##__VA_ARGS__) +#define p_err(fmt, ...) __p(stderr, ERROR, "ERROR", fmt, ##__VA_ARGS__) + +/* default */ +enum log_level log_level = ERROR; + +void sig_handler(int signo) +{ + if (signo == SIGINT || signo == SIGTERM) + kill(0, SIGKILL); +} + +void __p(FILE *outstream, enum log_level level, char *level_str, char *fmt, ...) +{ + va_list ap; + char mon[4]; + int day, year, hour, minute, second; + + if (level < log_level) + return; + + sscanf(__DATE__, "%s %d %d", mon, &day, &year); + sscanf(__TIME__, "%d:%d:%d", &hour, &minute, &second); + + va_start(ap, fmt); + fprintf(outstream, DATE_FORMAT, year, mon, day, hour, minute, second); + fprintf(outstream, "%s ", level_str); + vfprintf(outstream, fmt, ap); + fprintf(outstream, "\n"); + va_end(ap); + fflush(outstream); +} + +void set_log_level(enum log_level level) +{ + log_level = level; +} + +pid_t execute_process(char *cmd) +{ + const char *delim = " "; + char **argv, *ptr, *filepath; + pid_t pid = 0; + int i; + + if (cmd == NULL) { + p_warn("Invalid argument: command not found"); + exit(-1); + } else + p_info("Execute child process: %s", cmd); + /* It is enough to alloc half length of cmd to save argv */ + argv = calloc(sizeof(char *), strlen(cmd)/2); + if (argv == NULL) { + p_err("failed to alloc memory"); + goto cleanup; + } + + ptr = strtok(cmd, delim); + if (ptr != NULL) { + filepath = ptr; + ptr = strtok(NULL, delim); + } else { + p_err("failed to exec %s", cmd); + goto cleanup; + } + + i = 0; + argv[i++] = filepath; + while (ptr != NULL) { + argv[i++] = ptr; + ptr = strtok(NULL, delim); + } + + pid = fork(); + if (pid == 0) { + execve(filepath, argv, NULL); + } else if (pid > 0) { + signal(SIGINT, sig_handler); + free(argv); + return pid; + } else { + p_err("failed to exec %s", cmd); + exit(-1); + } + +cleanup: + free(argv); + return -1; +} + +static volatile sig_atomic_t exiting = 0; + +static struct env { + /* main process's tid */ + pid_t pid; + int stack_storage_size; + int perf_max_stack_depth; + /* unit: second */ + bool verbose; + char* command; +} env = { + .pid = -1, + .stack_storage_size = MAX_ENTRIES, + .perf_max_stack_depth = STACK_MAX, + .verbose = false, + .command = NULL, +}; + +const char *argp_program_version = "doublefree 0.1"; +const char *argp_program_bug_address = + "https://github.com/iovisor/bcc/tree/master/libbpf-tools"; +const char argp_program_doc[] = "Detect and report double free error.\n" +"\n" +"-c or -p is a mandatory option\n" +"EXAMPLES:\n" +" doublefree -p 1234 # Detect doublefree on process id 1234\n" +" doublefree -c a.out # Detect doublefree on a.out\n" +" doublefree -c 'a.out arg' # Detect doublefree on a.out with argument\n" +" doublefree -c \"a.out arg\" # Detect doublefree on a.out with argument\n"; +static const struct argp_option opts[] = { + { "verbose", 'v', NULL, 0, "Verbose debug output" }, + { "help", 'h', NULL, OPTION_HIDDEN, "Show the full help" }, + { "pid", 'p', "PID", 0, "Set pid" }, + { "command", 'c', "COMMAND", 0, "Execute and trace the specified command" }, + {}, +}; + +static struct doublefree_bpf *obj = NULL; +struct syms_cache *syms_cache = NULL; + +static int libbpf_print_fn(enum libbpf_print_level level, + const char *format, va_list args) +{ + if (level == LIBBPF_DEBUG && !env.verbose) + return 0; + + return vfprintf(stderr, format, args); +} + +static void sig_int(int signo) +{ + exiting = 1; +} + +static error_t parse_arg(int key, char *arg, struct argp_state *state) +{ + switch (key) { + case 'h': + argp_state_help(state, stderr, ARGP_HELP_STD_HELP); + break; + case 'v': + env.verbose = true; + break; + case 'p': + errno = 0; + env.pid = strtol(arg, NULL, 10); + if (errno || env.pid <= 0) { + p_err("Invalid PID: %s", arg); + argp_usage(state); + } + break; + case 'c': + env.command = strdup(arg); + ON_MEM_FAILURE(env.command); + break; + default: + return ARGP_ERR_UNKNOWN; + } + return 0; +} + +static int attach_uprobes(struct doublefree_bpf *obj) +{ + int err = 0; + char libc_path[PATH_MAX] = {}; + + err = get_pid_lib_path(1, "c", libc_path, PATH_MAX); + if (err) { + p_err("Failed to find libc.so, %d", err); + return err; + } + ATTACH_URETPROBE_CHECKED(obj, libc_path, malloc, env.pid); + ATTACH_UPROBE_CHECKED(obj, libc_path, free, env.pid); + ATTACH_URETPROBE_CHECKED(obj, libc_path, calloc, env.pid); + ATTACH_URETPROBE_CHECKED(obj, libc_path, realloc, env.pid); + ATTACH_UPROBES_CHECKED(obj, libc_path, posix_memalign, env.pid); + ATTACH_URETPROBE_CHECKED(obj, libc_path, memalign, env.pid); + + /* Accept attach failure for following functions */ + ATTACH_URETPROBE(obj, libc_path, aligned_alloc, env.pid); + ATTACH_URETPROBE(obj, libc_path, valloc, env.pid); + ATTACH_URETPROBE(obj, libc_path, pvalloc, env.pid); + ATTACH_URETPROBE(obj, libc_path, reallocarray, env.pid); + + return 0; +} + +static void print_backtrace(const struct syms* syms, int stackid) { + const struct sym *sym = NULL; + size_t i = 0; + int err = 0; + unsigned long *ip = NULL; + int sfd = bpf_map__fd(obj->maps.stack_traces); + + ip = calloc(env.perf_max_stack_depth, sizeof(*ip)); + if (!ip) { + p_err("Failed to alloc ip"); + return; + } + err = bpf_map_lookup_elem(sfd, &stackid, ip); + if (err < 0) { + p_err("Failed to lookup elem on sfd"); + free(ip); + return; + } + for (i = 0; i < env.perf_max_stack_depth && ip[i]; ++i) { + printf("\t#%zu %#016lx", i + 1, ip[i]); + sym = syms__map_addr(syms, ip[i]); + if (sym) + printf(" %s+%#lx", sym->name, sym->offset); + printf("\n"); + } + printf("\n"); + free(ip); +} + +static int get_stackid(int fd, unsigned long key) { + struct doublefree_info_t val = {}; + int err = 0; + + err = bpf_map_lookup_elem(fd, &key, &val); + if (err < 0) { + p_err("Failed to lookup elem on fd"); + return err; + } + return val.stackid; +} + +static void handle_event(void *ctx, int cpu, void *data, __u32 data_sz) +{ + const struct syms *syms = NULL; + struct event *e = data; + int stackid = 0; + int allocs_fd = bpf_map__fd(obj->maps.allocs); + int deallocs_fd = bpf_map__fd(obj->maps.deallocs); + + if (e->err == -1) { + p_err("This message should not be printed.."); + p_err("Please report this issue to the developer"); + return; + } + syms = syms_cache__get_syms(syms_cache, env.pid); + if (syms != NULL) { + printf("\nAllocation:\n"); + stackid = get_stackid(allocs_fd, e->addr); + print_backtrace(syms, stackid); + + printf("First deallocation:\n"); + stackid = get_stackid(deallocs_fd, e->addr); + print_backtrace(syms, stackid); + + printf("Second deallocation:\n"); + print_backtrace(syms, e->stackid); + } else { + p_warn("syms is null"); + } +} + +static void handle_lost_events(void *ctx, int cpu, __u64 lost_cnt) +{ + p_err("handle_lost_events"); +} + +int main(int argc, char **argv) +{ + int err = 0; + struct perf_buffer *pb = NULL; + static const struct argp argp = { + .options = opts, + .parser = parse_arg, + .doc = argp_program_doc, + }; + + set_log_level(INFO); + err = argp_parse(&argp, argc, argv, 0, NULL, NULL); + if (err) + return err; + + if (env.verbose) + set_log_level(DEBUG); + + if (env.command != NULL) { + env.pid = execute_process(env.command); + if (env.pid > 0) + p_info("execute command: %s(pid %d)", env.command, + env.pid); + + } + if (env.pid == -1) { + p_err("-c or -p is a mandatory option"); + return -1; + } + libbpf_set_print(libbpf_print_fn); + obj = doublefree_bpf__open(); + if (!obj) { + p_err("Failed to open BPF object"); + return -1; + } + bpf_map__set_value_size(obj->maps.stack_traces, + env.perf_max_stack_depth * sizeof(__u64)); + bpf_map__set_max_entries(obj->maps.stack_traces, + env.stack_storage_size); + err = doublefree_bpf__load(obj); + if (err) { + p_err("Failed to load BPF object: %d", err); + return -1; + } + err = attach_uprobes(obj); + if (err) { + p_err("Failed to attach BPF programs"); + p_err("Is this process alive? pid: %d", env.pid); + return -1; + } + syms_cache = syms_cache__new(0); + if (!syms_cache) { + p_err("Failed to load syms"); + return -1; + } + syms_cache__get_syms(syms_cache, env.pid); + pb = perf_buffer__new(bpf_map__fd(obj->maps.events), PERF_BUFFER_PAGES, + handle_event, handle_lost_events, NULL, NULL); + if (!pb) { + p_err("failed to open perf buffer"); + return -1; + } + if (signal(SIGINT, sig_int) == SIG_ERR) { + p_err("can't set signal handler: %s\n", strerror(errno)); + return -1; + } + printf("Tracing doublefree... Hit Ctrl-C to stop\n"); + while (!exiting) { + err = perf_buffer__poll(pb, PERF_POLL_TIMEOUT_MS); + if (err < 0) { + p_err("Failed to poll perf_buffer"); + break; + } + if (getpgid(env.pid) < 0) { + p_warn("Is this process alive? pid: %d", env.pid); + break; + } + } + + /* cleanup */ + perf_buffer__free(pb); + syms_cache__free(syms_cache); + doublefree_bpf__destroy(obj); + free(env.command); + return 0; +} diff --git a/libbpf-tools/doublefree.h b/libbpf-tools/doublefree.h new file mode 100644 index 000000000000..ef4215be31a1 --- /dev/null +++ b/libbpf-tools/doublefree.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +/* Copyright (c) 2022 LG Electronics */ +#ifndef __DOUBLEFREE_H +#define __DOUBLEFREE_H + +#define MAX_ENTRIES 65536 + +struct event { + /* success: 0, failure: -1 */ + int err; + int stackid; + __u64 addr; +}; + +struct doublefree_info_t { + int stackid; + /* allocated: 1, deallocated: 0, doublefree: -1 */ + int alloc_count; +}; + +#endif /* __DOUBLEFREE_H */