Skip to content

Commit

Permalink
Merge branch 'main' into add_k8s_cluster_name_attr
Browse files Browse the repository at this point in the history
  • Loading branch information
marctc committed Jul 1, 2024
2 parents f3c90f0 + fd90b82 commit ac0994a
Show file tree
Hide file tree
Showing 71 changed files with 368 additions and 102 deletions.
20 changes: 12 additions & 8 deletions bpf/bpf_dbg.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,19 @@ struct {
__uint(pinning, LIBBPF_PIN_BY_NAME);
} debug_events SEC(".maps");

enum bpf_func_id___x { BPF_FUNC_snprintf___x = 42 /* avoid zero */ };

#define bpf_dbg_helper(fmt, args...) { \
{log_info_t *__trace__ = bpf_ringbuf_reserve(&debug_events, sizeof(log_info_t), 0); \
if (__trace__) { \
BPF_SNPRINTF(__trace__->log, sizeof(__trace__->log), fmt, ##args); \
u64 id = bpf_get_current_pid_tgid(); \
bpf_get_current_comm(&__trace__->comm, sizeof(__trace__->comm)); \
__trace__->pid = id >> 32; \
bpf_ringbuf_submit(__trace__, 0); \
}} \
if(bpf_core_enum_value_exists(enum bpf_func_id___x, BPF_FUNC_snprintf___x)) { \
log_info_t *__trace__ = bpf_ringbuf_reserve(&debug_events, sizeof(log_info_t), 0); \
if (__trace__) { \
BPF_SNPRINTF(__trace__->log, sizeof(__trace__->log), fmt, ##args); \
u64 id = bpf_get_current_pid_tgid(); \
bpf_get_current_comm(&__trace__->comm, sizeof(__trace__->comm)); \
__trace__->pid = id >> 32; \
bpf_ringbuf_submit(__trace__, 0); \
} \
} \
}

#define bpf_dbg_printk(fmt, args...) { \
Expand Down
33 changes: 19 additions & 14 deletions bpf/http_sock.c
Original file line number Diff line number Diff line change
Expand Up @@ -299,18 +299,17 @@ int BPF_KPROBE(kprobe_tcp_sendmsg, struct sock *sk, struct msghdr *msg, size_t s
void *ssl = is_ssl_connection(id, &s_args.p_conn);
if (size > 0) {
if (!ssl) {
void *iovec_ptr = find_msghdr_buf(msg);
if (iovec_ptr) {
u64 sock_p = (u64)sk;
bpf_map_update_elem(&active_send_args, &id, &s_args, BPF_ANY);
bpf_map_update_elem(&active_send_sock_args, &sock_p, &s_args, BPF_ANY);
handle_buf_with_connection(&s_args.p_conn, iovec_ptr, size, NO_SSL, TCP_SEND, orig_dport);
// if (size < KPROBES_LARGE_RESPONSE_LEN) {
// bpf_dbg_printk("Maybe we need to finish the request");
// finish_possible_delayed_http_request(&s_args.p_conn);
// }
} else {
bpf_dbg_printk("can't find iovec ptr in msghdr, not tracking sendmsg");
u8* buf = iovec_memory();
if (buf) {
size = read_msghdr_buf(msg, buf, size);
if (size) {
u64 sock_p = (u64)sk;
bpf_map_update_elem(&active_send_args, &id, &s_args, BPF_ANY);
bpf_map_update_elem(&active_send_sock_args, &sock_p, &s_args, BPF_ANY);
handle_buf_with_connection(&s_args.p_conn, buf, size, NO_SSL, TCP_SEND, orig_dport);
} else {
bpf_dbg_printk("can't find iovec ptr in msghdr, not tracking sendmsg");
}
}
} else {
bpf_dbg_printk("tcp_sendmsg for identified SSL connection, ignoring...");
Expand Down Expand Up @@ -426,7 +425,7 @@ int BPF_KPROBE(kprobe_tcp_recvmsg, struct sock *sk, struct msghdr *msg, size_t l
// can get modified in non-reversible way if the incoming packet is large and broken down in parts.
recv_args_t args = {
.sock_ptr = (u64)sk,
.iovec_ptr = (u64)find_msghdr_buf(msg)
.iovec_ptr = (u64)(msg)
};

bpf_map_update_elem(&active_recv_args, &id, &args, BPF_ANY);
Expand Down Expand Up @@ -465,7 +464,13 @@ int BPF_KRETPROBE(kretprobe_tcp_recvmsg, int copied_len) {
void *ssl = is_ssl_connection(id, &info);

if (!ssl) {
handle_buf_with_connection(&info, (void *)args->iovec_ptr, copied_len, NO_SSL, TCP_RECV, orig_dport);
u8* buf = iovec_memory();
if (buf) {
copied_len = read_msghdr_buf((void *)args->iovec_ptr, buf, copied_len);
if (copied_len) {
handle_buf_with_connection(&info, buf, copied_len, NO_SSL, TCP_RECV, orig_dport);
}
}
} else {
bpf_dbg_printk("tcp_recvmsg for an identified SSL connection, ignoring...");
}
Expand Down
81 changes: 57 additions & 24 deletions bpf/http_sock.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
#define PACKET_TYPE_REQUEST 1
#define PACKET_TYPE_RESPONSE 2

#define IO_VEC_MAX_LEN 512

volatile const s32 capture_header_buffer = 0;

// Keeps track of the ongoing http connections we match for request/response
Expand Down Expand Up @@ -99,6 +101,13 @@ struct {
__uint(max_entries, 1);
} tcp_req_mem SEC(".maps");

struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__type(key, int);
__type(value, u8[(IO_VEC_MAX_LEN * 2)]);
__uint(max_entries, 1);
} iovec_mem SEC(".maps");

static __always_inline u8 is_http(unsigned char *p, u32 len, u8 *packet_type) {
if (len < MIN_HTTP_SIZE) {
return 0;
Expand Down Expand Up @@ -153,7 +162,7 @@ struct _iov_iter {
};
};

static __always_inline void *find_msghdr_buf(struct msghdr *msg) {
static __always_inline int read_msghdr_buf(struct msghdr *msg, u8* buf, int max_len) {
struct iov_iter msg_iter = BPF_CORE_READ(msg, msg_iter);
u8 msg_iter_type = 0;

Expand All @@ -166,6 +175,9 @@ static __always_inline void *find_msghdr_buf(struct msghdr *msg) {

struct iovec *iov = NULL;

u32 l = max_len;
bpf_clamp_umax(l, IO_VEC_MAX_LEN);

if (bpf_core_field_exists(msg_iter.iov)) {
bpf_probe_read(&iov, sizeof(struct iovec *), &(msg_iter.iov));
bpf_dbg_printk("iov exists, read value %llx", iov);
Expand All @@ -182,41 +194,56 @@ static __always_inline void *find_msghdr_buf(struct msghdr *msg) {
bpf_probe_read(&vec, sizeof(struct iovec), &(_msg_iter.__ubuf_iovec));
bpf_dbg_printk("ubuf base %llx, &ubuf base %llx", vec.iov_base, &vec.iov_base);

return vec.iov_base;
bpf_probe_read(buf, l, vec.iov_base);
return l;
} else {
bpf_probe_read(&iov, sizeof(struct iovec *), &(_msg_iter.__iov));
}
}

if (!iov) {
return NULL;
return 0;
}

if (msg_iter_type == 6) {// Direct char buffer
bpf_dbg_printk("direct char buffer type=6 iov %llx", iov);
return iov;
bpf_probe_read(buf, l, iov);

return l;
}

struct iovec vec;
bpf_probe_read(&vec, sizeof(struct iovec), iov);

bpf_dbg_printk("standard iov %llx base %llx len %d", iov, vec.iov_base, vec.iov_len);

if (!vec.iov_base) {
// We didn't find the base in the first vector, loop couple of times to find the base
for (int i = 1; i < 4; i++) {
void *p = &iov[i];
bpf_probe_read(&vec, sizeof(struct iovec), p);
// No prints in loops on 5.10
// bpf_dbg_printk("iov[%d]=%llx base %llx, len %d", i, p, vec.iov_base, vec.iov_len);
if (!vec.iov_base || !vec.iov_len) {
continue;
}
return vec.iov_base;
u32 tot_len = 0;

// Loop couple of times reading the various io_vecs
for (int i = 0; i < 4; i++) {
void *p = &iov[i];
bpf_probe_read(&vec, sizeof(struct iovec), p);
// No prints in loops on 5.10
// bpf_printk("iov[%d]=%llx base %llx, len %d", i, p, vec.iov_base, vec.iov_len);
if (!vec.iov_base || !vec.iov_len) {
continue;
}

u32 remaining = IO_VEC_MAX_LEN > tot_len ? (IO_VEC_MAX_LEN - tot_len) : 0;
u32 iov_size = vec.iov_len < remaining ? vec.iov_len : remaining;
bpf_clamp_umax(tot_len, IO_VEC_MAX_LEN);
bpf_clamp_umax(iov_size, IO_VEC_MAX_LEN);
// bpf_printk("tot_len=%d, remaining=%d", tot_len, remaining);
if (tot_len + iov_size > l) {
break;
}
bpf_probe_read(&buf[tot_len], iov_size, vec.iov_base);
// bpf_printk("iov_size=%d, buf=%s", iov_size, buf);

tot_len += iov_size;
}

return vec.iov_base;
return tot_len;
}

// empty_http_info zeroes and return the unique percpu copy in the map
Expand Down Expand Up @@ -254,6 +281,11 @@ static __always_inline http_connection_metadata_t* empty_connection_meta() {
return bpf_map_lookup_elem(&connection_meta_mem, &zero);
}

static __always_inline u8* iovec_memory() {
int zero = 0;
return bpf_map_lookup_elem(&iovec_mem, &zero);
}

static __always_inline u8 http_info_complete(http_info_t *info) {
return (info->start_monotime_ns != 0 && info->status != 0 && info->pid.host_pid != 0);
}
Expand Down Expand Up @@ -455,8 +487,8 @@ static __always_inline void http2_grpc_end(http2_conn_stream_t *stream, http2_gr

http2_grpc_request_t *trace = bpf_ringbuf_reserve(&events, sizeof(http2_grpc_request_t), 0);
if (trace) {
bpf_probe_read(prev_info->ret_data, KPROBES_HTTP2_RET_BUF_SIZE, u_buf);
bpf_memcpy(trace, prev_info, sizeof(http2_grpc_request_t));
bpf_probe_read(trace->ret_data, KPROBES_HTTP2_RET_BUF_SIZE, u_buf);
bpf_ringbuf_submit(trace, get_flags());
}
}
Expand All @@ -475,17 +507,16 @@ static __always_inline void process_http2_grpc_frames(pid_connection_info_t *pid
u8 found_data_frame = 0;
http2_conn_stream_t stream = {0};

for (int i = 0; i < 8; i++) {
unsigned char frame_buf[FRAME_HEADER_LEN];
frame_header_t frame = {0};

unsigned char frame_buf[FRAME_HEADER_LEN];
frame_header_t frame = {0};

for (int i = 0; i < 4; i++) {
if (pos >= bytes_len) {
break;
}

bpf_probe_read(&frame_buf, FRAME_HEADER_LEN, (void *)((u8 *)u_buf + pos));
read_http2_grpc_frame_header(&frame, frame_buf, FRAME_HEADER_LEN);

read_http2_grpc_frame_header(&frame, frame_buf, FRAME_HEADER_LEN);
//bpf_dbg_printk("http2 frame type = %d, len = %d, stream_id = %d, flags = %d", frame.type, frame.length, frame.stream_id, frame.flags);

if (is_headers_frame(&frame)) {
Expand Down Expand Up @@ -549,7 +580,9 @@ static __always_inline void process_http2_grpc_frames(pid_connection_info_t *pid
u8 req_type = request_type_by_direction(direction, PACKET_TYPE_RESPONSE);
if (prev_info) {
if (req_type == prev_info->type) {
http2_grpc_end(&stream, prev_info, (void *)((u8 *)u_buf + saved_buf_pos));
u32 buf_pos = saved_buf_pos;
bpf_clamp_umax(buf_pos, IO_VEC_MAX_LEN);
http2_grpc_end(&stream, prev_info, (void *)((u8 *)u_buf + buf_pos));
bpf_map_delete_elem(&active_ssl_connections, pid_conn);
} else {
bpf_dbg_printk("grpc request/response mismatch, req_type %d, prev_info->type %d", req_type, prev_info->type);
Expand Down
2 changes: 1 addition & 1 deletion pkg/internal/discover/attacher.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ func (ta *TraceAttacher) getTracer(ie *Instrumentable) (*ebpf.ProcessTracer, boo
tracerType = ebpf.Go
programs = filterNotFoundPrograms(newGoTracersGroup(ta.Cfg, ta.Metrics), ie.Offsets)
}
case svc.InstrumentableJava, svc.InstrumentableNodejs, svc.InstrumentableRuby, svc.InstrumentablePython, svc.InstrumentableDotnet, svc.InstrumentableGeneric, svc.InstrumentableRust:
case svc.InstrumentableJava, svc.InstrumentableNodejs, svc.InstrumentableRuby, svc.InstrumentablePython, svc.InstrumentableDotnet, svc.InstrumentableGeneric, svc.InstrumentableRust, svc.InstrumentablePHP:
// We are not instrumenting a Go application, we override the programs
// list with the generic kernel/socket space filters
if ta.reusableTracer != nil {
Expand Down
Loading

0 comments on commit ac0994a

Please sign in to comment.