From 28e725938a19fe071d1bc81aa18584ad740119b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= Date: Fri, 24 May 2019 18:31:37 +0200 Subject: [PATCH] tests: ipc: check deadlock-like situation due to mixing priorities MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Compared to the outer world, libqb brings rather unintuitive approach to priorities within a native event loop (qbloop.h) -- it doesn't do an exhaustive high-to-low priorities in a batched (clean-the-level) manner, but rather linearly adds a possibility to pick the handling task from the higher priority level as opposed to lower priority ones. This has the advantage of limiting the chances of starvation and deadlock opportunities in the incorrectly constructed SW, on the other hand, it means that libqb is not fulfilling the architected intentions regarding what deserves a priority truthfully, so these priorities are worth just a hint rather than urgency-based separation. And consequently, a discovery of these deadlocks etc. is deferred to the (as Murphy's laws have it) least convenient moment, e.g., when said native event loop is exchanged for other (this time priority trully abiding, like GLib) implementation, while retaining the same basic notion and high-level handling of priorities on libqb side, in IPC server (service handling) context. Hence, demonstration of such a degenerate blocking is not trivial, and we must defer such other event loop implementation. After this hassle, we are rewarded with a practical proof said "high-level handling [...] in IPC server (service handling) context" contains a bug (which we are going to subsequently fix) -- this is contrasted with libqb's native loop implementation that works just fine even prior that fix. Signed-off-by: Jan Pokorný --- tests/Makefile.am | 5 + tests/check_ipc.c | 585 +++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 553 insertions(+), 37 deletions(-) diff --git a/tests/Makefile.am b/tests/Makefile.am index d3536ff06..29b3c29fd 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -167,6 +167,11 @@ ipc_test_LDADD = $(top_builddir)/lib/libqb.la @CHECK_LIBS@ if HAVE_FAILURE_INJECTION ipc_test_LDADD += _failure_injection.la +if HAVE_GLIB +ipc_test_CFLAGS += $(GLIB_CFLAGS) +ipc_test_LDADD += $(GLIB_LIBS) +endif + check_LTLIBRARIES += _failure_injection.la _failure_injection_la_SOURCES = _failure_injection.c _failure_injection.h _failure_injection_la_LDFLAGS = -module diff --git a/tests/check_ipc.c b/tests/check_ipc.c index 5ccac6e7c..fb200a8ba 100644 --- a/tests/check_ipc.c +++ b/tests/check_ipc.c @@ -25,6 +25,12 @@ #include #include #include +#include +#include + +#ifdef HAVE_GLIB +#include +#endif #include "check_common.h" @@ -63,9 +69,12 @@ static const int MAX_MSG_SIZE = DEFAULT_MAX_MSG_SIZE; * this the largests msg we can successfully send. */ #define GIANT_MSG_DATA_SIZE MAX_MSG_SIZE - sizeof(struct qb_ipc_response_header) - 8 -static int enforce_server_buffer=0; +static int enforce_server_buffer; static qb_ipcc_connection_t *conn; static enum qb_ipc_type ipc_type; +static enum qb_loop_priority global_loop_prio = QB_LOOP_MED; +static bool global_use_glib; +static int global_pipefd[2]; enum my_msg_ids { IPC_MSG_REQ_TX_RX, @@ -76,12 +85,92 @@ enum my_msg_ids { IPC_MSG_RES_BULK_EVENTS, IPC_MSG_REQ_STRESS_EVENT, IPC_MSG_RES_STRESS_EVENT, + IPC_MSG_REQ_SELF_FEED, + IPC_MSG_RES_SELF_FEED, IPC_MSG_REQ_SERVER_FAIL, IPC_MSG_RES_SERVER_FAIL, IPC_MSG_REQ_SERVER_DISCONNECT, IPC_MSG_RES_SERVER_DISCONNECT, }; + +#ifdef HAVE_GLIB +/* these 2 functions from pacemaker code */ +static gint +conv_prio_libqb2glib(enum qb_loop_priority prio) +{ + gint ret = G_PRIORITY_DEFAULT; + switch (prio) { + case QB_LOOP_LOW: + ret = G_PRIORITY_LOW; + break; + case QB_LOOP_HIGH: + ret = G_PRIORITY_HIGH; + break; + default: + qb_log(LOG_DEBUG, "Invalid libqb's loop priority %d," + " assuming QB_LOOP_MED", prio); + /* fall-through */ + case QB_LOOP_MED: + break; + } + return ret; +} +static enum qb_ipcs_rate_limit +conv_libqb_prio2ratelimit(enum qb_loop_priority prio) +{ + /* this is an inversion of what libqb's qb_ipcs_request_rate_limit does */ + enum qb_ipcs_rate_limit ret = QB_IPCS_RATE_NORMAL; + switch (prio) { + case QB_LOOP_LOW: + ret = QB_IPCS_RATE_SLOW; + break; + case QB_LOOP_HIGH: + ret = QB_IPCS_RATE_FAST; + break; + default: + qb_log(LOG_DEBUG, "Invalid libqb's loop priority %d," + " assuming QB_LOOP_MED", prio); + /* fall-through */ + case QB_LOOP_MED: + break; + } + return ret; +} + +/* these 3 glue functions inspired from pacemaker, too */ +static gboolean +gio_source_prepare(GSource *source, gint *timeout) +{ + qb_enter(); + *timeout = 500; + return FALSE; +} +static gboolean +gio_source_check(GSource *source) +{ + qb_enter(); + return TRUE; +} +static gboolean +gio_source_dispatch(GSource *source, GSourceFunc callback, gpointer user_data) +{ + gboolean ret = G_SOURCE_CONTINUE; + qb_enter(); + if (callback) { + ret = callback(user_data); + } + return ret; +} +static GSourceFuncs gio_source_funcs = { + .prepare = gio_source_prepare, + .check = gio_source_check, + .dispatch = gio_source_dispatch, +}; + +#endif + + /* Test Cases * * 1) basic send & recv different message sizes @@ -148,6 +237,61 @@ set_ipc_name(const char *prefix) } } +static int +pipe_writer(int fd, int revents, void *data) { + qb_enter(); + static const char buf[8] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h' }; + + ssize_t wbytes = 0, wbytes_sum = 0; + + //for (size_t i = 0; i < SIZE_MAX; i++) { + for (size_t i = 0; i < 4096; i++) { + wbytes_sum += wbytes; + if ((wbytes = write(fd, buf, sizeof(buf))) == -1) { + if (errno != EAGAIN) { + perror("write"); + exit(-1); + } + break; + } + } + if (wbytes_sum > 0) { + qb_log(LOG_DEBUG, "written %zd bytes", wbytes_sum); + } + qb_leave(); + return 1; +} + +static int +pipe_reader(int fd, int revents, void *data) { + qb_enter(); + ssize_t rbytes, rbytes_sum = 0; + size_t cnt = SIZE_MAX; + char buf[4096] = { '\0' }; + while ((rbytes = read(fd, buf, sizeof(buf))) > 0 && rbytes < cnt) { + cnt -= rbytes; + rbytes_sum += rbytes; + } + if (rbytes_sum > 0) { + fail_if(buf[0] == '\0'); /* avoid dead store elimination */ + qb_log(LOG_DEBUG, "read %zd bytes", rbytes_sum); + sleep(1); + } + qb_leave(); + return 1; +} + +#if HAVE_GLIB +static gboolean +gio_pipe_reader(void *data) { + return (pipe_reader(*((int *) data), 0, NULL) > 0); +} +static gboolean +gio_pipe_writer(void *data) { + return (pipe_writer(*((int *) data), 0, NULL) > 0); +} +#endif + static int32_t s1_msg_process_fn(qb_ipcs_connection_t *c, void *data, size_t size) @@ -268,6 +412,39 @@ s1_msg_process_fn(qb_ipcs_connection_t *c, giant_event_send.hdr.id++; } + } else if (req_pt->id == IPC_MSG_REQ_SELF_FEED) { + if (pipe(global_pipefd) != 0) { + perror("pipefd"); + fail_if(1); + } + fcntl(global_pipefd[0], F_SETFL, O_NONBLOCK); + fcntl(global_pipefd[1], F_SETFL, O_NONBLOCK); + if (global_use_glib) { +#ifdef HAVE_GLIB + GSource *source_r, *source_w; + source_r = g_source_new(&gio_source_funcs, sizeof(GSource)); + source_w = g_source_new(&gio_source_funcs, sizeof(GSource)); + fail_if(source_r == NULL || source_w == NULL); + g_source_set_priority(source_r, conv_prio_libqb2glib(QB_LOOP_HIGH)); + g_source_set_priority(source_w, conv_prio_libqb2glib(QB_LOOP_HIGH)); + g_source_set_can_recurse(source_r, FALSE); + g_source_set_can_recurse(source_w, FALSE); + g_source_set_callback(source_r, gio_pipe_reader, &global_pipefd[0], NULL); + g_source_set_callback(source_w, gio_pipe_writer, &global_pipefd[1], NULL); + g_source_add_unix_fd(source_r, global_pipefd[0], G_IO_IN); + g_source_add_unix_fd(source_w, global_pipefd[1], G_IO_OUT); + g_source_attach(source_r, NULL); + g_source_attach(source_w, NULL); +#else + fail_if(1); +#endif + } else { + qb_loop_poll_add(my_loop, QB_LOOP_HIGH, global_pipefd[1], + POLLOUT|POLLERR, NULL, pipe_writer); + qb_loop_poll_add(my_loop, QB_LOOP_HIGH, global_pipefd[0], + POLLIN|POLLERR, NULL, pipe_reader); + } + } else if (req_pt->id == IPC_MSG_REQ_SERVER_FAIL) { exit(0); } else if (req_pt->id == IPC_MSG_REQ_SERVER_DISCONNECT) { @@ -305,6 +482,122 @@ my_dispatch_del(int32_t fd) return qb_loop_poll_del(my_loop, fd); } + +/* taken from examples/ipcserver.c, with s/my_g/gio/ */ +#ifdef HAVE_GLIB + +#include + +static qb_array_t *gio_map; +static GMainLoop *glib_loop; + +struct gio_to_qb_poll { + int32_t is_used; + int32_t events; + int32_t source; + int32_t fd; + void *data; + qb_ipcs_dispatch_fn_t fn; + enum qb_loop_priority p; +}; + +static gboolean +gio_read_socket(GIOChannel * gio, GIOCondition condition, gpointer data) +{ + struct gio_to_qb_poll *adaptor = (struct gio_to_qb_poll *)data; + gint fd = g_io_channel_unix_get_fd(gio); + + qb_enter(); + + return (adaptor->fn(fd, condition, adaptor->data) == 0); +} + +static void +gio_poll_destroy(gpointer data) +{ + struct gio_to_qb_poll *adaptor = (struct gio_to_qb_poll *)data; + + adaptor->is_used--; + if (adaptor->is_used == 0) { + qb_log(LOG_DEBUG, "fd %d adaptor destroyed\n", adaptor->fd); + adaptor->fd = 0; + adaptor->source = 0; + } +} + +static int32_t +gio_dispatch_update(enum qb_loop_priority p, int32_t fd, int32_t evts, + void *data, qb_ipcs_dispatch_fn_t fn, gboolean is_new) +{ + struct gio_to_qb_poll *adaptor; + GIOChannel *channel; + int32_t res = 0; + + qb_enter(); + + res = qb_array_index(gio_map, fd, (void **)&adaptor); + if (res < 0) { + return res; + } + if (adaptor->is_used && adaptor->source) { + if (is_new) { + return -EEXIST; + } + g_source_remove(adaptor->source); + adaptor->source = 0; + } + + channel = g_io_channel_unix_new(fd); + if (!channel) { + return -ENOMEM; + } + + adaptor->fn = fn; + adaptor->events = evts; + adaptor->data = data; + adaptor->p = p; + adaptor->is_used++; + adaptor->fd = fd; + + adaptor->source = g_io_add_watch_full(channel, conv_prio_libqb2glib(p), + evts, gio_read_socket, adaptor, + gio_poll_destroy); + + /* we are handing the channel off to be managed by mainloop now. + * remove our reference. */ + g_io_channel_unref(channel); + + return 0; +} + +static int32_t +gio_dispatch_add(enum qb_loop_priority p, int32_t fd, int32_t evts, + void *data, qb_ipcs_dispatch_fn_t fn) +{ + return gio_dispatch_update(p, fd, evts, data, fn, TRUE); +} + +static int32_t +gio_dispatch_mod(enum qb_loop_priority p, int32_t fd, int32_t evts, + void *data, qb_ipcs_dispatch_fn_t fn) +{ + return gio_dispatch_update(p, fd, evts, data, fn, FALSE); +} + +static int32_t +gio_dispatch_del(int32_t fd) +{ + struct gio_to_qb_poll *adaptor; + if (qb_array_index(gio_map, fd, (void **)&adaptor) == 0) { + g_source_remove(adaptor->source); + adaptor->source = 0; + } + return 0; +} + +#endif /* HAVE_GLIB */ + + static int32_t s1_connection_closed(qb_ipcs_connection_t *c) { @@ -416,13 +709,13 @@ READY_SIGNALLER(usr1_signaller, parent_target) kill(*((pid_t *) parent_target), SIGUSR1); } -#define NEW_PROCESS_RUNNER(name, ready_signaller_arg, signaller_data_arg) \ +#define NEW_PROCESS_RUNNER(name, ready_signaller_arg, signaller_data_arg, data_arg) \ void (name)(ready_signaller_fn ready_signaller_arg, \ - void *signaller_data_arg) -typedef NEW_PROCESS_RUNNER(new_process_runner_fn, , ); + void *signaller_data_arg, void *data_arg) +typedef NEW_PROCESS_RUNNER(new_process_runner_fn, , , ); static -NEW_PROCESS_RUNNER(run_ipc_server, ready_signaller, signaller_data) +NEW_PROCESS_RUNNER(run_ipc_server, ready_signaller, signaller_data, data) { int32_t res; qb_loop_signal_handle handle; @@ -435,12 +728,7 @@ NEW_PROCESS_RUNNER(run_ipc_server, ready_signaller, signaller_data) .connection_closed = s1_connection_closed, }; - struct qb_ipcs_poll_handlers ph = { - .job_add = my_job_add, - .dispatch_add = my_dispatch_add, - .dispatch_mod = my_dispatch_mod, - .dispatch_del = my_dispatch_del, - }; + struct qb_ipcs_poll_handlers ph; uint32_t max_size = MAX_MSG_SIZE; my_loop = qb_loop_create(); @@ -451,6 +739,33 @@ NEW_PROCESS_RUNNER(run_ipc_server, ready_signaller, signaller_data) s1 = qb_ipcs_create(ipc_name, 4, ipc_type, &sh); fail_if(s1 == 0); + if (global_loop_prio != QB_LOOP_MED) { + qb_ipcs_request_rate_limit(s1, + conv_libqb_prio2ratelimit(global_loop_prio)); + } + if (global_use_glib) { +#ifdef HAVE_GLIB + ph = (struct qb_ipcs_poll_handlers) { + .job_add = NULL, + .dispatch_add = gio_dispatch_add, + .dispatch_mod = gio_dispatch_mod, + .dispatch_del = gio_dispatch_del, + }; + glib_loop = g_main_loop_new(NULL, FALSE); + gio_map = qb_array_create_2(16, sizeof(struct gio_to_qb_poll), 1); + fail_if (gio_map == NULL); +#else + fail_if(1); +#endif + } else { + ph = (struct qb_ipcs_poll_handlers) { + .job_add = my_job_add, + .dispatch_add = my_dispatch_add, + .dispatch_mod = my_dispatch_mod, + .dispatch_del = my_dispatch_del, + }; + } + if (enforce_server_buffer) { qb_ipcs_enforce_buffer_size(s1, max_size); } @@ -463,13 +778,20 @@ NEW_PROCESS_RUNNER(run_ipc_server, ready_signaller, signaller_data) ready_signaller(signaller_data); } - qb_loop_run(my_loop); + if (global_use_glib) { +#ifdef HAVE_GLIB + g_main_loop_run(glib_loop); +#endif + } else { + qb_loop_run(my_loop); + } qb_log(LOG_DEBUG, "loop finished - done ..."); } static pid_t run_function_in_new_process(const char *role, - new_process_runner_fn new_process_runner) + new_process_runner_fn new_process_runner, + void *data) { char formatbuf[1024]; pid_t parent_target, pid1, pid2; @@ -507,7 +829,7 @@ run_function_in_new_process(const char *role, qb_log_format_set(QB_LOG_STDERR, formatbuf); } - new_process_runner(usr1_signaller, &parent_target); + new_process_runner(usr1_signaller, &parent_target, data); exit(0); } else { waitpid(pid2, NULL, 0); @@ -675,7 +997,7 @@ test_ipc_txrx_timeout(void) pid_t pid; uint32_t max_size = MAX_MSG_SIZE; - pid = run_function_in_new_process("server", run_ipc_server); + pid = run_function_in_new_process("server", run_ipc_server, NULL); fail_if(pid == -1); do { @@ -723,7 +1045,7 @@ test_ipc_txrx(void) pid_t pid; uint32_t max_size = MAX_MSG_SIZE; - pid = run_function_in_new_process("server", run_ipc_server); + pid = run_function_in_new_process("server", run_ipc_server, NULL); fail_if(pid == -1); do { @@ -773,7 +1095,7 @@ test_ipc_exit(void) pid_t pid; uint32_t max_size = MAX_MSG_SIZE; - pid = run_function_in_new_process("server", run_ipc_server); + pid = run_function_in_new_process("server", run_ipc_server, NULL); fail_if(pid == -1); do { @@ -926,14 +1248,21 @@ struct my_res { char message[1024 * 1024]; }; +struct dispatch_data { + pid_t server_pid; + enum my_msg_ids msg_type; + uint32_t repetitions; +}; + static inline -NEW_PROCESS_RUNNER(client_dispatch, ready_signaller, signaller_data) +NEW_PROCESS_RUNNER(client_dispatch, ready_signaller, signaller_data, data) { uint32_t max_size = MAX_MSG_SIZE; int32_t size; int32_t c = 0; int32_t j; - pid_t server_pid = *((pid_t *) signaller_data); + pid_t server_pid = ((struct dispatch_data *) data)->server_pid; + enum my_msg_ids msg_type = ((struct dispatch_data *) data)->msg_type; do { conn = qb_ipcc_connect(ipc_name, max_size); @@ -951,13 +1280,17 @@ NEW_PROCESS_RUNNER(client_dispatch, ready_signaller, signaller_data) } size = QB_MIN(sizeof(struct qb_ipc_request_header), 64); - for (j = 1; j < 19; j++) { - size *= 2; - if (size >= max_size) - break; - if (send_and_check(IPC_MSG_REQ_DISPATCH, size, - recv_timeout, QB_TRUE) < 0) { - break; + + for (uint32_t r = ((struct dispatch_data *) data)->repetitions; + r > 0; r--) { + for (j = 1; j < 19; j++) { + size *= 2; + if (size >= max_size) + break; + if (send_and_check(msg_type, size, + recv_timeout, QB_TRUE) < 0) { + break; + } } } } @@ -966,11 +1299,15 @@ static void test_ipc_dispatch(void) { pid_t pid; + struct dispatch_data data; - pid = run_function_in_new_process(NULL, run_ipc_server); + pid = run_function_in_new_process(NULL, run_ipc_server, NULL); fail_if(pid == -1); + data = (struct dispatch_data){.server_pid = pid, + .msg_type = IPC_MSG_REQ_DISPATCH, + .repetitions = 1}; - client_dispatch(NULL, (void *) &pid); + client_dispatch(NULL, NULL, (void *) &data); request_server_exit(); qb_ipcc_disconnect(conn); @@ -1074,7 +1411,7 @@ test_ipc_stress_connections(void) QB_LOG_FILTER_FILE, "*", LOG_INFO); qb_log_ctl(QB_LOG_STDERR, QB_LOG_CONF_ENABLED, QB_TRUE); - pid = run_function_in_new_process("server", run_ipc_server); + pid = run_function_in_new_process("server", run_ipc_server, NULL); fail_if(pid == -1); for (connections = 1; connections < 70000; connections++) { @@ -1121,7 +1458,7 @@ test_ipc_bulk_events(void) int32_t fd; uint32_t max_size = MAX_MSG_SIZE; - pid = run_function_in_new_process("server", run_ipc_server); + pid = run_function_in_new_process("server", run_ipc_server, NULL); fail_if(pid == -1); do { @@ -1185,7 +1522,7 @@ test_ipc_stress_test(void) int32_t real_buf_size; enforce_server_buffer = 1; - pid = run_function_in_new_process("server", run_ipc_server); + pid = run_function_in_new_process("server", run_ipc_server, NULL); enforce_server_buffer = 0; fail_if(pid == -1); @@ -1266,7 +1603,6 @@ END_TEST START_TEST(test_ipc_bulk_events_us) { qb_enter(); - send_event_on_created = QB_FALSE; ipc_type = QB_IPC_SOCKET; set_ipc_name(__func__); test_ipc_bulk_events(); @@ -1274,6 +1610,93 @@ START_TEST(test_ipc_bulk_events_us) } END_TEST +static +READY_SIGNALLER(connected_signaller, _) +{ + request_server_exit(); +} + +START_TEST(test_ipc_dispatch_us_native_prio_deadlock_provoke) +{ + pid_t server_pid, alphaclient_pid; + struct dispatch_data data; + + qb_enter(); + ipc_type = QB_IPC_SOCKET; + set_ipc_name(__func__); + + /* this is to demonstrate that native event loop can deal even + with "extreme" priority disproportions */ + global_loop_prio = QB_LOOP_LOW; + multiple_connections = QB_TRUE; + recv_timeout = -1; + + server_pid = run_function_in_new_process("server", run_ipc_server, + NULL); + fail_if(server_pid == -1); + data = (struct dispatch_data){.server_pid = server_pid, + .msg_type = IPC_MSG_REQ_SELF_FEED, + .repetitions = 1}; + alphaclient_pid = run_function_in_new_process("alphaclient", + client_dispatch, + (void *) &data); + fail_if(alphaclient_pid == -1); + + //sleep(1); + sched_yield(); + + data.repetitions = 0; + client_dispatch(connected_signaller, NULL, (void *) &data); + verify_graceful_stop(server_pid); + + multiple_connections = QB_FALSE; + qb_leave(); +} +END_TEST + +#if HAVE_GLIB +START_TEST(test_ipc_dispatch_us_glib_prio_deadlock_provoke) +{ + pid_t server_pid, alphaclient_pid; + struct dispatch_data data; + + qb_enter(); + ipc_type = QB_IPC_SOCKET; + set_ipc_name(__func__); + + global_use_glib = QB_TRUE; + /* this is to make the test pass at all, since GLib is strict + on priorities -- QB_LOOP_MED or lower would fail for sure */ + global_loop_prio = QB_LOOP_HIGH; + multiple_connections = QB_TRUE; + recv_timeout = -1; + + server_pid = run_function_in_new_process("server", run_ipc_server, + NULL); + fail_if(server_pid == -1); + data = (struct dispatch_data){.server_pid = server_pid, + .msg_type = IPC_MSG_REQ_SELF_FEED, + .repetitions = 1}; + alphaclient_pid = run_function_in_new_process("alphaclient", + client_dispatch, + (void *) &data); + fail_if(alphaclient_pid == -1); + + //sleep(1); + sched_yield(); + + data.repetitions = 0; + client_dispatch(connected_signaller, NULL, (void *) &data); + verify_graceful_stop(server_pid); + + multiple_connections = QB_FALSE; + global_loop_prio = QB_LOOP_MED; + global_use_glib = QB_FALSE; + qb_leave(); +} +END_TEST +#endif + static void test_ipc_event_on_created(void) { @@ -1287,7 +1710,7 @@ test_ipc_event_on_created(void) num_bulk_events = 1; - pid = run_function_in_new_process("server", run_ipc_server); + pid = run_function_in_new_process("server", run_ipc_server, NULL); fail_if(pid == -1); do { @@ -1341,7 +1764,7 @@ test_ipc_disconnect_after_created(void) int32_t res; uint32_t max_size = MAX_MSG_SIZE; - pid = run_function_in_new_process("server", run_ipc_server); + pid = run_function_in_new_process("server", run_ipc_server, NULL); fail_if(pid == -1); do { @@ -1398,7 +1821,7 @@ test_ipc_server_fail(void) pid_t pid; uint32_t max_size = MAX_MSG_SIZE; - pid = run_function_in_new_process("server", run_ipc_server); + pid = run_function_in_new_process("server", run_ipc_server, NULL); fail_if(pid == -1); do { @@ -1465,6 +1888,87 @@ START_TEST(test_ipc_stress_connections_shm) } END_TEST +START_TEST(test_ipc_dispatch_shm_native_prio_deadlock_provoke) +{ + pid_t server_pid, alphaclient_pid; + struct dispatch_data data; + + qb_enter(); + ipc_type = QB_IPC_SHM; + set_ipc_name(__func__); + + /* this is to demonstrate that native event loop can deal even + with "extreme" priority disproportions */ + global_loop_prio = QB_LOOP_LOW; + multiple_connections = QB_TRUE; + recv_timeout = -1; + + server_pid = run_function_in_new_process("server", run_ipc_server, + NULL); + fail_if(server_pid == -1); + data = (struct dispatch_data){.server_pid = server_pid, + .msg_type = IPC_MSG_REQ_SELF_FEED, + .repetitions = 1}; + alphaclient_pid = run_function_in_new_process("alphaclient", + client_dispatch, + (void *) &data); + fail_if(alphaclient_pid == -1); + + //sleep(1); + sched_yield(); + + data.repetitions = 0; + client_dispatch(connected_signaller, NULL, (void *) &data); + verify_graceful_stop(server_pid); + + multiple_connections = QB_FALSE; + qb_leave(); +} +END_TEST + +#if HAVE_GLIB +START_TEST(test_ipc_dispatch_shm_glib_prio_deadlock_provoke) +{ + pid_t server_pid, alphaclient_pid; + struct dispatch_data data; + + qb_enter(); + ipc_type = QB_IPC_SOCKET; + set_ipc_name(__func__); + + global_use_glib = QB_TRUE; + /* this is to make the test pass at all, since GLib is strict + on priorities -- QB_LOOP_MED or lower would fail for sure */ + global_loop_prio = QB_LOOP_HIGH; + multiple_connections = QB_TRUE; + recv_timeout = -1; + + server_pid = run_function_in_new_process("server", run_ipc_server, + NULL); + fail_if(server_pid == -1); + data = (struct dispatch_data){.server_pid = server_pid, + .msg_type = IPC_MSG_REQ_SELF_FEED, + .repetitions = 1}; + alphaclient_pid = run_function_in_new_process("alphaclient", + client_dispatch, + (void *) &data); + fail_if(alphaclient_pid == -1); + + //sleep(1); + sched_yield(); + + data.repetitions = 0; + client_dispatch(connected_signaller, NULL, (void *) &data); + verify_graceful_stop(server_pid); + + multiple_connections = QB_FALSE; + global_loop_prio = QB_LOOP_MED; + global_use_glib = QB_FALSE; + qb_leave(); +} +END_TEST +#endif + START_TEST(test_ipc_bulk_events_shm) { qb_enter(); @@ -1532,7 +2036,7 @@ test_ipc_service_ref_count(void) reference_count_test = QB_TRUE; - pid = run_function_in_new_process("server", run_ipc_server); + pid = run_function_in_new_process("server", run_ipc_server, NULL); fail_if(pid == -1); do { @@ -1636,7 +2140,10 @@ make_shm_suite(void) add_tcase(s, tc, test_ipc_event_on_created_shm, 9); add_tcase(s, tc, test_ipc_service_ref_count_shm, 9); add_tcase(s, tc, test_ipc_stress_connections_shm, 3600 /* ? */); - + add_tcase(s, tc, test_ipc_dispatch_shm_native_prio_deadlock_provoke, 15); +#if HAVE_GLIB + add_tcase(s, tc, test_ipc_dispatch_shm_glib_prio_deadlock_provoke, 15); +#endif #ifdef HAVE_FAILURE_INJECTION add_tcase(s, tc, test_ipcc_truncate_when_unlink_fails_shm, 8); #endif @@ -1668,6 +2175,10 @@ make_soc_suite(void) add_tcase(s, tc, test_ipc_disconnect_after_created_us, 9); add_tcase(s, tc, test_ipc_service_ref_count_us, 9); add_tcase(s, tc, test_ipc_stress_connections_us, 3600 /* ? */); + add_tcase(s, tc, test_ipc_dispatch_us_native_prio_deadlock_provoke, 15); +#if HAVE_GLIB + add_tcase(s, tc, test_ipc_dispatch_us_glib_prio_deadlock_provoke, 15); +#endif return s; }