diff --git a/include/urcu/rculfhash.h b/include/urcu/rculfhash.h index 69a251c2..fd13877a 100644 --- a/include/urcu/rculfhash.h +++ b/include/urcu/rculfhash.h @@ -128,7 +128,7 @@ enum { struct cds_lfht_mm_type { struct cds_lfht *(*alloc_cds_lfht)(unsigned long min_nr_alloc_buckets, unsigned long max_nr_buckets, const struct cds_lfht_alloc *alloc); - void (*alloc_bucket_table)(struct cds_lfht *ht, unsigned long order); + int (*alloc_bucket_table)(struct cds_lfht *ht, unsigned long order); void (*free_bucket_table)(struct cds_lfht *ht, unsigned long order); struct cds_lfht_node *(*bucket_at)(struct cds_lfht *ht, unsigned long index); diff --git a/src/rculfhash-internal.h b/src/rculfhash-internal.h index 7225ec99..2cccc975 100644 --- a/src/rculfhash-internal.h +++ b/src/rculfhash-internal.h @@ -162,7 +162,8 @@ struct cds_lfht *__default_alloc_cds_lfht( struct cds_lfht *ht; ht = alloc->calloc(alloc->state, 1, cds_lfht_size); - urcu_posix_assert(ht); + if (ht == NULL) + return NULL; ht->mm = mm; ht->alloc = alloc; diff --git a/src/rculfhash-mm-chunk.c b/src/rculfhash-mm-chunk.c index 93931ee7..2b754262 100644 --- a/src/rculfhash-mm-chunk.c +++ b/src/rculfhash-mm-chunk.c @@ -11,22 +11,32 @@ #include "rculfhash-internal.h" static -void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order) +int cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order) { if (order == 0) { ht->tbl_chunk[0] = ht->alloc->calloc(ht->alloc->state, ht->min_nr_alloc_buckets, sizeof(struct cds_lfht_node)); - urcu_posix_assert(ht->tbl_chunk[0]); + if (ht->tbl_chunk[0] == NULL) + return -1; + } else if (order > ht->min_alloc_buckets_order) { unsigned long i, len = 1UL << (order - 1 - ht->min_alloc_buckets_order); for (i = len; i < 2 * len; i++) { + urcu_posix_assert(i < ht->max_nr_buckets / ht->min_nr_alloc_buckets); ht->tbl_chunk[i] = ht->alloc->calloc(ht->alloc->state, ht->min_nr_alloc_buckets, sizeof(struct cds_lfht_node)); - urcu_posix_assert(ht->tbl_chunk[i]); + if (ht->tbl_chunk[i] == NULL) { + while (i != len) { + --i; + poison_free(ht->alloc, ht->tbl_chunk[i]); + } + return -1; + } } } /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */ + return 0; } /* @@ -69,7 +79,6 @@ struct cds_lfht *alloc_cds_lfht(unsigned long min_nr_alloc_buckets, nr_chunks = max_nr_buckets / min_nr_alloc_buckets; cds_lfht_size = offsetof(struct cds_lfht, tbl_chunk) + sizeof(struct cds_lfht_node *) * nr_chunks; - cds_lfht_size = max(cds_lfht_size, sizeof(struct cds_lfht)); return __default_alloc_cds_lfht( &cds_lfht_mm_chunk, alloc, cds_lfht_size, diff --git a/src/rculfhash-mm-mmap.c b/src/rculfhash-mm-mmap.c index 2b4bc421..641aa19c 100644 --- a/src/rculfhash-mm-mmap.c +++ b/src/rculfhash-mm-mmap.c @@ -46,10 +46,6 @@ void *memory_map(size_t length) void *ret; ret = mmap(NULL, length, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (ret == MAP_FAILED) { - perror("mmap"); - abort(); - } return ret; } @@ -65,12 +61,12 @@ void memory_unmap(void *ptr, size_t length) #ifdef __CYGWIN__ /* Set protection to read/write to allocate a memory chunk */ static -void memory_populate(void *ptr, size_t length) +int memory_populate(void *ptr, size_t length) { if (mprotect(ptr, length, PROT_READ | PROT_WRITE)) { - perror("mprotect"); - abort(); + return -1; } + return 0; } /* Set protection to none to deallocate a memory chunk */ @@ -86,14 +82,16 @@ void memory_discard(void *ptr, size_t length) #else /* __CYGWIN__ */ static -void memory_populate(void *ptr, size_t length) +int memory_populate(void *ptr, size_t length) { if (mmap(ptr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) != ptr) { - perror("mmap"); - abort(); + /* ENOMEM on freebsd */ + urcu_posix_assert(errno != EINVAL); + return -1; } + return 0; } /* @@ -106,37 +104,50 @@ void memory_discard(void *ptr, size_t length) if (mmap(ptr, length, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) != ptr) { - perror("mmap"); + /* ENOMEM on freebsd */ + urcu_posix_assert(errno != EINVAL); +#if 0 + perror("mmap discard"); abort(); +#endif } } #endif /* __CYGWIN__ */ static -void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order) +int cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order) { if (order == 0) { if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) { /* small table */ ht->tbl_mmap = ht->alloc->calloc(ht->alloc->state, ht->max_nr_buckets, sizeof(*ht->tbl_mmap)); - urcu_posix_assert(ht->tbl_mmap); - return; + if (ht->tbl_mmap == NULL) + return -1; + return 0; } /* large table */ ht->tbl_mmap = memory_map(ht->max_nr_buckets * sizeof(*ht->tbl_mmap)); - memory_populate(ht->tbl_mmap, - ht->min_nr_alloc_buckets * sizeof(*ht->tbl_mmap)); + if (ht->tbl_mmap == MAP_FAILED) + return -1; + if (memory_populate(ht->tbl_mmap, ht->min_nr_alloc_buckets + * sizeof(*ht->tbl_mmap))) { + memory_unmap(ht->tbl_mmap, + ht->max_nr_buckets * sizeof(*ht->tbl_mmap)); + return -1; + } } else if (order > ht->min_alloc_buckets_order) { /* large table */ unsigned long len = 1UL << (order - 1); urcu_posix_assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets); - memory_populate(ht->tbl_mmap + len, - len * sizeof(*ht->tbl_mmap)); + if (memory_populate(ht->tbl_mmap + len, + len * sizeof(*ht->tbl_mmap))) + return -1; } /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */ + return 0; } /* @@ -176,7 +187,7 @@ static struct cds_lfht *alloc_cds_lfht(unsigned long min_nr_alloc_buckets, unsigned long max_nr_buckets, const struct cds_lfht_alloc *alloc) { - unsigned long page_bucket_size; + unsigned long page_bucket_size, cds_lfht_size; page_bucket_size = getpagesize() / sizeof(struct cds_lfht_node); if (max_nr_buckets <= page_bucket_size) { @@ -187,9 +198,11 @@ struct cds_lfht *alloc_cds_lfht(unsigned long min_nr_alloc_buckets, min_nr_alloc_buckets = max(min_nr_alloc_buckets, page_bucket_size); } + cds_lfht_size = offsetof(struct cds_lfht, tbl_mmap) + + sizeof(struct cds_lfht_node *); return __default_alloc_cds_lfht( - &cds_lfht_mm_mmap, alloc, sizeof(struct cds_lfht), + &cds_lfht_mm_mmap, alloc, cds_lfht_size, min_nr_alloc_buckets, max_nr_buckets); } diff --git a/src/rculfhash-mm-order.c b/src/rculfhash-mm-order.c index 2b0f707e..4e589443 100644 --- a/src/rculfhash-mm-order.c +++ b/src/rculfhash-mm-order.c @@ -11,18 +11,24 @@ #include "rculfhash-internal.h" static -void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order) +int cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order) { + urcu_posix_assert(order <= + (unsigned)cds_lfht_get_count_order_ulong(ht->max_nr_buckets)); if (order == 0) { ht->tbl_order[0] = ht->alloc->calloc(ht->alloc->state, ht->min_nr_alloc_buckets, sizeof(struct cds_lfht_node)); - urcu_posix_assert(ht->tbl_order[0]); + if (ht->tbl_order[0] == NULL) + return -1; + } else if (order > ht->min_alloc_buckets_order) { ht->tbl_order[order] = ht->alloc->calloc(ht->alloc->state, 1UL << (order -1), sizeof(struct cds_lfht_node)); - urcu_posix_assert(ht->tbl_order[order]); + if (ht->tbl_order[order] == NULL) + return -1; } /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */ + return 0; } /* @@ -64,8 +70,14 @@ static struct cds_lfht *alloc_cds_lfht(unsigned long min_nr_alloc_buckets, unsigned long max_nr_buckets, const struct cds_lfht_alloc *alloc) { + unsigned long max_order, cds_lfht_size; + + max_order = cds_lfht_get_count_order_ulong(max_nr_buckets); + cds_lfht_size = offsetof(struct cds_lfht, tbl_order) + + sizeof(struct cds_lfht_node *) * (max_order + 1); + return __default_alloc_cds_lfht( - &cds_lfht_mm_order, alloc, sizeof(struct cds_lfht), + &cds_lfht_mm_order, alloc, cds_lfht_size, min_nr_alloc_buckets, max_nr_buckets); } diff --git a/src/rculfhash.c b/src/rculfhash.c index 10f5b8ed..ed493b43 100644 --- a/src/rculfhash.c +++ b/src/rculfhash.c @@ -696,7 +696,7 @@ static void ht_init_nr_cpus_mask(void) } static -void alloc_split_items_count(struct cds_lfht *ht) +int alloc_split_items_count(struct cds_lfht *ht) { if (nr_cpus_mask == NR_CPUS_MASK_UNINITIALIZED) { ht_init_nr_cpus_mask(); @@ -713,10 +713,12 @@ void alloc_split_items_count(struct cds_lfht *ht) if (ht->flags & CDS_LFHT_ACCOUNTING) { ht->split_count = ht->alloc->calloc(ht->alloc->state, split_count_mask + 1, sizeof(struct ht_items_count)); - urcu_posix_assert(ht->split_count); + if (ht->split_count == NULL) + return -1; } else { ht->split_count = NULL; } + return 0; } static @@ -925,7 +927,7 @@ unsigned long _uatomic_xchg_monotonic_increase(unsigned long *ptr, } static -void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order) +int cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order) { return ht->mm->alloc_bucket_table(ht, order); } @@ -1400,7 +1402,7 @@ void init_table_populate(struct cds_lfht *ht, unsigned long i, } static -void init_table(struct cds_lfht *ht, +int init_table(struct cds_lfht *ht, unsigned long first_order, unsigned long last_order) { unsigned long i; @@ -1418,7 +1420,12 @@ void init_table(struct cds_lfht *ht, if (CMM_LOAD_SHARED(ht->resize_target) < (1UL << i)) break; - cds_lfht_alloc_bucket_table(ht, i); + /* Also stop if out of memory */ + if (cds_lfht_alloc_bucket_table(ht, i)) { + dbg_printf("cannot grow %ld to %ld\n", + first_order, last_order); + return -1; + } /* * Set all bucket nodes reverse hash values for a level and @@ -1437,6 +1444,7 @@ void init_table(struct cds_lfht *ht, if (CMM_LOAD_SHARED(ht->in_progress_destroy)) break; } + return 0; } /* @@ -1561,13 +1569,14 @@ void fini_table(struct cds_lfht *ht, * Never called with size < 1. */ static -void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size) +int cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size) { struct cds_lfht_node *prev, *node; unsigned long order, len, i; int bucket_order; - cds_lfht_alloc_bucket_table(ht, 0); + if (cds_lfht_alloc_bucket_table(ht, 0)) + return -1; dbg_printf("create bucket: order 0 index 0 hash 0\n"); node = bucket_at(ht, 0); @@ -1579,7 +1588,15 @@ void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size) for (order = 1; order < (unsigned long) bucket_order + 1; order++) { len = 1UL << (order - 1); - cds_lfht_alloc_bucket_table(ht, order); + + if (cds_lfht_alloc_bucket_table(ht, order)) { + /* destroy down to 0 */ + do { + --order; + cds_lfht_free_bucket_table(ht, order); + } while (order); + return -1; + } for (i = 0; i < len; i++) { /* @@ -1606,6 +1623,7 @@ void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size) prev->next = flag_bucket(node); } } + return 0; } #if (CAA_BITS_PER_LONG > 32) @@ -1684,8 +1702,9 @@ struct cds_lfht *_cds_lfht_new_with_alloc(unsigned long init_size, init_size = min(init_size, max_nr_buckets); ht = mm->alloc_cds_lfht(min_nr_alloc_buckets, max_nr_buckets, alloc ? : &cds_lfht_default_alloc); + if (ht == NULL) + return NULL; - urcu_posix_assert(ht); urcu_posix_assert(ht->mm == mm); urcu_posix_assert(ht->bucket_at == mm->bucket_at); @@ -1694,14 +1713,25 @@ struct cds_lfht *_cds_lfht_new_with_alloc(unsigned long init_size, ht->caller_resize_attr = attr; if (attr) ht->resize_attr = *attr; - alloc_split_items_count(ht); /* this mutex should not nest in read-side C.S. */ - pthread_mutex_init(&ht->resize_mutex, NULL); + if (pthread_mutex_init(&ht->resize_mutex, NULL)) + goto fail; + if (alloc_split_items_count(ht)) + goto fail1; order = cds_lfht_get_count_order_ulong(init_size); ht->resize_target = 1UL << order; - cds_lfht_create_bucket(ht, 1UL << order); + if (cds_lfht_create_bucket(ht, 1UL << order)) + goto fail2; ht->size = 1UL << order; return ht; + +fail2: + free_split_items_count(ht); +fail1: + pthread_mutex_destroy(&ht->resize_mutex); +fail: + poison_free(ht->alloc, ht); + return NULL; } struct cds_lfht *_cds_lfht_new(unsigned long init_size, @@ -1724,6 +1754,7 @@ void cds_lfht_lookup(struct cds_lfht *ht, unsigned long hash, struct cds_lfht_node *node, *next, *bucket; unsigned long reverse_hash, size; + urcu_posix_assert(ht); cds_lfht_iter_debug_set_ht(ht, iter); reverse_hash = bit_reverse_ulong(hash); @@ -1825,6 +1856,7 @@ void cds_lfht_next(struct cds_lfht *ht __attribute__((unused)), void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter) { + urcu_posix_assert(ht); cds_lfht_iter_debug_set_ht(ht, iter); /* * Get next after first bucket node. The first bucket node is the @@ -1839,6 +1871,7 @@ void cds_lfht_add(struct cds_lfht *ht, unsigned long hash, { unsigned long size; + urcu_posix_assert(ht); node->reverse_hash = bit_reverse_ulong(hash); size = uatomic_load(&ht->size, CMM_ACQUIRE); _cds_lfht_add(ht, hash, NULL, NULL, size, node, NULL, 0); @@ -1854,6 +1887,7 @@ struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, unsigned long size; struct cds_lfht_iter iter; + urcu_posix_assert(ht); node->reverse_hash = bit_reverse_ulong(hash); size = uatomic_load(&ht->size, CMM_ACQUIRE); _cds_lfht_add(ht, hash, match, key, size, node, &iter, 0); @@ -1871,6 +1905,7 @@ struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht, unsigned long size; struct cds_lfht_iter iter; + urcu_posix_assert(ht); node->reverse_hash = bit_reverse_ulong(hash); size = uatomic_load(&ht->size, CMM_ACQUIRE); for (;;) { @@ -1894,6 +1929,7 @@ int cds_lfht_replace(struct cds_lfht *ht, { unsigned long size; + urcu_posix_assert(ht); new_node->reverse_hash = bit_reverse_ulong(hash); if (!old_iter->node) return -ENOENT; @@ -1911,6 +1947,7 @@ int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node) unsigned long size; int ret; + urcu_posix_assert(ht); size = uatomic_load(&ht->size, CMM_ACQUIRE); ret = _cds_lfht_del(ht, size, node); if (!ret) { @@ -2016,6 +2053,7 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) { int ret; + urcu_posix_assert(ht); if (ht->flags & CDS_LFHT_AUTO_RESIZE) { /* * Perform error-checking for emptiness before queuing @@ -2061,6 +2099,7 @@ void cds_lfht_count_nodes(struct cds_lfht *ht, struct cds_lfht_node *node, *next; unsigned long nr_bucket = 0, nr_removed = 0; + urcu_posix_assert(ht); *approx_before = 0; if (ht->split_count) { int i; @@ -2103,7 +2142,7 @@ void cds_lfht_count_nodes(struct cds_lfht *ht, /* called with resize mutex held */ static -void _do_cds_lfht_grow(struct cds_lfht *ht, +int _do_cds_lfht_grow(struct cds_lfht *ht, unsigned long old_size, unsigned long new_size) { unsigned long old_order, new_order; @@ -2113,7 +2152,7 @@ void _do_cds_lfht_grow(struct cds_lfht *ht, dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", old_size, old_order, new_size, new_order); urcu_posix_assert(new_size > old_size); - init_table(ht, old_order + 1, new_order); + return init_table(ht, old_order + 1, new_order); } /* called with resize mutex held */ @@ -2134,15 +2173,19 @@ void _do_cds_lfht_shrink(struct cds_lfht *ht, fini_table(ht, new_order + 1, old_order); } - -/* called with resize mutex held */ +/* takes the resize mutex */ static -void _do_cds_lfht_resize(struct cds_lfht *ht) +void do_cds_lfht_resize(struct cds_lfht *ht) { unsigned long new_size, old_size; + int ret = 0; + mutex_lock(&ht->resize_mutex); /* * Resize table, re-do if the target size has changed under us. + * If cannot grow, leave the loop at whatever size we've got + * but tweak resize_target because unless it tracks ht->size + * automatic resizes will be blocked! */ do { if (uatomic_load(&ht->in_progress_destroy, CMM_RELAXED)) @@ -2153,14 +2196,20 @@ void _do_cds_lfht_resize(struct cds_lfht *ht) old_size = ht->size; new_size = uatomic_load(&ht->resize_target, CMM_RELAXED); if (old_size < new_size) - _do_cds_lfht_grow(ht, old_size, new_size); + ret = _do_cds_lfht_grow(ht, old_size, new_size); else if (old_size > new_size) _do_cds_lfht_shrink(ht, old_size, new_size); uatomic_store(&ht->resize_initiated, 0, CMM_RELAXED); + if (ret) { + uatomic_store(&ht->resize_target, ht->size, CMM_RELAXED); + break; + } /* write resize_initiated before read resize_target */ cmm_smp_mb(); } while (ht->size != uatomic_load(&ht->resize_target, CMM_RELAXED)); + + mutex_unlock(&ht->resize_mutex); } static @@ -2180,16 +2229,17 @@ void resize_target_update_count(struct cds_lfht *ht, void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size) { + urcu_posix_assert(ht); resize_target_update_count(ht, new_size); - /* - * Set flags has early as possible even in contention case. + * In case someone tries to mix manual and automatic resizes + * set the flag early to avoid queuing an automatic one. + * Set it once again after the resize mutex is taken. + * Clear it before the mutex is released. */ uatomic_store(&ht->resize_initiated, 1, CMM_RELAXED); - mutex_lock(&ht->resize_mutex); - _do_cds_lfht_resize(ht); - mutex_unlock(&ht->resize_mutex); + do_cds_lfht_resize(ht); } static @@ -2199,12 +2249,12 @@ void do_resize_cb(struct urcu_work *work) caa_container_of(work, struct resize_work, work); struct cds_lfht *ht = resize_work->ht; + poison_free(ht->alloc, work); ht->flavor->register_thread(); - mutex_lock(&ht->resize_mutex); - _do_cds_lfht_resize(ht); - mutex_unlock(&ht->resize_mutex); + + do_cds_lfht_resize(ht); + ht->flavor->unregister_thread(); - poison_free(ht->alloc, work); } static @@ -2215,21 +2265,31 @@ void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht) /* * Store to resize_target is before read resize_initiated as guaranteed * by either cmpxchg or _uatomic_xchg_monotonic_increase. + * The thread doing the resize clears resize_initiated briefly before + * checking if resize_target has changed. */ - if (!uatomic_load(&ht->resize_initiated, CMM_RELAXED)) { - if (uatomic_load(&ht->in_progress_destroy, CMM_RELAXED)) { - return; - } - work = ht->alloc->malloc(ht->alloc->state, sizeof(*work)); - if (work == NULL) { - dbg_printf("error allocating resize work, bailing out\n"); - return; - } - work->ht = ht; - urcu_workqueue_queue_work(cds_lfht_workqueue, - &work->work, do_resize_cb); - uatomic_store(&ht->resize_initiated, 1, CMM_RELAXED); + if (uatomic_load(&ht->resize_initiated, CMM_RELAXED)) + return; + if (uatomic_load(&ht->in_progress_destroy, CMM_RELAXED)) + return; + + work = ht->alloc->malloc(ht->alloc->state, sizeof(*work)); + if (work == NULL) { + dbg_printf("error allocating resize work, bailing out\n"); + return; } + work->ht = ht; + /* + * Set the flag early to hinder other threads from queuing a resize. + * Remember that the automatic resize is only checked for when + * ht->count is a power of 2. We don't want to miss a resize because + * of races, so we prefer to err on the side of queuing an extra + * work item sometimes. + */ + uatomic_store(&ht->resize_initiated, 1, CMM_RELAXED); + + urcu_workqueue_queue_work(cds_lfht_workqueue, + &work->work, do_resize_cb); } static @@ -2245,6 +2305,7 @@ void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int grow } /* + * Called periodically when accounting is on. Count = desired size. * We favor grow operations over shrink. A shrink operation never occurs * if a grow operation is queued for lazy execution. A grow operation * cancels any pending shrink lazy execution. @@ -2255,6 +2316,7 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, { if (!(ht->flags & CDS_LFHT_AUTO_RESIZE)) return; + count = max(count, MIN_TABLE_SIZE); count = min(count, ht->max_nr_buckets); if (count == size) @@ -2268,7 +2330,7 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, s = uatomic_cmpxchg(&ht->resize_target, size, count); if (s == size) - break; /* no resize needed */ + break; /* set resize_target = count */ if (s > size) return; /* growing is/(was just) in progress */ if (s <= count) diff --git a/src/urcu-call-rcu-impl.h b/src/urcu-call-rcu-impl.h index 2ea1efcd..0652c04b 100644 --- a/src/urcu-call-rcu-impl.h +++ b/src/urcu-call-rcu-impl.h @@ -130,9 +130,8 @@ static void alloc_cpu_call_rcu_data(void) if (cpus_array_len <= 0) { return; } - p = malloc(cpus_array_len * sizeof(*per_cpu_call_rcu_data)); + p = calloc(cpus_array_len, sizeof(*per_cpu_call_rcu_data)); if (p != NULL) { - memset(p, '\0', cpus_array_len * sizeof(*per_cpu_call_rcu_data)); rcu_set_pointer(&per_cpu_call_rcu_data, p); } else { if (!warned) { @@ -198,7 +197,7 @@ int set_thread_cpu_affinity(struct call_rcu_data *crdp) if (crdp->cpu_affinity < 0) return 0; - if (++crdp->gp_count & SET_AFFINITY_CHECK_PERIOD_MASK) + if (crdp->gp_count++ & SET_AFFINITY_CHECK_PERIOD_MASK) return 0; if (urcu_sched_getcpu() == crdp->cpu_affinity) return 0; @@ -318,9 +317,6 @@ static void *call_rcu_thread(void *arg) struct call_rcu_data *crdp = (struct call_rcu_data *) arg; int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT); - if (set_thread_cpu_affinity(crdp)) - urcu_die(errno); - /* * If callbacks take a read-side lock, we need to be registered. */ @@ -333,7 +329,7 @@ static void *call_rcu_thread(void *arg) cmm_smp_mb(); } for (;;) { - struct cds_wfcq_head cbs_tmp_head; + struct __cds_wfcq_head cbs_tmp_head; struct cds_wfcq_tail cbs_tmp_tail; struct cds_wfcq_node *cbs, *cbs_tmp_n; enum cds_wfcq_ret splice_ret; @@ -358,7 +354,7 @@ static void *call_rcu_thread(void *arg) rcu_register_thread(); } - cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail); + __cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail); splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head, &cbs_tmp_tail, &crdp->cbs_head, &crdp->cbs_tail); urcu_posix_assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK); @@ -377,8 +373,11 @@ static void *call_rcu_thread(void *arg) } uatomic_sub(&crdp->qlen, cbcount); } + /*cds_wfcq_destroy(&cbs_tmp_head, &cbs_tmp_tail);*/ + if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP) break; + rcu_thread_offline(); if (!rt) { if (cds_wfcq_empty(&crdp->cbs_head, @@ -425,10 +424,10 @@ static void call_rcu_data_init(struct call_rcu_data **crdpp, int ret; sigset_t newmask, oldmask; - crdp = malloc(sizeof(*crdp)); + crdp = calloc(1, sizeof(*crdp)); if (crdp == NULL) urcu_die(errno); - memset(crdp, '\0', sizeof(*crdp)); + cds_wfcq_init(&crdp->cbs_head, &crdp->cbs_tail); crdp->qlen = 0; crdp->futex = 0; diff --git a/src/workqueue.c b/src/workqueue.c index 10b9fdee..6cb1644f 100644 --- a/src/workqueue.c +++ b/src/workqueue.c @@ -86,7 +86,7 @@ static int set_thread_cpu_affinity(struct urcu_workqueue *workqueue) if (workqueue->cpu_affinity < 0) return 0; - if (++workqueue->loop_count & SET_AFFINITY_CHECK_PERIOD_MASK) + if (workqueue->loop_count++ & SET_AFFINITY_CHECK_PERIOD_MASK) return 0; if (urcu_sched_getcpu() == workqueue->cpu_affinity) return 0; @@ -163,6 +163,7 @@ static void *workqueue_thread(void *arg) struct urcu_workqueue *workqueue = (struct urcu_workqueue *) arg; int rt = !!(uatomic_read(&workqueue->flags) & URCU_WORKQUEUE_RT); + /* XXX needed? */ if (set_thread_cpu_affinity(workqueue)) urcu_die(errno); @@ -175,7 +176,7 @@ static void *workqueue_thread(void *arg) cmm_smp_mb(); } for (;;) { - struct cds_wfcq_head cbs_tmp_head; + struct __cds_wfcq_head cbs_tmp_head; struct cds_wfcq_tail cbs_tmp_tail; struct cds_wfcq_node *cbs, *cbs_tmp_n; enum cds_wfcq_ret splice_ret; @@ -202,7 +203,7 @@ static void *workqueue_thread(void *arg) workqueue->worker_after_resume_fct(workqueue, workqueue->priv); } - cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail); + __cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail); splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head, &cbs_tmp_tail, &workqueue->cbs_head, &workqueue->cbs_tail); urcu_posix_assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK); @@ -222,8 +223,11 @@ static void *workqueue_thread(void *arg) } uatomic_sub(&workqueue->qlen, cbcount); } + /*cds_wfcq_destroy(&cbs_tmp_head, &cbs_tmp_tail);*/ + if (uatomic_read(&workqueue->flags) & URCU_WORKQUEUE_STOP) break; + if (workqueue->worker_before_wait_fct) workqueue->worker_before_wait_fct(workqueue, workqueue->priv); if (!rt) { @@ -272,10 +276,10 @@ struct urcu_workqueue *urcu_workqueue_create(unsigned long flags, int ret; sigset_t newmask, oldmask; - workqueue = malloc(sizeof(*workqueue)); + workqueue = calloc(1, sizeof(*workqueue)); if (workqueue == NULL) urcu_die(errno); - memset(workqueue, '\0', sizeof(*workqueue)); + cds_wfcq_init(&workqueue->cbs_head, &workqueue->cbs_tail); workqueue->qlen = 0; workqueue->futex = 0;