diff --git a/allchblk.c b/allchblk.c index 159c06aee..8cb6e4792 100644 --- a/allchblk.c +++ b/allchblk.c @@ -29,22 +29,21 @@ * Adjacent free blocks are coalesced. */ - -# define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE) +#define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE) /* largest block we will allocate starting on a black */ /* listed block. Must be >= HBLKSIZE. */ -# define UNIQUE_THRESHOLD 32 +#define UNIQUE_THRESHOLD 32 /* Sizes up to this many HBLKs each have their own free list */ -# define HUGE_THRESHOLD 256 +#define HUGE_THRESHOLD 256 /* Sizes of at least this many heap blocks are mapped to a */ /* single free list. */ -# define FL_COMPRESSION 8 +#define FL_COMPRESSION 8 /* In between sizes map this many distinct sizes to a single */ /* bin. */ -# define N_HBLK_FLS ((HUGE_THRESHOLD - UNIQUE_THRESHOLD) / FL_COMPRESSION \ +#define N_HBLK_FLS ((HUGE_THRESHOLD - UNIQUE_THRESHOLD) / FL_COMPRESSION \ + UNIQUE_THRESHOLD) #ifndef GC_GCJ_SUPPORT @@ -102,14 +101,14 @@ STATIC int GC_hblk_fl_from_blocks(size_t blocks_needed) + UNIQUE_THRESHOLD; } -# define PHDR(hhdr) HDR((hhdr) -> hb_prev) -# define NHDR(hhdr) HDR((hhdr) -> hb_next) +#define PHDR(hhdr) HDR((hhdr) -> hb_prev) +#define NHDR(hhdr) HDR((hhdr) -> hb_next) -# ifdef USE_MUNMAP +#ifdef USE_MUNMAP # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0) -# else +#else # define IS_MAPPED(hhdr) TRUE -# endif /* !USE_MUNMAP */ +#endif /* !USE_MUNMAP */ #if !defined(NO_DEBUGGING) || defined(GC_ASSERTIONS) static void GC_CALLBACK add_hb_sz(struct hblk *h, int i, @@ -132,7 +131,7 @@ STATIC int GC_hblk_fl_from_blocks(size_t blocks_needed) } #endif /* !NO_DEBUGGING || GC_ASSERTIONS */ -# if !defined(NO_DEBUGGING) +#if !defined(NO_DEBUGGING) static void GC_CALLBACK print_hblkfreelist_item(struct hblk *h, int i, void *prev_index_ptr) { @@ -168,10 +167,10 @@ STATIC int GC_hblk_fl_from_blocks(size_t blocks_needed) (unsigned long)total); } -/* Return the free-list index on which the block described by the header */ -/* appears, or -1 if it appears nowhere. */ -static int free_list_index_of(const hdr *wanted) -{ + /* Return the free-list index on which the block described by the header */ + /* appears, or -1 if it appears nowhere. */ + static int free_list_index_of(const hdr *wanted) + { int i; for (i = 0; i <= N_HBLK_FLS; ++i) { @@ -184,10 +183,10 @@ static int free_list_index_of(const hdr *wanted) } } return -1; -} + } -GC_API void GC_CALL GC_dump_regions(void) -{ + GC_API void GC_CALL GC_dump_regions(void) + { unsigned i; for (i = 0; i < GC_n_heap_sects; ++i) { @@ -213,7 +212,7 @@ GC_API void GC_CALL GC_dump_regions(void) } if (HBLK_IS_FREE(hhdr)) { int correct_index = GC_hblk_fl_from_blocks( - (size_t)divHBLKSZ(hhdr -> hb_sz)); + divHBLKSZ(hhdr -> hb_sz)); int actual_index; GC_printf("\t%p\tfree block of size 0x%lx bytes%s\n", @@ -235,9 +234,8 @@ GC_API void GC_CALL GC_dump_regions(void) } } } -} - -# endif /* NO_DEBUGGING */ + } +#endif /* NO_DEBUGGING */ /* Initialize hdr for a block containing the indicated size and */ /* kind of objects. Return FALSE on failure. */ @@ -352,8 +350,7 @@ STATIC void GC_remove_from_fl_at(hdr *hhdr, int index) /* size-appropriate free list). */ GC_INLINE void GC_remove_from_fl(hdr *hhdr) { - GC_remove_from_fl_at(hhdr, GC_hblk_fl_from_blocks( - (size_t)divHBLKSZ(hhdr -> hb_sz))); + GC_remove_from_fl_at(hhdr, GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz))); } /* Return a pointer to the block ending just before h, if any. */ @@ -395,7 +392,7 @@ STATIC struct hblk * GC_free_block_ending_at(struct hblk *h) /* We maintain individual free lists sorted by address. */ STATIC void GC_add_to_fl(struct hblk *h, hdr *hhdr) { - int index = GC_hblk_fl_from_blocks((size_t)divHBLKSZ(hhdr -> hb_sz)); + int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz)); struct hblk *second = GC_hblkfreelist[index]; # if defined(GC_ASSERTIONS) && !defined(USE_MUNMAP) @@ -543,7 +540,7 @@ GC_INNER void GC_merge_unmapped(void) while (h != 0) { struct hblk *next; hdr *hhdr, *nexthdr; - word size, nextsize; + size_t size, next_size; GET_HDR(h, hhdr); size = hhdr -> hb_sz; @@ -551,7 +548,7 @@ GC_INNER void GC_merge_unmapped(void) GET_HDR(next, nexthdr); /* Coalesce with successor, if possible. */ if (nexthdr != NULL && HBLK_IS_FREE(nexthdr) - && !((size + (nextsize = nexthdr -> hb_sz)) & SIGNB) + && ((size + (next_size = nexthdr -> hb_sz)) & SIZET_SIGNB) == 0 /* no overflow */) { /* Note that we usually try to avoid adjacent free blocks */ /* that are either both mapped or both unmapped. But that */ @@ -560,20 +557,20 @@ GC_INNER void GC_merge_unmapped(void) /* not hold if the merged block would be too big. */ if (IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) { /* Make both consistent, so that we can merge. */ - if (size > nextsize) { + if (size > next_size) { GC_adjust_num_unmapped(next, nexthdr); - GC_remap((ptr_t)next, nextsize); + GC_remap((ptr_t)next, next_size); } else { GC_adjust_num_unmapped(h, hhdr); GC_unmap((ptr_t)h, size); - GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize); + GC_unmap_gap((ptr_t)h, size, (ptr_t)next, next_size); hhdr -> hb_flags |= WAS_UNMAPPED; } } else if (IS_MAPPED(nexthdr) && !IS_MAPPED(hhdr)) { - if (size > nextsize) { + if (size > next_size) { GC_adjust_num_unmapped(next, nexthdr); - GC_unmap((ptr_t)next, nextsize); - GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize); + GC_unmap((ptr_t)next, next_size); + GC_unmap_gap((ptr_t)h, size, (ptr_t)next, next_size); } else { GC_adjust_num_unmapped(h, hhdr); GC_remap((ptr_t)h, size); @@ -582,7 +579,7 @@ GC_INNER void GC_merge_unmapped(void) } } else if (!IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) { /* Unmap any gap in the middle */ - GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize); + GC_unmap_gap((ptr_t)h, size, (ptr_t)next, next_size); } /* If they are both unmapped, we merge, but leave unmapped. */ GC_remove_from_fl_at(hhdr, i); @@ -611,27 +608,27 @@ GC_INNER void GC_merge_unmapped(void) * If the return value is not 0, then hhdr is the header for it. */ STATIC struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr, - size_t bytes, int index) + size_t size_needed, int index) { size_t total_size; struct hblk * rest; hdr * rest_hdr; GC_ASSERT(I_HOLD_LOCK()); - GC_ASSERT(modHBLKSZ(bytes) == 0); - total_size = (size_t)(hhdr -> hb_sz); + GC_ASSERT(modHBLKSZ(size_needed) == 0); + total_size = hhdr -> hb_sz; GC_ASSERT(modHBLKSZ(total_size) == 0); GC_remove_from_fl_at(hhdr, index); - if (total_size == bytes) return h; + if (total_size == size_needed) return h; - rest = (struct hblk *)((word)h + bytes); + rest = (struct hblk *)((word)h + size_needed); rest_hdr = GC_install_header(rest); if (EXPECT(NULL == rest_hdr, FALSE)) { /* FIXME: This is likely to be very bad news ... */ WARN("Header allocation failed: dropping block\n", 0); return NULL; } - rest_hdr -> hb_sz = total_size - bytes; + rest_hdr -> hb_sz = total_size - size_needed; rest_hdr -> hb_flags = 0; # ifdef GC_ASSERTIONS /* Mark h not free, to avoid assertion about adjacent free blocks. */ @@ -654,15 +651,14 @@ STATIC struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr, STATIC void GC_split_block(struct hblk *hbp, hdr *hhdr, struct hblk *last_hbp, hdr *last_hdr, int index /* of free list */) { - word total_size = hhdr -> hb_sz; - word h_size = (word)((ptr_t)last_hbp - (ptr_t)hbp); + size_t h_size = (size_t)((ptr_t)last_hbp - (ptr_t)hbp); struct hblk *prev = hhdr -> hb_prev; struct hblk *next = hhdr -> hb_next; /* Replace hbp with last_hbp on its free list. */ last_hdr -> hb_prev = prev; last_hdr -> hb_next = next; - last_hdr -> hb_sz = total_size - h_size; + last_hdr -> hb_sz = hhdr -> hb_sz - h_size; last_hdr -> hb_flags = 0; if (prev /* != NULL */) { /* CPPCHECK */ HDR(prev) -> hb_next = last_hbp; @@ -759,11 +755,11 @@ STATIC unsigned GC_drop_blacklisted_count = 0; #define ALIGN_PAD_SZ(p, align_m1) \ (((align_m1) + 1 - (size_t)ADDR(p)) & (align_m1)) -static GC_bool next_hblk_fits_better(const hdr *hhdr, word size_avail, - word size_needed, size_t align_m1) +static GC_bool next_hblk_fits_better(const hdr *hhdr, size_t size_avail, + size_t size_needed, size_t align_m1) { const hdr *nexthdr; - word next_size; + size_t next_size; size_t next_ofs; struct hblk *next_hbp = hhdr -> hb_next; @@ -777,8 +773,8 @@ static GC_bool next_hblk_fits_better(const hdr *hhdr, word size_avail, && !GC_is_black_listed(next_hbp + divHBLKSZ(next_ofs), size_needed); } -static struct hblk *find_nonbl_hblk(struct hblk *last_hbp, word size_remain, - word eff_size_needed, size_t align_m1) +static struct hblk *find_nonbl_hblk(struct hblk *last_hbp, size_t size_remain, + size_t eff_size_needed, size_t align_m1) { ptr_t search_end = PTR_ALIGN_DOWN((ptr_t)last_hbp + size_remain, align_m1 + 1); @@ -798,7 +794,7 @@ static struct hblk *find_nonbl_hblk(struct hblk *last_hbp, word size_remain, /* that we will recover some later. hhdr should correspond to hbp. */ static void drop_hblk_in_chunks(int n, struct hblk *hbp, hdr *hhdr) { - size_t total_size = (size_t)(hhdr -> hb_sz); + size_t total_size = hhdr -> hb_sz; const struct hblk *limit = hbp + divHBLKSZ(total_size); GC_ASSERT(HDR(hbp) == hhdr); @@ -852,7 +848,7 @@ STATIC struct hblk *GC_allochblk_nth(size_t lb_adjusted, int k, { struct hblk *hbp, *last_hbp; hdr *hhdr; /* header corresponding to hbp */ - word size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS_CHECKED(lb_adjusted); + size_t size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS_CHECKED(lb_adjusted); /* number of bytes in requested objects */ GC_ASSERT(I_HOLD_LOCK()); @@ -861,7 +857,7 @@ STATIC struct hblk *GC_allochblk_nth(size_t lb_adjusted, int k, retry: /* Search for a big enough block in free list. */ for (hbp = GC_hblkfreelist[index];; hbp = hhdr -> hb_next) { - word size_avail; /* bytes available in this block */ + size_t size_avail; /* bytes available in this block */ size_t align_ofs; if (hbp /* != NULL */) { @@ -978,11 +974,11 @@ STATIC struct hblk *GC_allochblk_nth(size_t lb_adjusted, int k, } # endif /* hbp may be on the wrong free list; the parameter index is important. */ - hbp = GC_get_first_part(hbp, hhdr, (size_t)size_needed, index); + hbp = GC_get_first_part(hbp, hhdr, size_needed, index); if (EXPECT(NULL == hbp, FALSE)) return NULL; /* Add it to map of valid blocks. */ - if (EXPECT(!GC_install_counts(hbp, (size_t)size_needed), FALSE)) + if (EXPECT(!GC_install_counts(hbp, size_needed), FALSE)) return NULL; /* This leaks memory under very rare conditions. */ /* Set up the header. */ @@ -992,7 +988,7 @@ STATIC struct hblk *GC_allochblk_nth(size_t lb_adjusted, int k, /* Result is always true, not checked to avoid a cppcheck warning. */ # else if (EXPECT(!setup_header(hhdr, hbp, lb_adjusted, k, flags), FALSE)) { - GC_remove_counts(hbp, (size_t)size_needed); + GC_remove_counts(hbp, size_needed); return NULL; /* ditto */ } # endif @@ -1040,17 +1036,17 @@ GC_INNER void GC_freehblk(struct hblk *hbp) { struct hblk *next, *prev; hdr *hhdr, *prevhdr, *nexthdr; - word size; + size_t size; GET_HDR(hbp, hhdr); size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz); - if ((size & SIGNB) != 0) + if ((size & SIZET_SIGNB) != 0) ABORT("Deallocating excessively large block. Too large an allocation?"); /* Probably possible if we try to allocate more than half the address */ /* space at once. If we don't catch it here, strange things happen */ /* later. */ - GC_remove_counts(hbp, (size_t)size); + GC_remove_counts(hbp, size); hhdr -> hb_sz = size; # ifdef USE_MUNMAP hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no; @@ -1069,7 +1065,8 @@ GC_INNER void GC_freehblk(struct hblk *hbp) prev = GC_free_block_ending_at(hbp); /* Coalesce with successor, if possible. */ if (nexthdr != NULL && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr) - && !((hhdr -> hb_sz + nexthdr -> hb_sz) & SIGNB) /* no overflow */) { + && ((hhdr -> hb_sz + nexthdr -> hb_sz) & SIZET_SIGNB) == 0 + /* no overflow */) { GC_remove_from_fl(nexthdr); hhdr -> hb_sz += nexthdr -> hb_sz; GC_remove_header(next); @@ -1079,7 +1076,7 @@ GC_INNER void GC_freehblk(struct hblk *hbp) if (prev /* != NULL */) { /* CPPCHECK */ prevhdr = HDR(prev); if (IS_MAPPED(prevhdr) - && !((hhdr -> hb_sz + prevhdr -> hb_sz) & SIGNB)) { + && ((hhdr -> hb_sz + prevhdr -> hb_sz) & SIZET_SIGNB) == 0) { GC_remove_from_fl(prevhdr); prevhdr -> hb_sz += hhdr -> hb_sz; # ifdef USE_MUNMAP diff --git a/alloc.c b/alloc.c index 14ed09c44..e3519c139 100644 --- a/alloc.c +++ b/alloc.c @@ -1003,7 +1003,7 @@ GC_INNER void GC_set_fl_marks(ptr_t q) q2 = (ptr_t)obj_link(q); # endif for (;;) { - unsigned bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz); + size_t bit_no = MARK_BIT_NO((size_t)((ptr_t)q - (ptr_t)h), sz); if (!mark_bit_from_hdr(hhdr, bit_no)) { set_mark_bit_from_hdr(hhdr, bit_no); @@ -1093,7 +1093,7 @@ STATIC void GC_clear_fl_marks(ptr_t q) size_t sz = hhdr -> hb_sz; /* Normally set only once. */ for (;;) { - unsigned bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz); + size_t bit_no = MARK_BIT_NO((size_t)((ptr_t)q - (ptr_t)h), sz); if (mark_bit_from_hdr(hhdr, bit_no)) { size_t n_marks = hhdr -> hb_n_marks; @@ -1394,9 +1394,9 @@ GC_INNER ptr_t GC_os_get_mem(size_t bytes) return (ptr_t)space; } -/* Use the chunk of memory starting at h of size bytes as part of the heap. */ -/* Assumes h is HBLKSIZE aligned, bytes argument is a multiple of HBLKSIZE. */ -STATIC void GC_add_to_heap(struct hblk *h, size_t bytes) +/* Use the chunk of memory starting at h of size sz as part of the */ +/* heap. Assumes h is HBLKSIZE aligned, sz is a multiple of HBLKSIZE. */ +STATIC void GC_add_to_heap(struct hblk *h, size_t sz) { hdr *hhdr; ptr_t endp; @@ -1408,8 +1408,8 @@ STATIC void GC_add_to_heap(struct hblk *h, size_t bytes) GC_ASSERT(I_HOLD_LOCK()); GC_ASSERT(ADDR(h) % HBLKSIZE == 0); - GC_ASSERT(bytes % HBLKSIZE == 0); - GC_ASSERT(bytes > 0); + GC_ASSERT(sz % HBLKSIZE == 0); + GC_ASSERT(sz > 0); GC_ASSERT(GC_all_nils != NULL); if (EXPECT(GC_n_heap_sects == GC_capacity_heap_sects, FALSE)) { @@ -1445,13 +1445,13 @@ STATIC void GC_add_to_heap(struct hblk *h, size_t bytes) while (EXPECT(ADDR(h) <= HBLKSIZE, FALSE)) { /* Can't handle memory near address zero. */ ++h; - bytes -= HBLKSIZE; - if (0 == bytes) return; + sz -= HBLKSIZE; + if (0 == sz) return; } - endp = (ptr_t)h + bytes; + endp = (ptr_t)h + sz; while (EXPECT(ADDR_GE((ptr_t)h, endp), FALSE)) { - bytes -= HBLKSIZE; - if (0 == bytes) return; + sz -= HBLKSIZE; + if (0 == sz) return; endp -= HBLKSIZE; } hhdr = GC_install_header(h); @@ -1474,12 +1474,12 @@ STATIC void GC_add_to_heap(struct hblk *h, size_t bytes) } # endif GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)h; - GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes; + GC_heap_sects[GC_n_heap_sects].hs_bytes = sz; GC_n_heap_sects++; - hhdr -> hb_sz = bytes; + hhdr -> hb_sz = sz; hhdr -> hb_flags = 0; GC_freehblk(h); - GC_heapsize += bytes; + GC_heapsize += sz; if (ADDR_GE((ptr_t)GC_least_plausible_heap_addr, (ptr_t)h) || EXPECT(NULL == GC_least_plausible_heap_addr, FALSE)) { @@ -1555,7 +1555,7 @@ GC_API void GC_CALL GC_set_max_heap_size(GC_word n) word GC_max_retries = 0; -GC_INNER void GC_scratch_recycle_inner(void *ptr, size_t bytes) +GC_INNER void GC_scratch_recycle_inner(void *ptr, size_t sz) { size_t page_offset; size_t displ = 0; @@ -1564,15 +1564,15 @@ GC_INNER void GC_scratch_recycle_inner(void *ptr, size_t bytes) GC_ASSERT(I_HOLD_LOCK()); if (NULL == ptr) return; - GC_ASSERT(bytes != 0); + GC_ASSERT(sz != 0); GC_ASSERT(GC_page_size != 0); /* TODO: Assert correct memory flags if GWW_VDB */ page_offset = ADDR(ptr) & (GC_page_size-1); if (page_offset != 0) displ = GC_page_size - page_offset; - recycled_bytes = bytes > displ ? (bytes - displ) & ~(GC_page_size - 1) : 0; + recycled_bytes = sz > displ ? (sz - displ) & ~(GC_page_size - 1) : 0; GC_COND_LOG_PRINTF("Recycle %lu/%lu scratch-allocated bytes at %p\n", - (unsigned long)recycled_bytes, (unsigned long)bytes, ptr); + (unsigned long)recycled_bytes, (unsigned long)sz, ptr); if (recycled_bytes > 0) GC_add_to_heap((struct hblk *)((ptr_t)ptr + displ), recycled_bytes); } @@ -1583,7 +1583,7 @@ GC_INNER void GC_scratch_recycle_inner(void *ptr, size_t bytes) /* Returns FALSE on failure. */ GC_INNER GC_bool GC_expand_hp_inner(word n) { - size_t bytes; + size_t sz; struct hblk * space; word expansion_slop; /* Number of bytes by which we expect */ /* the heap to expand soon. */ @@ -1591,22 +1591,22 @@ GC_INNER GC_bool GC_expand_hp_inner(word n) GC_ASSERT(I_HOLD_LOCK()); GC_ASSERT(GC_page_size != 0); if (0 == n) n = 1; - bytes = ROUNDUP_PAGESIZE((size_t)n * HBLKSIZE); + sz = ROUNDUP_PAGESIZE((size_t)n * HBLKSIZE); GC_DBGLOG_PRINT_HEAP_IN_USE(); if (GC_max_heapsize != 0 - && (GC_max_heapsize < (word)bytes - || GC_heapsize > GC_max_heapsize - (word)bytes)) { + && (GC_max_heapsize < (word)sz + || GC_heapsize > GC_max_heapsize - (word)sz)) { /* Exceeded self-imposed limit */ return FALSE; } - space = (struct hblk *)GC_os_get_mem(bytes); + space = (struct hblk *)GC_os_get_mem(sz); if (EXPECT(NULL == space, FALSE)) { - WARN("Failed to expand heap by %" WARN_PRIuPTR " KiB\n", bytes >> 10); + WARN("Failed to expand heap by %" WARN_PRIuPTR " KiB\n", sz >> 10); return FALSE; } GC_last_heap_growth_gc_no = GC_gc_no; GC_INFOLOG_PRINTF("Grow heap to %lu KiB after %lu bytes allocated\n", - TO_KiB_UL(GC_heapsize + bytes), + TO_KiB_UL(GC_heapsize + sz), (unsigned long)GC_bytes_allocd); /* Adjust heap limits generously for blacklisting to work better. */ @@ -1615,14 +1615,14 @@ GC_INNER GC_bool GC_expand_hp_inner(word n) expansion_slop = min_bytes_allocd() + 4 * MAXHINCR * HBLKSIZE; if ((0 == GC_last_heap_addr && (ADDR(space) & SIGNB) == 0) || (GC_last_heap_addr != 0 && GC_last_heap_addr < ADDR(space))) { - /* Assume the heap is growing up. */ - ptr_t new_limit = (ptr_t)space + bytes + expansion_slop; + /* Assume the heap is growing up. */ + ptr_t new_limit = (ptr_t)space + sz + expansion_slop; if (ADDR_LT((ptr_t)space, new_limit) && ADDR_LT((ptr_t)GC_greatest_plausible_heap_addr, new_limit)) GC_greatest_plausible_heap_addr = new_limit; } else { - /* Heap is growing down. */ + /* Heap is growing down. */ ptr_t new_limit = (ptr_t)space - expansion_slop - sizeof(word); if (ADDR_LT(new_limit, (ptr_t)space) @@ -1631,7 +1631,7 @@ GC_INNER GC_bool GC_expand_hp_inner(word n) } GC_last_heap_addr = ADDR(space); - GC_add_to_heap(space, bytes); + GC_add_to_heap(space, sz); if (GC_on_heap_resize) (*GC_on_heap_resize)(GC_heapsize); diff --git a/backgraph.c b/backgraph.c index 18024fe15..821d6a840 100644 --- a/backgraph.c +++ b/backgraph.c @@ -9,7 +9,6 @@ * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. - * */ #include "private/dbg_mlc.h" @@ -290,7 +289,7 @@ static void add_edge(ptr_t p, ptr_t q) # endif } -typedef void (*per_object_func)(ptr_t p, size_t n_bytes, word gc_descr); +typedef void (*per_object_func)(ptr_t p, size_t sz, word descr); static GC_CALLBACK void per_object_helper(struct hblk *h, void *fn_ptr) { @@ -311,10 +310,10 @@ GC_INLINE void GC_apply_to_each_object(per_object_func fn) GC_apply_to_all_blocks(per_object_helper, &fn); } -static void reset_back_edge(ptr_t p, size_t n_bytes, word gc_descr) +static void reset_back_edge(ptr_t p, size_t sz, word descr) { - UNUSED_ARG(n_bytes); - UNUSED_ARG(gc_descr); + UNUSED_ARG(sz); + UNUSED_ARG(descr); GC_ASSERT(I_HOLD_LOCK()); /* Skip any free-list links, or dropped blocks. */ if (GC_HAS_DEBUG_INFO(p)) { @@ -349,16 +348,16 @@ static void reset_back_edge(ptr_t p, size_t n_bytes, word gc_descr) } } -static void add_back_edges(ptr_t p, size_t n_bytes, word gc_descr) +static void add_back_edges(ptr_t p, size_t sz, word descr) { ptr_t current_p = p + sizeof(oh); /* For now, fix up non-length descriptors conservatively. */ - if((gc_descr & GC_DS_TAGS) != GC_DS_LENGTH) { - gc_descr = n_bytes; + if ((descr & GC_DS_TAGS) != GC_DS_LENGTH) { + descr = sz; } - for (; ADDR_LT(current_p, p + gc_descr); current_p += sizeof(word)) { + for (; ADDR_LT(current_p, p + descr); current_p += sizeof(word)) { ptr_t q; LOAD_WORD_OR_CONTINUE(q, current_p); @@ -467,10 +466,10 @@ STATIC ptr_t GC_deepest_obj = NULL; /* next GC. */ /* Set GC_max_height to be the maximum height we encounter, and */ /* GC_deepest_obj to be the corresponding object. */ -static void update_max_height(ptr_t p, size_t n_bytes, word gc_descr) +static void update_max_height(ptr_t p, size_t sz, word descr) { - UNUSED_ARG(n_bytes); - UNUSED_ARG(gc_descr); + UNUSED_ARG(sz); + UNUSED_ARG(descr); GC_ASSERT(I_HOLD_LOCK()); if (GC_is_marked(p) && GC_HAS_DEBUG_INFO(p)) { word p_height = 0; diff --git a/blacklst.c b/blacklst.c index bf8d6eacd..df1271988 100644 --- a/blacklst.c +++ b/blacklst.c @@ -196,7 +196,7 @@ GC_INNER void GC_unpromote_black_lists(void) GC_ASSERT(I_HOLD_LOCK()); # endif if (GC_modws_valid_offsets[ADDR(p) & (sizeof(word)-1)]) { - word index = PHT_HASH(p); + size_t index = PHT_HASH(p); if (NULL == HDR(p) || get_pht_entry_from_index(GC_old_normal_bl, index)) { # ifdef PRINT_BLACK_LIST @@ -217,7 +217,7 @@ GC_INNER void GC_unpromote_black_lists(void) GC_INNER void GC_add_to_black_list_stack(ptr_t p) #endif { - word index = PHT_HASH(p); + size_t index = PHT_HASH(p); # ifndef PARALLEL_MARK GC_ASSERT(I_HOLD_LOCK()); @@ -242,7 +242,7 @@ GC_INNER void GC_unpromote_black_lists(void) GC_API struct GC_hblk_s *GC_CALL GC_is_black_listed(struct GC_hblk_s *h, size_t len) { - size_t index = (size_t)PHT_HASH(h); + size_t index = PHT_HASH(h); size_t i, nblocks; if (!GC_all_interior_pointers @@ -260,12 +260,12 @@ GC_API struct GC_hblk_s *GC_CALL GC_is_black_listed(struct GC_hblk_s *h, } else { if (get_pht_entry_from_index(GC_old_stack_bl, index) || get_pht_entry_from_index(GC_incomplete_stack_bl, index)) { - return h + (i+1); + return &h[i + 1]; } i++; } if (i >= nblocks) break; - index = (size_t)PHT_HASH(h + i); + index = PHT_HASH(h + i); } return NULL; } @@ -280,7 +280,7 @@ STATIC word GC_number_stack_black_listed(struct hblk *start, word result = 0; for (h = start; ADDR_LT((ptr_t)h, (ptr_t)endp1); h++) { - word index = PHT_HASH(h); + size_t index = PHT_HASH(h); if (get_pht_entry_from_index(GC_old_stack_bl, index)) result++; } diff --git a/dbg_mlc.c b/dbg_mlc.c index 2aeecc0d6..f374a218e 100644 --- a/dbg_mlc.c +++ b/dbg_mlc.c @@ -36,7 +36,7 @@ GC_INNER int GC_has_other_debug_info(ptr_t base) { ptr_t body = (ptr_t)((oh *)base + 1); - word sz = GC_size(base); + size_t sz = GC_size(base); if (HBLKPTR(base) != HBLKPTR(body) || sz < DEBUG_BYTES + EXTRA_BYTES) { @@ -250,7 +250,7 @@ #define CROSSES_HBLK(p, sz) \ ((ADDR((p) + sizeof(oh) + (sz) - 1) ^ ADDR(p)) >= HBLKSIZE) -GC_INNER void *GC_store_debug_info_inner(void *base, word sz, +GC_INNER void *GC_store_debug_info_inner(void *base, size_t sz, const char *string, int linenum) { GC_uintptr_t *result = (GC_uintptr_t *)((oh *)base + 1); @@ -293,7 +293,7 @@ static void *store_debug_info(void *base, size_t lb, LOCK(); if (!GC_debugging_started) GC_start_debugging_inner(); - result = GC_store_debug_info_inner(base, (word)lb, s, i); + result = GC_store_debug_info_inner(base, lb, s, i); ADD_CALL_CHAIN(base, ra); UNLOCK(); return result; @@ -306,7 +306,7 @@ static void *store_debug_info(void *base, size_t lb, STATIC ptr_t GC_check_annotated_obj(oh *ohdr) { ptr_t body = (ptr_t)(ohdr + 1); - word gc_sz = GC_size(ohdr); + size_t gc_sz = GC_size(ohdr); if (ohdr -> oh_sz + DEBUG_BYTES > (GC_uintptr_t)gc_sz) { return (ptr_t)(&(ohdr -> oh_sz)); @@ -471,7 +471,7 @@ GC_INNER void GC_start_debugging_inner(void) # endif GC_print_heap_obj = GC_debug_print_heap_obj_proc; GC_debugging_started = TRUE; - GC_register_displacement_inner((word)sizeof(oh)); + GC_register_displacement_inner(sizeof(oh)); # if defined(CPPCHECK) GC_noop1(GC_debug_header_size); # endif @@ -487,7 +487,7 @@ GC_API void GC_CALL GC_debug_register_displacement(size_t offset) { LOCK(); GC_register_displacement_inner(offset); - GC_register_displacement_inner((word)sizeof(oh) + offset); + GC_register_displacement_inner(sizeof(oh) + offset); UNLOCK(); } @@ -585,7 +585,7 @@ STATIC void * GC_debug_generic_malloc(size_t lb, int k, GC_EXTRA_PARAMS) } if (!GC_debugging_started) GC_start_debugging_inner(); - result = GC_store_debug_info_inner(base, (word)lb, "INTERNAL", 0); + result = GC_store_debug_info_inner(base, lb, "INTERNAL", 0); ADD_CALL_CHAIN_INNER(base); return result; } @@ -747,7 +747,7 @@ GC_API void GC_CALL GC_debug_free(void * p) } else { # ifndef SHORT_DBG_HDRS ptr_t clobbered = GC_check_annotated_obj((oh *)base); - word sz = GC_size(base); + size_t sz = GC_size(base); if (clobbered != NULL) { GC_SET_HAVE_ERRORS(); /* no "release" barrier is needed */ @@ -791,7 +791,7 @@ GC_API void GC_CALL GC_debug_free(void * p) /* is deferred. */ LOCK(); # ifdef LINT2 - GC_incr_bytes_freed((size_t)sz); + GC_incr_bytes_freed(sz); # else GC_bytes_freed += sz; # endif diff --git a/finalize.c b/finalize.c index fe655252c..6f63d2205 100644 --- a/finalize.c +++ b/finalize.c @@ -26,9 +26,9 @@ typedef void (* finalization_mark_proc)(ptr_t /* finalizable_obj_ptr */); #define HASH3(addr,size,log_size) \ - (((ADDR(addr) >> 3) ^ (ADDR(addr) >> (3 + (log_size)))) \ + ((size_t)((ADDR(addr) >> 3) ^ (ADDR(addr) >> (3 + (log_size)))) \ & ((size)-1)) -#define HASH2(addr,log_size) HASH3(addr, (word)1 << (log_size), log_size) +#define HASH2(addr,log_size) HASH3(addr, (size_t)1 << (log_size), log_size) struct hash_chain_entry { word hidden_key; @@ -53,9 +53,9 @@ struct finalizable_object { /* is on finalize_now queue. */ # define fo_next(x) (struct finalizable_object *)((x) -> prolog.next) # define fo_set_next(x,y) ((x)->prolog.next = (struct hash_chain_entry *)(y)) - GC_finalization_proc fo_fn; /* Finalizer. */ + GC_finalization_proc fo_fn; /* finalizer */ ptr_t fo_client_data; - word fo_object_size; /* In bytes. */ + size_t fo_object_sz; /* in bytes */ finalization_mark_proc fo_mark_proc; /* Mark-through procedure */ }; @@ -91,14 +91,14 @@ GC_API void GC_CALL GC_push_finalizer_structures(void) /* current size. May be a no-op. *table is a pointer to an array of */ /* hash headers. We update both *table and *log_size_ptr on success. */ STATIC void GC_grow_table(struct hash_chain_entry ***table, - unsigned *log_size_ptr, const word *entries_ptr) + unsigned *log_size_ptr, const size_t *entries_ptr) { - word i; + size_t i; struct hash_chain_entry *p; unsigned log_old_size = *log_size_ptr; unsigned log_new_size = log_old_size + 1; - word old_size = *table == NULL ? 0 : (word)1 << log_old_size; - word new_size = (word)1 << log_new_size; + size_t old_size = NULL == *table ? 0 : (size_t)1 << log_old_size; + size_t new_size = (size_t)1 << log_new_size; /* FIXME: Power of 2 size often gets rounded up to one more page. */ struct hash_chain_entry **new_table; @@ -114,24 +114,22 @@ STATIC void GC_grow_table(struct hash_chain_entry ***table, GC_gcollect_inner(); RESTORE_CANCEL(cancel_state); /* GC_finalize might decrease entries value. */ - if (*entries_ptr < ((word)1 << log_old_size) - (*entries_ptr >> 2)) + if (*entries_ptr < ((size_t)1 << log_old_size) - (*entries_ptr >> 2)) return; } new_table = (struct hash_chain_entry **) GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE( - (size_t)new_size * sizeof(struct hash_chain_entry *), - NORMAL); - if (new_table == 0) { - if (*table == 0) { + new_size * sizeof(struct hash_chain_entry *), NORMAL); + if (NULL == new_table) { + if (NULL == *table) { ABORT("Insufficient space for initial table allocation"); } else { return; } } for (i = 0; i < old_size; i++) { - p = (*table)[i]; - while (p != 0) { + for (p = (*table)[i]; p != NULL;) { ptr_t real_key = (ptr_t)GC_REVEAL_POINTER(p -> hidden_key); struct hash_chain_entry *next = p -> next; size_t new_hash = HASH3(real_key, new_size, log_new_size); @@ -174,7 +172,7 @@ STATIC int GC_register_disappearing_link_inner( GC_ASSERT(obj != NULL && GC_base_C(obj) == obj); if (EXPECT(NULL == dl_hashtbl -> head, FALSE) || EXPECT(dl_hashtbl -> entries - > ((word)1 << dl_hashtbl -> log_size), FALSE)) { + > ((size_t)1 << dl_hashtbl -> log_size), FALSE)) { GC_grow_table((struct hash_chain_entry ***)&dl_hashtbl -> head, &dl_hashtbl -> log_size, &dl_hashtbl -> entries); GC_COND_LOG_PRINTF("Grew %s table to %u entries\n", tbl_log_name, @@ -705,7 +703,8 @@ STATIC void GC_register_finalizer_inner(void * obj, if (mp == GC_unreachable_finalize_mark_proc) need_unreachable_finalization = TRUE; if (EXPECT(NULL == GC_fnlz_roots.fo_head, FALSE) - || EXPECT(GC_fo_entries > ((word)1 << GC_log_fo_table_size), FALSE)) { + || EXPECT(GC_fo_entries + > ((size_t)1 << GC_log_fo_table_size), FALSE)) { GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head, &GC_log_fo_table_size, &GC_fo_entries); GC_COND_LOG_PRINTF("Grew fo table to %u entries\n", @@ -717,7 +716,7 @@ STATIC void GC_register_finalizer_inner(void * obj, index = HASH2(obj, GC_log_fo_table_size); curr_fo = GC_fnlz_roots.fo_head[index]; - while (curr_fo != 0) { + while (curr_fo != NULL) { GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object)); if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(obj)) { /* Interruption by a signal in the middle of this */ @@ -811,7 +810,7 @@ STATIC void GC_register_finalizer_inner(void * obj, new_fo -> fo_hidden_base = GC_HIDE_POINTER(obj); new_fo -> fo_fn = fn; new_fo -> fo_client_data = (ptr_t)cd; - new_fo -> fo_object_size = hhdr -> hb_sz; + new_fo -> fo_object_sz = hhdr -> hb_sz; new_fo -> fo_mark_proc = mp; fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]); GC_dirty(new_fo); @@ -903,9 +902,9 @@ GC_API void GC_CALL GC_register_finalizer_unreachable(void * obj, #endif /* !NO_DEBUGGING */ #ifndef SMALL_CONFIG - STATIC word GC_old_dl_entries = 0; /* for stats printing */ + STATIC size_t GC_old_dl_entries = 0; /* for stats printing */ # ifndef GC_LONG_REFS_NOT_NEEDED - STATIC word GC_old_ll_entries = 0; + STATIC size_t GC_old_ll_entries = 0; # endif #endif /* !SMALL_CONFIG */ @@ -1070,9 +1069,8 @@ GC_INNER void GC_finalize(void) /* see it. */ curr_fo -> fo_hidden_base = (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base); - GC_bytes_finalized += - curr_fo -> fo_object_size - + sizeof(struct finalizable_object); + GC_bytes_finalized += (word)(curr_fo -> fo_object_sz) + + sizeof(struct finalizable_object); GC_ASSERT(GC_is_marked(GC_base(curr_fo))); curr_fo = next_fo; } else { @@ -1121,8 +1119,8 @@ GC_INNER void GC_finalize(void) GC_dirty(prev_fo); } curr_fo -> fo_hidden_base = GC_HIDE_POINTER(real_ptr); - GC_bytes_finalized -= - (curr_fo -> fo_object_size) + sizeof(struct finalizable_object); + GC_bytes_finalized -= (word)(curr_fo -> fo_object_sz) + + sizeof(struct finalizable_object); i = HASH2(real_ptr, GC_log_fo_table_size); fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]); @@ -1199,8 +1197,8 @@ STATIC unsigned GC_interrupt_finalizers = 0; /* see it. */ curr_fo -> fo_hidden_base = (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base); - GC_bytes_finalized += - curr_fo -> fo_object_size + sizeof(struct finalizable_object); + GC_bytes_finalized += (word)(curr_fo -> fo_object_sz) + + sizeof(struct finalizable_object); curr_fo = next_fo; } } diff --git a/gcj_mlc.c b/gcj_mlc.c index 20ba588fd..c8d539677 100644 --- a/gcj_mlc.c +++ b/gcj_mlc.c @@ -211,7 +211,7 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_gcj_malloc(size_t lb, if (!GC_debugging_started) { GC_start_debugging_inner(); } - result = GC_store_debug_info_inner(base, (word)lb, s, i); + result = GC_store_debug_info_inner(base, lb, s, i); ADD_CALL_CHAIN(base, ra); UNLOCK(); GC_dirty(result); diff --git a/headers.c b/headers.c index 093d43067..55a78c8e8 100644 --- a/headers.c +++ b/headers.c @@ -354,7 +354,7 @@ GC_API void GC_CALL GC_apply_to_all_blocks(GC_walk_hblk_fn fn, GC_INNER struct hblk * GC_next_block(struct hblk *h, GC_bool allow_free) { REGISTER bottom_index * bi; - REGISTER word j = (ADDR(h) >> LOG_HBLKSIZE) & (BOTTOM_SZ-1); + REGISTER size_t j = (size_t)(ADDR(h) >> LOG_HBLKSIZE) & (BOTTOM_SZ-1); GC_ASSERT(I_HOLD_READER_LOCK()); GET_BI(h, bi); @@ -369,6 +369,7 @@ GC_INNER struct hblk * GC_next_block(struct hblk *h, GC_bool allow_free) while (bi != 0) { while (j < BOTTOM_SZ) { hdr * hhdr = bi -> index[j]; + if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { j++; } else { diff --git a/include/private/gc_priv.h b/include/private/gc_priv.h index c0b256d21..606652d98 100644 --- a/include/private/gc_priv.h +++ b/include/private/gc_priv.h @@ -889,6 +889,7 @@ EXTERN_C_BEGIN #define divWORDSZ(n) ((n) / CPP_WORDSZ) #define SIGNB ((word)1 << (CPP_WORDSZ-1)) +#define SIZET_SIGNB (GC_SIZE_MAX ^ (GC_SIZE_MAX >> 1)) #if CPP_WORDSZ / 8 != ALIGNMENT # define UNALIGNED_PTRS @@ -970,7 +971,7 @@ EXTERN_C_BEGIN #define LOG_HBLKSIZE ((size_t)CPP_LOG_HBLKSIZE) #define HBLKSIZE ((size_t)1 << CPP_LOG_HBLKSIZE) -#define GC_SQRT_SIZE_MAX ((((size_t)1) << (CPP_WORDSZ / 2)) - 1) +#define GC_SQRT_SIZE_MAX ((((size_t)1) << (sizeof(size_t) * 8 / 2)) - 1) /* Max size objects supported by free list (larger objects are */ /* allocated directly with allchblk(), by rounding to the next */ @@ -1057,7 +1058,7 @@ EXTERN_C_BEGIN #define PHT_SIZE (PHT_ENTRIES > CPP_WORDSZ ? PHT_ENTRIES / CPP_WORDSZ : 1) typedef word page_hash_table[PHT_SIZE]; -#define PHT_HASH(addr) ((ADDR(addr) >> LOG_HBLKSIZE) & (PHT_ENTRIES-1)) +#define PHT_HASH(p) ((size_t)((ADDR(p) >> LOG_HBLKSIZE) & (PHT_ENTRIES-1))) #define get_pht_entry_from_index(bl, index) \ (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1) @@ -1327,7 +1328,7 @@ struct finalizable_object; struct dl_hashtbl_s { struct disappearing_link **head; - word entries; + size_t entries; unsigned log_size; }; @@ -1535,7 +1536,7 @@ struct _GC_arrays { word _n_heap_sects; /* Number of separately added heap sections. */ # ifdef ANY_MSWIN # define GC_n_heap_bases GC_arrays._n_heap_bases - word _n_heap_bases; /* See GC_heap_bases. */ + size_t _n_heap_bases; /* see GC_heap_bases[] */ # endif # ifdef USE_PROC_FOR_LIBRARIES # define GC_n_memory GC_arrays._n_memory @@ -1577,7 +1578,7 @@ struct _GC_arrays { # endif # define n_root_sets GC_arrays._n_root_sets # define GC_excl_table_entries GC_arrays._excl_table_entries - int _n_root_sets; /* GC_static_roots[0..n_root_sets) contains the */ + size_t _n_root_sets; /* GC_static_roots[0..n_root_sets) contains the */ /* valid root sets. */ size_t _excl_table_entries; /* Number of entries in use. */ # define GC_ed_size GC_arrays._ed_size @@ -1885,7 +1886,8 @@ struct GC_traced_stack_sect_s { #ifdef IA64 /* Similar to GC_push_all_stack_sections() but for IA-64 registers store. */ GC_INNER void GC_push_all_register_sections(ptr_t bs_lo, ptr_t bs_hi, - int eager, struct GC_traced_stack_sect_s *traced_stack_sect); + GC_bool eager, + struct GC_traced_stack_sect_s *traced_stack_sect); #endif /* IA64 */ /* Marks are in a reserved area in */ @@ -1927,7 +1929,7 @@ struct GC_traced_stack_sect_s { #endif /* !USE_MARK_BYTES */ #ifdef MARK_BIT_PER_OBJ -# define MARK_BIT_NO(offset, sz) (((unsigned)(offset))/(sz)) +# define MARK_BIT_NO(offset, sz) ((offset) / (sz)) /* Get the mark bit index corresponding to the given byte */ /* offset and size (in bytes). */ # define MARK_BIT_OFFSET(sz) 1 @@ -1935,7 +1937,7 @@ struct GC_traced_stack_sect_s { # define FINAL_MARK_BIT(sz) ((sz) > MAXOBJBYTES ? 1 : HBLK_OBJS(sz)) /* Position of final, always set, mark bit. */ #else -# define MARK_BIT_NO(offset, sz) BYTES_TO_GRANULES((unsigned)(offset)) +# define MARK_BIT_NO(offset, sz) BYTES_TO_GRANULES(offset) # define MARK_BIT_OFFSET(sz) BYTES_TO_GRANULES(sz) # define FINAL_MARK_BIT(sz) \ ((sz) > MAXOBJBYTES ? MARK_BITS_PER_HBLK \ @@ -2276,7 +2278,7 @@ GC_INNER ptr_t GC_scratch_alloc(size_t bytes); #else # define GC_scratch_recycle_no_gww GC_scratch_recycle_inner #endif -GC_INNER void GC_scratch_recycle_inner(void *ptr, size_t bytes); +GC_INNER void GC_scratch_recycle_inner(void *ptr, size_t sz); /* Reuse the memory region by the heap. */ #ifndef MARK_BIT_PER_OBJ @@ -2642,7 +2644,7 @@ GC_EXTERN GC_bool GC_print_back_height; GC_INNER GC_bool GC_page_was_dirty(struct hblk *h); /* Read retrieved dirty bits. */ - GC_INNER void GC_remove_protection(struct hblk *h, word nblocks, + GC_INNER void GC_remove_protection(struct hblk *h, size_t nblocks, GC_bool is_ptrfree); /* Block h is about to be written or allocated shortly. */ /* Ensure that all pages containing any part of the */ @@ -2894,7 +2896,7 @@ GC_INNER void GC_start_debugging_inner(void); /* defined in dbg_mlc.c. */ /* Store debugging info into p. Return displaced pointer. */ /* Assume we hold the allocator lock. */ -GC_INNER void *GC_store_debug_info_inner(void *p, word sz, const char *str, +GC_INNER void *GC_store_debug_info_inner(void *p, size_t sz, const char *str, int linenum); #if defined(REDIRECT_MALLOC) && !defined(REDIRECT_MALLOC_IN_HEADER) \ diff --git a/include/private/pthread_support.h b/include/private/pthread_support.h index 1550f6cc6..8d3e320ec 100644 --- a/include/private/pthread_support.h +++ b/include/private/pthread_support.h @@ -96,7 +96,7 @@ typedef struct GC_StackContext_Rep { # endif # ifdef E2K - size_t ps_ofs; /* the current offset of the procedure stack */ + size_t ps_ofs; /* the current offset in the procedure stack */ # endif # ifndef GC_NO_FINALIZATION @@ -406,7 +406,7 @@ GC_INNER void GC_wait_for_gc_completion(GC_bool); #endif #if defined(GC_ENABLE_SUSPEND_THREAD) && defined(SIGNAL_BASED_STOP_WORLD) - GC_INNER void GC_suspend_self_inner(GC_thread me, word suspend_cnt); + GC_INNER void GC_suspend_self_inner(GC_thread me, size_t suspend_cnt); GC_INNER void GC_suspend_self_blocked(ptr_t thread_me, void *context); /* Wrapper over GC_suspend_self_inner. */ diff --git a/include/private/specific.h b/include/private/specific.h index 18aa624d6..77e5019d6 100644 --- a/include/private/specific.h +++ b/include/private/specific.h @@ -68,10 +68,10 @@ EXTERN_C_BEGIN /* value. This invariant must be preserved at ALL times, since */ /* asynchronous reads are allowed. */ typedef struct thread_specific_entry { - volatile AO_t qtid; /* quick thread id, only for cache */ - ts_entry_value_t value; - struct thread_specific_entry *next; - pthread_t thread; + volatile AO_t qtid; /* quick thread id, only for cache */ + ts_entry_value_t value; + struct thread_specific_entry *next; + pthread_t thread; } tse; /* We represent each thread-specific datum as two tables. The first is */ @@ -86,9 +86,9 @@ typedef struct thread_specific_entry { /* or at least thread stack separation, is at least 4 KB. */ /* Must be defined so that it never returns 0. (Page 0 can't really be */ /* part of any stack, since that would make 0 a valid stack pointer.) */ -#define ts_quick_thread_id() (ADDR(GC_approx_sp()) >> 12) +#define ts_quick_thread_id() ((size_t)(ADDR(GC_approx_sp()) >> 12)) -#define INVALID_QTID ((word)0) +#define INVALID_QTID ((size_t)0) #define INVALID_THREADID ((pthread_t)0) union ptse_ao_u { @@ -113,21 +113,22 @@ GC_INNER int GC_setspecific(tsd * key, void * value); GC_INNER void GC_remove_specific_after_fork(tsd * key, pthread_t t); /* An internal version of getspecific that assumes a cache miss. */ -GC_INNER void * GC_slow_getspecific(tsd * key, word qtid, +GC_INNER void * GC_slow_getspecific(tsd * key, size_t qtid, tse * volatile * cache_entry); GC_INLINE void * GC_getspecific(tsd * key) { - word qtid = ts_quick_thread_id(); - tse * volatile * entry_ptr = &(key -> cache[TS_CACHE_HASH(qtid)]); - const tse * entry = *entry_ptr; /* must be loaded only once */ - - GC_ASSERT(qtid != INVALID_QTID); - if (EXPECT(entry -> qtid == qtid, TRUE)) { - GC_ASSERT(entry -> thread == pthread_self()); - return TS_REVEAL_PTR(entry -> value); - } - return GC_slow_getspecific(key, qtid, entry_ptr); + size_t qtid = ts_quick_thread_id(); + tse * volatile * entry_ptr = &(key -> cache[TS_CACHE_HASH(qtid)]); + const tse * entry = *entry_ptr; /* must be loaded only once */ + + GC_ASSERT(qtid != INVALID_QTID); + if (EXPECT(entry -> qtid == qtid, TRUE)) { + GC_ASSERT(entry -> thread == pthread_self()); + return TS_REVEAL_PTR(entry -> value); + } + + return GC_slow_getspecific(key, qtid, entry_ptr); } EXTERN_C_END diff --git a/mark.c b/mark.c index b0ab2b7f9..4bf19d714 100644 --- a/mark.c +++ b/mark.c @@ -190,12 +190,12 @@ GC_INNER void GC_clear_hdr_marks(hdr *hhdr) /* Set all mark bits in the header. Used for uncollectible blocks. */ GC_INNER void GC_set_hdr_marks(hdr *hhdr) { - unsigned i; + size_t i; size_t sz = hhdr -> hb_sz; - unsigned n_marks = (unsigned)FINAL_MARK_BIT(sz); + size_t n_marks = FINAL_MARK_BIT(sz); # ifdef USE_MARK_BYTES - for (i = 0; i <= n_marks; i += (unsigned)MARK_BIT_OFFSET(sz)) { + for (i = 0; i <= n_marks; i += MARK_BIT_OFFSET(sz)) { hhdr -> hb_marks[i] = 1; } # else @@ -236,7 +236,7 @@ GC_API void GC_CALL GC_set_mark_bit(const void *p) { struct hblk *h = HBLKPTR(p); hdr * hhdr = HDR(h); - word bit_no = MARK_BIT_NO((word)((ptr_t)p - (ptr_t)h), hhdr -> hb_sz); + size_t bit_no = MARK_BIT_NO((size_t)((ptr_t)p - (ptr_t)h), hhdr -> hb_sz); if (!mark_bit_from_hdr(hhdr, bit_no)) { set_mark_bit_from_hdr(hhdr, bit_no); @@ -248,7 +248,7 @@ GC_API void GC_CALL GC_clear_mark_bit(const void *p) { struct hblk *h = HBLKPTR(p); hdr * hhdr = HDR(h); - word bit_no = MARK_BIT_NO((word)((ptr_t)p - (ptr_t)h), hhdr -> hb_sz); + size_t bit_no = MARK_BIT_NO((size_t)((ptr_t)p - (ptr_t)h), hhdr -> hb_sz); if (mark_bit_from_hdr(hhdr, bit_no)) { size_t n_marks = hhdr -> hb_n_marks; @@ -272,7 +272,7 @@ GC_API int GC_CALL GC_is_marked(const void *p) { struct hblk *h = HBLKPTR(p); hdr * hhdr = HDR(h); - word bit_no = MARK_BIT_NO((word)((ptr_t)p - (ptr_t)h), hhdr -> hb_sz); + size_t bit_no = MARK_BIT_NO((size_t)((ptr_t)p - (ptr_t)h), hhdr -> hb_sz); return (int)mark_bit_from_hdr(hhdr, bit_no); /* 0 or 1 */ } @@ -969,20 +969,20 @@ GC_INNER void GC_wait_for_markers_init(void) } /* Steal mark stack entries starting at mse low into mark stack local */ -/* until we either steal mse high, or we have max entries. */ +/* until we either steal mse high, or we have n_to_get entries. */ /* Return a pointer to the top of the local mark stack. */ /* (*next) is replaced by a pointer to the next unscanned mark stack */ /* entry. */ STATIC mse * GC_steal_mark_stack(mse * low, mse * high, mse * local, - unsigned max, mse **next) + size_t n_to_get, mse **next) { mse *p; mse *top = local - 1; - unsigned i = 0; + size_t i = 0; GC_ASSERT(ADDR_GE((ptr_t)high, (ptr_t)(low - 1)) && (word)(high - low + 1) <= GC_mark_stack_size); - for (p = low; ADDR_GE((ptr_t)high, (ptr_t)p) && i <= max; ++p) { + for (p = low; ADDR_GE((ptr_t)high, (ptr_t)p) && i <= n_to_get; ++p) { word descr = (word)AO_load(&p->mse_descr.ao); if (descr != 0) { /* Must be ordered after read of descr: */ @@ -1000,7 +1000,8 @@ STATIC mse * GC_steal_mark_stack(mse * low, mse * high, mse * local, || ADDR(p -> mse_start) >= GC_greatest_real_heap_addr); /* If this is a big object, count it as size/256 + 1 objects. */ ++i; - if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) i += (int)(descr >> 8); + if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) + i += (size_t)(descr >> 8); } } *next = p; @@ -1120,10 +1121,8 @@ STATIC void GC_mark_local(mse *local_mark_stack, int id) GC_VERBOSE_LOG_PRINTF("Starting mark helper %d\n", id); GC_release_mark_lock(); for (;;) { - size_t n_on_stack; - unsigned n_to_get; - mse * my_top; - mse * local_top; + size_t n_on_stack, n_to_get; + mse *my_top, *local_top; mse * global_first_nonempty = (mse *)AO_load(&GC_first_nonempty); GC_ASSERT(ADDR_GE((ptr_t)my_first_nonempty, (ptr_t)GC_mark_stack) @@ -1906,7 +1905,7 @@ STATIC void GC_push_marked(struct hblk *h, const hdr *hhdr) size_t sz = hhdr -> hb_sz; word descr = hhdr -> hb_descr; ptr_t p; - word bit_no; + size_t bit_no; ptr_t lim; mse * mark_stack_top; mse * mark_stack_limit = GC_mark_stack_limit; diff --git a/mark_rts.c b/mark_rts.c index 06f1b56aa..e26ccc5ac 100644 --- a/mark_rts.c +++ b/mark_rts.c @@ -43,7 +43,7 @@ int GC_no_dls = 0; /* Register dynamic library data segments. */ /* Should return the same value as GC_root_size. */ GC_INNER word GC_compute_root_size(void) { - int i; + size_t i; word size = 0; for (i = 0; i < n_root_sets; i++) { @@ -57,7 +57,7 @@ int GC_no_dls = 0; /* Register dynamic library data segments. */ /* For debugging: */ void GC_print_static_roots(void) { - int i; + size_t i; word size; for (i = 0; i < n_root_sets; i++) { @@ -79,8 +79,8 @@ int GC_no_dls = 0; /* Register dynamic library data segments. */ /* Is the address p in one of the registered static root sections? */ GC_INNER GC_bool GC_is_static_root(ptr_t p) { - static int last_root_set = MAX_ROOT_SETS; - int i; + static size_t last_root_set = MAX_ROOT_SETS; + size_t i; # if defined(CPPCHECK) if (n_root_sets > MAX_ROOT_SETS) ABORT("Bad n_root_sets"); @@ -111,7 +111,7 @@ int GC_no_dls = 0; /* Register dynamic library data segments. */ -- really defined in gc_priv.h */ - GC_INLINE int rt_hash(ptr_t addr) + GC_INLINE size_t rt_hash(ptr_t addr) { word val = ADDR(addr); @@ -122,14 +122,14 @@ int GC_no_dls = 0; /* Register dynamic library data segments. */ val ^= val >> (4*LOG_RT_SIZE); # endif val ^= val >> (2*LOG_RT_SIZE); - return ((val >> LOG_RT_SIZE) ^ val) & (RT_SIZE-1); + return (size_t)((val >> LOG_RT_SIZE) ^ val) & (RT_SIZE-1); } /* Is a range starting at b already in the table? If so, return a */ /* pointer to it, else NULL. */ GC_INNER void * GC_roots_present(ptr_t b) { - int h; + size_t h; struct roots *p; GC_ASSERT(I_HOLD_READER_LOCK()); @@ -143,7 +143,7 @@ int GC_no_dls = 0; /* Register dynamic library data segments. */ /* Add the given root structure to the index. */ GC_INLINE void add_roots_to_index(struct roots *p) { - int h = rt_hash(p -> r_start); + size_t h = rt_hash(p -> r_start); p -> r_next = GC_root_index[h]; GC_root_index[h] = p; @@ -186,7 +186,7 @@ void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp) /* virtually guaranteed to be dominated by the time it */ /* takes to scan the roots. */ { - int i; + size_t i; struct roots * old = NULL; /* initialized to prevent warning. */ for (i = 0; i < n_root_sets; i++) { @@ -257,8 +257,8 @@ void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp) } # ifdef DEBUG_ADD_DEL_ROOTS - GC_log_printf("Adding data root section %d: %p .. %p%s\n", - n_root_sets, (void *)b, (void *)e, + GC_log_printf("Adding data root section %u: %p .. %p%s\n", + (unsigned)n_root_sets, (void *)b, (void *)e, tmp ? " (temporary)" : ""); # endif GC_static_roots[n_root_sets].r_start = (ptr_t)b; @@ -290,9 +290,10 @@ GC_API void GC_CALL GC_clear_roots(void) UNLOCK(); } -STATIC void GC_remove_root_at_pos(int i) +STATIC void GC_remove_root_at_pos(size_t i) { GC_ASSERT(I_HOLD_LOCK()); + GC_ASSERT(i < n_root_sets); # ifdef DEBUG_ADD_DEL_ROOTS GC_log_printf("Remove data root section at %d: %p .. %p%s\n", i, (void *)GC_static_roots[i].r_start, @@ -310,7 +311,8 @@ STATIC void GC_remove_root_at_pos(int i) #ifndef ANY_MSWIN STATIC void GC_rebuild_root_index(void) { - int i; + size_t i; + BZERO(GC_root_index, RT_SIZE * sizeof(void *)); for (i = 0; i < n_root_sets; i++) add_roots_to_index(GC_static_roots + i); @@ -320,9 +322,9 @@ STATIC void GC_remove_root_at_pos(int i) #if defined(DYNAMIC_LOADING) || defined(ANY_MSWIN) || defined(PCR) STATIC void GC_remove_tmp_roots(void) { - int i; + size_t i; # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) - int old_n_roots = n_root_sets; + size_t old_n_roots = n_root_sets; # endif GC_ASSERT(I_HOLD_LOCK()); @@ -356,9 +358,9 @@ GC_API void GC_CALL GC_remove_roots(void *b, void *e) STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e) { - int i; + size_t i; # ifndef ANY_MSWIN - int old_n_roots = n_root_sets; + size_t old_n_roots = n_root_sets; # endif GC_ASSERT(I_HOLD_LOCK()); @@ -379,7 +381,7 @@ STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e) #ifdef USE_PROC_FOR_LIBRARIES /* Exchange the elements of the roots table. Requires rebuild of */ /* the roots index table after the swap. */ - GC_INLINE void swap_static_roots(int i, int j) + GC_INLINE void swap_static_roots(size_t i, size_t j) { ptr_t r_start = GC_static_roots[i].r_start; ptr_t r_end = GC_static_roots[i].r_end; @@ -399,7 +401,7 @@ STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e) /* this function is called repeatedly by GC_register_map_entries. */ GC_INNER void GC_remove_roots_subregion(ptr_t b, ptr_t e) { - int i; + size_t i; GC_bool rebuild = FALSE; GC_ASSERT(I_HOLD_LOCK()); @@ -410,7 +412,8 @@ STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e) if (GC_static_roots[i].r_tmp) { /* The remaining roots are skipped as they are all temporary. */ # ifdef GC_ASSERTIONS - int j; + size_t j; + for (j = i + 1; j < n_root_sets; j++) { GC_ASSERT(GC_static_roots[j].r_tmp); } @@ -430,7 +433,7 @@ STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e) GC_static_roots[i].r_end = b; /* No need to rebuild as hash does not use r_end value. */ if (ADDR_LT(e, r_end)) { - int j; + size_t j; if (rebuild) { GC_rebuild_root_index(); @@ -452,9 +455,9 @@ STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e) GC_static_roots[i].r_start = e; } else { GC_remove_root_at_pos(i); - if (i < n_root_sets - 1 && GC_static_roots[i].r_tmp + if (i + 1 < n_root_sets && GC_static_roots[i].r_tmp && !GC_static_roots[i + 1].r_tmp) { - int j; + size_t j; for (j = i + 2; j < n_root_sets; j++) if (GC_static_roots[j].r_tmp) @@ -480,20 +483,20 @@ STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e) GC_API int GC_CALL GC_is_tmp_root(void *p) { # ifndef HAS_REAL_READER_LOCK - static int last_root_set; /* initialized to 0; no shared access */ + static size_t last_root_set; /* initialized to 0; no shared access */ # elif defined(AO_HAVE_load) || defined(AO_HAVE_store) static volatile AO_t last_root_set; # else - static volatile int last_root_set; + static volatile size_t last_root_set; /* A race is acceptable, it's just a cached index. */ # endif - int i; + size_t i; int res; READER_LOCK(); /* First try the cached root. */ # if defined(AO_HAVE_load) && defined(HAS_REAL_READER_LOCK) - i = (int)(unsigned)AO_load(&last_root_set); + i = AO_load(&last_root_set); # else i = last_root_set; # endif @@ -508,7 +511,7 @@ STATIC void GC_remove_roots_inner(ptr_t b, ptr_t e) GC_static_roots[i].r_end)) { res = (int)GC_static_roots[i].r_tmp; # if defined(AO_HAVE_store) && defined(HAS_REAL_READER_LOCK) - AO_store(&last_root_set, (AO_t)(unsigned)i); + AO_store(&last_root_set, i); # else last_root_set = i; # endif @@ -688,7 +691,8 @@ STATIC void GC_push_conditional_with_exclusions(ptr_t bottom, ptr_t top, #ifdef IA64 /* Similar to GC_push_all_stack_sections() but for IA-64 registers store. */ GC_INNER void GC_push_all_register_sections(ptr_t bs_lo, ptr_t bs_hi, - int eager, struct GC_traced_stack_sect_s *traced_stack_sect) + GC_bool eager, + struct GC_traced_stack_sect_s *traced_stack_sect) { GC_ASSERT(I_HOLD_LOCK()); while (traced_stack_sect != NULL) { @@ -920,7 +924,7 @@ STATIC void GC_push_regs_and_stack(ptr_t cold_gc_frame) /* that it is OK to miss some register values. */ GC_INNER void GC_push_roots(GC_bool all, ptr_t cold_gc_frame) { - int i; + size_t i; unsigned kind; GC_ASSERT(I_HOLD_LOCK()); @@ -951,6 +955,7 @@ GC_INNER void GC_push_roots(GC_bool all, ptr_t cold_gc_frame) /* marking the freelists. */ for (kind = 0; kind < GC_n_kinds; kind++) { const void *base = GC_base(GC_obj_kinds[kind].ok_freelist); + if (base != NULL) { GC_set_mark_bit(base); } diff --git a/misc.c b/misc.c index 74b1d0c3c..cc8ba158d 100644 --- a/misc.c +++ b/misc.c @@ -281,13 +281,13 @@ STATIC void GC_init_size_map(void) # else STATIC word GC_stack_last_cleared = 0; /* GC_gc_no value when we last did this. */ + STATIC word GC_bytes_allocd_at_reset = 0; STATIC ptr_t GC_min_sp = NULL; /* Coolest stack pointer value from which */ /* we've already cleared the stack. */ STATIC ptr_t GC_high_water = NULL; /* "hottest" stack pointer value we have seen */ /* recently. Degrades over time. */ - STATIC word GC_bytes_allocd_at_reset = 0; # define DEGRADE_RATE 50 # endif @@ -430,7 +430,7 @@ GC_API void * GC_CALL GC_base(void * p) bottom_index *bi; hdr *hhdr; ptr_t limit; - word sz; + size_t sz; if (!EXPECT(GC_is_initialized, TRUE)) return NULL; h = HBLKPTR(r); @@ -1331,6 +1331,7 @@ GC_API void GC_CALL GC_init(void) } # endif # if !defined(CPPCHECK) + GC_STATIC_ASSERT(sizeof(size_t) <= sizeof(ptrdiff_t)); GC_STATIC_ASSERT(sizeof(ptrdiff_t) == sizeof(word)); GC_STATIC_ASSERT(sizeof(signed_word) == sizeof(word)); GC_STATIC_ASSERT(sizeof(ptr_t) == sizeof(GC_uintptr_t)); @@ -1406,7 +1407,7 @@ GC_API void GC_CALL GC_init(void) } if (GC_all_interior_pointers) GC_initialize_offsets(); - GC_register_displacement_inner(0L); + GC_register_displacement_inner(0); # if defined(GC_LINUX_THREADS) && defined(REDIRECT_MALLOC) if (!GC_all_interior_pointers) { /* TLS ABI uses pointer-sized offsets for dtv. */ @@ -2154,7 +2155,7 @@ GC_API void ** GC_CALL GC_new_free_list_inner(void) GC_ASSERT(I_HOLD_LOCK()); result = GC_INTERNAL_MALLOC((MAXOBJGRANULES+1) * sizeof(ptr_t), PTRFREE); if (NULL == result) ABORT("Failed to allocate free list for new kind"); - BZERO(result, (MAXOBJGRANULES+1)*sizeof(ptr_t)); + BZERO(result, (MAXOBJGRANULES+1) * sizeof(ptr_t)); return (void **)result; } @@ -2305,17 +2306,19 @@ GC_API void * GC_CALL GC_call_with_stack_base(GC_stack_base_func fn, void *arg) #ifndef THREADS -GC_INNER ptr_t GC_blocked_sp = NULL; + GC_INNER ptr_t GC_blocked_sp = NULL; /* NULL value means we are not inside GC_do_blocking() call. */ + # ifdef IA64 STATIC ptr_t GC_blocked_register_sp = NULL; # endif -GC_INNER struct GC_traced_stack_sect_s *GC_traced_stack_sect = NULL; + GC_INNER struct GC_traced_stack_sect_s *GC_traced_stack_sect = NULL; -/* This is nearly the same as in pthread_support.c. */ -GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn, void *client_data) -{ + /* This is nearly the same as in pthread_support.c. */ + GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn, + void *client_data) + { struct GC_traced_stack_sect_s stacksect; GC_ASSERT(GC_is_initialized); @@ -2362,11 +2365,11 @@ GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn, void *client_data) GC_blocked_sp = stacksect.saved_stack_ptr; return client_data; /* result */ -} + } -/* This is nearly the same as in pthread_support.c. */ -STATIC void GC_do_blocking_inner(ptr_t data, void *context) -{ + /* This is nearly the same as in pthread_support.c. */ + STATIC void GC_do_blocking_inner(ptr_t data, void *context) + { struct blocking_data * d = (struct blocking_data *)data; UNUSED_ARG(context); @@ -2392,7 +2395,7 @@ STATIC void GC_do_blocking_inner(ptr_t data, void *context) GC_noop1_ptr(GC_blocked_sp); # endif GC_blocked_sp = NULL; -} + } GC_API void GC_CALL GC_set_stackbottom(void *gc_thread_handle, const struct GC_stack_base *sb) diff --git a/os_dep.c b/os_dep.c index e9f988d48..d75c89b44 100644 --- a/os_dep.c +++ b/os_dep.c @@ -144,6 +144,7 @@ { int f = open("/proc/self/maps", O_RDONLY); size_t result; + if (f < 0) return 0; /* treat missing file as empty */ result = GC_get_file_len(f); close(f); @@ -1584,8 +1585,8 @@ GC_INNER void GC_setpagesize(void) #ifdef OS2 -void GC_register_data_segments(void) -{ + void GC_register_data_segments(void) + { PTIB ptib; PPIB ppib; HMODULE module_handle; @@ -1661,7 +1662,7 @@ void GC_register_data_segments(void) (ptr_t)(O32_BASE(seg)+O32_SIZE(seg)), FALSE); } (void)fclose(myexefile); -} + } #else /* !OS2 */ @@ -1765,58 +1766,60 @@ void GC_register_data_segments(void) # define GetWriteWatch_alloc_flag 0 # endif /* !GWW_VDB */ -# ifdef ANY_MSWIN - # ifdef MSWIN32 - /* Unfortunately, we have to handle win32s very differently from NT, */ - /* Since VirtualQuery has very different semantics. In particular, */ - /* under win32s a VirtualQuery call on an unmapped page returns an */ - /* invalid result. Under NT, GC_register_data_segments is a no-op */ - /* and all real work is done by GC_register_dynamic_libraries. Under */ - /* win32s, we cannot find the data segments associated with dll's. */ - /* We register the main data segment here. */ - GC_INNER GC_bool GC_no_win32_dlls = FALSE; - /* This used to be set for gcc, to avoid dealing with */ - /* the structured exception handling issues. But we now have */ - /* assembly code to do that right. */ - - GC_INNER GC_bool GC_wnt = FALSE; - /* This is a Windows NT derivative, i.e. NT, Win2K, XP or later. */ - - GC_INNER void GC_init_win32(void) - { -# if defined(_WIN64) || (defined(_MSC_VER) && _MSC_VER >= 1800) - /* MS Visual Studio 2013 deprecates GetVersion, but on the other */ - /* hand it cannot be used to target pre-Win2K. */ - GC_wnt = TRUE; -# else - /* Set GC_wnt. If we're running under win32s, assume that no */ - /* DLLs will be loaded. I doubt anyone still runs win32s, but... */ - DWORD v = GetVersion(); + /* Unfortunately, we have to handle win32s very differently from */ + /* NT, since VirtualQuery has very different semantics. In */ + /* particular, under win32s a VirtualQuery call on an unmapped page */ + /* returns an invalid result. Under NT, GC_register_data_segments */ + /* is a no-op and all real work is done by */ + /* GC_register_dynamic_libraries(). Under win32s, we cannot find */ + /* the data segments associated with dll's. We register the main */ + /* data segment here. */ + + GC_INNER GC_bool GC_no_win32_dlls = FALSE; + /* This used to be set for gcc, to avoid */ + /* dealing with the structured exception */ + /* handling issues. But we now have assembly */ + /* code to do that right. */ + + GC_INNER GC_bool GC_wnt = FALSE; + /* This is a Windows NT derivative, i.e. NT, */ + /* Win2K, XP or later. */ + + GC_INNER void GC_init_win32(void) + { +# if defined(_WIN64) || (defined(_MSC_VER) && _MSC_VER >= 1800) + /* MS Visual Studio 2013 deprecates GetVersion, but on the */ + /* other hand it cannot be used to target pre-Win2K. */ + GC_wnt = TRUE; +# else + /* Set GC_wnt. If we're running under win32s, assume that no */ + /* DLLs will be loaded. I doubt anyone still runs win32s, but... */ + DWORD v = GetVersion(); - GC_wnt = !(v & (DWORD)0x80000000UL); - GC_no_win32_dlls |= ((!GC_wnt) && (v & 0xff) <= 3); -# endif -# ifdef USE_MUNMAP - if (GC_no_win32_dlls) { - /* Turn off unmapping for safety (since may not work well with */ - /* GlobalAlloc). */ - GC_unmap_threshold = 0; - } -# endif - } + GC_wnt = !(v & (DWORD)0x80000000UL); + GC_no_win32_dlls |= ((!GC_wnt) && (v & 0xff) <= 3); +# endif +# ifdef USE_MUNMAP + if (GC_no_win32_dlls) { + /* Turn off unmapping for safety (since may not work well */ + /* with GlobalAlloc). */ + GC_unmap_threshold = 0; + } +# endif + } - /* Return the smallest address a such that VirtualQuery */ - /* returns correct results for all addresses between a and start. */ - /* Assumes VirtualQuery returns correct information for start. */ - STATIC ptr_t GC_least_described_address(ptr_t start) - { - MEMORY_BASIC_INFORMATION buf; - ptr_t limit = (ptr_t)GC_sysinfo.lpMinimumApplicationAddress; - ptr_t p = PTR_ALIGN_DOWN(start, GC_page_size); + /* Return the smallest address p such that VirtualQuery returns */ + /* correct results for all addresses between p and start. Assumes */ + /* VirtualQuery() returns correct information for start. */ + STATIC ptr_t GC_least_described_address(ptr_t start) + { + ptr_t limit = (ptr_t)GC_sysinfo.lpMinimumApplicationAddress; + ptr_t p = PTR_ALIGN_DOWN(start, GC_page_size); - GC_ASSERT(GC_page_size != 0); - for (;;) { + GC_ASSERT(GC_page_size != 0); + for (;;) { + MEMORY_BASIC_INFORMATION buf; size_t result; ptr_t q = p - GC_page_size; @@ -1824,160 +1827,165 @@ void GC_register_data_segments(void) result = VirtualQuery((LPVOID)q, &buf, sizeof(buf)); if (result != sizeof(buf) || 0 == buf.AllocationBase) break; p = (ptr_t)buf.AllocationBase; + } + return p; } - return p; - } -# endif /* MSWIN32 */ -# if defined(USE_WINALLOC) && !defined(REDIRECT_MALLOC) - /* We maintain a linked list of AllocationBase values that we know */ - /* correspond to malloc heap sections. Currently this is only called */ - /* during a GC. But there is some hope that for long running */ - /* programs we will eventually see most heap sections. */ - - /* In the long run, it would be more reliable to occasionally walk */ - /* the malloc heap with HeapWalk on the default heap. But that */ - /* apparently works only for NT-based Windows. */ - - STATIC size_t GC_max_root_size = 100000; /* Appr. largest root size. */ - - /* In the long run, a better data structure would also be nice ... */ - STATIC struct GC_malloc_heap_list { - void * allocation_base; - struct GC_malloc_heap_list *next; - } *GC_malloc_heap_l = 0; + STATIC void GC_register_root_section(ptr_t static_root) + { + ptr_t p, base, limit; - /* Is p the base of one of the malloc heap sections we already know */ - /* about? */ - STATIC GC_bool GC_is_malloc_heap_base(const void *p) - { - struct GC_malloc_heap_list *q; + GC_ASSERT(I_HOLD_LOCK()); + if (!GC_no_win32_dlls) return; - for (q = GC_malloc_heap_l; q != NULL; q = q -> next) { - if (q -> allocation_base == p) return TRUE; - } - return FALSE; - } + p = GC_least_described_address(static_root); + base = limit = p; + while (ADDR_LT(p, (ptr_t)GC_sysinfo.lpMaximumApplicationAddress)) { + MEMORY_BASIC_INFORMATION buf; + size_t result = VirtualQuery((LPVOID)p, &buf, sizeof(buf)); - STATIC void *GC_get_allocation_base(void *p) - { - MEMORY_BASIC_INFORMATION buf; - size_t result = VirtualQuery(p, &buf, sizeof(buf)); - if (result != sizeof(buf)) { - ABORT("Weird VirtualQuery result"); - } - return buf.AllocationBase; - } + if (result != sizeof(buf) || 0 == buf.AllocationBase + || GC_is_heap_base(buf.AllocationBase)) break; + if (ADDR(p) > GC_WORD_MAX - buf.RegionSize) break; /* overflow */ - GC_INNER void GC_add_current_malloc_heap(void) - { - struct GC_malloc_heap_list *new_l = (struct GC_malloc_heap_list *) - malloc(sizeof(struct GC_malloc_heap_list)); - void *candidate; - - if (NULL == new_l) return; - new_l -> allocation_base = NULL; - /* to suppress maybe-uninitialized gcc warning */ - - candidate = GC_get_allocation_base(new_l); - if (GC_is_malloc_heap_base(candidate)) { - /* Try a little harder to find malloc heap. */ - size_t req_size = 10000; - do { - void *p = malloc(req_size); - if (0 == p) { - free(new_l); - return; + if (buf.State == MEM_COMMIT && is_writable(buf.Protect)) { + if (p != limit) { + if (base != limit) GC_add_roots_inner(base, limit, FALSE); + base = p; } - candidate = GC_get_allocation_base(p); - free(p); - req_size *= 2; - } while (GC_is_malloc_heap_base(candidate) - && req_size < GC_max_root_size/10 && req_size < 500000); - if (GC_is_malloc_heap_base(candidate)) { - free(new_l); - return; + limit = p + buf.RegionSize; } + p += buf.RegionSize; + } + if (base != limit) GC_add_roots_inner(base, limit, FALSE); } - GC_COND_LOG_PRINTF("Found new system malloc AllocationBase at %p\n", - candidate); - new_l -> allocation_base = candidate; - new_l -> next = GC_malloc_heap_l; - GC_malloc_heap_l = new_l; - } +# endif /* MSWIN32 */ - /* Free all the linked list nodes. Could be invoked at process exit */ - /* to avoid memory leak complains of a dynamic code analysis tool. */ - STATIC void GC_free_malloc_heap_list(void) - { - struct GC_malloc_heap_list *q = GC_malloc_heap_l; +# ifdef ANY_MSWIN - GC_malloc_heap_l = NULL; - while (q != NULL) { - struct GC_malloc_heap_list *next = q -> next; - free(q); - q = next; - } - } -# endif /* USE_WINALLOC && !REDIRECT_MALLOC */ +# if defined(USE_WINALLOC) && !defined(REDIRECT_MALLOC) + /* We maintain a linked list of AllocationBase values that we */ + /* know correspond to malloc heap sections. Currently this */ + /* is only called during a GC. But there is some hope that */ + /* for long running programs we will eventually see most heap */ + /* sections. */ + + /* In the long run, it would be more reliable to occasionally */ + /* walk the malloc heap with HeapWalk on the default heap. */ + /* But that apparently works only for NT-based Windows. */ + + STATIC size_t GC_max_root_size = 100000; /* approx. largest root size */ + + /* In the long run, a better data structure would also be nice... */ + STATIC struct GC_malloc_heap_list { + void * allocation_base; + struct GC_malloc_heap_list *next; + } *GC_malloc_heap_l = 0; + + /* Is p the base of one of the malloc heap sections we already */ + /* know about? */ + STATIC GC_bool GC_is_malloc_heap_base(const void *p) + { + struct GC_malloc_heap_list *q; - /* Is p the start of either the malloc heap, or of one of our */ - /* heap sections? */ - GC_INNER GC_bool GC_is_heap_base(const void *p) - { - int i; + for (q = GC_malloc_heap_l; q != NULL; q = q -> next) { + if (q -> allocation_base == p) return TRUE; + } + return FALSE; + } -# if defined(USE_WINALLOC) && !defined(REDIRECT_MALLOC) - if (GC_root_size > GC_max_root_size) - GC_max_root_size = GC_root_size; - if (GC_is_malloc_heap_base(p)) - return TRUE; -# endif - for (i = 0; i < (int)GC_n_heap_bases; i++) { - if (GC_heap_bases[i] == p) return TRUE; - } - return FALSE; - } + STATIC void *GC_get_allocation_base(void *p) + { + MEMORY_BASIC_INFORMATION buf; + size_t result = VirtualQuery(p, &buf, sizeof(buf)); -#ifdef MSWIN32 - STATIC void GC_register_root_section(ptr_t static_root) - { - MEMORY_BASIC_INFORMATION buf; - ptr_t p, base, limit; + if (result != sizeof(buf)) { + ABORT("Weird VirtualQuery result"); + } + return buf.AllocationBase; + } - GC_ASSERT(I_HOLD_LOCK()); - if (!GC_no_win32_dlls) return; - p = base = limit = GC_least_described_address(static_root); - while (ADDR_LT(p, (ptr_t)GC_sysinfo.lpMaximumApplicationAddress)) { - size_t result = VirtualQuery((LPVOID)p, &buf, sizeof(buf)); - DWORD protect; + GC_INNER void GC_add_current_malloc_heap(void) + { + struct GC_malloc_heap_list *new_l = (struct GC_malloc_heap_list *) + malloc(sizeof(struct GC_malloc_heap_list)); + void *candidate; - if (result != sizeof(buf) || buf.AllocationBase == 0 - || GC_is_heap_base(buf.AllocationBase)) break; - if (ADDR(p) > GC_WORD_MAX - buf.RegionSize) break; /* overflow */ + if (NULL == new_l) return; + new_l -> allocation_base = NULL; + /* to suppress maybe-uninitialized gcc warning */ + + candidate = GC_get_allocation_base(new_l); + if (GC_is_malloc_heap_base(candidate)) { + /* Try a little harder to find malloc heap. */ + size_t req_size = 10000; - protect = buf.Protect; - if (buf.State == MEM_COMMIT - && is_writable(protect)) { - if (p != limit) { - if (base != limit) GC_add_roots_inner(base, limit, FALSE); - base = p; + do { + void *p = malloc(req_size); + + if (NULL == p) { + free(new_l); + return; } - limit = p + buf.RegionSize; + candidate = GC_get_allocation_base(p); + free(p); + req_size *= 2; + } while (GC_is_malloc_heap_base(candidate) + && req_size < GC_max_root_size / 10 && req_size < 500000); + if (GC_is_malloc_heap_base(candidate)) { + free(new_l); + return; + } } - p += buf.RegionSize; + GC_COND_LOG_PRINTF("Found new system malloc AllocationBase at %p\n", + candidate); + new_l -> allocation_base = candidate; + new_l -> next = GC_malloc_heap_l; + GC_malloc_heap_l = new_l; } - if (base != limit) GC_add_roots_inner(base, limit, FALSE); - } -#endif /* MSWIN32 */ - void GC_register_data_segments(void) - { -# ifdef MSWIN32 - GC_register_root_section((ptr_t)&GC_pages_executable); - /* any other GC global variable would fit too. */ -# endif - } + /* Free all the linked list nodes. Could be invoked at process */ + /* exit to avoid memory leak complains of a dynamic code analysis */ + /* tool. */ + STATIC void GC_free_malloc_heap_list(void) + { + struct GC_malloc_heap_list *q = GC_malloc_heap_l; + + GC_malloc_heap_l = NULL; + while (q != NULL) { + struct GC_malloc_heap_list *next = q -> next; + + free(q); + q = next; + } + } +# endif /* USE_WINALLOC && !REDIRECT_MALLOC */ + + /* Is p the start of either the malloc heap, or of one of our */ + /* heap sections? */ + GC_INNER GC_bool GC_is_heap_base(const void *p) + { + size_t i; + +# if defined(USE_WINALLOC) && !defined(REDIRECT_MALLOC) + if (GC_root_size > GC_max_root_size) + GC_max_root_size = GC_root_size; + if (GC_is_malloc_heap_base(p)) + return TRUE; +# endif + for (i = 0; i < GC_n_heap_bases; i++) { + if (GC_heap_bases[i] == p) return TRUE; + } + return FALSE; + } + + void GC_register_data_segments(void) + { +# ifdef MSWIN32 + GC_register_root_section((ptr_t)&GC_pages_executable); + /* any other GC global variable would fit too */ +# endif + } # else /* !ANY_MSWIN */ @@ -2023,142 +2031,142 @@ void GC_register_data_segments(void) } # endif /* SVR4 || AIX || DGUX */ -#ifdef DATASTART_USES_BSDGETDATASTART -/* It's unclear whether this should be identical to the above, or */ -/* whether it should apply to non-x86 architectures. */ -/* For now we don't assume that there is always an empty page after */ -/* etext. But in some cases there actually seems to be slightly more. */ -/* This also deals with holes between read-only data and writable data. */ - GC_INNER ptr_t GC_FreeBSDGetDataStart(size_t max_page_size, - ptr_t etext_addr) - { - volatile ptr_t result = PTR_ALIGN_UP(etext_addr, sizeof(ptr_t)); - volatile ptr_t next_page = PTR_ALIGN_UP(etext_addr, max_page_size); - - GC_ASSERT(max_page_size % sizeof(ptr_t) == 0); - GC_setup_temporary_fault_handler(); - if (SETJMP(GC_jmp_buf) == 0) { - /* Try reading at the address. */ - /* This should happen before there is another thread. */ - for (; ADDR_LT(next_page, DATAEND); next_page += max_page_size) - GC_noop1((word)(*(volatile unsigned char *)next_page)); - GC_reset_fault_handler(); - } else { - GC_reset_fault_handler(); - /* As above, we go to plan B. */ - result = (ptr_t)GC_find_limit(DATAEND, FALSE); - } - return result; - } -#endif /* DATASTART_USES_BSDGETDATASTART */ - -#ifdef AMIGA - -# define GC_AMIGA_DS -# include "extra/AmigaOS.c" -# undef GC_AMIGA_DS +# ifdef DATASTART_USES_BSDGETDATASTART + /* It's unclear whether this should be identical to the above, or */ + /* whether it should apply to non-x86 architectures. For now we */ + /* do not assume that there is always an empty page after etext. */ + /* But in some cases there actually seems to be slightly more. */ + /* It also deals with holes between read-only and writable data. */ + GC_INNER ptr_t GC_FreeBSDGetDataStart(size_t max_page_size, + ptr_t etext_addr) + { + volatile ptr_t result = PTR_ALIGN_UP(etext_addr, sizeof(ptr_t)); + volatile ptr_t next_page = PTR_ALIGN_UP(etext_addr, max_page_size); -#elif defined(OPENBSD) + GC_ASSERT(max_page_size % sizeof(ptr_t) == 0); + GC_setup_temporary_fault_handler(); + if (SETJMP(GC_jmp_buf) == 0) { + /* Try reading at the address. */ + /* This should happen before there is another thread. */ + for (; ADDR_LT(next_page, DATAEND); next_page += max_page_size) + GC_noop1((word)(*(volatile unsigned char *)next_page)); + GC_reset_fault_handler(); + } else { + GC_reset_fault_handler(); + /* As above, we go to plan B. */ + result = (ptr_t)GC_find_limit(DATAEND, FALSE); + } + return result; + } +# endif /* DATASTART_USES_BSDGETDATASTART */ -/* Depending on arch alignment, there can be multiple holes */ -/* between DATASTART and DATAEND. Scan in DATASTART .. DATAEND */ -/* and register each region. */ -void GC_register_data_segments(void) -{ - ptr_t region_start = DATASTART; +# ifdef AMIGA +# define GC_AMIGA_DS +# include "extra/AmigaOS.c" +# undef GC_AMIGA_DS - GC_ASSERT(I_HOLD_LOCK()); - if (ADDR(region_start) - 1U >= ADDR(DATAEND)) - ABORT_ARG2("Wrong DATASTART/END pair", - ": %p .. %p", (void *)region_start, (void *)DATAEND); - for (;;) { - ptr_t region_end = GC_find_limit_with_bound(region_start, TRUE, DATAEND); +# elif defined(OPENBSD) + /* Depending on arch alignment, there can be multiple holes */ + /* between DATASTART and DATAEND. Scan in DATASTART .. DATAEND */ + /* and register each region. */ + void GC_register_data_segments(void) + { + ptr_t region_start = DATASTART; - GC_add_roots_inner(region_start, region_end, FALSE); - if (ADDR_GE(region_end, DATAEND)) - break; - region_start = GC_skip_hole_openbsd(region_end, DATAEND); - } -} + GC_ASSERT(I_HOLD_LOCK()); + if (ADDR(region_start) - 1U >= ADDR(DATAEND)) + ABORT_ARG2("Wrong DATASTART/END pair", + ": %p .. %p", (void *)region_start, (void *)DATAEND); + for (;;) { + ptr_t region_end = GC_find_limit_with_bound(region_start, TRUE, + DATAEND); -# else /* !AMIGA && !OPENBSD */ + GC_add_roots_inner(region_start, region_end, FALSE); + if (ADDR_GE(region_end, DATAEND)) + break; + region_start = GC_skip_hole_openbsd(region_end, DATAEND); + } + } -# if !defined(PCR) && !defined(MACOS) && defined(REDIRECT_MALLOC) \ - && defined(GC_SOLARIS_THREADS) - EXTERN_C_BEGIN - extern caddr_t sbrk(int); - EXTERN_C_END -# endif +# else /* !AMIGA && !OPENBSD */ +# if !defined(PCR) && !defined(MACOS) && defined(REDIRECT_MALLOC) \ + && defined(GC_SOLARIS_THREADS) + EXTERN_C_BEGIN + extern caddr_t sbrk(int); + EXTERN_C_END +# endif - void GC_register_data_segments(void) - { - GC_ASSERT(I_HOLD_LOCK()); -# if !defined(DYNAMIC_LOADING) && defined(GC_DONT_REGISTER_MAIN_STATIC_DATA) - /* Avoid even referencing DATASTART and DATAEND as they are */ - /* unnecessary and cause linker errors when bitcode is enabled. */ - /* GC_register_data_segments() is not called anyway. */ -# elif defined(PCR) || (defined(DYNAMIC_LOADING) && defined(DARWIN)) - /* No-op. GC_register_main_static_data() always returns false. */ -# elif defined(MACOS) + void GC_register_data_segments(void) { -# if defined(THINK_C) - extern void *GC_MacGetDataStart(void); - - /* Globals begin above stack and end at a5. */ - GC_add_roots_inner((ptr_t)GC_MacGetDataStart(), - (ptr_t)LMGetCurrentA5(), FALSE); -# elif defined(__MWERKS__) && defined(M68K) - extern void *GC_MacGetDataStart(void); -# if __option(far_data) - extern void *GC_MacGetDataEnd(void); - - /* Handle Far Globals (CW Pro 3) located after the QD globals. */ - GC_add_roots_inner((ptr_t)GC_MacGetDataStart(), - (ptr_t)GC_MacGetDataEnd(), FALSE); -# else - GC_add_roots_inner((ptr_t)GC_MacGetDataStart(), - (ptr_t)LMGetCurrentA5(), FALSE); -# endif -# elif defined(__MWERKS__) && defined(POWERPC) - extern char __data_start__[], __data_end__[]; + GC_ASSERT(I_HOLD_LOCK()); +# if !defined(DYNAMIC_LOADING) \ + && defined(GC_DONT_REGISTER_MAIN_STATIC_DATA) + /* Avoid even referencing DATASTART and DATAEND as they are */ + /* unnecessary and cause linker errors when bitcode is */ + /* enabled. GC_register_data_segments is not called anyway. */ +# elif defined(PCR) || (defined(DYNAMIC_LOADING) && defined(DARWIN)) + /* No-op. GC_register_main_static_data() always returns false. */ +# elif defined(MACOS) + { +# if defined(THINK_C) + extern void *GC_MacGetDataStart(void); + + /* Globals begin above stack and end at a5. */ + GC_add_roots_inner((ptr_t)GC_MacGetDataStart(), + (ptr_t)LMGetCurrentA5(), FALSE); +# elif defined(__MWERKS__) && defined(M68K) + extern void *GC_MacGetDataStart(void); +# if __option(far_data) + extern void *GC_MacGetDataEnd(void); + + /* Handle Far Globals (CW Pro 3) located after */ + /* the QD globals. */ + GC_add_roots_inner((ptr_t)GC_MacGetDataStart(), + (ptr_t)GC_MacGetDataEnd(), FALSE); +# else + GC_add_roots_inner((ptr_t)GC_MacGetDataStart(), + (ptr_t)LMGetCurrentA5(), FALSE); +# endif +# elif defined(__MWERKS__) && defined(POWERPC) + extern char __data_start__[], __data_end__[]; - GC_add_roots_inner((ptr_t)&__data_start__, - (ptr_t)&__data_end__, FALSE); + GC_add_roots_inner((ptr_t)&__data_start__, + (ptr_t)&__data_end__, FALSE); +# endif + } +# elif defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS) + /* As of Solaris 2.3, the Solaris threads implementation */ + /* allocates the data structure for the initial thread with */ + /* sbrk at process startup. It needs to be scanned, so */ + /* that we don't lose some malloc allocated data structures */ + /* hanging from it. We're on thin ice here... */ + GC_ASSERT(DATASTART); + { + ptr_t p = (ptr_t)sbrk(0); + + if (ADDR_LT(DATASTART, p)) + GC_add_roots_inner(DATASTART, p, FALSE); + } +# else + if (ADDR(DATASTART) - 1U >= ADDR(DATAEND)) { + /* Subtract one to check also for NULL */ + /* without a compiler warning. */ + ABORT_ARG2("Wrong DATASTART/END pair", + ": %p .. %p", (void *)DATASTART, (void *)DATAEND); + } + GC_add_roots_inner(DATASTART, DATAEND, FALSE); +# ifdef GC_HAVE_DATAREGION2 + if (ADDR(DATASTART2) - 1U >= ADDR(DATAEND2)) + ABORT_ARG2("Wrong DATASTART/END2 pair", + ": %p .. %p", (void *)DATASTART2, (void *)DATAEND2); + GC_add_roots_inner(DATASTART2, DATAEND2, FALSE); +# endif # endif + /* Dynamic libraries are added at every collection, since they */ + /* may change. */ } -# elif defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS) - /* As of Solaris 2.3, the Solaris threads implementation */ - /* allocates the data structure for the initial thread with */ - /* sbrk at process startup. It needs to be scanned, so that */ - /* we don't lose some malloc allocated data structures */ - /* hanging from it. We're on thin ice here ... */ - GC_ASSERT(DATASTART); - { - ptr_t p = (ptr_t)sbrk(0); +# endif /* !AMIGA && !OPENBSD */ - if (ADDR_LT(DATASTART, p)) - GC_add_roots_inner(DATASTART, p, FALSE); - } -# else - if (ADDR(DATASTART) - 1U >= ADDR(DATAEND)) { - /* Subtract one to check also for NULL */ - /* without a compiler warning. */ - ABORT_ARG2("Wrong DATASTART/END pair", - ": %p .. %p", (void *)DATASTART, (void *)DATAEND); - } - GC_add_roots_inner(DATASTART, DATAEND, FALSE); -# ifdef GC_HAVE_DATAREGION2 - if (ADDR(DATASTART2) - 1U >= ADDR(DATAEND2)) - ABORT_ARG2("Wrong DATASTART/END2 pair", - ": %p .. %p", (void *)DATASTART2, (void *)DATAEND2); - GC_add_roots_inner(DATASTART2, DATAEND2, FALSE); -# endif -# endif - /* Dynamic libraries are added at every collection, since they may */ - /* change. */ - } - -# endif /* !AMIGA && !OPENBSD */ # endif /* !ANY_MSWIN */ #endif /* !OS2 */ @@ -2369,7 +2377,7 @@ ptr_t GC_unix_get_mem(size_t bytes) ptr_t GC_wince_get_mem(size_t bytes) { ptr_t result = 0; /* initialized to prevent warning. */ - word i; + size_t i; GC_ASSERT(GC_page_size != 0); bytes = ROUNDUP_PAGESIZE(bytes); @@ -2514,7 +2522,8 @@ ptr_t GC_unix_get_mem(size_t bytes) if (GLOBAL_ALLOC_TEST) # endif { - while (GC_n_heap_bases-- > 0) { + while (GC_n_heap_bases > 0) { + GC_n_heap_bases--; # ifdef CYGWIN32 /* FIXME: Is it OK to use non-GC free() here? */ # else @@ -2957,7 +2966,8 @@ GC_API GC_push_other_roots_proc GC_CALL GC_get_push_other_roots(void) /* Add all pages in pht2 to pht1. */ STATIC void GC_or_pages(page_hash_table pht1, const word *pht2) { - unsigned i; + size_t i; + for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i]; } #endif /* CHECKSUMS && GWW_VDB || PROC_VDB */ @@ -2982,13 +2992,13 @@ GC_API GC_push_other_roots_proc GC_CALL GC_get_push_other_roots(void) GC_INLINE void GC_gww_read_dirty(GC_bool output_unneeded) { - word i; + size_t i; GC_ASSERT(I_HOLD_LOCK()); if (!output_unneeded) BZERO(GC_grungy_pages, sizeof(GC_grungy_pages)); - for (i = 0; i != GC_n_heap_sects; ++i) { + for (i = 0; i < GC_n_heap_sects; ++i) { GC_ULONG_PTR count; do { @@ -3026,11 +3036,12 @@ GC_API GC_push_other_roots_proc GC_CALL GC_get_push_other_roots(void) " Falling back to marking all pages dirty\n", start); } if (!output_unneeded) { - unsigned j; + size_t j; for (j = 0; j < nblocks; ++j) { - word hash = PHT_HASH(start + j); - set_pht_entry_from_index(GC_grungy_pages, hash); + size_t index = PHT_HASH(start + j); + + set_pht_entry_from_index(GC_grungy_pages, index); } } count = 1; /* Done with this section. */ @@ -3367,7 +3378,7 @@ GC_API GC_push_other_roots_proc GC_CALL GC_get_push_other_roots(void) /* and then to have the thread stopping code set the dirty */ /* flag, if necessary. */ for (i = 0; i < divHBLKSZ(GC_page_size); i++) { - word index = PHT_HASH(h+i); + size_t index = PHT_HASH(h + i); async_set_pht_entry_from_index(GC_dirty_pages, index); } @@ -3513,7 +3524,7 @@ GC_API GC_push_other_roots_proc GC_CALL GC_get_push_other_roots(void) STATIC void GC_protect_heap(void) { - unsigned i; + size_t i; GC_ASSERT(GC_page_size != 0); for (i = 0; i < GC_n_heap_sects; i++) { @@ -3537,7 +3548,7 @@ STATIC void GC_protect_heap(void) current_start = (struct hblk *)start; limit = start + len; for (current = current_start;;) { - word nblocks = 0; + size_t nblocks = 0; GC_bool is_ptrfree = TRUE; if (ADDR_LT((ptr_t)current, limit)) { @@ -3588,7 +3599,7 @@ STATIC void GC_protect_heap(void) /* Remove protection for the entire heap not updating GC_dirty_pages. */ STATIC void GC_unprotect_all_heap(void) { - unsigned i; + size_t i; GC_ASSERT(I_HOLD_LOCK()); GC_ASSERT(GC_auto_incremental); @@ -3717,9 +3728,8 @@ GC_INNER GC_bool GC_dirty_init(void) GC_INLINE void GC_proc_read_dirty(GC_bool output_unneeded) { - int nmaps; + size_t i, nmaps; char * bufp = GC_proc_buf; - int i; GC_ASSERT(I_HOLD_LOCK()); # ifndef THREADS @@ -3764,10 +3774,10 @@ GC_INLINE void GC_proc_read_dirty(GC_bool output_unneeded) } /* Copy dirty bits into GC_grungy_pages */ - nmaps = ((struct prpageheader *)bufp) -> pr_nmap; + nmaps = (size_t)(((struct prpageheader *)bufp) -> pr_nmap); # ifdef DEBUG_DIRTY_BITS GC_log_printf("Proc VDB read: pr_nmap= %u, pr_npage= %lu\n", - nmaps, ((struct prpageheader *)bufp)->pr_npage); + (unsigned)nmaps, ((struct prpageheader *)bufp)->pr_npage); # endif # if defined(GC_NO_SYS_FAULT_H) && defined(CPPCHECK) GC_noop1(((struct prpageheader *)bufp)->dummy[0]); @@ -3801,7 +3811,7 @@ GC_INLINE void GC_proc_read_dirty(GC_bool output_unneeded) # endif for (h = (struct hblk *)vaddr; ADDR_LT((ptr_t)h, next_vaddr); h++) { - word index = PHT_HASH(h); + size_t index = PHT_HASH(h); set_pht_entry_from_index(GC_grungy_pages, index); } @@ -4107,7 +4117,7 @@ GC_INLINE void GC_proc_read_dirty(GC_bool output_unneeded) if (EXPECT(ADDR_LT(vaddr, start), FALSE)) h = (struct hblk *)start; for (; ADDR_LT((ptr_t)h, next_vaddr); h++) { - word index = PHT_HASH(h); + size_t index = PHT_HASH(h); /* Filter out the blocks without pointers. It might worth */ /* for the case when the heap is large enough for the hash */ @@ -4177,26 +4187,26 @@ GC_INLINE void GC_proc_read_dirty(GC_bool output_unneeded) # endif if (!output_unneeded) { - word i; + size_t i; BZERO(GC_grungy_pages, sizeof(GC_grungy_pages)); pagemap_buf_len = 0; /* invalidate soft_vdb_buf */ - for (i = 0; i != GC_n_heap_sects; ++i) { + for (i = 0; i < GC_n_heap_sects; ++i) { ptr_t start = GC_heap_sects[i].hs_start; soft_set_grungy_pages(start, start + GC_heap_sects[i].hs_bytes, - i < GC_n_heap_sects-1 ? - GC_heap_sects[i+1].hs_start : NULL, + i + 1 < GC_n_heap_sects + ? GC_heap_sects[i+1].hs_start : NULL, FALSE); } # ifndef NO_VDB_FOR_STATIC_ROOTS - for (i = 0; (int)i < n_root_sets; ++i) { + for (i = 0; i < n_root_sets; ++i) { soft_set_grungy_pages((ptr_t)HBLKPTR(GC_static_roots[i].r_start), GC_static_roots[i].r_end, - (int)i < n_root_sets-1 ? - GC_static_roots[i+1].r_start : NULL, + i + 1 < n_root_sets + ? GC_static_roots[i+1].r_start : NULL, TRUE); } # endif @@ -4240,7 +4250,7 @@ GC_INNER GC_bool GC_dirty_init(void) /* dirties the entire object. */ GC_INNER void GC_dirty_inner(const void *p) { - word index = PHT_HASH(p); + size_t index = PHT_HASH(p); # if defined(MPROTECT_VDB) /* Do not update GC_dirty_pages if it should be followed by the */ @@ -4289,8 +4299,9 @@ GC_INNER GC_bool GC_dirty_init(void) # elif defined(PCR_VDB) /* lazily enable dirty bits on newly added heap sects */ { - static int onhs = 0; - int nhs = GC_n_heap_sects; + static size_t onhs = 0; + size_t nhs = GC_n_heap_sects; + for (; onhs < nhs; onhs++) { PCR_VD_WriteProtectEnable( GC_heap_sects[onhs].hs_start, @@ -4330,7 +4341,7 @@ GC_INNER GC_bool GC_dirty_init(void) /* side of labeling pages as dirty (and this implementation does). */ GC_INNER GC_bool GC_page_was_dirty(struct hblk *h) { - word index; + size_t index; # ifdef PCR_VDB if (!GC_manual_vdb) { @@ -4358,7 +4369,7 @@ GC_INNER GC_bool GC_dirty_init(void) GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk *h) { # if defined(GWW_VDB) || defined(PROC_VDB) || defined(SOFT_VDB) - word index; + size_t index; # ifdef MPROTECT_VDB if (!GC_GWW_AVAILABLE()) @@ -4381,7 +4392,7 @@ GC_INNER GC_bool GC_dirty_init(void) } # endif /* CHECKSUMS || PROC_VDB */ - GC_INNER void GC_remove_protection(struct hblk *h, word nblocks, + GC_INNER void GC_remove_protection(struct hblk *h, size_t nblocks, GC_bool is_ptrfree) { # ifdef MPROTECT_VDB @@ -4408,7 +4419,7 @@ GC_INNER GC_bool GC_dirty_init(void) /* whether the page at h_trunc has already been marked */ /* dirty as there could be a hash collision. */ for (current = h_trunc; ADDR_LT((ptr_t)current, h_end); ++current) { - word index = PHT_HASH(current); + size_t index = PHT_HASH(current); # ifndef DONT_PROTECT_PTRFREE if (!is_ptrfree @@ -4904,7 +4915,7 @@ STATIC kern_return_t GC_forward_exception(mach_port_t thread, mach_port_t task, exception_data_t data, mach_msg_type_number_t data_count) { - unsigned int i; + size_t i; kern_return_t r; mach_port_t port; exception_behavior_t behavior; @@ -4913,11 +4924,11 @@ STATIC kern_return_t GC_forward_exception(mach_port_t thread, mach_port_t task, thread_state_data_t thread_state; mach_msg_type_number_t thread_state_count = THREAD_STATE_MAX; - for (i = 0; i < GC_old_exc_ports.count; i++) { + for (i = 0; (int)i < GC_old_exc_ports.count; i++) { if ((GC_old_exc_ports.masks[i] & ((exception_mask_t)1 << exception)) != 0) break; } - if (i == GC_old_exc_ports.count) + if (i == (size_t)GC_old_exc_ports.count) ABORT("No handler for exception!"); port = GC_old_exc_ports.ports[i]; @@ -5094,7 +5105,8 @@ catch_exception_raise(mach_port_t exception_port, mach_port_t thread, # endif UNPROTECT(h, GC_page_size); for (i = 0; i < divHBLKSZ(GC_page_size); i++) { - word index = PHT_HASH(h+i); + size_t index = PHT_HASH(h + i); + async_set_pht_entry_from_index(GC_dirty_pages, index); } } else if (GC_mprotect_state == GC_MP_DISCARDING) { diff --git a/pthread_stop_world.c b/pthread_stop_world.c index 47c387e42..4256f5bf2 100644 --- a/pthread_stop_world.c +++ b/pthread_stop_world.c @@ -318,7 +318,7 @@ STATIC void GC_suspend_handler_inner(ptr_t dummy, void *context) # endif IF_CANCEL(int cancel_state;) # ifdef GC_ENABLE_SUSPEND_THREAD - word suspend_cnt; + AO_t suspend_cnt; # endif AO_t my_stop_count = ao_load_acquire_async(&GC_stop_count); /* After the barrier, this thread should see */ @@ -360,7 +360,7 @@ STATIC void GC_suspend_handler_inner(ptr_t dummy, void *context) crtn -> backing_store_ptr = bs_lo + stack_size; # endif # ifdef GC_ENABLE_SUSPEND_THREAD - suspend_cnt = (word)ao_load_async(&(me -> ext_suspend_cnt)); + suspend_cnt = ao_load_async(&(me -> ext_suspend_cnt)); # endif /* Tell the thread that wants to stop the world that this */ @@ -385,8 +385,7 @@ STATIC void GC_suspend_handler_inner(ptr_t dummy, void *context) } while (ao_load_acquire_async(&GC_stop_count) == my_stop_count # ifdef GC_ENABLE_SUSPEND_THREAD || ((suspend_cnt & 1) != 0 - && (word)ao_load_async(&(me -> ext_suspend_cnt)) - == suspend_cnt) + && ao_load_async(&(me -> ext_suspend_cnt)) == suspend_cnt) # endif ); @@ -599,7 +598,7 @@ STATIC void GC_restart_handler(int sig) (void)select(0, 0, 0, 0, &tv); } - GC_INNER void GC_suspend_self_inner(GC_thread me, word suspend_cnt) { + GC_INNER void GC_suspend_self_inner(GC_thread me, size_t suspend_cnt) { IF_CANCEL(int cancel_state;) GC_ASSERT((suspend_cnt & 1) != 0); @@ -607,8 +606,7 @@ STATIC void GC_restart_handler(int sig) # ifdef DEBUG_THREADS GC_log_printf("Suspend self: %p\n", (void *)(me -> id)); # endif - while ((word)ao_load_acquire_async(&(me -> ext_suspend_cnt)) - == suspend_cnt) { + while (ao_load_acquire_async(&(me -> ext_suspend_cnt)) == suspend_cnt) { /* TODO: Use sigsuspend() even for self-suspended threads. */ GC_brief_async_signal_safe_sleep(); } @@ -621,7 +619,7 @@ STATIC void GC_restart_handler(int sig) GC_API void GC_CALL GC_suspend_thread(GC_SUSPEND_THREAD_ID thread) { GC_thread t; AO_t next_stop_count; - word suspend_cnt; + AO_t suspend_cnt; IF_CANCEL(int cancel_state;) LOCK(); @@ -630,21 +628,21 @@ STATIC void GC_restart_handler(int sig) UNLOCK(); return; } - suspend_cnt = (word)(t -> ext_suspend_cnt); + suspend_cnt = t -> ext_suspend_cnt; if ((suspend_cnt & 1) != 0) /* already suspended? */ { GC_ASSERT(!THREAD_EQUAL((pthread_t)thread, pthread_self())); UNLOCK(); return; } if ((t -> flags & (FINISHED | DO_BLOCKING)) != 0) { - t -> ext_suspend_cnt = (AO_t)(suspend_cnt | 1); /* suspend */ + t -> ext_suspend_cnt = suspend_cnt | 1; /* suspend */ /* Terminated but not joined yet, or in do-blocking state. */ UNLOCK(); return; } if (THREAD_EQUAL((pthread_t)thread, pthread_self())) { - t -> ext_suspend_cnt = (AO_t)(suspend_cnt | 1); + t -> ext_suspend_cnt = suspend_cnt | 1; GC_with_callee_saves_pushed(GC_suspend_self_blocked, (ptr_t)t); UNLOCK(); return; @@ -677,7 +675,7 @@ STATIC void GC_restart_handler(int sig) AO_store(&GC_stop_count, next_stop_count); /* Set the flag making the change visible to the signal handler. */ - AO_store_release(&(t -> ext_suspend_cnt), (AO_t)(suspend_cnt | 1)); + AO_store_release(&(t -> ext_suspend_cnt), suspend_cnt | 1); /* TODO: Support GC_retry_signals (not needed for TSan) */ switch (raise_signal(t, GC_sig_suspend)) { @@ -706,12 +704,12 @@ STATIC void GC_restart_handler(int sig) LOCK(); t = GC_lookup_by_pthread((pthread_t)thread); if (t != NULL) { - word suspend_cnt = (word)(t -> ext_suspend_cnt); + AO_t suspend_cnt = t -> ext_suspend_cnt; if ((suspend_cnt & 1) != 0) /* is suspended? */ { GC_ASSERT((GC_stop_count & THREAD_RESTARTED) != 0); /* Mark the thread as not suspended - it will be resumed shortly. */ - AO_store(&(t -> ext_suspend_cnt), (AO_t)(suspend_cnt + 1)); + AO_store(&(t -> ext_suspend_cnt), suspend_cnt + 1); if ((t -> flags & (FINISHED | DO_BLOCKING)) == 0) { int result = raise_signal(t, GC_sig_thr_restart); @@ -1022,8 +1020,8 @@ GC_INNER void GC_stop_world(void) # endif GC_ASSERT(I_HOLD_LOCK()); /* Make sure all free list construction has stopped before we start. */ - /* No new construction can start, since a free list construction is */ - /* required to acquire and release the allocator lock before start. */ + /* No new construction can start, since it is required to acquire and */ + /* release the allocator lock before start. */ GC_ASSERT(GC_thr_initialized); # ifdef DEBUG_THREADS diff --git a/pthread_support.c b/pthread_support.c index a81c347ab..73a867ff3 100644 --- a/pthread_support.c +++ b/pthread_support.c @@ -1932,7 +1932,7 @@ GC_INNER void GC_do_blocking_inner(ptr_t data, void *context) /* otherwise there could be a static analysis tool warning */ /* (false positive) about unlock without a matching lock. */ while (EXPECT((me -> ext_suspend_cnt & 1) != 0, FALSE)) { - word suspend_cnt = (word)(me -> ext_suspend_cnt); + size_t suspend_cnt = me -> ext_suspend_cnt; /* read suspend counter (number) before unlocking */ READER_UNLOCK_RELEASE(); @@ -1960,7 +1960,7 @@ GC_INNER void GC_do_blocking_inner(ptr_t data, void *context) /* function. */ do_blocking_enter(&topOfStackUnset, me); while ((me -> ext_suspend_cnt & 1) != 0) { - word suspend_cnt = (word)(me -> ext_suspend_cnt); + size_t suspend_cnt = me -> ext_suspend_cnt; UNLOCK(); GC_suspend_self_inner(me, suspend_cnt); @@ -2069,7 +2069,7 @@ GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn, void *client_data) # if defined(GC_ENABLE_SUSPEND_THREAD) && defined(SIGNAL_BASED_STOP_WORLD) while (EXPECT((me -> ext_suspend_cnt & 1) != 0, FALSE)) { - word suspend_cnt = (word)(me -> ext_suspend_cnt); + size_t suspend_cnt = me -> ext_suspend_cnt; READER_UNLOCK_RELEASE(); GC_suspend_self_inner(me, suspend_cnt); @@ -2674,7 +2674,7 @@ GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *sb) # endif return; } - for (; pause_length <= SPIN_MAX; pause_length <<= 1) { + for (; pause_length <= (unsigned)SPIN_MAX; pause_length <<= 1) { for (i = 0; i < pause_length; ++i) { GC_pause(); } @@ -2737,20 +2737,19 @@ GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *sb) GC_INNER void GC_lock(void) { - unsigned my_spin_max; - unsigned my_last_spins; - unsigned i; + AO_t my_spin_max, my_last_spins_half; + size_t i; if (EXPECT(AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR, TRUE)) { return; } - my_spin_max = (unsigned)AO_load(&spin_max); - my_last_spins = (unsigned)AO_load(&last_spins); + my_spin_max = AO_load(&spin_max); + my_last_spins_half = AO_load(&last_spins) / 2; for (i = 0; i < my_spin_max; i++) { if (is_collecting() || GC_nprocs == 1) goto yield; - if (i < my_last_spins/2) { + if (i < my_last_spins_half) { GC_pause(); continue; } @@ -2761,13 +2760,13 @@ GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *sb) * against the other process with which we were contending. * Thus it makes sense to spin longer the next time. */ - AO_store(&last_spins, (AO_t)i); - AO_store(&spin_max, (AO_t)high_spin_max); + AO_store(&last_spins, i); + AO_store(&spin_max, high_spin_max); return; } } /* We are probably being scheduled against the other process. Sleep. */ - AO_store(&spin_max, (AO_t)low_spin_max); + AO_store(&spin_max, low_spin_max); yield: for (i = 0;; ++i) { if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) { diff --git a/ptr_chck.c b/ptr_chck.c index 1f829572c..847b4093f 100644 --- a/ptr_chck.c +++ b/ptr_chck.c @@ -31,7 +31,7 @@ GC_API void * GC_CALL GC_same_obj(void *p, void *q) { hdr *hhdr; ptr_t base, limit; - word sz; + size_t sz; if (!EXPECT(GC_is_initialized, TRUE)) GC_init(); hhdr = HDR(p); @@ -63,9 +63,7 @@ GC_API void * GC_CALL GC_same_obj(void *p, void *q) } } else { size_t offset; - size_t pdispl = HBLKDISPL(p); - offset = pdispl % sz; if (HBLKPTR(p) != HBLKPTR(q)) { /* W/o this check, we might miss an error if */ /* q points to the first object on a page, and */ @@ -73,6 +71,7 @@ GC_API void * GC_CALL GC_same_obj(void *p, void *q) GC_same_obj_print_proc((ptr_t)p, (ptr_t)q); return p; } + offset = HBLKDISPL(p) % sz; base = (ptr_t)p - offset; limit = base + sz; } @@ -97,10 +96,9 @@ GC_valid_ptr_print_proc_t GC_is_valid_displacement_print_proc = GC_API void * GC_CALL GC_is_valid_displacement(void *p) { hdr *hhdr; - word pdispl; - word offset; + size_t offset; struct hblk *h; - word sz; + size_t sz; if (!EXPECT(GC_is_initialized, TRUE)) GC_init(); if (NULL == p) return NULL; @@ -114,8 +112,7 @@ GC_API void * GC_CALL GC_is_valid_displacement(void *p) return p; } sz = hhdr -> hb_sz; - pdispl = HBLKDISPL(p); - offset = pdispl % sz; + offset = HBLKDISPL(p) % sz; if ((sz > MAXOBJBYTES && ADDR_GE((ptr_t)p, (ptr_t)h + sz)) || !GC_valid_offsets[offset] || (ADDR_LT((ptr_t)(h + 1), (ptr_t)p + sz - offset) @@ -206,8 +203,8 @@ GC_API void * GC_CALL GC_is_visible(void *p) if (EXPECT(NULL == type_descr, FALSE)) goto fail; /* see comment in GC_mark_from */ descr = *(word *)(type_descr - - (descr - (word)(GC_DS_PER_OBJECT - - GC_INDIR_PER_OBJ_BIAS))); + - ((signed_word)descr + (GC_INDIR_PER_OBJ_BIAS + - GC_DS_PER_OBJECT))); } goto retry; } diff --git a/reclaim.c b/reclaim.c index 1df4d8430..9cbd7c11a 100644 --- a/reclaim.c +++ b/reclaim.c @@ -146,7 +146,7 @@ STATIC GC_bool GC_block_nearly_full(const hdr *hhdr, size_t sz) /* TODO: This should perhaps again be specialized for USE_MARK_BYTES */ /* and USE_MARK_BITS cases. */ -GC_INLINE word *GC_clear_block(word *p, word sz, word *pcount) +GC_INLINE word *GC_clear_block(word *p, size_t sz, word *pcount) { word *q = (word *)((ptr_t)p + sz); @@ -178,7 +178,7 @@ GC_INLINE word *GC_clear_block(word *p, word sz, word *pcount) STATIC ptr_t GC_reclaim_clear(struct hblk *hbp, const hdr *hhdr, size_t sz, ptr_t list, word *pcount) { - word bit_no; + size_t bit_no; ptr_t p, plim; GC_ASSERT(hhdr == GC_find_header(hbp)); @@ -210,7 +210,7 @@ STATIC ptr_t GC_reclaim_clear(struct hblk *hbp, const hdr *hhdr, size_t sz, STATIC ptr_t GC_reclaim_uninit(struct hblk *hbp, const hdr *hhdr, size_t sz, ptr_t list, word *pcount) { - word bit_no; + size_t bit_no; word n_bytes_found = 0; ptr_t p, plim; @@ -241,7 +241,7 @@ STATIC ptr_t GC_reclaim_uninit(struct hblk *hbp, const hdr *hhdr, size_t sz, STATIC ptr_t GC_disclaim_and_reclaim(struct hblk *hbp, hdr *hhdr, size_t sz, ptr_t list, word *pcount) { - word bit_no; + size_t bit_no; ptr_t p, plim; int (GC_CALLBACK *disclaim)(void *) = GC_obj_kinds[hhdr -> hb_obj_kind].ok_disclaim_proc; @@ -274,7 +274,7 @@ STATIC ptr_t GC_reclaim_uninit(struct hblk *hbp, const hdr *hhdr, size_t sz, /* Don't really reclaim objects, just check for unmarked ones: */ STATIC void GC_reclaim_check(struct hblk *hbp, const hdr *hhdr, size_t sz) { - word bit_no; + size_t bit_no; ptr_t p, plim; # ifndef THREADS @@ -351,8 +351,8 @@ STATIC void GC_reclaim_small_nonempty_block(struct hblk *hbp, size_t sz, struct obj_kind *ok = &GC_obj_kinds[hhdr -> hb_obj_kind]; void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]); - *flh = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init, - (ptr_t)(*flh), (word *)&GC_bytes_found); + *flh = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init, (ptr_t)(*flh), + (/* unsigned */ word *)&GC_bytes_found); } } @@ -372,8 +372,8 @@ STATIC void GC_reclaim_small_nonempty_block(struct hblk *hbp, size_t sz, flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]); hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no; - flh_next = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init, - (ptr_t)(*flh), (word *)&GC_bytes_found); + flh_next = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init, (ptr_t)(*flh), + (/* unsigned */ word *)&GC_bytes_found); if (hhdr -> hb_n_marks) { *flh = flh_next; } else { @@ -461,7 +461,7 @@ STATIC void GC_CALLBACK GC_reclaim_block(struct hblk *hbp, { ptr_t p = hbp -> hb_body; ptr_t plim = p + HBLKSIZE - sz; - word bit_no; + size_t bit_no; for (bit_no = 0; ADDR_GE(plim, p); bit_no += MARK_BIT_OFFSET(sz), p += sz) { @@ -499,122 +499,118 @@ STATIC void GC_CALLBACK GC_reclaim_block(struct hblk *hbp, /* doing it here avoids some silly lock contention in */ /* GC_malloc_many. */ if (IS_PTRFREE_SAFE(hhdr)) { - GC_atomic_in_use += sz * hhdr -> hb_n_marks; + GC_atomic_in_use += (word)sz * hhdr -> hb_n_marks; } else { - GC_composite_in_use += sz * hhdr -> hb_n_marks; + GC_composite_in_use += (word)sz * hhdr -> hb_n_marks; } } } #if !defined(NO_DEBUGGING) -/* Routines to gather and print heap block info */ -/* intended for debugging. Otherwise should be called */ -/* with the allocator lock held. */ + /* Routines to gather and print heap block info intended for */ + /* debugging. Otherwise should be called with the allocator lock */ + /* held. */ -struct Print_stats -{ - size_t number_of_blocks; - size_t total_bytes; -}; - -EXTERN_C_BEGIN /* to avoid "no previous prototype" clang warning */ -unsigned GC_n_set_marks(const hdr *); -EXTERN_C_END - -#ifdef USE_MARK_BYTES + struct Print_stats + { + size_t number_of_blocks; + size_t total_bytes; + }; -/* Return the number of set mark bits in the given header. */ -/* Remains externally visible as used by GNU GCJ currently. */ -/* There could be a race between GC_clear_hdr_marks and this */ -/* function but the latter is for a debug purpose. */ -GC_ATTR_NO_SANITIZE_THREAD -unsigned GC_n_set_marks(const hdr *hhdr) -{ - unsigned result = 0; - size_t i; - size_t offset = MARK_BIT_OFFSET(hhdr -> hb_sz); - size_t limit = FINAL_MARK_BIT(hhdr -> hb_sz); + EXTERN_C_BEGIN /* to avoid "no previous prototype" clang warning */ + unsigned GC_n_set_marks(const hdr *); + EXTERN_C_END - for (i = 0; i < limit; i += offset) { +# ifdef USE_MARK_BYTES + /* Return the number of set mark bits in the given header. */ + /* Remains externally visible as used by GNU GCJ currently. */ + /* There could be a race between GC_clear_hdr_marks and this */ + /* function but the latter is for a debug purpose. */ + GC_ATTR_NO_SANITIZE_THREAD + unsigned GC_n_set_marks(const hdr *hhdr) + { + unsigned result = 0; + size_t i; + size_t offset = MARK_BIT_OFFSET(hhdr -> hb_sz); + size_t limit = FINAL_MARK_BIT(hhdr -> hb_sz); + + for (i = 0; i < limit; i += offset) { result += hhdr -> hb_marks[i]; + } + GC_ASSERT(hhdr -> hb_marks[limit]); /* the one set past the end */ + return result; } - GC_ASSERT(hhdr -> hb_marks[limit]); /* the one set past the end */ - return result; -} - -#else - -/* Number of set bits in a word. Not performance critical. */ -static unsigned count_ones(word n) -{ - unsigned result = 0; - for (; n > 0; n >>= 1) - if (n & 1) result++; - - return result; -} - -unsigned GC_n_set_marks(const hdr *hhdr) -{ - unsigned result = 0; - size_t sz = hhdr -> hb_sz; - size_t i; -# ifdef MARK_BIT_PER_OBJ - size_t n_objs = HBLK_OBJS(sz); - size_t n_mark_words = divWORDSZ(n_objs > 0 - ? n_objs : 1); /* round down */ +# else + /* Number of set bits in a word. Not performance critical. */ + static unsigned count_ones(word v) + { + unsigned result = 0; - for (i = 0; i <= n_mark_words; i++) { - result += count_ones(hhdr -> hb_marks[i]); + for (; v > 0; v >>= 1) { + if (v & 1) result++; } -# else + return result; + } - for (i = 0; i < HB_MARKS_SZ; i++) { + unsigned GC_n_set_marks(const hdr *hhdr) + { + unsigned result = 0; + size_t sz = hhdr -> hb_sz; + size_t i; +# ifdef MARK_BIT_PER_OBJ + size_t n_objs = HBLK_OBJS(sz); + size_t n_mark_words + = divWORDSZ(n_objs > 0 ? n_objs : 1); /* round down */ + + for (i = 0; i <= n_mark_words; i++) { result += count_ones(hhdr -> hb_marks[i]); - } -# endif - GC_ASSERT(result > 0); - result--; /* exclude the one bit set past the end */ -# ifndef MARK_BIT_PER_OBJ - if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) { - size_t lg = BYTES_TO_GRANULES(sz); - - /* As mentioned in GC_set_hdr_marks(), all the bits are set */ - /* instead of every n-th, thus the result should be adjusted. */ - GC_ASSERT((unsigned)lg != 0 && result % lg == 0); - result /= (unsigned)lg; - } -# endif - return result; -} + } +# else -#endif /* !USE_MARK_BYTES */ + for (i = 0; i < HB_MARKS_SZ; i++) { + result += count_ones(hhdr -> hb_marks[i]); + } +# endif + GC_ASSERT(result > 0); + result--; /* exclude the one bit set past the end */ +# ifndef MARK_BIT_PER_OBJ + if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) { + size_t lg = BYTES_TO_GRANULES(sz); + + /* As mentioned in GC_set_hdr_marks(), all the bits are set */ + /* instead of every n-th, thus the result should be adjusted. */ + GC_ASSERT((unsigned)lg != 0 && result % lg == 0); + result /= (unsigned)lg; + } +# endif + return result; + } +# endif /* !USE_MARK_BYTES */ -GC_API unsigned GC_CALL GC_count_set_marks_in_hblk(const void *p) { + GC_API unsigned GC_CALL GC_count_set_marks_in_hblk(const void *p) { return GC_n_set_marks(HDR(p)); -} + } STATIC void GC_CALLBACK GC_print_block_descr(struct hblk *h, void *raw_ps) { const hdr *hhdr = HDR(h); size_t sz = hhdr -> hb_sz; struct Print_stats *ps = (struct Print_stats *)raw_ps; - unsigned n_marks = GC_n_set_marks(hhdr); - unsigned n_objs = (unsigned)HBLK_OBJS(sz); + size_t n_marks = (size_t)GC_n_set_marks(hhdr); + size_t n_objs = HBLK_OBJS(sz); # ifndef PARALLEL_MARK - GC_ASSERT(hhdr -> hb_n_marks == n_marks); + GC_ASSERT(hhdr -> hb_n_marks == n_marks); # endif # if defined(CPPCHECK) GC_noop1_ptr(h); # endif GC_ASSERT((n_objs > 0 ? n_objs : 1) >= n_marks); - GC_printf("%u,%u,%u,%u\n", - hhdr -> hb_obj_kind, (unsigned)sz, n_marks, n_objs); + GC_printf("%u,%u,%u,%u\n", hhdr -> hb_obj_kind, (unsigned)sz, + (unsigned)n_marks, (unsigned)n_objs); ps -> number_of_blocks++; - ps -> total_bytes += - (word)(sz + HBLKSIZE-1) & ~(word)(HBLKSIZE-1); /* round up */ + ps -> total_bytes += (sz + HBLKSIZE-1) & ~(HBLKSIZE-1); /* round up */ } void GC_print_block_list(void) @@ -630,8 +626,8 @@ GC_API unsigned GC_CALL GC_count_set_marks_in_hblk(const void *p) { (unsigned long)pstats.total_bytes); } -GC_API void GC_CALL GC_print_free_list(int k, size_t lg) -{ + GC_API void GC_CALL GC_print_free_list(int k, size_t lg) + { void *flh_next; int n; @@ -639,12 +635,11 @@ GC_API void GC_CALL GC_print_free_list(int k, size_t lg) GC_ASSERT(lg <= MAXOBJGRANULES); flh_next = GC_obj_kinds[k].ok_freelist[lg]; for (n = 0; flh_next != NULL; n++) { - GC_printf("Free object in heap block %p [%d]: %p\n", - (void *)HBLKPTR(flh_next), n, flh_next); - flh_next = obj_link(flh_next); + GC_printf("Free object in heap block %p [%d]: %p\n", + (void *)HBLKPTR(flh_next), n, flh_next); + flh_next = obj_link(flh_next); } -} - + } #endif /* !NO_DEBUGGING */ /* @@ -787,7 +782,7 @@ GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old) /* It's likely we'll need it this time, too */ /* It's been touched recently, so this */ /* shouldn't trigger paging. */ - GC_reclaim_small_nonempty_block(hbp, hhdr->hb_sz, FALSE); + GC_reclaim_small_nonempty_block(hbp, hhdr -> hb_sz, FALSE); } } } diff --git a/specific.c b/specific.c index 78ba86ea2..7d2f452df 100644 --- a/specific.c +++ b/specific.c @@ -145,7 +145,7 @@ GC_INNER void GC_remove_specific_after_fork(tsd * key, pthread_t t) } /* Note that even the slow path doesn't lock. */ -GC_INNER void * GC_slow_getspecific(tsd * key, word qtid, +GC_INNER void * GC_slow_getspecific(tsd * key, size_t qtid, tse * volatile * cache_ptr) { pthread_t self = pthread_self(); diff --git a/typd_mlc.c b/typd_mlc.c index 95beb6916..fc7c6d122 100644 --- a/typd_mlc.c +++ b/typd_mlc.c @@ -11,7 +11,6 @@ * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. - * */ #include "private/gc_pmark.h" @@ -29,10 +28,10 @@ * This is done because the environment field is too small, and the collector * must trace the complex_descriptor. * - * Note that descriptors inside objects may appear cleared, if we encounter a - * false reference to an object on a free list. In the GC_descr case, this - * is OK, since a 0 descriptor corresponds to examining no fields. - * In the complex_descriptor case, we explicitly check for that case. + * Note that descriptors inside objects may appear cleared, if we encounter + * a false reference to an object on a free list. In the case of a simple + * object, this is OK, since a zero descriptor corresponds to examining no + * fields. In the complex_descriptor case, we explicitly check for that case. * * MAJOR PARTS OF THIS CODE HAVE NOT BEEN TESTED AT ALL and are not testable, * since they are not accessible through the current interface. @@ -62,19 +61,19 @@ STATIC void GC_push_typed_structures_proc(void) /* Add a multi-word bitmap to GC_ext_descriptors arrays. */ /* Returns starting index on success, -1 otherwise. */ -STATIC signed_word GC_add_ext_descriptor(const word * bm, word nbits) +STATIC signed_word GC_add_ext_descriptor(const word * bm, size_t nbits) { - size_t nwords = divWORDSZ(nbits + CPP_WORDSZ-1); signed_word result; size_t i; + size_t nwords = divWORDSZ(nbits + CPP_WORDSZ-1); LOCK(); while (EXPECT(GC_avail_descr + nwords >= GC_ed_size, FALSE)) { typed_ext_descr_t *newExtD; size_t new_size; - word ed_size = GC_ed_size; + size_t ed_size = GC_ed_size; - if (ed_size == 0) { + if (0 == ed_size) { GC_ASSERT(ADDR(&GC_ext_descriptors) % sizeof(word) == 0); GC_push_typed_structures = GC_push_typed_structures_proc; UNLOCK(); @@ -155,7 +154,7 @@ STATIC void GC_init_explicit_typing(void) GC_bm_table[0] = GC_DS_BITMAP; for (i = 1; i < CPP_WORDSZ / 2; i++) { - GC_bm_table[i] = (((word)-1) << (CPP_WORDSZ - i)) | GC_DS_BITMAP; + GC_bm_table[i] = (GC_WORD_MAX << (CPP_WORDSZ - i)) | GC_DS_BITMAP; } } @@ -220,7 +219,7 @@ GC_API GC_descr GC_CALL GC_make_descriptor(const GC_word * bm, size_t len) UNLOCK(); # endif - while (last_set_bit >= 0 && !GC_get_bit(bm, (word)last_set_bit)) + while (last_set_bit >= 0 && !GC_get_bit(bm, (size_t)last_set_bit)) last_set_bit--; if (last_set_bit < 0) return 0; /* no pointers */ @@ -242,8 +241,8 @@ GC_API GC_descr GC_CALL GC_make_descriptor(const GC_word * bm, size_t len) if (last_set_bit < BITMAP_BITS) { signed_word i; - /* Hopefully the common case. */ - /* Build bitmap descriptor (with bits reversed) */ + /* Hopefully the common case. Build the bitmap descriptor */ + /* (with the bits reversed). */ d = SIGNB; for (i = last_set_bit - 1; i >= 0; i--) { d >>= 1; @@ -319,9 +318,9 @@ GC_API GC_ATTR_MALLOC void * GC_CALL struct LeafDescriptor { /* Describes simple array. */ word ld_tag; # define LEAF_TAG 1 - word ld_size; /* Bytes per element; non-zero, */ + size_t ld_size; /* Bytes per element; non-zero, */ /* multiple of ALIGNMENT. */ - word ld_nelements; /* Number of elements. */ + size_t ld_nelements; /* Number of elements. */ GC_descr ld_descriptor; /* A simple length, bitmap, */ /* or procedure descriptor. */ }; @@ -329,7 +328,7 @@ struct LeafDescriptor { /* Describes simple array. */ struct ComplexArrayDescriptor { word ad_tag; # define ARRAY_TAG 2 - word ad_nelements; + size_t ad_nelements; union ComplexDescriptor *ad_element_descr; }; @@ -346,7 +345,7 @@ typedef union ComplexDescriptor { struct SequenceDescriptor sd; } complex_descriptor; -STATIC complex_descriptor *GC_make_leaf_descriptor(word size, word nelements, +STATIC complex_descriptor *GC_make_leaf_descriptor(word size, size_t nelements, GC_descr d) { complex_descriptor *result = (complex_descriptor *) @@ -443,11 +442,11 @@ STATIC int GC_make_array_descriptor(size_t nelements, size_t size, if (COMPLEX == result) { beginning = *pcomplex_d; } else { - beginning = SIMPLE == result ? - GC_make_leaf_descriptor(size, 1, *psimple_d) : - GC_make_leaf_descriptor(pleaf -> ld_size, - pleaf -> ld_nelements, - pleaf -> ld_descriptor); + beginning = SIMPLE == result + ? GC_make_leaf_descriptor(size, 1, *psimple_d) + : GC_make_leaf_descriptor(pleaf -> ld_size, + pleaf -> ld_nelements, + pleaf -> ld_descriptor); if (EXPECT(NULL == beginning, FALSE)) return NO_MEM; } *pcomplex_d = GC_make_sequence_descriptor(beginning, one_element); @@ -490,20 +489,21 @@ GC_API int GC_CALL GC_calloc_prepare_explicitly_typed( return 0; /* failure */ } - pctd -> descr_type = GC_make_array_descriptor((word)n, (word)lb, d, - &(pctd -> simple_d), &(pctd -> complex_d), - &(pctd -> leaf)); + pctd -> descr_type = GC_make_array_descriptor(n, lb, d, + &(pctd -> simple_d), + &(pctd -> complex_d), + &(pctd -> leaf)); switch (pctd -> descr_type) { case NO_MEM: case SIMPLE: pctd -> alloc_lb = (word)lb * n; break; case LEAF: - pctd -> alloc_lb = (word)SIZET_SAT_ADD(lb * n, + pctd -> alloc_lb = SIZET_SAT_ADD(lb * n, sizeof(struct LeafDescriptor) + TYPD_EXTRA_BYTES); break; case COMPLEX: - pctd -> alloc_lb = (word)SIZET_SAT_ADD(lb * n, TYPD_EXTRA_BYTES); + pctd -> alloc_lb = SIZET_SAT_ADD(lb * n, TYPD_EXTRA_BYTES); break; } return 1; /* success */ @@ -593,9 +593,9 @@ GC_API GC_ATTR_MALLOC void * GC_CALL GC_calloc_explicitly_typed(size_t n, /* Return the size of the object described by complex_d. It would be */ /* faster to store this directly, or to compute it as part of */ /* GC_push_complex_descriptor, but hopefully it does not matter. */ -STATIC word GC_descr_obj_size(complex_descriptor *complex_d) +STATIC size_t GC_descr_obj_size(complex_descriptor *complex_d) { - switch(complex_d -> ad.ad_tag) { + switch (complex_d -> ad.ad_tag) { case LEAF_TAG: return complex_d -> ld.ld_nelements * complex_d -> ld.ld_size; case ARRAY_TAG: @@ -617,13 +617,12 @@ STATIC mse *GC_push_complex_descriptor(word *addr, mse *msp, mse *msl) { ptr_t current = (ptr_t)addr; - word nelements; - word sz; - word i; + size_t i, nelements; + size_t sz; GC_descr d; complex_descriptor *element_descr; - switch(complex_d -> ad.ad_tag) { + switch (complex_d -> ad.ad_tag) { case LEAF_TAG: d = complex_d -> ld.ld_descriptor; nelements = complex_d -> ld.ld_nelements;