Cleanup: Removed dead code, added comments

This commit is contained in:
Justin Ethier 2019-11-04 14:10:46 -05:00
parent 1b7b3198f3
commit e0388e892a
2 changed files with 28 additions and 46 deletions

41
gc.c
View file

@ -387,14 +387,11 @@ uint64_t gc_heap_free_size(gc_heap *h) {
* The caller must hold the necessary locks.
* @param heap_type Define the size of objects that will be allocated on this heap
* @param size Requested size (unpadded) of the heap
* @param max_size Define the heap page max size parameter
* @param chunk_size Define the heap chunk size parameter
* @param thd Calling mutator's thread data object
* @return Pointer to the newly allocated heap page, or NULL
* if the allocation failed.
*/
gc_heap *gc_heap_create(int heap_type, size_t size, size_t max_size,
size_t chunk_size, gc_thread_data *thd)
gc_heap *gc_heap_create(int heap_type, size_t size, gc_thread_data *thd)
{
gc_free_list *free, *next;
gc_heap *h;
@ -408,15 +405,11 @@ gc_heap *gc_heap_create(int heap_type, size_t size, size_t max_size,
h->size = size;
h->ttl = 10;
h->next_free = h;
h->next_frees = NULL;
h->last_alloc_size = 0;
thd->cached_heap_total_sizes[heap_type] += size;
thd->cached_heap_free_sizes[heap_type] += size;
h->chunk_size = chunk_size;
h->max_size = max_size;
h->data = (char *)gc_heap_align(sizeof(h->data) + (uintptr_t) & (h->data));
h->next = NULL;
//h->num_children = 1;
h->num_unswept_children = 0;
free = h->free_list = (gc_free_list *) h->data;
next = (gc_free_list *) (((char *)free) + gc_heap_align(gc_free_chunk_size));
@ -728,15 +721,6 @@ gc_heap *gc_sweep_fixed_size(gc_heap * h, int heap_type, gc_thread_data *thd)
return rv;
}
gc_heap *gc_find_heap_with_chunk_size(gc_heap *h, size_t chunk_size)
{
while (h) {
if (h->chunk_size == chunk_size) return h;
h = h->next;
}
return NULL;
}
/**
* @brief Free a page of the heap
* @param page Page to free
@ -1010,7 +994,6 @@ char *gc_copy_obj(object dest, char *obj, gc_thread_data * thd)
* @param h Heap to be expanded
* @param heap_type Define the size of objects that will be allocated on this heap
* @param size Not applicable, can set to 0
* @param chunk_size Heap chunk size, or 0 if not applicable
* @param thd Thread data for the mutator using this heap
* @return A true value if the heap was grown, or 0 otherwise
*
@ -1021,7 +1004,7 @@ char *gc_copy_obj(object dest, char *obj, gc_thread_data * thd)
* increasing size using the Fibonnaci Sequence until reaching the
* max size.
*/
int gc_grow_heap(gc_heap * h, int heap_type, size_t size, size_t chunk_size, gc_thread_data *thd)
int gc_grow_heap(gc_heap * h, int heap_type, size_t size, gc_thread_data *thd)
{
size_t /*cur_size,*/ new_size;
gc_heap *h_last = h, *h_new;
@ -1073,7 +1056,7 @@ int gc_grow_heap(gc_heap * h, int heap_type, size_t size, size_t chunk_size, gc_
// allocate larger pages if size will not fit on the page
//new_size = gc_heap_align(((cur_size > size) ? cur_size : size));
// Done with computing new page size
h_new = gc_heap_create(heap_type, new_size, h_last->max_size, chunk_size, thd);
h_new = gc_heap_create(heap_type, new_size, thd);
h_last->next = h_new;
//pthread_mutex_unlock(&(thd->heap_lock));
#if GC_DEBUG_TRACE
@ -1198,8 +1181,6 @@ void *gc_try_alloc_slow(gc_heap *h_passed, gc_heap *h, int heap_type, size_t siz
}
//thd->cached_heap_free_sizes[heap_type] -= prev_free_size;
thd->cached_heap_total_sizes[heap_type] -= h_size;
//h_passed->num_children--;
//h_passed->num_unswept_children--;
continue;
}
}
@ -1314,8 +1295,6 @@ void *gc_try_alloc_slow_fixed_size(gc_heap *h_passed, gc_heap *h, int heap_type,
}
//thd->cached_heap_free_sizes[heap_type] -= prev_free_size;
thd->cached_heap_total_sizes[heap_type] -= h_size;
//h_passed->num_children--;
//h_passed->num_unswept_children--;
continue;
}
}
@ -1464,10 +1443,8 @@ fprintf(stderr, "slow alloc of %p\n", result);
/* A vanilla mark&sweep collector would collect now, but unfortunately */
/* we can't do that because we have to go through multiple stages, some */
/* of which are asynchronous. So... no choice but to grow the heap. */
gc_grow_heap(h, heap_type, size, 0, thd);
gc_grow_heap(h, heap_type, size, thd);
*heap_grown = 1;
//h_passed->num_children++;
//h_passed->num_unswept_children++;
// TODO: would be nice if gc_grow_heap returns new page (maybe it does) then we can start from there
// otherwise will be a bit of a bottleneck since with lazy sweeping there is no guarantee we are at
// the end of the heap anymore
@ -2761,13 +2738,13 @@ void gc_thread_data_init(gc_thread_data * thd, int mut_num, char *stack_base,
thd->cached_heap_total_sizes = calloc(5, sizeof(uintptr_t));
thd->heap = calloc(1, sizeof(gc_heap_root));
thd->heap->heap = calloc(1, sizeof(gc_heap *) * NUM_HEAP_TYPES);
thd->heap->heap[HEAP_REST] = gc_heap_create(HEAP_REST, INITIAL_HEAP_SIZE, 0, 0, thd);
thd->heap->heap[HEAP_SM] = gc_heap_create(HEAP_SM, INITIAL_HEAP_SIZE, 0, 0, thd);
thd->heap->heap[HEAP_64] = gc_heap_create(HEAP_64, INITIAL_HEAP_SIZE, 0, 0, thd);
thd->heap->heap[HEAP_REST] = gc_heap_create(HEAP_REST, INITIAL_HEAP_SIZE, thd);
thd->heap->heap[HEAP_SM] = gc_heap_create(HEAP_SM, INITIAL_HEAP_SIZE, thd);
thd->heap->heap[HEAP_64] = gc_heap_create(HEAP_64, INITIAL_HEAP_SIZE, thd);
if (sizeof(void *) == 8) { // Only use this heap on 64-bit platforms
thd->heap->heap[HEAP_96] = gc_heap_create(HEAP_96, INITIAL_HEAP_SIZE, 0, 0, thd);
thd->heap->heap[HEAP_96] = gc_heap_create(HEAP_96, INITIAL_HEAP_SIZE, thd);
}
thd->heap->heap[HEAP_HUGE] = gc_heap_create(HEAP_HUGE, 1024, 0, 0, thd);
thd->heap->heap[HEAP_HUGE] = gc_heap_create(HEAP_HUGE, 1024, thd);
}
/**

View file

@ -199,37 +199,43 @@ struct gc_free_list_t {
/**
* Heap page
*
* @brief Contains data for a single page of the heap.
*
* Note there are groups of parameters to support:
* - Bump-allocation - This type of allocation is faster but only applicable when a page is first created or empty.
* - Lazy sweep
*/
typedef struct gc_heap_t gc_heap;
struct gc_heap_t {
gc_heap_type type;
/** Size of the heap page in bytes */
unsigned int size;
/** 0 for any size; non-zero and heap will only alloc chunks of that size */
unsigned int chunk_size;
unsigned int max_size;
/** Keep empty page alive this many times before freeing */
unsigned int ttl;
/** Bump: Track remaining space; this is useful for bump&pop style allocation */
unsigned int remaining;
/** For fixed-size heaps, only allocate blocks of this size */
unsigned block_size;
/** End of the data when using bump alllocation or NULL when using free lists */
char *data_end;
// Lazy-sweep related data
/** Lazy-sweep: Amount of heap data that is free */
unsigned int free_size;
/** Lazy-sweep: Determine if the heap is full */
unsigned char is_full;
/** Lazy-sweep: Determine if the heap has been swept */
unsigned char is_unswept;
//int num_children;
/** Lazy-sweep: Start GC cycle if fewer than this many heap pages are unswept */
int num_unswept_children;
//
/** Last size of object that was allocated, allows for optimizations */
unsigned int last_alloc_size;
//unsigned int free_size;
/** Next page that has free space, lets alloc find that page faster */
gc_heap *next_free;
gc_heap **next_frees;
//
/** Linked list of free memory blocks in this page */
gc_free_list *free_list;
/** Next page in this heap */
gc_heap *next; // TBD, linked list is not very efficient, but easy to work with as a start
/** Actual data in this page */
char *data;
};
@ -363,14 +369,13 @@ void gc_remove_mutator(gc_thread_data * thd);
int gc_is_mutator_active(gc_thread_data *thd);
int gc_is_mutator_new(gc_thread_data *thd);
void gc_sleep_ms(int ms);
gc_heap *gc_heap_create(int heap_type, size_t size, size_t max_size,
size_t chunk_size, gc_thread_data *thd);
gc_heap *gc_heap_create(int heap_type, size_t size, gc_thread_data *thd);
gc_heap *gc_heap_free(gc_heap *page, gc_heap *prev_page);
void gc_heap_merge(gc_heap *hdest, gc_heap *hsrc);
void gc_merge_all_heaps(gc_thread_data *dest, gc_thread_data *src);
int gc_is_heap_empty(gc_heap *h);
void gc_print_stats(gc_heap * h);
int gc_grow_heap(gc_heap * h, int heap_type, size_t size, size_t chunk_size, gc_thread_data *thd);
int gc_grow_heap(gc_heap * h, int heap_type, size_t size, gc_thread_data *thd);
char *gc_copy_obj(object hp, char *obj, gc_thread_data * thd);
void *gc_try_alloc(gc_heap * h, int heap_type, size_t size, char *obj,
gc_thread_data * thd);
@ -382,8 +387,8 @@ size_t gc_allocated_bytes(object obj, gc_free_list * q, gc_free_list * r);
gc_heap *gc_heap_last(gc_heap * h);
void gc_heap_create_rest(gc_heap *h, gc_thread_data *thd);
int gc_grow_heap_rest(gc_heap * h, int heap_type, size_t size, size_t chunk_size, gc_thread_data *thd);
void *gc_try_alloc_rest(gc_heap * h, int heap_type, size_t size, size_t chunk_size, char *obj, gc_thread_data * thd);
int gc_grow_heap_rest(gc_heap * h, int heap_type, size_t size, gc_thread_data *thd);
void *gc_try_alloc_rest(gc_heap * h, int heap_type, size_t size, char *obj, gc_thread_data * thd);
void *gc_alloc_rest(gc_heap_root * hrt, size_t size, char *obj, gc_thread_data * thd, int *heap_grown);
void gc_init_fixed_size_free_list(gc_heap *h);