From 8461a974e05e727db024a9ab6b9aa07ffc51266a Mon Sep 17 00:00:00 2001 From: j-hui Date: Fri, 11 Feb 2022 10:42:30 -0500 Subject: [PATCH] Fix hooks, no more errors, but seemingly unreliable --- src/ssm-mem.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/src/ssm-mem.c b/src/ssm-mem.c index 793c363..cfa570b 100644 --- a/src/ssm-mem.c +++ b/src/ssm-mem.c @@ -91,11 +91,11 @@ static inline size_t find_pool_size(size_t size) { static inline void alloc_pool(size_t p) { block_t *new_page = alloc_page(); SSM_ASSERT(END_OF_FREELIST < new_page); - VALGRIND_CREATE_MEMPOOL(new_page, 0, 0); struct mem_pool *pool = &mem_pools[p]; size_t last_block = BLOCKS_PER_PAGE - SSM_MEM_POOL_SIZE(p) / sizeof(block_t); new_page[last_block].free_list_next = pool->free_list_head; pool->free_list_head = new_page; + VALGRIND_MAKE_MEM_NOACCESS(new_page, SSM_MEM_PAGE_SIZE); } void ssm_mem_init(void *(*alloc_page_handler)(void), @@ -105,8 +105,10 @@ void ssm_mem_init(void *(*alloc_page_handler)(void), alloc_mem = alloc_mem_handler; free_mem = free_mem_handler; - for (size_t p = 0; p < SSM_MEM_POOL_COUNT; p++) + for (size_t p = 0; p < SSM_MEM_POOL_COUNT; p++) { mem_pools[p].free_list_head = END_OF_FREELIST; + VALGRIND_CREATE_MEMPOOL(&mem_pools[p], 0, 1); + } } void ssm_mem_prealloc(size_t size, size_t num_pages) { @@ -132,12 +134,16 @@ void *ssm_mem_alloc(size_t size) { alloc_pool(p); void *buf = pool->free_list_head->block_buf; + VALGRIND_MEMPOOL_ALLOC(pool, buf, size); if (pool->free_list_head->free_list_next == UNINITIALIZED_FREE_BLOCK) pool->free_list_head += SSM_MEM_POOL_SIZE(p) / sizeof(block_t); else pool->free_list_head = pool->free_list_head->free_list_next; + + VALGRIND_MAKE_MEM_UNDEFINED(buf, size); + return buf; #endif } @@ -147,16 +153,19 @@ void ssm_mem_free(void *m, size_t size) { free_mem(m, size); #else - size_t pool = find_pool_size(size); - if (pool >= SSM_MEM_POOL_COUNT) { + size_t p = find_pool_size(size); + if (p >= SSM_MEM_POOL_COUNT) { free_mem(m, size); return; } - VALGRIND_MEMPOOL_FREE(pool, m); + + struct mem_pool *pool = &mem_pools[p]; block_t *new_head = m; - new_head->free_list_next = mem_pools[pool].free_list_head; - mem_pools[pool].free_list_head = new_head; + new_head->free_list_next = pool->free_list_head; + pool->free_list_head = new_head; + + VALGRIND_MEMPOOL_FREE(pool, m); #endif }