From 566e81451ed79b5d5b286f8e1f5eeb776a9e2e7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1niel=20B=C3=A1tyai?= Date: Tue, 6 Aug 2019 17:59:49 +0200 Subject: [PATCH] Add realloc function to jmem (#2998) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch extends jmem functionality by adding a realloc function. This opens up other paths of optimization which can result in smaller peak memory usage and faster execution times, due to not having to duplicate memory when we need to extend blocks. JerryScript-DCO-1.0-Signed-off-by: Dániel Bátyai dbatyai@inf.u-szeged.hu --- jerry-core/jmem/jmem-heap.c | 246 +++++++++++++++++++++++++++++++----- jerry-core/jmem/jmem.h | 1 + tests/unit-core/test-jmem.c | 110 ++++++++++++++++ 3 files changed, 328 insertions(+), 29 deletions(-) create mode 100644 tests/unit-core/test-jmem.c diff --git a/jerry-core/jmem/jmem-heap.c b/jerry-core/jmem/jmem-heap.c index badb079a2e..05b85463a2 100644 --- a/jerry-core/jmem/jmem-heap.c +++ b/jerry-core/jmem/jmem-heap.c @@ -353,25 +353,16 @@ jmem_heap_alloc_block_null_on_error (const size_t size) /**< required memory siz return block_p; } /* jmem_heap_alloc_block_null_on_error */ +#if !ENABLED (JERRY_SYSTEM_ALLOCATOR) /** - * Internal method for freeing a memory block. + * Finds the block in the free block list which preceeds the argument block + * + * @return pointer to the preceeding block */ -void JERRY_ATTR_HOT -jmem_heap_free_block_internal (void *ptr, /**< pointer to beginning of data space of the block */ - const size_t size) /**< size of allocated region */ +static jmem_heap_free_t * +jmem_heap_find_prev (const jmem_heap_free_t * const block_p) /**< which memory block's predecessor we're looking for */ { -#if !ENABLED (JERRY_SYSTEM_ALLOCATOR) - /* checking that ptr points to the heap */ - JERRY_ASSERT (jmem_is_heap_pointer (ptr)); - JERRY_ASSERT (size > 0); - JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size)); - - JMEM_VALGRIND_FREELIKE_SPACE (ptr); - JMEM_VALGRIND_NOACCESS_SPACE (ptr, size); - - jmem_heap_free_t *block_p = (jmem_heap_free_t *) ptr; - jmem_heap_free_t *prev_p; - jmem_heap_free_t *next_p; + const jmem_heap_free_t *prev_p; JMEM_VALGRIND_DEFINED_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t)); @@ -391,7 +382,7 @@ jmem_heap_free_block_internal (void *ptr, /**< pointer to beginning of data spac /* Find position of region in the list. */ while (prev_p->next_offset < block_offset) { - next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset); + const jmem_heap_free_t * const next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset); JERRY_ASSERT (jmem_is_heap_pointer (next_p)); JMEM_VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t)); @@ -399,29 +390,45 @@ jmem_heap_free_block_internal (void *ptr, /**< pointer to beginning of data spac prev_p = next_p; } - next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset); - JMEM_VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t)); + return (jmem_heap_free_t *) prev_p; +} /* jmem_heap_find_prev */ - /* Realign size */ - const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT; +/** + * Inserts the block into the free chain after a specified block. + * + * Note: + * 'jmem_heap_find_prev' can and should be used to find the previous free block + */ +static void +jmem_heap_insert_block (jmem_heap_free_t *block_p, /**< block to insert */ + jmem_heap_free_t *prev_p, /**< the free block after which to insert 'block_p' */ + const size_t size) /**< size of the inserted block */ +{ + JERRY_ASSERT ((uintptr_t) block_p % JMEM_ALIGNMENT == 0); + JERRY_ASSERT (size % JMEM_ALIGNMENT == 0); - JMEM_VALGRIND_DEFINED_SPACE (block_p, sizeof (jmem_heap_free_t)); JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t)); + jmem_heap_free_t *next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset); + + JMEM_VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t)); + JMEM_VALGRIND_DEFINED_SPACE (block_p, sizeof (jmem_heap_free_t)); + + const uint32_t block_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (block_p); + /* Update prev. */ if (jmem_heap_get_region_end (prev_p) == block_p) { /* Can be merged. */ - prev_p->size += (uint32_t) aligned_size; + prev_p->size += (uint32_t) size; JMEM_VALGRIND_NOACCESS_SPACE (block_p, sizeof (jmem_heap_free_t)); block_p = prev_p; } else { - block_p->size = (uint32_t) aligned_size; + block_p->size = (uint32_t) size; prev_p->next_offset = block_offset; } - JMEM_VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t)); /* Update next. */ if (jmem_heap_get_region_end (block_p) == next_p) { @@ -439,28 +446,209 @@ jmem_heap_free_block_internal (void *ptr, /**< pointer to beginning of data spac JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t)); JMEM_VALGRIND_NOACCESS_SPACE (block_p, size); JMEM_VALGRIND_NOACCESS_SPACE (next_p, sizeof (jmem_heap_free_t)); +} /* jmem_heap_insert_block */ +#endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */ +/** + * Internal method for freeing a memory block. + */ +void JERRY_ATTR_HOT +jmem_heap_free_block_internal (void *ptr, /**< pointer to beginning of data space of the block */ + const size_t size) /**< size of allocated region */ +{ + JERRY_ASSERT (size > 0); + JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size)); JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_allocated_size) > 0); + +#if !ENABLED (JERRY_SYSTEM_ALLOCATOR) + /* checking that ptr points to the heap */ + JERRY_ASSERT (jmem_is_heap_pointer (ptr)); + JERRY_ASSERT ((uintptr_t) ptr % JMEM_ALIGNMENT == 0); + + const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT; + + jmem_heap_free_t * const block_p = (jmem_heap_free_t *) ptr; + jmem_heap_free_t * const prev_p = jmem_heap_find_prev (block_p); + jmem_heap_insert_block (block_p, prev_p, aligned_size); + JERRY_CONTEXT (jmem_heap_allocated_size) -= aligned_size; + JMEM_VALGRIND_NOACCESS_SPACE (ptr, size); + JMEM_VALGRIND_FREELIKE_SPACE (ptr); +#else /* ENABLED (JERRY_SYSTEM_ALLOCATOR) */ + JERRY_CONTEXT (jmem_heap_allocated_size) -= size; + free (ptr); +#endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */ while (JERRY_CONTEXT (jmem_heap_allocated_size) + CONFIG_GC_LIMIT <= JERRY_CONTEXT (jmem_heap_limit)) { JERRY_CONTEXT (jmem_heap_limit) -= CONFIG_GC_LIMIT; } - JMEM_VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t)); JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size)); +} /* jmem_heap_free_block_internal */ + +/** + * Reallocates the memory region pointed to by 'ptr', changing the size of the allocated region. + * + * @return pointer to the reallocated region + */ +void * JERRY_ATTR_HOT +jmem_heap_realloc_block (void *ptr, /**< memory region to reallocate */ + const size_t old_size, /**< current size of the region */ + const size_t new_size) /**< desired new size */ +{ +#if !ENABLED (JERRY_SYSTEM_ALLOCATOR) + JERRY_ASSERT (jmem_is_heap_pointer (ptr)); + JERRY_ASSERT ((uintptr_t) ptr % JMEM_ALIGNMENT == 0); + JERRY_ASSERT (old_size != 0); + JERRY_ASSERT (new_size != 0); + + jmem_heap_free_t * const block_p = (jmem_heap_free_t *) ptr; + const size_t aligned_new_size = (new_size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT; + const size_t aligned_old_size = (old_size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT; + + if (aligned_old_size == aligned_new_size) + { + JMEM_VALGRIND_NOACCESS_SPACE (block_p, old_size); + JMEM_VALGRIND_DEFINED_SPACE (block_p, new_size); + JMEM_HEAP_STAT_FREE (old_size); + JMEM_HEAP_STAT_ALLOC (new_size); + return block_p; + } + + jmem_heap_free_t *prev_p = jmem_heap_find_prev (block_p); + + if (aligned_new_size < aligned_old_size) + { + JMEM_VALGRIND_NOACCESS_SPACE (block_p, old_size); + JMEM_VALGRIND_DEFINED_SPACE (block_p, new_size); + JMEM_HEAP_STAT_FREE (old_size); + JMEM_HEAP_STAT_ALLOC (new_size); + jmem_heap_insert_block ((jmem_heap_free_t *)((uint8_t *) block_p + aligned_new_size), + prev_p, + aligned_old_size - aligned_new_size); + + JERRY_CONTEXT (jmem_heap_allocated_size) -= (aligned_old_size - aligned_new_size); + while (JERRY_CONTEXT (jmem_heap_allocated_size) + CONFIG_GC_LIMIT <= JERRY_CONTEXT (jmem_heap_limit)) + { + JERRY_CONTEXT (jmem_heap_limit) -= CONFIG_GC_LIMIT; + } + + return block_p; + } + + void *ret_block_p = NULL; + const size_t required_size = aligned_new_size - aligned_old_size; + +#if !ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC) + if (JERRY_CONTEXT (jmem_heap_allocated_size) + required_size >= JERRY_CONTEXT (jmem_heap_limit)) +#endif /* !ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC) */ + { + ecma_free_unused_memory (JMEM_PRESSURE_LOW); + } + + JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t)); + jmem_heap_free_t * const next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset); + JMEM_VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t)); + + /* Check if block can be extended at the end */ + if (((jmem_heap_free_t *) ((uint8_t *) block_p + aligned_old_size)) == next_p) + { + if (required_size <= next_p->size) + { + /* Block can be extended, update the list. */ + if (required_size == next_p->size) + { + prev_p->next_offset = next_p->next_offset; + } + else + { + jmem_heap_free_t *const new_next_p = (jmem_heap_free_t *) ((uint8_t *) next_p + required_size); + JMEM_VALGRIND_DEFINED_SPACE (new_next_p, sizeof (jmem_heap_free_t)); + new_next_p->next_offset = next_p->next_offset; + new_next_p->size = (uint32_t) (next_p->size - required_size); + JMEM_VALGRIND_NOACCESS_SPACE (new_next_p, sizeof (jmem_heap_free_t)); + prev_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (new_next_p); + } + + JMEM_VALGRIND_NOACCESS_SPACE ((uint8_t *) block_p, old_size); + JMEM_VALGRIND_DEFINED_SPACE ((uint8_t *) block_p, new_size); + ret_block_p = block_p; + } + } + /* + * Check if block can be extended at the front. + * This is less optimal because we need to copy the data, but still better than allocting a new block. + */ + else if (jmem_heap_get_region_end (prev_p) == block_p) + { + if (required_size <= prev_p->size) + { + if (required_size == prev_p->size) + { + prev_p = jmem_heap_find_prev (prev_p); + JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t)); + prev_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (next_p); + } + else + { + prev_p->size = (uint32_t) (prev_p->size - required_size); + } + + ret_block_p = (uint8_t *) block_p - required_size; + memmove (ret_block_p, block_p, old_size); + JMEM_VALGRIND_NOACCESS_SPACE ((uint8_t *) block_p, old_size); + JMEM_VALGRIND_DEFINED_SPACE ((uint8_t *) ret_block_p, new_size); + } + } + + if (ret_block_p != NULL) + { + /* Managed to extend the block. Update memory usage and the skip pointer. */ + JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p; + JERRY_CONTEXT (jmem_heap_allocated_size) += required_size; + + while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit)) + { + JERRY_CONTEXT (jmem_heap_limit) += CONFIG_GC_LIMIT; + } + } + else + { + /* Could not extend block. Allocate new region and copy the data. */ + /* jmem_heap_alloc_block_internal will adjust the allocated_size, but insert_block will not, + so we reduce it here first, so that the limit calculation remains consistent. */ + JERRY_CONTEXT (jmem_heap_allocated_size) -= aligned_old_size; + + ret_block_p = jmem_heap_alloc_block_internal (new_size); + memcpy (ret_block_p, block_p, old_size); + jmem_heap_insert_block (block_p, prev_p, aligned_old_size); + } + + JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t)); + JMEM_VALGRIND_NOACCESS_SPACE (next_p, sizeof (jmem_heap_free_t)); + + JMEM_HEAP_STAT_FREE (old_size); + JMEM_HEAP_STAT_ALLOC (new_size); + return ret_block_p; #else /* ENABLED (JERRY_SYSTEM_ALLOCATOR) */ - JERRY_CONTEXT (jmem_heap_allocated_size) -= size; + JERRY_CONTEXT (jmem_heap_allocated_size) += (new_size - old_size); + + while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit)) + { + JERRY_CONTEXT (jmem_heap_limit) += CONFIG_GC_LIMIT; + } while (JERRY_CONTEXT (jmem_heap_allocated_size) + CONFIG_GC_LIMIT <= JERRY_CONTEXT (jmem_heap_limit)) { JERRY_CONTEXT (jmem_heap_limit) -= CONFIG_GC_LIMIT; } - free (ptr); + JMEM_HEAP_STAT_FREE (old_size); + JMEM_HEAP_STAT_ALLOC (new_size); + return realloc (ptr, new_size); #endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */ -} /* jmem_heap_free_block_internal */ +} /* jmem_heap_realloc_block */ /** * Free memory block diff --git a/jerry-core/jmem/jmem.h b/jerry-core/jmem/jmem.h index 083bf11e95..18ad2f3833 100644 --- a/jerry-core/jmem/jmem.h +++ b/jerry-core/jmem/jmem.h @@ -108,6 +108,7 @@ void jmem_finalize (void); void *jmem_heap_alloc_block (const size_t size); void *jmem_heap_alloc_block_null_on_error (const size_t size); +void *jmem_heap_realloc_block (void *ptr, const size_t old_size, const size_t new_size); void jmem_heap_free_block (void *ptr, const size_t size); #if ENABLED (JERRY_MEM_STATS) diff --git a/tests/unit-core/test-jmem.c b/tests/unit-core/test-jmem.c new file mode 100644 index 0000000000..6799939aea --- /dev/null +++ b/tests/unit-core/test-jmem.c @@ -0,0 +1,110 @@ +/* Copyright JS Foundation and other contributors, http://js.foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecma-init-finalize.h" +#include "jmem.h" + +#include "test-common.h" + +#define BASIC_SIZE (64) + +int +main (void) +{ + TEST_INIT (); + + jmem_init (); + ecma_init (); + + { + uint8_t *block1_p = (uint8_t *) jmem_heap_alloc_block (BASIC_SIZE); + uint8_t *block2_p = (uint8_t *) jmem_heap_alloc_block (BASIC_SIZE); + uint8_t *block3_p = (uint8_t *) jmem_heap_alloc_block (BASIC_SIZE); + + /* [block1 64] [block2 64] [block3 64] [...] */ + + for (uint8_t i = 0; i < BASIC_SIZE; i++) + { + block2_p[i] = i; + } + + /* Realloc by moving */ + block2_p = jmem_heap_realloc_block (block2_p, BASIC_SIZE, BASIC_SIZE * 2); + + /* [block1 64] [free 64] [block3 64] [block2 128] [...] */ + + for (uint8_t i = 0; i < BASIC_SIZE; i++) + { + TEST_ASSERT (block2_p[i] == i); + } + + for (uint8_t i = BASIC_SIZE; i < BASIC_SIZE * 2; i++) + { + block2_p[i] = i; + } + + uint8_t *block4_p = (uint8_t *) jmem_heap_alloc_block (BASIC_SIZE * 2); + + /* [block1 64] [free 64] [block3 64] [block2 128] [block4 128] [...] */ + + jmem_heap_free_block (block3_p, BASIC_SIZE); + + /* [block1 64] [free 128] [block2 128] [block4 128] [...] */ + + /* Realloc by extending front */ + block2_p = (uint8_t *) jmem_heap_realloc_block (block2_p, BASIC_SIZE * 2, BASIC_SIZE * 3); + + /* [block1 64] [free 64] [block2 192] [block4 128] [...] */ + + for (uint8_t i = 0; i < BASIC_SIZE * 2; i++) + { + TEST_ASSERT (block2_p[i] == i); + } + + /* Shrink */ + block2_p = (uint8_t *) jmem_heap_realloc_block (block2_p, BASIC_SIZE * 3, BASIC_SIZE); + + /* [block1 64] [free 64] [block2 64] [free 128] [block4 128] [...] */ + + for (uint8_t i = 0; i < BASIC_SIZE; i++) + { + TEST_ASSERT (block2_p[i] == i); + } + + for (uint8_t i = 0; i < BASIC_SIZE; i++) + { + block1_p[i] = i; + } + + /* Grow in place */ + block1_p = (uint8_t *) jmem_heap_realloc_block (block1_p, BASIC_SIZE, BASIC_SIZE * 2); + + /* [block1 128] [block2 64] [free 128] [block4 128] [...] */ + + for (uint8_t i = 0; i < BASIC_SIZE; i++) + { + TEST_ASSERT (block1_p[i] == i); + } + + jmem_heap_free_block (block1_p, BASIC_SIZE * 2); + jmem_heap_free_block (block2_p, BASIC_SIZE); + jmem_heap_free_block (block4_p, BASIC_SIZE * 2); + } + + ecma_finalize (); + jmem_finalize (); + + return 0; +} /* main */