Import of the watch repository from Pebble

This commit is contained in:
Matthieu Jeanson 2024-12-12 16:43:03 -08:00 committed by Katharine Berry
commit 3b92768480
10334 changed files with 2564465 additions and 0 deletions

View file

@ -0,0 +1,36 @@
/* Copyright 2014-2016 Samsung Electronics Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef JMEM_ALLOCATOR_INTERNAL_H
#define JMEM_ALLOCATOR_INTERNAL_H
#ifndef JMEM_ALLOCATOR_INTERNAL
# error "The header is for internal routines of memory allocator component. Please, don't use the routines directly."
#endif /* !JMEM_ALLOCATOR_INTERNAL */
#include <stdbool.h>
#include <stddef.h>
/** \addtogroup mem Memory allocation
* @{
*/
extern void jmem_run_free_unused_memory_callbacks (jmem_free_unused_memory_severity_t, size_t, bool);
/**
* @}
*/
#endif /* !JMEM_ALLOCATOR_INTERNAL_H */

View file

@ -0,0 +1,183 @@
/* Copyright 2014-2016 Samsung Electronics Co., Ltd.
* Copyright 2016 University of Szeged.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Allocator implementation
*/
#include "jcontext.h"
#include "jmem-allocator.h"
#include "jmem-heap.h"
#include "jmem-poolman.h"
#include "jrt-libc-includes.h"
#define JMEM_ALLOCATOR_INTERNAL
#include "jmem-allocator-internal.h"
#ifdef JERRY_CPOINTER_32_BIT
/* This check will go away when we will support 64 bit compressed pointers. */
JERRY_STATIC_ASSERT (sizeof (uintptr_t) <= sizeof (jmem_cpointer_t),
size_of_uintpt_t_must_be_equal_to_jmem_cpointer_t);
#endif
/**
* Initialize memory allocators.
*/
void
jmem_init (void)
{
jmem_heap_init ();
} /* jmem_init */
/**
* Finalize memory allocators.
*/
void
jmem_finalize ()
{
jmem_pools_finalize ();
#ifdef JMEM_STATS
if (JERRY_CONTEXT (jerry_init_flags) & JERRY_INIT_MEM_STATS)
{
jmem_stats_print ();
}
#endif /* JMEM_STATS */
jmem_heap_finalize ();
} /* jmem_finalize */
/**
* Compress pointer
*
* @return packed pointer
*/
inline jmem_cpointer_t __attr_always_inline___
jmem_compress_pointer (const void *pointer_p) /**< pointer to compress */
{
JERRY_ASSERT (pointer_p != NULL);
JERRY_ASSERT (jmem_is_heap_pointer (pointer_p));
uintptr_t uint_ptr = (uintptr_t) pointer_p;
JERRY_ASSERT (uint_ptr % JMEM_ALIGNMENT == 0);
#ifdef JERRY_CPOINTER_32_BIT
JERRY_ASSERT (((jmem_cpointer_t) uint_ptr) == uint_ptr);
#else /* !JERRY_CPOINTER_32_BIT */
const uintptr_t heap_start = (uintptr_t) &JERRY_HEAP_CONTEXT (first);
uint_ptr -= heap_start;
uint_ptr >>= JMEM_ALIGNMENT_LOG;
JERRY_ASSERT (uint_ptr <= UINT16_MAX);
JERRY_ASSERT (uint_ptr != JMEM_CP_NULL);
#endif /* JERRY_CPOINTER_32_BIT */
return (jmem_cpointer_t) uint_ptr;
} /* jmem_compress_pointer */
/**
* Decompress pointer
*
* @return unpacked pointer
*/
inline void * __attr_always_inline___
jmem_decompress_pointer (uintptr_t compressed_pointer) /**< pointer to decompress */
{
JERRY_ASSERT (compressed_pointer != JMEM_CP_NULL);
uintptr_t uint_ptr = compressed_pointer;
JERRY_ASSERT (((jmem_cpointer_t) uint_ptr) == uint_ptr);
#ifdef JERRY_CPOINTER_32_BIT
JERRY_ASSERT (uint_ptr % JMEM_ALIGNMENT == 0);
#else /* !JERRY_CPOINTER_32_BIT */
const uintptr_t heap_start = (uintptr_t) &JERRY_HEAP_CONTEXT (first);
uint_ptr <<= JMEM_ALIGNMENT_LOG;
uint_ptr += heap_start;
JERRY_ASSERT (jmem_is_heap_pointer ((void *) uint_ptr));
#endif /* JERRY_CPOINTER_32_BIT */
return (void *) uint_ptr;
} /* jmem_decompress_pointer */
/**
* Register specified 'try to give memory back' callback routine
*/
void
jmem_register_free_unused_memory_callback (jmem_free_unused_memory_callback_t callback) /**< callback routine */
{
/* Currently only one callback is supported */
JERRY_ASSERT (JERRY_CONTEXT (jmem_free_unused_memory_callback) == NULL);
JERRY_CONTEXT (jmem_free_unused_memory_callback) = callback;
} /* jmem_register_free_unused_memory_callback */
/**
* Unregister specified 'try to give memory back' callback routine
*/
void
jmem_unregister_free_unused_memory_callback (jmem_free_unused_memory_callback_t callback) /**< callback routine */
{
/* Currently only one callback is supported */
JERRY_ASSERT (JERRY_CONTEXT (jmem_free_unused_memory_callback) == callback);
JERRY_CONTEXT (jmem_free_unused_memory_callback) = NULL;
} /* jmem_unregister_free_unused_memory_callback */
/**
* Run 'try to give memory back' callbacks with specified severity
*/
void
jmem_run_free_unused_memory_callbacks (jmem_free_unused_memory_severity_t severity, /**< severity of the request */
size_t requested_size_bytes, /**< number of bytes to be allocated */
bool fatal_if_not_freed) /**< run-time will terminate if not enough memory is freed */
{
if (JERRY_CONTEXT (jmem_free_unused_memory_callback) != NULL)
{
JERRY_CONTEXT (jmem_free_unused_memory_callback) (severity, requested_size_bytes, fatal_if_not_freed);
}
jmem_pools_collect_empty ();
} /* jmem_run_free_unused_memory_callbacks */
#ifdef JMEM_STATS
/**
* Reset peak values in memory usage statistics
*/
void
jmem_stats_reset_peak (void)
{
jmem_heap_stats_reset_peak ();
jmem_pools_stats_reset_peak ();
} /* jmem_stats_reset_peak */
/**
* Print memory usage statistics
*/
void
jmem_stats_print (void)
{
jmem_heap_stats_print ();
jmem_pools_stats_print ();
} /* jmem_stats_print */
#endif /* JMEM_STATS */

View file

@ -0,0 +1,172 @@
/* Copyright 2014-2016 Samsung Electronics Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Allocator interface
*/
#ifndef JMEM_ALLOCATOR_H
#define JMEM_ALLOCATOR_H
#include "jrt.h"
#include "jmem-config.h"
#include "jmem-heap.h"
#include "jmem-poolman.h"
/** \addtogroup mem Memory allocation
* @{
*/
/**
* Compressed pointer representations
*
* 16 bit representation:
* The jmem_cpointer_t is defined as uint16_t
* and it can contain any sixteen bit value.
*
* 32 bit representation:
* The jmem_cpointer_t is defined as uint32_t.
* The lower JMEM_ALIGNMENT_LOG bits must be zero.
* The other bits can have any value.
*
* The 16 bit representation always encodes an offset from
* a heap base. The 32 bit representation currently encodes
* raw 32 bit JMEM_ALIGNMENT aligned pointers on 32 bit systems.
* This can be extended to encode a 32 bit offset from a heap
* base on 64 bit systems in the future. There are no plans
* to support more than 4G address space for JerryScript.
*/
/**
* Compressed pointer
*/
#ifdef JERRY_CPOINTER_32_BIT
typedef uint32_t jmem_cpointer_t;
#else /* !JERRY_CPOINTER_32_BIT */
typedef uint16_t jmem_cpointer_t;
#endif /* JERRY_CPOINTER_32_BIT */
/**
* Width of compressed memory pointer
*/
#ifdef JERRY_CPOINTER_32_BIT
#define JMEM_CP_WIDTH 32
#else /* !JERRY_CPOINTER_32_BIT */
#define JMEM_CP_WIDTH 16
#endif /* JERRY_CPOINTER_32_BIT */
/**
* Representation of NULL value for compressed pointers
*/
#define JMEM_CP_NULL ((jmem_cpointer_t) 0)
/**
* Required alignment for allocated units/blocks
*/
#define JMEM_ALIGNMENT (1u << JMEM_ALIGNMENT_LOG)
/**
* Severity of a 'try give memory back' request
*
* The request are posted sequentially beginning from
* low to high until enough memory is freed.
*
* If not enough memory is freed upon a high request
* then the engine is shut down with ERR_OUT_OF_MEMORY.
*/
typedef enum
{
JMEM_FREE_UNUSED_MEMORY_SEVERITY_LOW, /* 'low' severity */
JMEM_FREE_UNUSED_MEMORY_SEVERITY_HIGH, /* 'high' severity */
} jmem_free_unused_memory_severity_t;
/**
* Free region node
*/
typedef struct
{
uint32_t next_offset; /* Offset of next region in list */
uint32_t size; /* Size of region */
} jmem_heap_free_t;
/**
* Node for free chunk list
*/
typedef struct jmem_pools_chunk_t
{
struct jmem_pools_chunk_t *next_p; /* pointer to next pool chunk */
} jmem_pools_chunk_t;
/**
* A free memory callback routine type.
*/
typedef void (*jmem_free_unused_memory_callback_t) (jmem_free_unused_memory_severity_t, size_t, bool);
/**
* Get value of pointer from specified non-null compressed pointer value
*/
#define JMEM_CP_GET_NON_NULL_POINTER(type, cp_value) \
((type *) (jmem_decompress_pointer (cp_value)))
/**
* Get value of pointer from specified compressed pointer value
*/
#define JMEM_CP_GET_POINTER(type, cp_value) \
(((unlikely ((cp_value) == JMEM_CP_NULL)) ? NULL : JMEM_CP_GET_NON_NULL_POINTER (type, cp_value)))
/**
* Set value of non-null compressed pointer so that it will correspond
* to specified non_compressed_pointer
*/
#define JMEM_CP_SET_NON_NULL_POINTER(cp_value, non_compressed_pointer) \
(cp_value) = jmem_compress_pointer (non_compressed_pointer)
/**
* Set value of compressed pointer so that it will correspond
* to specified non_compressed_pointer
*/
#define JMEM_CP_SET_POINTER(cp_value, non_compressed_pointer) \
do \
{ \
void *ptr_value = (void *) non_compressed_pointer; \
\
if (unlikely ((ptr_value) == NULL)) \
{ \
(cp_value) = JMEM_CP_NULL; \
} \
else \
{ \
JMEM_CP_SET_NON_NULL_POINTER (cp_value, ptr_value); \
} \
} while (false);
extern void jmem_init (void);
extern void jmem_finalize (void);
extern jmem_cpointer_t jmem_compress_pointer (const void *);
extern void *jmem_decompress_pointer (uintptr_t);
extern void jmem_register_free_unused_memory_callback (jmem_free_unused_memory_callback_t);
extern void jmem_unregister_free_unused_memory_callback (jmem_free_unused_memory_callback_t);
#ifdef JMEM_STATS
extern void jmem_stats_reset_peak (void);
extern void jmem_stats_print (void);
#endif /* JMEM_STATS */
/**
* @}
*/
#endif /* !JMEM_ALLOCATOR_H */

View file

@ -0,0 +1,31 @@
/* Copyright 2014-2016 Samsung Electronics Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef JMEM_CONFIG_H
#define JMEM_CONFIG_H
#include "config.h"
/**
* Size of heap
*/
#define JMEM_HEAP_SIZE ((size_t) (CONFIG_MEM_HEAP_AREA_SIZE))
/**
* Logarithm of required alignment for allocated units/blocks
*/
#define JMEM_ALIGNMENT_LOG 3
#endif /* !JMEM_CONFIG_H */

View file

@ -0,0 +1,716 @@
/* Copyright 2014-2016 Samsung Electronics Co., Ltd.
* Copyright 2016 University of Szeged.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Heap implementation
*/
#include "jcontext.h"
#include "jmem-allocator.h"
#include "jmem-config.h"
#include "jmem-heap.h"
#include "jrt-bit-fields.h"
#include "jrt-libc-includes.h"
#define JMEM_ALLOCATOR_INTERNAL
#include "jmem-allocator-internal.h"
/** \addtogroup mem Memory allocation
* @{
*
* \addtogroup heap Heap
* @{
*/
/*
* Valgrind-related options and headers
*/
#ifdef JERRY_VALGRIND
# include "memcheck.h"
# define VALGRIND_NOACCESS_SPACE(p, s) VALGRIND_MAKE_MEM_NOACCESS((p), (s))
# define VALGRIND_UNDEFINED_SPACE(p, s) VALGRIND_MAKE_MEM_UNDEFINED((p), (s))
# define VALGRIND_DEFINED_SPACE(p, s) VALGRIND_MAKE_MEM_DEFINED((p), (s))
#else /* !JERRY_VALGRIND */
# define VALGRIND_NOACCESS_SPACE(p, s)
# define VALGRIND_UNDEFINED_SPACE(p, s)
# define VALGRIND_DEFINED_SPACE(p, s)
#endif /* JERRY_VALGRIND */
#ifdef JERRY_VALGRIND_FREYA
# include "memcheck.h"
/**
* Called by pool manager before a heap allocation or free.
*/
void jmem_heap_valgrind_freya_mempool_request (void)
{
JERRY_CONTEXT (valgrind_freya_mempool_request) = true;
} /* jmem_heap_valgrind_freya_mempool_request */
# define VALGRIND_FREYA_CHECK_MEMPOOL_REQUEST \
bool mempool_request = JERRY_CONTEXT (valgrind_freya_mempool_request); \
JERRY_CONTEXT (valgrind_freya_mempool_request) = false
# define VALGRIND_FREYA_MALLOCLIKE_SPACE(p, s) \
if (!mempool_request) \
{ \
VALGRIND_MALLOCLIKE_BLOCK((p), (s), 0, 0); \
}
# define VALGRIND_FREYA_FREELIKE_SPACE(p) \
if (!mempool_request) \
{ \
VALGRIND_FREELIKE_BLOCK((p), 0); \
}
#else /* !JERRY_VALGRIND_FREYA */
# define VALGRIND_FREYA_CHECK_MEMPOOL_REQUEST
# define VALGRIND_FREYA_MALLOCLIKE_SPACE(p, s)
# define VALGRIND_FREYA_FREELIKE_SPACE(p)
#endif /* JERRY_VALGRIND_FREYA */
/**
* End of list marker.
*/
#define JMEM_HEAP_END_OF_LIST ((uint32_t) 0xffffffff)
#if UINTPTR_MAX > UINT32_MAX
#define JMEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) ((uint8_t *) (p) - JERRY_HEAP_CONTEXT (area)))
#define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *) (JERRY_HEAP_CONTEXT (area) + (u)))
#else /* UINTPTR_MAX <= UINT32_MAX */
/* In this case we simply store the pointer, since it fits anyway. */
#define JMEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) (p))
#define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *) (u))
#endif /* UINTPTR_MAX > UINT32_MAX */
/**
* Get end of region
*/
static inline jmem_heap_free_t * __attr_always_inline___ __attr_pure___
jmem_heap_get_region_end (jmem_heap_free_t *curr_p) /**< current region */
{
return (jmem_heap_free_t *)((uint8_t *) curr_p + curr_p->size);
} /* jmem_heap_get_region_end */
/**
* Check size of heap is corresponding to configuration
*/
JERRY_STATIC_ASSERT (sizeof (jmem_heap_t) <= JMEM_HEAP_SIZE,
size_of_mem_heap_must_be_less_than_or_equal_to_MEM_HEAP_SIZE);
#ifdef JMEM_STATS
static void jmem_heap_stat_init (void);
static void jmem_heap_stat_alloc (size_t num);
static void jmem_heap_stat_free (size_t num);
static void jmem_heap_stat_skip ();
static void jmem_heap_stat_nonskip ();
static void jmem_heap_stat_alloc_iter ();
static void jmem_heap_stat_free_iter ();
# define JMEM_HEAP_STAT_INIT() jmem_heap_stat_init ()
# define JMEM_HEAP_STAT_ALLOC(v1) jmem_heap_stat_alloc (v1)
# define JMEM_HEAP_STAT_FREE(v1) jmem_heap_stat_free (v1)
# define JMEM_HEAP_STAT_SKIP() jmem_heap_stat_skip ()
# define JMEM_HEAP_STAT_NONSKIP() jmem_heap_stat_nonskip ()
# define JMEM_HEAP_STAT_ALLOC_ITER() jmem_heap_stat_alloc_iter ()
# define JMEM_HEAP_STAT_FREE_ITER() jmem_heap_stat_free_iter ()
#else /* !JMEM_STATS */
# define JMEM_HEAP_STAT_INIT()
# define JMEM_HEAP_STAT_ALLOC(v1)
# define JMEM_HEAP_STAT_FREE(v1)
# define JMEM_HEAP_STAT_SKIP()
# define JMEM_HEAP_STAT_NONSKIP()
# define JMEM_HEAP_STAT_ALLOC_ITER()
# define JMEM_HEAP_STAT_FREE_ITER()
#endif /* JMEM_STATS */
/**
* Startup initialization of heap
*/
void
jmem_heap_init (void)
{
#ifndef JERRY_CPOINTER_32_BIT
JERRY_STATIC_ASSERT (((UINT16_MAX + 1) << JMEM_ALIGNMENT_LOG) >= JMEM_HEAP_SIZE,
maximum_heap_size_for_16_bit_compressed_pointers_is_512K);
#endif /* !JERRY_CPOINTER_32_BIT */
JERRY_ASSERT ((uintptr_t) JERRY_HEAP_CONTEXT (area) % JMEM_ALIGNMENT == 0);
JERRY_CONTEXT (jmem_heap_limit) = CONFIG_MEM_HEAP_DESIRED_LIMIT;
jmem_heap_free_t *const region_p = (jmem_heap_free_t *) JERRY_HEAP_CONTEXT (area);
region_p->size = JMEM_HEAP_AREA_SIZE;
region_p->next_offset = JMEM_HEAP_END_OF_LIST;
JERRY_HEAP_CONTEXT (first).size = 0;
JERRY_HEAP_CONTEXT (first).next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (region_p);
JERRY_CONTEXT (jmem_heap_list_skip_p) = &JERRY_HEAP_CONTEXT (first);
VALGRIND_NOACCESS_SPACE (JERRY_HEAP_CONTEXT (area), JMEM_HEAP_AREA_SIZE);
JMEM_HEAP_STAT_INIT ();
} /* jmem_heap_init */
/**
* Finalize heap
*/
void jmem_heap_finalize (void)
{
JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_allocated_size) == 0);
VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_t));
} /* jmem_heap_finalize */
/**
* Allocation of memory region.
*
* See also:
* jmem_heap_alloc_block
*
* @return pointer to allocated memory block - if allocation is successful,
* NULL - if there is not enough memory.
*/
static __attr_hot___
void *jmem_heap_alloc_block_internal (const size_t size)
{
// Align size
const size_t required_size = ((size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT) * JMEM_ALIGNMENT;
jmem_heap_free_t *data_space_p = NULL;
VALGRIND_DEFINED_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
// Fast path for 8 byte chunks, first region is guaranteed to be sufficient
if (required_size == JMEM_ALIGNMENT
&& likely (JERRY_HEAP_CONTEXT (first).next_offset != JMEM_HEAP_END_OF_LIST))
{
data_space_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset);
JERRY_ASSERT (jmem_is_heap_pointer (data_space_p));
VALGRIND_DEFINED_SPACE (data_space_p, sizeof (jmem_heap_free_t));
JERRY_CONTEXT (jmem_heap_allocated_size) += JMEM_ALIGNMENT;
JMEM_HEAP_STAT_ALLOC_ITER ();
if (data_space_p->size == JMEM_ALIGNMENT)
{
JERRY_HEAP_CONTEXT (first).next_offset = data_space_p->next_offset;
}
else
{
JERRY_ASSERT (data_space_p->size > JMEM_ALIGNMENT);
jmem_heap_free_t *remaining_p;
remaining_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset) + 1;
VALGRIND_DEFINED_SPACE (remaining_p, sizeof (jmem_heap_free_t));
remaining_p->size = data_space_p->size - JMEM_ALIGNMENT;
remaining_p->next_offset = data_space_p->next_offset;
VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (jmem_heap_free_t));
JERRY_HEAP_CONTEXT (first).next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p);
}
VALGRIND_UNDEFINED_SPACE (data_space_p, sizeof (jmem_heap_free_t));
if (unlikely (data_space_p == JERRY_CONTEXT (jmem_heap_list_skip_p)))
{
JERRY_CONTEXT (jmem_heap_list_skip_p) = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset);
}
}
// Slow path for larger regions
else
{
uint32_t current_offset = JERRY_HEAP_CONTEXT (first).next_offset;
jmem_heap_free_t *prev_p = &JERRY_HEAP_CONTEXT (first);
while (current_offset != JMEM_HEAP_END_OF_LIST)
{
jmem_heap_free_t *current_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (current_offset);
JERRY_ASSERT (jmem_is_heap_pointer (current_p));
VALGRIND_DEFINED_SPACE (current_p, sizeof (jmem_heap_free_t));
JMEM_HEAP_STAT_ALLOC_ITER ();
const uint32_t next_offset = current_p->next_offset;
JERRY_ASSERT (next_offset == JMEM_HEAP_END_OF_LIST
|| jmem_is_heap_pointer (JMEM_HEAP_GET_ADDR_FROM_OFFSET (next_offset)));
if (current_p->size >= required_size)
{
// Region is sufficiently big, store address
data_space_p = current_p;
JERRY_CONTEXT (jmem_heap_allocated_size) += required_size;
// Region was larger than necessary
if (current_p->size > required_size)
{
// Get address of remaining space
jmem_heap_free_t *const remaining_p = (jmem_heap_free_t *) ((uint8_t *) current_p + required_size);
// Update metadata
VALGRIND_DEFINED_SPACE (remaining_p, sizeof (jmem_heap_free_t));
remaining_p->size = current_p->size - (uint32_t) required_size;
remaining_p->next_offset = next_offset;
VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (jmem_heap_free_t));
// Update list
VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
prev_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p);
VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
}
// Block is an exact fit
else
{
// Remove the region from the list
VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
prev_p->next_offset = next_offset;
VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
}
JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p;
// Found enough space
break;
}
VALGRIND_NOACCESS_SPACE (current_p, sizeof (jmem_heap_free_t));
// Next in list
prev_p = current_p;
current_offset = next_offset;
}
}
while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
{
JERRY_CONTEXT (jmem_heap_limit) += CONFIG_MEM_HEAP_DESIRED_LIMIT;
}
VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
if (unlikely (!data_space_p))
{
return NULL;
}
JERRY_ASSERT ((uintptr_t) data_space_p % JMEM_ALIGNMENT == 0);
VALGRIND_UNDEFINED_SPACE (data_space_p, size);
JMEM_HEAP_STAT_ALLOC (size);
return (void *) data_space_p;
} /* jmem_heap_finalize */
/**
* Allocation of memory block, running 'try to give memory back' callbacks, if there is not enough memory.
*
* Note:
* if there is still not enough memory after running the callbacks
* - NULL value will be returned if parmeter 'ret_null_on_error' is true
* - the engine will terminate with ERR_OUT_OF_MEMORY if 'ret_null_on_error' is false
*
* @return NULL, if the required memory size is 0
* also NULL, if 'ret_null_on_error' is true and the allocation fails because of there is not enough memory
*/
static void *
jmem_heap_gc_and_alloc_block (const size_t size, /**< required memory size */
bool ret_null_on_error) /**< indicates whether return null or terminate
with ERR_OUT_OF_MEMORY on out of memory */
{
if (unlikely (size == 0))
{
return NULL;
}
VALGRIND_FREYA_CHECK_MEMPOOL_REQUEST;
#ifdef JMEM_GC_BEFORE_EACH_ALLOC
jmem_run_free_unused_memory_callbacks (JMEM_FREE_UNUSED_MEMORY_SEVERITY_HIGH, size, !ret_null_on_error);
#endif /* JMEM_GC_BEFORE_EACH_ALLOC */
if (JERRY_CONTEXT (jmem_heap_allocated_size) + size >= JERRY_CONTEXT (jmem_heap_limit))
{
jmem_run_free_unused_memory_callbacks (JMEM_FREE_UNUSED_MEMORY_SEVERITY_LOW, size, !ret_null_on_error);
}
void *data_space_p = jmem_heap_alloc_block_internal (size);
if (likely (data_space_p != NULL))
{
VALGRIND_FREYA_MALLOCLIKE_SPACE (data_space_p, size);
return data_space_p;
}
for (jmem_free_unused_memory_severity_t severity = JMEM_FREE_UNUSED_MEMORY_SEVERITY_LOW;
severity <= JMEM_FREE_UNUSED_MEMORY_SEVERITY_HIGH;
severity = (jmem_free_unused_memory_severity_t) (severity + 1))
{
jmem_run_free_unused_memory_callbacks (severity, size, !ret_null_on_error);
data_space_p = jmem_heap_alloc_block_internal (size);
if (likely (data_space_p != NULL))
{
VALGRIND_FREYA_MALLOCLIKE_SPACE (data_space_p, size);
return data_space_p;
}
}
JERRY_ASSERT (data_space_p == NULL);
if (!ret_null_on_error)
{
jerry_fatal (ERR_OUT_OF_MEMORY);
}
return data_space_p;
} /* jmem_heap_gc_and_alloc_block */
/**
* Allocation of memory block, running 'try to give memory back' callbacks, if there is not enough memory.
*
* Note:
* If there is still not enough memory after running the callbacks, then the engine will be
* terminated with ERR_OUT_OF_MEMORY.
*
* @return NULL, if the required memory is 0
* pointer to allocated memory block, otherwise
*/
void * __attr_hot___ __attr_always_inline___
jmem_heap_alloc_block (const size_t size) /**< required memory size */
{
return jmem_heap_gc_and_alloc_block (size, false);
} /* jmem_heap_alloc_block */
/**
* Allocation of memory block, running 'try to give memory back' callbacks, if there is not enough memory.
*
* Note:
* If there is still not enough memory after running the callbacks, NULL will be returned.
*
* @return NULL, if the required memory size is 0
* also NULL, if the allocation has failed
* pointer to the allocated memory block, otherwise
*/
void * __attr_hot___ __attr_always_inline___
jmem_heap_alloc_block_null_on_error (const size_t size) /**< required memory size */
{
return jmem_heap_gc_and_alloc_block (size, true);
} /* jmem_heap_alloc_block_null_on_error */
/**
* Free the memory block.
*/
void __attr_hot___
jmem_heap_free_block (void *ptr, /**< pointer to beginning of data space of the block */
const size_t size) /**< size of allocated region */
{
VALGRIND_FREYA_CHECK_MEMPOOL_REQUEST;
/* checking that ptr points to the heap */
JERRY_ASSERT (jmem_is_heap_pointer (ptr));
JERRY_ASSERT (size > 0);
JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size));
VALGRIND_FREYA_FREELIKE_SPACE (ptr);
VALGRIND_NOACCESS_SPACE (ptr, size);
JMEM_HEAP_STAT_FREE_ITER ();
jmem_heap_free_t *block_p = (jmem_heap_free_t *) ptr;
jmem_heap_free_t *prev_p;
jmem_heap_free_t *next_p;
VALGRIND_DEFINED_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
if (block_p > JERRY_CONTEXT (jmem_heap_list_skip_p))
{
prev_p = JERRY_CONTEXT (jmem_heap_list_skip_p);
JMEM_HEAP_STAT_SKIP ();
}
else
{
prev_p = &JERRY_HEAP_CONTEXT (first);
JMEM_HEAP_STAT_NONSKIP ();
}
JERRY_ASSERT (jmem_is_heap_pointer (block_p));
const uint32_t block_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (block_p);
VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
// Find position of region in the list
while (prev_p->next_offset < block_offset)
{
jmem_heap_free_t *const next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset);
JERRY_ASSERT (jmem_is_heap_pointer (next_p));
VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));
VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
prev_p = next_p;
JMEM_HEAP_STAT_FREE_ITER ();
}
next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset);
VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));
/* Realign size */
const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
VALGRIND_DEFINED_SPACE (block_p, sizeof (jmem_heap_free_t));
VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
// Update prev
if (jmem_heap_get_region_end (prev_p) == block_p)
{
// Can be merged
prev_p->size += (uint32_t) aligned_size;
VALGRIND_NOACCESS_SPACE (block_p, sizeof (jmem_heap_free_t));
block_p = prev_p;
}
else
{
block_p->size = (uint32_t) aligned_size;
prev_p->next_offset = block_offset;
}
VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));
// Update next
if (jmem_heap_get_region_end (block_p) == next_p)
{
if (unlikely (next_p == JERRY_CONTEXT (jmem_heap_list_skip_p)))
{
JERRY_CONTEXT (jmem_heap_list_skip_p) = block_p;
}
// Can be merged
block_p->size += next_p->size;
block_p->next_offset = next_p->next_offset;
}
else
{
block_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (next_p);
}
JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p;
VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
VALGRIND_NOACCESS_SPACE (block_p, size);
VALGRIND_NOACCESS_SPACE (next_p, sizeof (jmem_heap_free_t));
JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_allocated_size) > 0);
JERRY_CONTEXT (jmem_heap_allocated_size) -= aligned_size;
while (JERRY_CONTEXT (jmem_heap_allocated_size) + CONFIG_MEM_HEAP_DESIRED_LIMIT <= JERRY_CONTEXT (jmem_heap_limit))
{
JERRY_CONTEXT (jmem_heap_limit) -= CONFIG_MEM_HEAP_DESIRED_LIMIT;
}
VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size));
JMEM_HEAP_STAT_FREE (size);
} /* jmem_heap_free_block */
/**
* Check whether the pointer points to the heap
*
* Note:
* the routine should be used only for assertion checks
*
* @return true - if pointer points to the heap,
* false - otherwise
*/
bool
jmem_is_heap_pointer (const void *pointer) /**< pointer */
{
return ((uint8_t *) pointer >= JERRY_HEAP_CONTEXT (area)
&& (uint8_t *) pointer <= (JERRY_HEAP_CONTEXT (area) + JMEM_HEAP_AREA_SIZE));
} /* jmem_is_heap_pointer */
#ifdef JMEM_STATS
static void
jmem_heap_stats_calculate_largest_free_block(void)
{
jmem_heap_free_t *current_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset);
size_t *largest_free_block_bytes_ptr =
&(JERRY_CONTEXT (jmem_heap_stats)).largest_free_block_bytes;
*largest_free_block_bytes_ptr = 0;
while (jmem_is_heap_pointer(current_p))
{
const uint32_t next_offset = current_p->next_offset;
if (current_p->size > *largest_free_block_bytes_ptr)
{
*largest_free_block_bytes_ptr = current_p->size;
}
// Next in list
current_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (next_offset);
}
} /* jmem_heap_stats_calculate_largest_free_block */
/**
* Get heap memory usage statistics
*/
void
jmem_heap_get_stats (jmem_heap_stats_t *out_heap_stats_p) /**< [out] heap stats */
{
JERRY_ASSERT (out_heap_stats_p != NULL);
jmem_heap_stats_calculate_largest_free_block();
*out_heap_stats_p = JERRY_CONTEXT (jmem_heap_stats);
} /* jmem_heap_get_stats */
/**
* Reset peak values in memory usage statistics
*/
void
jmem_heap_stats_reset_peak (void)
{
JERRY_CONTEXT (jmem_heap_stats).peak_allocated_bytes = JERRY_CONTEXT (jmem_heap_stats).allocated_bytes;
JERRY_CONTEXT (jmem_heap_stats).peak_waste_bytes = JERRY_CONTEXT (jmem_heap_stats).waste_bytes;
} /* jmem_heap_stats_reset_peak */
/**
* Print heap memory usage statistics
*/
void
jmem_heap_stats_print (void)
{
jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats);
JERRY_DEBUG_MSG ("Heap stats:\n"
" Heap size = %zu bytes\n"
" Allocated = %zu bytes\n"
" Waste = %zu bytes\n"
" Peak allocated = %zu bytes\n"
" Peak waste = %zu bytes\n"
" Skip-ahead ratio = %zu.%04zu\n"
" Average alloc iteration = %zu.%04zu\n"
" Average free iteration = %zu.%04zu\n"
"\n",
heap_stats->size,
heap_stats->allocated_bytes,
heap_stats->waste_bytes,
heap_stats->peak_allocated_bytes,
heap_stats->peak_waste_bytes,
heap_stats->skip_count / heap_stats->nonskip_count,
heap_stats->skip_count % heap_stats->nonskip_count * 10000 / heap_stats->nonskip_count,
heap_stats->alloc_iter_count / heap_stats->alloc_count,
heap_stats->alloc_iter_count % heap_stats->alloc_count * 10000 / heap_stats->alloc_count,
heap_stats->free_iter_count / heap_stats->free_count,
heap_stats->free_iter_count % heap_stats->free_count * 10000 / heap_stats->free_count);
} /* jmem_heap_stats_print */
/**
* Initalize heap memory usage statistics account structure
*/
static void
jmem_heap_stat_init ()
{
JERRY_CONTEXT (jmem_heap_stats).size = JMEM_HEAP_AREA_SIZE;
} /* jmem_heap_stat_init */
/**
* Account allocation
*/
static void
jmem_heap_stat_alloc (size_t size) /**< Size of allocated block */
{
const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
const size_t waste_bytes = aligned_size - size;
jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats);
heap_stats->allocated_bytes += aligned_size;
heap_stats->waste_bytes += waste_bytes;
heap_stats->alloc_count++;
if (heap_stats->allocated_bytes > heap_stats->peak_allocated_bytes)
{
heap_stats->peak_allocated_bytes = heap_stats->allocated_bytes;
}
if (heap_stats->allocated_bytes > heap_stats->global_peak_allocated_bytes)
{
heap_stats->global_peak_allocated_bytes = heap_stats->allocated_bytes;
}
if (heap_stats->waste_bytes > heap_stats->peak_waste_bytes)
{
heap_stats->peak_waste_bytes = heap_stats->waste_bytes;
}
if (heap_stats->waste_bytes > heap_stats->global_peak_waste_bytes)
{
heap_stats->global_peak_waste_bytes = heap_stats->waste_bytes;
}
} /* jmem_heap_stat_alloc */
/**
* Account freeing
*/
static void
jmem_heap_stat_free (size_t size) /**< Size of freed block */
{
const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
const size_t waste_bytes = aligned_size - size;
jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats);
heap_stats->free_count++;
heap_stats->allocated_bytes -= aligned_size;
heap_stats->waste_bytes -= waste_bytes;
} /* jmem_heap_stat_free */
/**
* Counts number of skip-aheads during insertion of free block
*/
static void
jmem_heap_stat_skip ()
{
JERRY_CONTEXT (jmem_heap_stats).skip_count++;
} /* jmem_heap_stat_skip */
/**
* Counts number of times we could not skip ahead during free block insertion
*/
static void
jmem_heap_stat_nonskip ()
{
JERRY_CONTEXT (jmem_heap_stats).nonskip_count++;
} /* jmem_heap_stat_nonskip */
/**
* Count number of iterations required for allocations
*/
static void
jmem_heap_stat_alloc_iter ()
{
JERRY_CONTEXT (jmem_heap_stats).alloc_iter_count++;
} /* jmem_heap_stat_alloc_iter */
/**
* Counts number of iterations required for inserting free blocks
*/
static void
jmem_heap_stat_free_iter ()
{
JERRY_CONTEXT (jmem_heap_stats).free_iter_count++;
} /* jmem_heap_stat_free_iter */
#endif /* JMEM_STATS */
/**
* @}
* @}
*/

View file

@ -0,0 +1,124 @@
/* Copyright 2014-2016 Samsung Electronics Co., Ltd.
* Copyright 2016 University of Szeged.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Heap allocator interface
*/
#ifndef JMEM_HEAP_H
#define JMEM_HEAP_H
#include "jrt.h"
/** \addtogroup mem Memory allocation
* @{
*
* \addtogroup heap Heap
* @{
*/
extern void jmem_heap_init (void);
extern void jmem_heap_finalize (void);
extern void *jmem_heap_alloc_block (const size_t);
extern void *jmem_heap_alloc_block_null_on_error (const size_t);
extern void jmem_heap_free_block (void *, const size_t);
extern bool jmem_is_heap_pointer (const void *);
#ifdef JMEM_STATS
/**
* Heap memory usage statistics
*/
typedef struct jmem_heap_stats_t
{
size_t size; /**< size */
size_t allocated_bytes; /**< currently allocated bytes */
size_t peak_allocated_bytes; /**< peak allocated bytes */
size_t global_peak_allocated_bytes; /**< non-resettable peak allocated bytes */
size_t waste_bytes; /**< bytes waste due to blocks filled partially
and due to block headers */
size_t peak_waste_bytes; /**< peak bytes waste */
size_t global_peak_waste_bytes; /**< non-resettable peak bytes waste */
size_t largest_free_block_bytes; /**< largest, contiguous block of unallocated memory,
updated when calling jmem_heap_get_stats() */
size_t skip_count;
size_t nonskip_count;
size_t alloc_count;
size_t alloc_iter_count;
size_t free_count;
size_t free_iter_count;
} jmem_heap_stats_t;
extern void jmem_heap_get_stats (jmem_heap_stats_t *);
extern void jmem_heap_stats_reset_peak (void);
extern void jmem_heap_stats_print (void);
#endif /* JMEM_STATS */
#ifdef JERRY_VALGRIND_FREYA
#ifdef JERRY_VALGRIND
#error Valgrind and valgrind-freya modes are not compatible.
#endif /* JERRY_VALGRIND */
extern void jmem_heap_valgrind_freya_mempool_request (void);
#define JMEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST() jmem_heap_valgrind_freya_mempool_request ()
#else /* !JERRY_VALGRIND_FREYA */
#define JMEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST()
#endif /* JERRY_VALGRIND_FREYA */
/**
* Define a local array variable and allocate memory for the array on the heap.
*
* If requested number of elements is zero, assign NULL to the variable.
*
* Warning:
* if there is not enough memory on the heap, shutdown engine with ERR_OUT_OF_MEMORY.
*/
#define JMEM_DEFINE_LOCAL_ARRAY(var_name, number, type) \
{ \
size_t var_name ## ___size = (size_t) (number) * sizeof (type); \
type *var_name = (type *) (jmem_heap_alloc_block (var_name ## ___size));
/**
* Free the previously defined local array variable, freeing corresponding block on the heap,
* if it was allocated (i.e. if the array's size was non-zero).
*/
#define JMEM_FINALIZE_LOCAL_ARRAY(var_name) \
if (var_name != NULL) \
{ \
JERRY_ASSERT (var_name ## ___size != 0); \
\
jmem_heap_free_block (var_name, var_name ## ___size); \
} \
else \
{ \
JERRY_ASSERT (var_name ## ___size == 0); \
} \
}
/**
* @}
* @}
*/
#endif /* !JMEM_HEAP_H */

View file

@ -0,0 +1,343 @@
/* Copyright 2014-2016 Samsung Electronics Co., Ltd.
* Copyright 2016 University of Szeged.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Memory pool manager implementation
*/
#include "jcontext.h"
#include "jmem-allocator.h"
#include "jmem-heap.h"
#include "jmem-poolman.h"
#include "jrt-libc-includes.h"
#define JMEM_ALLOCATOR_INTERNAL
#include "jmem-allocator-internal.h"
/** \addtogroup mem Memory allocation
* @{
*
* \addtogroup poolman Memory pool manager
* @{
*/
#ifdef JMEM_STATS
static void jmem_pools_stat_free_pool (void);
static void jmem_pools_stat_new_alloc (void);
static void jmem_pools_stat_reuse (void);
static void jmem_pools_stat_dealloc (void);
# define JMEM_POOLS_STAT_FREE_POOL() jmem_pools_stat_free_pool ()
# define JMEM_POOLS_STAT_NEW_ALLOC() jmem_pools_stat_new_alloc ()
# define JMEM_POOLS_STAT_REUSE() jmem_pools_stat_reuse ()
# define JMEM_POOLS_STAT_DEALLOC() jmem_pools_stat_dealloc ()
#else /* !JMEM_STATS */
# define JMEM_POOLS_STAT_FREE_POOL()
# define JMEM_POOLS_STAT_NEW_ALLOC()
# define JMEM_POOLS_STAT_REUSE()
# define JMEM_POOLS_STAT_DEALLOC()
#endif /* JMEM_STATS */
/*
* Valgrind-related options and headers
*/
#ifdef JERRY_VALGRIND
# include "memcheck.h"
# define VALGRIND_NOACCESS_SPACE(p, s) VALGRIND_MAKE_MEM_NOACCESS((p), (s))
# define VALGRIND_UNDEFINED_SPACE(p, s) VALGRIND_MAKE_MEM_UNDEFINED((p), (s))
# define VALGRIND_DEFINED_SPACE(p, s) VALGRIND_MAKE_MEM_DEFINED((p), (s))
#else /* !JERRY_VALGRIND */
# define VALGRIND_NOACCESS_SPACE(p, s)
# define VALGRIND_UNDEFINED_SPACE(p, s)
# define VALGRIND_DEFINED_SPACE(p, s)
#endif /* JERRY_VALGRIND */
#ifdef JERRY_VALGRIND_FREYA
# include "memcheck.h"
# define VALGRIND_FREYA_MALLOCLIKE_SPACE(p, s) VALGRIND_MALLOCLIKE_BLOCK((p), (s), 0, 0)
# define VALGRIND_FREYA_FREELIKE_SPACE(p) VALGRIND_FREELIKE_BLOCK((p), 0)
#else /* !JERRY_VALGRIND_FREYA */
# define VALGRIND_FREYA_MALLOCLIKE_SPACE(p, s)
# define VALGRIND_FREYA_FREELIKE_SPACE(p)
#endif /* JERRY_VALGRIND_FREYA */
/**
* Finalize pool manager
*/
void
jmem_pools_finalize (void)
{
jmem_pools_collect_empty ();
JERRY_ASSERT (JERRY_CONTEXT (jmem_free_8_byte_chunk_p) == NULL);
#ifdef JERRY_CPOINTER_32_BIT
JERRY_ASSERT (JERRY_CONTEXT (jmem_free_16_byte_chunk_p) == NULL);
#endif /* JERRY_CPOINTER_32_BIT */
} /* jmem_pools_finalize */
/**
* Allocate a chunk of specified size
*
* @return pointer to allocated chunk, if allocation was successful,
* or NULL - if not enough memory.
*/
inline void * __attr_hot___ __attr_always_inline___
jmem_pools_alloc (size_t size) /**< size of the chunk */
{
#ifdef JMEM_GC_BEFORE_EACH_ALLOC
jmem_run_free_unused_memory_callbacks (JMEM_FREE_UNUSED_MEMORY_SEVERITY_HIGH);
#endif /* JMEM_GC_BEFORE_EACH_ALLOC */
if (size <= 8)
{
if (JERRY_CONTEXT (jmem_free_8_byte_chunk_p) != NULL)
{
const jmem_pools_chunk_t *const chunk_p = JERRY_CONTEXT (jmem_free_8_byte_chunk_p);
JMEM_POOLS_STAT_REUSE ();
VALGRIND_DEFINED_SPACE (chunk_p, sizeof (jmem_pools_chunk_t));
JERRY_CONTEXT (jmem_free_8_byte_chunk_p) = chunk_p->next_p;
VALGRIND_UNDEFINED_SPACE (chunk_p, sizeof (jmem_pools_chunk_t));
return (void *) chunk_p;
}
else
{
JMEM_POOLS_STAT_NEW_ALLOC ();
return (void *) jmem_heap_alloc_block (8);
}
}
#ifdef JERRY_CPOINTER_32_BIT
JERRY_ASSERT (size <= 16);
if (JERRY_CONTEXT (jmem_free_16_byte_chunk_p) != NULL)
{
const jmem_pools_chunk_t *const chunk_p = JERRY_CONTEXT (jmem_free_16_byte_chunk_p);
JMEM_POOLS_STAT_REUSE ();
VALGRIND_DEFINED_SPACE (chunk_p, sizeof (jmem_pools_chunk_t));
JERRY_CONTEXT (jmem_free_16_byte_chunk_p) = chunk_p->next_p;
VALGRIND_UNDEFINED_SPACE (chunk_p, sizeof (jmem_pools_chunk_t));
return (void *) chunk_p;
}
else
{
JMEM_POOLS_STAT_NEW_ALLOC ();
return (void *) jmem_heap_alloc_block (16);
}
#else /* !JERRY_CPOINTER_32_BIT */
JERRY_UNREACHABLE ();
return NULL;
#endif
} /* jmem_pools_alloc */
/**
* Free the chunk
*/
inline void __attr_hot___ __attr_always_inline___
jmem_pools_free (void *chunk_p, /**< pointer to the chunk */
size_t size) /**< size of the chunk */
{
JERRY_ASSERT (chunk_p != NULL);
jmem_pools_chunk_t *const chunk_to_free_p = (jmem_pools_chunk_t *) chunk_p;
VALGRIND_DEFINED_SPACE (chunk_to_free_p, size);
if (size <= 8)
{
chunk_to_free_p->next_p = JERRY_CONTEXT (jmem_free_8_byte_chunk_p);
JERRY_CONTEXT (jmem_free_8_byte_chunk_p) = chunk_to_free_p;
}
else
{
#ifdef JERRY_CPOINTER_32_BIT
JERRY_ASSERT (size <= 16);
chunk_to_free_p->next_p = JERRY_CONTEXT (jmem_free_16_byte_chunk_p);
JERRY_CONTEXT (jmem_free_16_byte_chunk_p) = chunk_to_free_p;
#else /* !JERRY_CPOINTER_32_BIT */
JERRY_UNREACHABLE ();
#endif /* JERRY_CPOINTER_32_BIT */
}
VALGRIND_NOACCESS_SPACE (chunk_to_free_p, size);
JMEM_POOLS_STAT_FREE_POOL ();
} /* jmem_pools_free */
/**
* Collect empty pool chunks
*/
void
jmem_pools_collect_empty ()
{
jmem_pools_chunk_t *chunk_p = JERRY_CONTEXT (jmem_free_8_byte_chunk_p);
JERRY_CONTEXT (jmem_free_8_byte_chunk_p) = NULL;
while (chunk_p)
{
VALGRIND_DEFINED_SPACE (chunk_p, sizeof (jmem_pools_chunk_t));
jmem_pools_chunk_t *const next_p = chunk_p->next_p;
VALGRIND_NOACCESS_SPACE (chunk_p, sizeof (jmem_pools_chunk_t));
jmem_heap_free_block (chunk_p, 8);
JMEM_POOLS_STAT_DEALLOC ();
chunk_p = next_p;
}
#ifdef JERRY_CPOINTER_32_BIT
chunk_p = JERRY_CONTEXT (jmem_free_16_byte_chunk_p);
JERRY_CONTEXT (jmem_free_16_byte_chunk_p) = NULL;
while (chunk_p)
{
VALGRIND_DEFINED_SPACE (chunk_p, sizeof (jmem_pools_chunk_t));
jmem_pools_chunk_t *const next_p = chunk_p->next_p;
VALGRIND_NOACCESS_SPACE (chunk_p, sizeof (jmem_pools_chunk_t));
jmem_heap_free_block (chunk_p, 16);
JMEM_POOLS_STAT_DEALLOC ();
chunk_p = next_p;
}
#endif /* JERRY_CPOINTER_32_BIT */
} /* jmem_pools_collect_empty */
#ifdef JMEM_STATS
/**
* Get pools memory usage statistics
*/
void
jmem_pools_get_stats (jmem_pools_stats_t *out_pools_stats_p) /**< [out] pools' stats */
{
JERRY_ASSERT (out_pools_stats_p != NULL);
*out_pools_stats_p = JERRY_CONTEXT (jmem_pools_stats);
} /* jmem_pools_get_stats */
/**
* Reset peak values in memory usage statistics
*/
void
jmem_pools_stats_reset_peak (void)
{
JERRY_CONTEXT (jmem_pools_stats).peak_pools_count = JERRY_CONTEXT (jmem_pools_stats.pools_count);
} /* jmem_pools_stats_reset_peak */
/**
* Print pools memory usage statistics
*/
void
jmem_pools_stats_print (void)
{
jmem_pools_stats_t *pools_stats = &JERRY_CONTEXT (jmem_pools_stats);
JERRY_DEBUG_MSG ("Pools stats:\n"
" Pool chunks: %zu\n"
" Peak pool chunks: %zu\n"
" Free chunks: %zu\n"
" Pool reuse ratio: %zu.%04zu\n",
pools_stats->pools_count,
pools_stats->peak_pools_count,
pools_stats->free_chunks,
pools_stats->reused_count / pools_stats->new_alloc_count,
pools_stats->reused_count % pools_stats->new_alloc_count * 10000 / pools_stats->new_alloc_count);
} /* jmem_pools_stats_print */
/**
* Account for allocation of new pool chunk
*/
static void
jmem_pools_stat_new_alloc (void)
{
jmem_pools_stats_t *pools_stats = &JERRY_CONTEXT (jmem_pools_stats);
pools_stats->pools_count++;
pools_stats->new_alloc_count++;
if (pools_stats->pools_count > pools_stats->peak_pools_count)
{
pools_stats->peak_pools_count = pools_stats->pools_count;
}
if (pools_stats->pools_count > pools_stats->global_peak_pools_count)
{
pools_stats->global_peak_pools_count = pools_stats->pools_count;
}
} /* jmem_pools_stat_new_alloc */
/**
* Account for reuse of pool chunk
*/
static void
jmem_pools_stat_reuse (void)
{
jmem_pools_stats_t *pools_stats = &JERRY_CONTEXT (jmem_pools_stats);
pools_stats->pools_count++;
pools_stats->free_chunks--;
pools_stats->reused_count++;
if (pools_stats->pools_count > pools_stats->peak_pools_count)
{
pools_stats->peak_pools_count = pools_stats->pools_count;
}
if (pools_stats->pools_count > pools_stats->global_peak_pools_count)
{
pools_stats->global_peak_pools_count = pools_stats->pools_count;
}
} /* jmem_pools_stat_reuse */
/**
* Account for freeing a chunk
*/
static void
jmem_pools_stat_free_pool (void)
{
jmem_pools_stats_t *pools_stats = &JERRY_CONTEXT (jmem_pools_stats);
JERRY_ASSERT (pools_stats->pools_count > 0);
pools_stats->pools_count--;
pools_stats->free_chunks++;
} /* jmem_pools_stat_free_pool */
/**
* Account for freeing a chunk
*/
static void
jmem_pools_stat_dealloc (void)
{
JERRY_CONTEXT (jmem_pools_stats).free_chunks--;
} /* jmem_pools_stat_dealloc */
#endif /* JMEM_STATS */
/**
* @}
* @}
*/

View file

@ -0,0 +1,72 @@
/* Copyright 2014-2016 Samsung Electronics Co., Ltd.
* Copyright 2016 University of Szeged.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Pool manager interface
*/
#ifndef JMEM_POOLMAN_H
#define JMEM_POOLMAN_H
#include "jrt.h"
/** \addtogroup mem Memory allocation
* @{
*
* \addtogroup poolman Memory pool manager
* @{
*/
extern void jmem_pools_finalize (void);
extern void *jmem_pools_alloc (size_t);
extern void jmem_pools_free (void *, size_t);
extern void jmem_pools_collect_empty (void);
#ifdef JMEM_STATS
/**
* Pools' memory usage statistics
*/
typedef struct
{
/** pools' count */
size_t pools_count;
/** peak pools' count */
size_t peak_pools_count;
/** non-resettable peak pools' count */
size_t global_peak_pools_count;
/** free chunks count */
size_t free_chunks;
/* Number of newly allocated pool chunks */
size_t new_alloc_count;
/* Number of reused pool chunks */
size_t reused_count;
} jmem_pools_stats_t;
extern void jmem_pools_get_stats (jmem_pools_stats_t *);
extern void jmem_pools_stats_reset_peak (void);
extern void jmem_pools_stats_print (void);
#endif /* JMEM_STATS */
/**
* @}
* @}
*/
#endif /* !JMEM_POOLMAN_H */