Compare commits
3 Commits
Author | SHA1 | Date |
---|---|---|
![]() |
17c897976c | |
![]() |
7883c7749f | |
![]() |
807a9a3e17 |
|
@ -14,6 +14,11 @@
|
||||||
# if !defined(SYS_write) && defined(__NR_write)
|
# if !defined(SYS_write) && defined(__NR_write)
|
||||||
# define SYS_write __NR_write
|
# define SYS_write __NR_write
|
||||||
# endif
|
# endif
|
||||||
|
# if defined(SYS_open) && defined(__aarch64__)
|
||||||
|
/* Android headers may define SYS_open to __NR_open even though
|
||||||
|
* __NR_open may not exist on AArch64 (superseded by __NR_openat). */
|
||||||
|
# undef SYS_open
|
||||||
|
# endif
|
||||||
# include <sys/uio.h>
|
# include <sys/uio.h>
|
||||||
# endif
|
# endif
|
||||||
# include <pthread.h>
|
# include <pthread.h>
|
||||||
|
|
104
src/arena.c
104
src/arena.c
|
@ -424,12 +424,15 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
|
arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
|
||||||
bool remove, bool zero)
|
bool remove, bool zero, bool commit)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
arena_chunk_map_misc_t *miscelm;
|
arena_chunk_map_misc_t *miscelm;
|
||||||
size_t flag_dirty, flag_decommitted, run_ind, need_pages;
|
size_t flag_dirty, flag_decommitted, run_ind, need_pages;
|
||||||
size_t flag_unzeroed_mask;
|
size_t flag_unzeroed_mask;
|
||||||
|
bool committed;
|
||||||
|
|
||||||
|
assert(!zero || commit);
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
|
||||||
miscelm = arena_run_to_miscelm(run);
|
miscelm = arena_run_to_miscelm(run);
|
||||||
|
@ -439,9 +442,15 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
|
||||||
need_pages = (size >> LG_PAGE);
|
need_pages = (size >> LG_PAGE);
|
||||||
assert(need_pages > 0);
|
assert(need_pages > 0);
|
||||||
|
|
||||||
if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
|
if (commit && flag_decommitted != 0) {
|
||||||
run_ind << LG_PAGE, size, arena->ind))
|
if (arena->chunk_hooks.commit(chunk, chunksize, run_ind <<
|
||||||
return (true);
|
LG_PAGE, size, arena->ind)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
committed = true;
|
||||||
|
} else {
|
||||||
|
committed = false;
|
||||||
|
}
|
||||||
|
|
||||||
if (remove) {
|
if (remove) {
|
||||||
arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
|
arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
|
||||||
|
@ -449,7 +458,7 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (zero) {
|
if (zero) {
|
||||||
if (flag_decommitted != 0) {
|
if (committed) {
|
||||||
/* The run is untouched, and therefore zeroed. */
|
/* The run is untouched, and therefore zeroed. */
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
|
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
|
||||||
*)((uintptr_t)chunk + (run_ind << LG_PAGE)),
|
*)((uintptr_t)chunk + (run_ind << LG_PAGE)),
|
||||||
|
@ -485,28 +494,34 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
|
||||||
* Set the last element first, in case the run only contains one page
|
* Set the last element first, in case the run only contains one page
|
||||||
* (i.e. both statements set the same element).
|
* (i.e. both statements set the same element).
|
||||||
*/
|
*/
|
||||||
flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
|
flag_unzeroed_mask = (flag_dirty == 0 && !committed) ?
|
||||||
CHUNK_MAP_UNZEROED : 0;
|
CHUNK_MAP_UNZEROED : 0;
|
||||||
|
flag_decommitted = (!commit && flag_decommitted != 0) ?
|
||||||
|
CHUNK_MAP_DECOMMITTED : 0;
|
||||||
arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
|
arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
|
||||||
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
|
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
|
||||||
run_ind+need_pages-1)));
|
run_ind+need_pages-1)) | flag_decommitted);
|
||||||
arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
|
arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
|
||||||
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
|
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)) |
|
||||||
|
flag_decommitted);
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
|
arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero,
|
||||||
|
bool commit)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (arena_run_split_large_helper(arena, run, size, true, zero));
|
return (arena_run_split_large_helper(arena, run, size, true, zero,
|
||||||
|
commit));
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
|
arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (arena_run_split_large_helper(arena, run, size, false, zero));
|
return (arena_run_split_large_helper(arena, run, size, false, zero,
|
||||||
|
true));
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
|
@ -585,6 +600,18 @@ arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, size_t sn, bool zero,
|
||||||
return (chunk_register(chunk, &chunk->node, gdump));
|
return (chunk_register(chunk, &chunk->node, gdump));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static arena_chunk_t *
|
||||||
|
arena_chunk_header_commit(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
chunk_hooks_t *chunk_hooks, arena_chunk_t *chunk, size_t sn, bool zero) {
|
||||||
|
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
|
||||||
|
LG_PAGE, arena->ind)) {
|
||||||
|
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
|
||||||
|
chunksize, sn, zero, false);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
return chunk;
|
||||||
|
}
|
||||||
|
|
||||||
static arena_chunk_t *
|
static arena_chunk_t *
|
||||||
arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
|
arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
|
chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
|
||||||
|
@ -599,13 +626,8 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
|
chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
|
||||||
NULL, chunksize, chunksize, &sn, zero, commit);
|
NULL, chunksize, chunksize, &sn, zero, commit);
|
||||||
if (chunk != NULL && !*commit) {
|
if (chunk != NULL && !*commit) {
|
||||||
/* Commit header. */
|
chunk = arena_chunk_header_commit(tsdn, arena, chunk_hooks,
|
||||||
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
|
chunk, sn, *zero);
|
||||||
LG_PAGE, arena->ind)) {
|
|
||||||
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
|
|
||||||
(void *)chunk, chunksize, sn, *zero, *commit);
|
|
||||||
chunk = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (chunk != NULL) {
|
if (chunk != NULL) {
|
||||||
bool gdump;
|
bool gdump;
|
||||||
|
@ -641,6 +663,10 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
|
||||||
|
|
||||||
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
|
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
|
||||||
chunksize, &sn, zero, commit, true);
|
chunksize, &sn, zero, commit, true);
|
||||||
|
if (chunk != NULL && !*commit) {
|
||||||
|
chunk = arena_chunk_header_commit(tsdn, arena, &chunk_hooks,
|
||||||
|
chunk, sn, *zero);
|
||||||
|
}
|
||||||
if (chunk != NULL) {
|
if (chunk != NULL) {
|
||||||
bool gdump;
|
bool gdump;
|
||||||
if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) {
|
if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) {
|
||||||
|
@ -721,7 +747,7 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
|
arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
|
||||||
flag_unzeroed);
|
flag_unzeroed | flag_decommitted);
|
||||||
|
|
||||||
return (chunk);
|
return (chunk);
|
||||||
}
|
}
|
||||||
|
@ -1145,18 +1171,20 @@ arena_run_first_best_fit(arena_t *arena, size_t size)
|
||||||
}
|
}
|
||||||
|
|
||||||
static arena_run_t *
|
static arena_run_t *
|
||||||
arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
|
arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero,
|
||||||
|
bool commit)
|
||||||
{
|
{
|
||||||
arena_run_t *run = arena_run_first_best_fit(arena, size);
|
arena_run_t *run = arena_run_first_best_fit(arena, size);
|
||||||
if (run != NULL) {
|
if (run != NULL) {
|
||||||
if (arena_run_split_large(arena, run, size, zero))
|
if (arena_run_split_large(arena, run, size, zero, commit))
|
||||||
run = NULL;
|
run = NULL;
|
||||||
}
|
}
|
||||||
return (run);
|
return (run);
|
||||||
}
|
}
|
||||||
|
|
||||||
static arena_run_t *
|
static arena_run_t *
|
||||||
arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
|
arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero,
|
||||||
|
bool commit)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
arena_run_t *run;
|
arena_run_t *run;
|
||||||
|
@ -1165,7 +1193,7 @@ arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
|
||||||
assert(size == PAGE_CEILING(size));
|
assert(size == PAGE_CEILING(size));
|
||||||
|
|
||||||
/* Search the arena's chunks for the lowest best fit. */
|
/* Search the arena's chunks for the lowest best fit. */
|
||||||
run = arena_run_alloc_large_helper(arena, size, zero);
|
run = arena_run_alloc_large_helper(arena, size, zero, commit);
|
||||||
if (run != NULL)
|
if (run != NULL)
|
||||||
return (run);
|
return (run);
|
||||||
|
|
||||||
|
@ -1175,7 +1203,7 @@ arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
|
||||||
chunk = arena_chunk_alloc(tsdn, arena);
|
chunk = arena_chunk_alloc(tsdn, arena);
|
||||||
if (chunk != NULL) {
|
if (chunk != NULL) {
|
||||||
run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
|
run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
|
||||||
if (arena_run_split_large(arena, run, size, zero))
|
if (arena_run_split_large(arena, run, size, zero, commit))
|
||||||
run = NULL;
|
run = NULL;
|
||||||
return (run);
|
return (run);
|
||||||
}
|
}
|
||||||
|
@ -1185,7 +1213,7 @@ arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
|
||||||
* sufficient memory available while this one dropped arena->lock in
|
* sufficient memory available while this one dropped arena->lock in
|
||||||
* arena_chunk_alloc(), so search one more time.
|
* arena_chunk_alloc(), so search one more time.
|
||||||
*/
|
*/
|
||||||
return (arena_run_alloc_large_helper(arena, size, zero));
|
return (arena_run_alloc_large_helper(arena, size, zero, commit));
|
||||||
}
|
}
|
||||||
|
|
||||||
static arena_run_t *
|
static arena_run_t *
|
||||||
|
@ -1657,7 +1685,8 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
arena_chunk_alloc(tsdn, arena);
|
arena_chunk_alloc(tsdn, arena);
|
||||||
|
|
||||||
/* Temporarily allocate the free dirty run. */
|
/* Temporarily allocate the free dirty run. */
|
||||||
arena_run_split_large(arena, run, run_size, false);
|
arena_run_split_large(arena, run, run_size, false,
|
||||||
|
false);
|
||||||
/* Stash. */
|
/* Stash. */
|
||||||
if (false)
|
if (false)
|
||||||
qr_new(rdelm, rd_link); /* Redundant. */
|
qr_new(rdelm, rd_link); /* Redundant. */
|
||||||
|
@ -2240,9 +2269,10 @@ arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
||||||
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
|
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
|
||||||
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
|
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
|
||||||
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
|
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
|
||||||
pageind+head_npages-1)));
|
pageind+head_npages-1)) | flag_decommitted);
|
||||||
arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
|
arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
|
||||||
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
|
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)) |
|
||||||
|
flag_decommitted);
|
||||||
|
|
||||||
if (config_debug) {
|
if (config_debug) {
|
||||||
UNUSED size_t tail_npages = newsize >> LG_PAGE;
|
UNUSED size_t tail_npages = newsize >> LG_PAGE;
|
||||||
|
@ -2253,7 +2283,7 @@ arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
||||||
}
|
}
|
||||||
arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
|
arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
|
||||||
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
|
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
|
||||||
pageind+head_npages)));
|
pageind+head_npages)) | flag_decommitted);
|
||||||
|
|
||||||
arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
|
arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
|
||||||
0));
|
0));
|
||||||
|
@ -2283,9 +2313,10 @@ arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
||||||
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
|
assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
|
||||||
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
|
arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
|
||||||
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
|
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
|
||||||
pageind+head_npages-1)));
|
pageind+head_npages-1)) | flag_decommitted);
|
||||||
arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
|
arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
|
||||||
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
|
(flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)) |
|
||||||
|
flag_decommitted);
|
||||||
|
|
||||||
if (config_debug) {
|
if (config_debug) {
|
||||||
UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
|
UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
|
||||||
|
@ -2296,7 +2327,7 @@ arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
||||||
}
|
}
|
||||||
arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
|
arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
|
||||||
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
|
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
|
||||||
pageind+head_npages)));
|
pageind+head_npages)) | flag_decommitted);
|
||||||
|
|
||||||
tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
|
tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
|
||||||
tail_run = &tail_miscelm->run;
|
tail_run = &tail_miscelm->run;
|
||||||
|
@ -2667,7 +2698,7 @@ arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
|
||||||
random_offset = ((uintptr_t)r) << LG_CACHELINE;
|
random_offset = ((uintptr_t)r) << LG_CACHELINE;
|
||||||
} else
|
} else
|
||||||
random_offset = 0;
|
random_offset = 0;
|
||||||
run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
|
run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero, true);
|
||||||
if (run == NULL) {
|
if (run == NULL) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
@ -2748,7 +2779,7 @@ arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||||
alloc_size = usize + large_pad + alignment - PAGE;
|
alloc_size = usize + large_pad + alignment - PAGE;
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
|
run = arena_run_alloc_large(tsdn, arena, alloc_size, false, false);
|
||||||
if (run == NULL) {
|
if (run == NULL) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
@ -3151,7 +3182,7 @@ arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
||||||
goto label_fail;
|
goto label_fail;
|
||||||
|
|
||||||
run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
|
run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
|
||||||
if (arena_run_split_large(arena, run, splitsize, zero))
|
if (arena_run_split_large(arena, run, splitsize, zero, true))
|
||||||
goto label_fail;
|
goto label_fail;
|
||||||
|
|
||||||
if (config_cache_oblivious && zero) {
|
if (config_cache_oblivious && zero) {
|
||||||
|
@ -3811,6 +3842,9 @@ init_thp_initially_huge(void) {
|
||||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
|
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
|
||||||
fd = (int)syscall(SYS_open,
|
fd = (int)syscall(SYS_open,
|
||||||
"/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
|
"/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
|
||||||
|
#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
|
||||||
|
fd = (int)syscall(SYS_openat,
|
||||||
|
AT_FDCWD, "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
|
||||||
#else
|
#else
|
||||||
fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
|
fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -250,9 +250,9 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
|
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
|
||||||
*sn = extent_node_sn_get(node);
|
*sn = extent_node_sn_get(node);
|
||||||
zeroed = extent_node_zeroed_get(node);
|
zeroed = extent_node_zeroed_get(node);
|
||||||
if (zeroed)
|
|
||||||
*zero = true;
|
|
||||||
committed = extent_node_committed_get(node);
|
committed = extent_node_committed_get(node);
|
||||||
|
if (zeroed && committed)
|
||||||
|
*zero = true;
|
||||||
if (committed)
|
if (committed)
|
||||||
*commit = true;
|
*commit = true;
|
||||||
/* Split the lead. */
|
/* Split the lead. */
|
||||||
|
@ -304,7 +304,8 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||||
node = NULL;
|
node = NULL;
|
||||||
}
|
}
|
||||||
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
|
if (*commit && !committed && chunk_hooks->commit(ret, size, 0, size,
|
||||||
|
arena->ind)) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
|
chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
|
||||||
cache, ret, size, *sn, zeroed, committed);
|
cache, ret, size, *sn, zeroed, committed);
|
||||||
|
|
|
@ -20,6 +20,8 @@ static unsigned dss_prec_default = (unsigned)DSS_PREC_DEFAULT;
|
||||||
|
|
||||||
/* Base address of the DSS. */
|
/* Base address of the DSS. */
|
||||||
static void *dss_base;
|
static void *dss_base;
|
||||||
|
/* Atomic boolean indicating whether a thread is currently extending DSS. */
|
||||||
|
static unsigned dss_extending;
|
||||||
/* Atomic boolean indicating whether the DSS is exhausted. */
|
/* Atomic boolean indicating whether the DSS is exhausted. */
|
||||||
static unsigned dss_exhausted;
|
static unsigned dss_exhausted;
|
||||||
/* Atomic current upper limit on DSS addresses. */
|
/* Atomic current upper limit on DSS addresses. */
|
||||||
|
@ -63,29 +65,13 @@ chunk_dss_prec_set(dss_prec_t dss_prec)
|
||||||
static void *
|
static void *
|
||||||
chunk_dss_max_update(void *new_addr)
|
chunk_dss_max_update(void *new_addr)
|
||||||
{
|
{
|
||||||
void *max_cur;
|
void *max_cur = chunk_dss_sbrk(0);
|
||||||
spin_t spinner;
|
|
||||||
|
|
||||||
/*
|
if (max_cur == (void *)-1) {
|
||||||
* Get the current end of the DSS as max_cur and assure that dss_max is
|
return NULL;
|
||||||
* up to date.
|
}
|
||||||
*/
|
atomic_write_p(&dss_max, max_cur);
|
||||||
spin_init(&spinner);
|
|
||||||
while (true) {
|
|
||||||
void *max_prev = atomic_read_p(&dss_max);
|
|
||||||
|
|
||||||
max_cur = chunk_dss_sbrk(0);
|
|
||||||
if ((uintptr_t)max_prev > (uintptr_t)max_cur) {
|
|
||||||
/*
|
|
||||||
* Another thread optimistically updated dss_max. Wait
|
|
||||||
* for it to finish.
|
|
||||||
*/
|
|
||||||
spin_adaptive(&spinner);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (!atomic_cas_p(&dss_max, max_prev, max_cur))
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
/* Fixed new_addr can only be supported if it is at the edge of DSS. */
|
/* Fixed new_addr can only be supported if it is at the edge of DSS. */
|
||||||
if (new_addr != NULL && max_cur != new_addr)
|
if (new_addr != NULL && max_cur != new_addr)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
@ -93,6 +79,26 @@ chunk_dss_max_update(void *new_addr)
|
||||||
return (max_cur);
|
return (max_cur);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
chunk_dss_extending_start(void) {
|
||||||
|
spin_t spinner;
|
||||||
|
|
||||||
|
spin_init(&spinner);
|
||||||
|
while (true) {
|
||||||
|
unsigned expected = 0;
|
||||||
|
if (!atomic_cas_u(&dss_extending, expected, 1)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
spin_adaptive(&spinner);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
chunk_dss_extending_finish(void) {
|
||||||
|
assert(atomic_read_u(&dss_extending));
|
||||||
|
atomic_write_u(&dss_extending, 0);
|
||||||
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||||
size_t alignment, bool *zero, bool *commit)
|
size_t alignment, bool *zero, bool *commit)
|
||||||
|
@ -108,6 +114,7 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||||
if ((intptr_t)size < 0)
|
if ((intptr_t)size < 0)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
|
chunk_dss_extending_start();
|
||||||
if (!atomic_read_u(&dss_exhausted)) {
|
if (!atomic_read_u(&dss_exhausted)) {
|
||||||
/*
|
/*
|
||||||
* The loop is necessary to recover from races with other
|
* The loop is necessary to recover from races with other
|
||||||
|
@ -152,19 +159,14 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||||
assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
|
assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
|
||||||
size);
|
size);
|
||||||
|
|
||||||
/*
|
|
||||||
* Optimistically update dss_max, and roll back below if
|
|
||||||
* sbrk() fails. No other thread will try to extend the
|
|
||||||
* DSS while dss_max is greater than the current DSS
|
|
||||||
* max reported by sbrk(0).
|
|
||||||
*/
|
|
||||||
if (atomic_cas_p(&dss_max, max_cur, dss_next))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* Try to allocate. */
|
/* Try to allocate. */
|
||||||
dss_prev = chunk_dss_sbrk(incr);
|
dss_prev = chunk_dss_sbrk(incr);
|
||||||
if (dss_prev == max_cur) {
|
if (dss_prev == max_cur) {
|
||||||
/* Success. */
|
/* Success. */
|
||||||
|
|
||||||
|
atomic_write_p(&dss_max, dss_next);
|
||||||
|
chunk_dss_extending_finish();
|
||||||
|
|
||||||
if (gap_size_chunk != 0) {
|
if (gap_size_chunk != 0) {
|
||||||
chunk_hooks_t chunk_hooks =
|
chunk_hooks_t chunk_hooks =
|
||||||
CHUNK_HOOKS_INITIALIZER;
|
CHUNK_HOOKS_INITIALIZER;
|
||||||
|
@ -186,13 +188,8 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Failure, whether due to OOM or a race with a raw
|
* Failure, whether due to OOM or a race with a raw
|
||||||
* sbrk() call from outside the allocator. Try to roll
|
* sbrk() call from outside the allocator.
|
||||||
* back optimistic dss_max update; if rollback fails,
|
|
||||||
* it's due to another caller of this function having
|
|
||||||
* succeeded since this invocation started, in which
|
|
||||||
* case rollback is not necessary.
|
|
||||||
*/
|
*/
|
||||||
atomic_cas_p(&dss_max, dss_next, max_cur);
|
|
||||||
if (dss_prev == (void *)-1) {
|
if (dss_prev == (void *)-1) {
|
||||||
/* OOM. */
|
/* OOM. */
|
||||||
atomic_write_u(&dss_exhausted, (unsigned)true);
|
atomic_write_u(&dss_exhausted, (unsigned)true);
|
||||||
|
@ -201,6 +198,7 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
label_oom:
|
label_oom:
|
||||||
|
chunk_dss_extending_finish();
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -240,6 +238,7 @@ chunk_dss_boot(void)
|
||||||
cassert(have_dss);
|
cassert(have_dss);
|
||||||
|
|
||||||
dss_base = chunk_dss_sbrk(0);
|
dss_base = chunk_dss_sbrk(0);
|
||||||
|
atomic_write_u(&dss_extending, 0);
|
||||||
dss_exhausted = (unsigned)(dss_base == (void *)-1);
|
dss_exhausted = (unsigned)(dss_base == (void *)-1);
|
||||||
dss_max = dss_base;
|
dss_max = dss_base;
|
||||||
}
|
}
|
||||||
|
|
|
@ -250,6 +250,9 @@ os_overcommits_proc(void)
|
||||||
|
|
||||||
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
|
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
|
||||||
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
|
fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
|
||||||
|
#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
|
||||||
|
fd = (int)syscall(SYS_openat,
|
||||||
|
AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY);
|
||||||
#else
|
#else
|
||||||
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
|
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue