Allow a range for the nfill passed to arena_cache_bin_fill_small

This commit is contained in:
Shirui Cheng 2024-08-01 10:24:09 -07:00 committed by Qi Wang
parent f68effe4ac
commit 14d5dc136a
3 changed files with 25 additions and 13 deletions

View File

@ -63,7 +63,8 @@ void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena);
void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_destroy(tsd_t *tsd, arena_t *arena);
void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
cache_bin_t *cache_bin, szind_t binind, const cache_bin_sz_t nfill);
cache_bin_t *cache_bin, szind_t binind, const cache_bin_sz_t nfill_min,
const cache_bin_sz_t nfill_max);
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
szind_t ind, bool zero, bool slab);

View File

@ -1047,14 +1047,16 @@ arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
void
arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
cache_bin_t *cache_bin, szind_t binind, const cache_bin_sz_t nfill) {
cache_bin_t *cache_bin, szind_t binind, const cache_bin_sz_t nfill_min,
const cache_bin_sz_t nfill_max) {
assert(cache_bin_ncached_get_local(cache_bin) == 0);
assert(nfill != 0);
assert(nfill_min > 0 && nfill_min <= nfill_max);
assert(nfill_max <= cache_bin_ncached_max_get(cache_bin));
const bin_info_t *bin_info = &bin_infos[binind];
CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nfill);
cache_bin_init_ptr_array_for_fill(cache_bin, &ptrs, nfill);
CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nfill_max);
cache_bin_init_ptr_array_for_fill(cache_bin, &ptrs, nfill_max);
/*
* Bin-local resources are used first: 1) bin->slabcur, and 2) nonfull
* slabs. After both are exhausted, new slabs will be allocated through
@ -1101,13 +1103,19 @@ label_refill:
malloc_mutex_lock(tsdn, &bin->lock);
arena_bin_flush_batch_after_lock(tsdn, arena, bin, binind, &batch_flush_state);
while (filled < nfill) {
while (filled < nfill_min) {
/* Try batch-fill from slabcur first. */
edata_t *slabcur = bin->slabcur;
if (slabcur != NULL && edata_nfree_get(slabcur) > 0) {
unsigned tofill = nfill - filled;
unsigned nfree = edata_nfree_get(slabcur);
unsigned cnt = tofill < nfree ? tofill : nfree;
/*
* Use up the free slots if the total filled <= nfill_max.
* Otherwise, fallback to nfill_min for a more conservative
* memory usage.
*/
unsigned cnt = edata_nfree_get(slabcur);
if (cnt + filled > nfill_max) {
cnt = nfill_min - filled;
}
arena_slab_reg_alloc_batch(slabcur, bin_info, cnt,
&ptrs.ptr[filled]);
@ -1144,7 +1152,7 @@ label_refill:
assert(fresh_slab == NULL);
assert(!alloc_and_retry);
break;
} /* while (filled < nfill) loop. */
} /* while (filled < nfill_min) loop. */
if (config_stats && !alloc_and_retry) {
bin->stats.nmalloc += filled;
@ -1162,7 +1170,7 @@ label_refill:
if (alloc_and_retry) {
assert(fresh_slab == NULL);
assert(filled < nfill);
assert(filled < nfill_min);
assert(made_progress);
fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard,
@ -1173,7 +1181,8 @@ label_refill:
made_progress = false;
goto label_refill;
}
assert(filled == nfill || (fresh_slab == NULL && !made_progress));
assert((filled >= nfill_min && filled <= nfill_max) ||
(fresh_slab == NULL && !made_progress));
/* Release if allocated but not used. */
if (fresh_slab != NULL) {

View File

@ -254,7 +254,9 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena,
if (nfill == 0) {
nfill = 1;
}
arena_cache_bin_fill_small(tsdn, arena, cache_bin, binind, nfill);
arena_cache_bin_fill_small(tsdn, arena, cache_bin, binind,
/* nfill_min */ opt_experimental_tcache_gc ?
((nfill >> 1) + 1) : nfill, /* nfill_max */ nfill);
tcache_slow->bin_refilled[binind] = true;
ret = cache_bin_alloc(cache_bin, tcache_success);