Remove `strict_min_purge_interval` option

Option `experimental_hpa_strict_min_purge_interval` was expected to be
temporary to simplify rollout of a bugfix. Now, when bugfix rollout is
complete it is safe to remove this option.
This commit is contained in:
Dmitry Ilvokhin 2024-08-29 10:49:31 -07:00 committed by Qi Wang
parent 6cc42173cb
commit 4f4fd42447
7 changed files with 21 additions and 41 deletions

View File

@ -50,14 +50,6 @@ struct hpa_shard_opts_s {
*/
uint64_t min_purge_interval_ms;
/*
* Strictly respect minimum amout of time between purges.
*
* This is an option to provide backward compatibility for staged rollout of
* purging logic fix.
*/
bool experimental_strict_min_purge_interval;
/*
* Maximum number of hugepages to purge on each purging attempt.
*/
@ -83,8 +75,6 @@ struct hpa_shard_opts_s {
10 * 1000, \
/* min_purge_interval_ms */ \
5 * 1000, \
/* experimental_strict_min_purge_interval */ \
false, \
/* experimental_max_purge_nhp */ \
-1 \
}

View File

@ -103,7 +103,6 @@ CTL_PROTO(opt_hpa_slab_max_alloc)
CTL_PROTO(opt_hpa_hugification_threshold)
CTL_PROTO(opt_hpa_hugify_delay_ms)
CTL_PROTO(opt_hpa_min_purge_interval_ms)
CTL_PROTO(opt_experimental_hpa_strict_min_purge_interval)
CTL_PROTO(opt_experimental_hpa_max_purge_nhp)
CTL_PROTO(opt_hpa_dirty_mult)
CTL_PROTO(opt_hpa_sec_nshards)
@ -462,8 +461,6 @@ static const ctl_named_node_t opt_node[] = {
CTL(opt_hpa_hugification_threshold)},
{NAME("hpa_hugify_delay_ms"), CTL(opt_hpa_hugify_delay_ms)},
{NAME("hpa_min_purge_interval_ms"), CTL(opt_hpa_min_purge_interval_ms)},
{NAME("experimental_hpa_strict_min_purge_interval"),
CTL(opt_experimental_hpa_strict_min_purge_interval)},
{NAME("experimental_hpa_max_purge_nhp"),
CTL(opt_experimental_hpa_max_purge_nhp)},
{NAME("hpa_dirty_mult"), CTL(opt_hpa_dirty_mult)},
@ -2202,8 +2199,6 @@ CTL_RO_NL_GEN(opt_hpa_hugification_threshold,
CTL_RO_NL_GEN(opt_hpa_hugify_delay_ms, opt_hpa_opts.hugify_delay_ms, uint64_t)
CTL_RO_NL_GEN(opt_hpa_min_purge_interval_ms, opt_hpa_opts.min_purge_interval_ms,
uint64_t)
CTL_RO_NL_GEN(opt_experimental_hpa_strict_min_purge_interval,
opt_hpa_opts.experimental_strict_min_purge_interval, bool)
CTL_RO_NL_GEN(opt_experimental_hpa_max_purge_nhp,
opt_hpa_opts.experimental_max_purge_nhp, ssize_t)

View File

@ -512,14 +512,9 @@ hpa_try_hugify(tsdn_t *tsdn, hpa_shard_t *shard) {
static bool
hpa_min_purge_interval_passed(tsdn_t *tsdn, hpa_shard_t *shard) {
malloc_mutex_assert_owner(tsdn, &shard->mtx);
if (shard->opts.experimental_strict_min_purge_interval) {
uint64_t since_last_purge_ms = shard->central->hooks.ms_since(
&shard->last_purge);
if (since_last_purge_ms < shard->opts.min_purge_interval_ms) {
return false;
}
}
return true;
uint64_t since_last_purge_ms = shard->central->hooks.ms_since(
&shard->last_purge);
return since_last_purge_ms >= shard->opts.min_purge_interval_ms;
}
/*

View File

@ -1571,10 +1571,6 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
"hpa_min_purge_interval_ms", 0, 0,
CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false);
CONF_HANDLE_BOOL(
opt_hpa_opts.experimental_strict_min_purge_interval,
"experimental_hpa_strict_min_purge_interval");
CONF_HANDLE_SSIZE_T(
opt_hpa_opts.experimental_max_purge_nhp,
"experimental_hpa_max_purge_nhp", -1, SSIZE_MAX);

View File

@ -1564,7 +1564,6 @@ stats_general_print(emitter_t *emitter) {
OPT_WRITE_SIZE_T("hpa_hugification_threshold")
OPT_WRITE_UINT64("hpa_hugify_delay_ms")
OPT_WRITE_UINT64("hpa_min_purge_interval_ms")
OPT_WRITE_BOOL("experimental_hpa_strict_min_purge_interval")
OPT_WRITE_SSIZE_T("experimental_hpa_max_purge_nhp")
if (je_mallctl("opt.hpa_dirty_mult", (void *)&u32v, &u32sz, NULL, 0)
== 0) {

View File

@ -34,8 +34,6 @@ static hpa_shard_opts_t test_hpa_shard_opts_default = {
10 * 1000,
/* min_purge_interval_ms */
5 * 1000,
/* experimental_strict_min_purge_interval */
false,
/* experimental_max_purge_nhp */
-1
};
@ -53,8 +51,6 @@ static hpa_shard_opts_t test_hpa_shard_opts_purge = {
0,
/* min_purge_interval_ms */
5 * 1000,
/* experimental_strict_min_purge_interval */
false,
/* experimental_max_purge_nhp */
-1
};
@ -506,7 +502,7 @@ TEST_BEGIN(test_purge_no_infinite_loop) {
}
TEST_END
TEST_BEGIN(test_no_experimental_strict_min_purge_interval) {
TEST_BEGIN(test_no_min_purge_interval) {
test_skip_if(!hpa_supported());
hpa_hooks_t hooks;
@ -520,6 +516,7 @@ TEST_BEGIN(test_no_experimental_strict_min_purge_interval) {
hpa_shard_opts_t opts = test_hpa_shard_opts_default;
opts.deferral_allowed = true;
opts.min_purge_interval_ms = 0;
hpa_shard_t *shard = create_test_data(&hooks, &opts);
@ -547,7 +544,7 @@ TEST_BEGIN(test_no_experimental_strict_min_purge_interval) {
}
TEST_END
TEST_BEGIN(test_experimental_strict_min_purge_interval) {
TEST_BEGIN(test_min_purge_interval) {
test_skip_if(!hpa_supported());
hpa_hooks_t hooks;
@ -561,7 +558,6 @@ TEST_BEGIN(test_experimental_strict_min_purge_interval) {
hpa_shard_opts_t opts = test_hpa_shard_opts_default;
opts.deferral_allowed = true;
opts.experimental_strict_min_purge_interval = true;
hpa_shard_t *shard = create_test_data(&hooks, &opts);
@ -631,6 +627,7 @@ TEST_BEGIN(test_purge) {
pai_dalloc(tsdn, &shard->pai, edatas[i],
&deferred_work_generated);
}
nstime_init2(&defer_curtime, 6, 0);
hpa_shard_do_deferred_work(tsdn, shard);
expect_zu_eq(0, ndefer_hugify_calls, "Hugified too early");
@ -642,9 +639,15 @@ TEST_BEGIN(test_purge) {
expect_zu_eq(2, ndefer_purge_calls, "Expect purges");
ndefer_purge_calls = 0;
nstime_init2(&defer_curtime, 12, 0);
hpa_shard_do_deferred_work(tsdn, shard);
expect_zu_eq(0, ndefer_hugify_calls, "Hugified too early");
/*
* We are still having 5 active hugepages and now they are
* matching hugification criteria long enough to actually hugify them.
*/
expect_zu_eq(5, ndefer_hugify_calls, "Expect hugification");
ndefer_hugify_calls = 0;
expect_zu_eq(0, ndefer_dehugify_calls, "Dehugified too early");
/*
* We still have completely dirty hugepage, but we are below
@ -691,6 +694,7 @@ TEST_BEGIN(test_experimental_max_purge_nhp) {
pai_dalloc(tsdn, &shard->pai, edatas[i],
&deferred_work_generated);
}
nstime_init2(&defer_curtime, 6, 0);
hpa_shard_do_deferred_work(tsdn, shard);
expect_zu_eq(0, ndefer_hugify_calls, "Hugified too early");
@ -702,14 +706,17 @@ TEST_BEGIN(test_experimental_max_purge_nhp) {
expect_zu_eq(1, ndefer_purge_calls, "Expect purges");
ndefer_purge_calls = 0;
nstime_init2(&defer_curtime, 12, 0);
hpa_shard_do_deferred_work(tsdn, shard);
expect_zu_eq(0, ndefer_hugify_calls, "Hugified too early");
expect_zu_eq(5, ndefer_hugify_calls, "Expect hugification");
ndefer_hugify_calls = 0;
expect_zu_eq(0, ndefer_dehugify_calls, "Dehugified too early");
/* We still above the limit for dirty pages. */
expect_zu_eq(1, ndefer_purge_calls, "Expect purge");
ndefer_purge_calls = 0;
nstime_init2(&defer_curtime, 18, 0);
hpa_shard_do_deferred_work(tsdn, shard);
expect_zu_eq(0, ndefer_hugify_calls, "Hugified too early");
@ -741,8 +748,8 @@ main(void) {
test_alloc_dalloc_batch,
test_defer_time,
test_purge_no_infinite_loop,
test_no_experimental_strict_min_purge_interval,
test_experimental_strict_min_purge_interval,
test_no_min_purge_interval,
test_min_purge_interval,
test_purge,
test_experimental_max_purge_nhp);
}

View File

@ -292,8 +292,6 @@ TEST_BEGIN(test_mallctl_opt) {
TEST_MALLCTL_OPT(size_t, hpa_sec_max_bytes, always);
TEST_MALLCTL_OPT(size_t, hpa_sec_bytes_after_flush, always);
TEST_MALLCTL_OPT(size_t, hpa_sec_batch_fill_extra, always);
TEST_MALLCTL_OPT(bool, experimental_hpa_strict_min_purge_interval,
always);
TEST_MALLCTL_OPT(ssize_t, experimental_hpa_max_purge_nhp, always);
TEST_MALLCTL_OPT(unsigned, narenas, always);
TEST_MALLCTL_OPT(const char *, percpu_arena, always);