Expose `psset` state stats

When evaluating changes in HPA logic, it is useful to know internal
`hpa_shard` state. Great deal of this state is `psset`. Some of the
`psset` stats was available, but in disaggregated form, which is not
very convenient. This commit exposed `psset` counters to `mallctl`
and malloc stats dumps.

Example of how malloc stats dump will look like after the change.

HPA shard stats:
  Pageslabs: 14899 (4354 huge, 10545 nonhuge)
  Active pages: 6708166 (2228917 huge, 4479249 nonhuge)
  Dirty pages: 233816 (331 huge, 233485 nonhuge)
  Retained pages: 686306
  Purge passes: 8730 (10 / sec)
  Purges: 127501 (146 / sec)
  Hugeifies: 4358 (5 / sec)
  Dehugifies: 4 (0 / sec)

Pageslabs, active pages, dirty pages and retained pages are rows added
by this change.
This commit is contained in:
Dmitry Ilvokhin 2024-11-14 10:52:50 -08:00 committed by stanjo74
parent 3820e38dc1
commit 6092c980a6
6 changed files with 458 additions and 58 deletions

View File

@ -21,6 +21,12 @@
*/
#define PSSET_NPSIZES 64
/*
* We store non-hugefied and hugified pageslabs metadata separately.
* [0] corresponds to non-hugified and [1] to hugified pageslabs.
*/
#define PSSET_NHUGE 2
/*
* We keep two purge lists per page size class; one for hugified hpdatas (at
* index 2*pszind), and one for the non-hugified hpdatas (at index 2*pszind +
@ -44,21 +50,36 @@ struct psset_bin_stats_s {
typedef struct psset_stats_s psset_stats_t;
struct psset_stats_s {
/*
* Merged stats for all pageslabs in psset. This lets us quickly
* answer queries for the number of dirty and active pages in the
* entire set.
*/
psset_bin_stats_t merged;
/*
* Below are the same stats, but aggregated by different
* properties of pageslabs: huginess or fullness.
*/
/* Non-huge and huge slabs. */
psset_bin_stats_t slabs[PSSET_NHUGE];
/*
* The second index is huge stats; nonfull_slabs[pszind][0] contains
* stats for the non-huge slabs in bucket pszind, while
* nonfull_slabs[pszind][1] contains stats for the huge slabs.
*/
psset_bin_stats_t nonfull_slabs[PSSET_NPSIZES][2];
psset_bin_stats_t nonfull_slabs[PSSET_NPSIZES][PSSET_NHUGE];
/*
* Full slabs don't live in any edata heap, but we still track their
* stats.
*/
psset_bin_stats_t full_slabs[2];
psset_bin_stats_t full_slabs[PSSET_NHUGE];
/* Empty slabs are similar. */
psset_bin_stats_t empty_slabs[2];
psset_bin_stats_t empty_slabs[PSSET_NHUGE];
};
typedef struct psset_s psset_t;
@ -70,12 +91,6 @@ struct psset_s {
hpdata_age_heap_t pageslabs[PSSET_NPSIZES];
/* Bitmap for which set bits correspond to non-empty heaps. */
fb_group_t pageslab_bitmap[FB_NGROUPS(PSSET_NPSIZES)];
/*
* The sum of all bin stats in stats. This lets us quickly answer
* queries for the number of dirty, active, and retained pages in the
* entire set.
*/
psset_bin_stats_t merged_stats;
psset_stats_t stats;
/*
* Slabs with no active allocations, but which are allowed to serve new
@ -116,17 +131,17 @@ void psset_remove(psset_t *psset, hpdata_t *ps);
static inline size_t
psset_npageslabs(psset_t *psset) {
return psset->merged_stats.npageslabs;
return psset->stats.merged.npageslabs;
}
static inline size_t
psset_nactive(psset_t *psset) {
return psset->merged_stats.nactive;
return psset->stats.merged.nactive;
}
static inline size_t
psset_ndirty(psset_t *psset) {
return psset->merged_stats.ndirty;
return psset->stats.merged.ndirty;
}
#endif /* JEMALLOC_INTERNAL_PSSET_H */

View File

@ -261,13 +261,27 @@ CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes)
CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
INDEX_PROTO(stats_arenas_i_extents_j)
/* Merged set of stats for HPA shard. */
CTL_PROTO(stats_arenas_i_hpa_shard_npageslabs)
CTL_PROTO(stats_arenas_i_hpa_shard_nactive)
CTL_PROTO(stats_arenas_i_hpa_shard_ndirty)
CTL_PROTO(stats_arenas_i_hpa_shard_npurge_passes)
CTL_PROTO(stats_arenas_i_hpa_shard_npurges)
CTL_PROTO(stats_arenas_i_hpa_shard_nhugifies)
CTL_PROTO(stats_arenas_i_hpa_shard_nhugify_failures)
CTL_PROTO(stats_arenas_i_hpa_shard_ndehugifies)
/* We have a set of stats for full slabs. */
/* Set of stats for non-hugified and hugified slabs. */
CTL_PROTO(stats_arenas_i_hpa_shard_slabs_npageslabs_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_slabs_npageslabs_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_slabs_nactive_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_slabs_nactive_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_slabs_ndirty_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_slabs_ndirty_huge)
/* A parallel set of stats for full slabs. */
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)
CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)
@ -295,6 +309,7 @@ CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)
CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)
INDEX_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j)
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_uptime)
CTL_PROTO(stats_arenas_i_dss)
@ -771,6 +786,21 @@ MUTEX_PROF_ARENA_MUTEXES
#undef OP
};
static const ctl_named_node_t stats_arenas_i_hpa_shard_slabs_node[] = {
{NAME("npageslabs_nonhuge"),
CTL(stats_arenas_i_hpa_shard_slabs_npageslabs_nonhuge)},
{NAME("npageslabs_huge"),
CTL(stats_arenas_i_hpa_shard_slabs_npageslabs_huge)},
{NAME("nactive_nonhuge"),
CTL(stats_arenas_i_hpa_shard_slabs_nactive_nonhuge)},
{NAME("nactive_huge"),
CTL(stats_arenas_i_hpa_shard_slabs_nactive_huge)},
{NAME("ndirty_nonhuge"),
CTL(stats_arenas_i_hpa_shard_slabs_ndirty_nonhuge)},
{NAME("ndirty_huge"),
CTL(stats_arenas_i_hpa_shard_slabs_ndirty_huge)}
};
static const ctl_named_node_t stats_arenas_i_hpa_shard_full_slabs_node[] = {
{NAME("npageslabs_nonhuge"),
CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)},
@ -827,19 +857,25 @@ static const ctl_indexed_node_t stats_arenas_i_hpa_shard_nonfull_slabs_node[] =
};
static const ctl_named_node_t stats_arenas_i_hpa_shard_node[] = {
{NAME("full_slabs"), CHILD(named,
stats_arenas_i_hpa_shard_full_slabs)},
{NAME("empty_slabs"), CHILD(named,
stats_arenas_i_hpa_shard_empty_slabs)},
{NAME("nonfull_slabs"), CHILD(indexed,
stats_arenas_i_hpa_shard_nonfull_slabs)},
{NAME("npageslabs"), CTL(stats_arenas_i_hpa_shard_npageslabs)},
{NAME("nactive"), CTL(stats_arenas_i_hpa_shard_nactive)},
{NAME("ndirty"), CTL(stats_arenas_i_hpa_shard_ndirty)},
{NAME("slabs"), CHILD(named, stats_arenas_i_hpa_shard_slabs)},
{NAME("npurge_passes"), CTL(stats_arenas_i_hpa_shard_npurge_passes)},
{NAME("npurges"), CTL(stats_arenas_i_hpa_shard_npurges)},
{NAME("nhugifies"), CTL(stats_arenas_i_hpa_shard_nhugifies)},
{NAME("nhugify_failures"),
CTL(stats_arenas_i_hpa_shard_nhugify_failures)},
{NAME("ndehugifies"), CTL(stats_arenas_i_hpa_shard_ndehugifies)}
{NAME("ndehugifies"), CTL(stats_arenas_i_hpa_shard_ndehugifies)},
{NAME("full_slabs"), CHILD(named,
stats_arenas_i_hpa_shard_full_slabs)},
{NAME("empty_slabs"), CHILD(named,
stats_arenas_i_hpa_shard_empty_slabs)},
{NAME("nonfull_slabs"), CHILD(indexed,
stats_arenas_i_hpa_shard_nonfull_slabs)}
};
static const ctl_named_node_t stats_arenas_i_node[] = {
@ -4061,6 +4097,29 @@ stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
return super_stats_arenas_i_extents_j_node;
}
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npageslabs,
arenas_i(mib[2])->astats->hpastats.psset_stats.merged.npageslabs, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nactive,
arenas_i(mib[2])->astats->hpastats.psset_stats.merged.nactive, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_ndirty,
arenas_i(mib[2])->astats->hpastats.psset_stats.merged.ndirty, size_t);
/* Nonhuge slabs */
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_slabs_npageslabs_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.slabs[0].npageslabs, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_slabs_nactive_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.slabs[0].nactive, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_slabs_ndirty_nonhuge,
arenas_i(mib[2])->astats->hpastats.psset_stats.slabs[0].ndirty, size_t);
/* Huge slabs */
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_slabs_npageslabs_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.slabs[1].npageslabs, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_slabs_nactive_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.slabs[1].nactive, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_slabs_ndirty_huge,
arenas_i(mib[2])->astats->hpastats.psset_stats.slabs[1].ndirty, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurge_passes,
arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurge_passes, uint64_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurges,

View File

@ -11,7 +11,6 @@ psset_init(psset_t *psset) {
hpdata_age_heap_new(&psset->pageslabs[i]);
}
fb_init(psset->pageslab_bitmap, PSSET_NPSIZES);
memset(&psset->merged_stats, 0, sizeof(psset->merged_stats));
memset(&psset->stats, 0, sizeof(psset->stats));
hpdata_empty_list_init(&psset->empty);
for (int i = 0; i < PSSET_NPURGE_LISTS; i++) {
@ -30,10 +29,14 @@ psset_bin_stats_accum(psset_bin_stats_t *dst, psset_bin_stats_t *src) {
void
psset_stats_accum(psset_stats_t *dst, psset_stats_t *src) {
psset_bin_stats_accum(&dst->full_slabs[0], &src->full_slabs[0]);
psset_bin_stats_accum(&dst->full_slabs[1], &src->full_slabs[1]);
psset_bin_stats_accum(&dst->empty_slabs[0], &src->empty_slabs[0]);
psset_bin_stats_accum(&dst->empty_slabs[1], &src->empty_slabs[1]);
psset_bin_stats_accum(&dst->merged, &src->merged);
for (int huge = 0; huge < PSSET_NHUGE; huge++) {
psset_bin_stats_accum(&dst->slabs[huge], &src->slabs[huge]);
psset_bin_stats_accum(&dst->full_slabs[huge],
&src->full_slabs[huge]);
psset_bin_stats_accum(&dst->empty_slabs[huge],
&src->empty_slabs[huge]);
}
for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
psset_bin_stats_accum(&dst->nonfull_slabs[i][0],
&src->nonfull_slabs[i][0]);
@ -48,48 +51,76 @@ psset_stats_accum(psset_stats_t *dst, psset_stats_t *src) {
* bin) when we call psset_update_end.
*/
JEMALLOC_ALWAYS_INLINE void
psset_bin_stats_insert_remove(psset_t *psset, psset_bin_stats_t *binstats,
hpdata_t *ps, bool insert) {
psset_slab_stats_insert_remove(psset_stats_t *stats,
psset_bin_stats_t *binstats, hpdata_t *ps, bool insert) {
size_t mul = insert ? (size_t)1 : (size_t)-1;
size_t nactive = hpdata_nactive_get(ps);
size_t ndirty = hpdata_ndirty_get(ps);
stats->merged.npageslabs += mul * 1;
stats->merged.nactive += mul * nactive;
stats->merged.ndirty += mul * ndirty;
/*
* Stats above are necessary for purging logic to work, everything
* below is to improve observability, thense is optional, so we don't
* update it, when stats disabled.
*/
if (!config_stats) {
return;
}
size_t huge_idx = (size_t)hpdata_huge_get(ps);
binstats[huge_idx].npageslabs += mul * 1;
binstats[huge_idx].nactive += mul * hpdata_nactive_get(ps);
binstats[huge_idx].ndirty += mul * hpdata_ndirty_get(ps);
stats->slabs[huge_idx].npageslabs += mul * 1;
stats->slabs[huge_idx].nactive += mul * nactive;
stats->slabs[huge_idx].ndirty += mul * ndirty;
psset->merged_stats.npageslabs += mul * 1;
psset->merged_stats.nactive += mul * hpdata_nactive_get(ps);
psset->merged_stats.ndirty += mul * hpdata_ndirty_get(ps);
binstats[huge_idx].npageslabs += mul * 1;
binstats[huge_idx].nactive += mul * nactive;
binstats[huge_idx].ndirty += mul * ndirty;
if (config_debug) {
psset_bin_stats_t check_stats = {0};
for (size_t huge = 0; huge <= 1; huge++) {
psset_bin_stats_accum(&check_stats,
&psset->stats.full_slabs[huge]);
psset_bin_stats_accum(&check_stats,
&psset->stats.empty_slabs[huge]);
psset_bin_stats_t check_stats[PSSET_NHUGE] = {{0}};
for (int huge = 0; huge < PSSET_NHUGE; huge++) {
psset_bin_stats_accum(&check_stats[huge],
&stats->full_slabs[huge]);
psset_bin_stats_accum(&check_stats[huge],
&stats->empty_slabs[huge]);
for (pszind_t pind = 0; pind < PSSET_NPSIZES; pind++) {
psset_bin_stats_accum(&check_stats,
&psset->stats.nonfull_slabs[pind][huge]);
psset_bin_stats_accum(&check_stats[huge],
&stats->nonfull_slabs[pind][huge]);
}
}
assert(psset->merged_stats.npageslabs
== check_stats.npageslabs);
assert(psset->merged_stats.nactive == check_stats.nactive);
assert(psset->merged_stats.ndirty == check_stats.ndirty);
assert(stats->merged.npageslabs
== check_stats[0].npageslabs + check_stats[1].npageslabs);
assert(stats->merged.nactive
== check_stats[0].nactive + check_stats[1].nactive);
assert(stats->merged.ndirty
== check_stats[0].ndirty + check_stats[1].ndirty);
for (int huge = 0; huge < PSSET_NHUGE; huge++) {
assert(stats->slabs[huge].npageslabs
== check_stats[huge].npageslabs);
assert(stats->slabs[huge].nactive
== check_stats[huge].nactive);
assert(stats->slabs[huge].ndirty
== check_stats[huge].ndirty);
}
}
}
static void
psset_bin_stats_insert(psset_t *psset, psset_bin_stats_t *binstats,
psset_slab_stats_insert(psset_stats_t *stats, psset_bin_stats_t *binstats,
hpdata_t *ps) {
psset_bin_stats_insert_remove(psset, binstats, ps, true);
psset_slab_stats_insert_remove(stats, binstats, ps, true);
}
static void
psset_bin_stats_remove(psset_t *psset, psset_bin_stats_t *binstats,
psset_slab_stats_remove(psset_stats_t *stats, psset_bin_stats_t *binstats,
hpdata_t *ps) {
psset_bin_stats_insert_remove(psset, binstats, ps, false);
psset_slab_stats_insert_remove(stats, binstats, ps, false);
}
static pszind_t
@ -122,27 +153,29 @@ psset_hpdata_heap_insert(psset_t *psset, hpdata_t *ps) {
}
static void
psset_stats_insert(psset_t* psset, hpdata_t *ps) {
psset_stats_insert(psset_t *psset, hpdata_t *ps) {
psset_stats_t *stats = &psset->stats;
if (hpdata_empty(ps)) {
psset_bin_stats_insert(psset, psset->stats.empty_slabs, ps);
psset_slab_stats_insert(stats, psset->stats.empty_slabs, ps);
} else if (hpdata_full(ps)) {
psset_bin_stats_insert(psset, psset->stats.full_slabs, ps);
psset_slab_stats_insert(stats, psset->stats.full_slabs, ps);
} else {
pszind_t pind = psset_hpdata_heap_index(ps);
psset_bin_stats_insert(psset, psset->stats.nonfull_slabs[pind],
psset_slab_stats_insert(stats, psset->stats.nonfull_slabs[pind],
ps);
}
}
static void
psset_stats_remove(psset_t *psset, hpdata_t *ps) {
psset_stats_t *stats = &psset->stats;
if (hpdata_empty(ps)) {
psset_bin_stats_remove(psset, psset->stats.empty_slabs, ps);
psset_slab_stats_remove(stats, psset->stats.empty_slabs, ps);
} else if (hpdata_full(ps)) {
psset_bin_stats_remove(psset, psset->stats.full_slabs, ps);
psset_slab_stats_remove(stats, psset->stats.full_slabs, ps);
} else {
pszind_t pind = psset_hpdata_heap_index(ps);
psset_bin_stats_remove(psset, psset->stats.nonfull_slabs[pind],
psset_slab_stats_remove(stats, psset->stats.nonfull_slabs[pind],
ps);
}
}

View File

@ -841,12 +841,48 @@ stats_arena_hpa_shard_sec_print(emitter_t *emitter, unsigned i) {
static void
stats_arena_hpa_shard_counters_print(emitter_t *emitter, unsigned i,
uint64_t uptime) {
size_t npageslabs;
size_t nactive;
size_t ndirty;
size_t npageslabs_nonhuge;
size_t nactive_nonhuge;
size_t ndirty_nonhuge;
size_t nretained_nonhuge;
size_t npageslabs_huge;
size_t nactive_huge;
size_t ndirty_huge;
uint64_t npurge_passes;
uint64_t npurges;
uint64_t nhugifies;
uint64_t nhugify_failures;
uint64_t ndehugifies;
CTL_M2_GET("stats.arenas.0.hpa_shard.npageslabs",
i, &npageslabs, size_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.nactive",
i, &nactive, size_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.ndirty",
i, &ndirty, size_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.npageslabs_nonhuge",
i, &npageslabs_nonhuge, size_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.nactive_nonhuge",
i, &nactive_nonhuge, size_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.ndirty_nonhuge",
i, &ndirty_nonhuge, size_t);
nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES
- nactive_nonhuge - ndirty_nonhuge;
CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.npageslabs_huge",
i, &npageslabs_huge, size_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.nactive_huge",
i, &nactive_huge, size_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.slabs.ndirty_huge",
i, &ndirty_huge, size_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.npurge_passes",
i, &npurge_passes, uint64_t);
CTL_M2_GET("stats.arenas.0.hpa_shard.npurges",
@ -860,18 +896,33 @@ stats_arena_hpa_shard_counters_print(emitter_t *emitter, unsigned i,
emitter_table_printf(emitter,
"HPA shard stats:\n"
" Pageslabs: %zu (%zu huge, %zu nonhuge)\n"
" Active pages: %zu (%zu huge, %zu nonhuge)\n"
" Dirty pages: %zu (%zu huge, %zu nonhuge)\n"
" Retained pages: %zu\n"
" Purge passes: %" FMTu64 " (%" FMTu64 " / sec)\n"
" Purges: %" FMTu64 " (%" FMTu64 " / sec)\n"
" Hugeifies: %" FMTu64 " (%" FMTu64 " / sec)\n"
" Hugify failures: %" FMTu64 " (%" FMTu64 " / sec)\n"
" Dehugifies: %" FMTu64 " (%" FMTu64 " / sec)\n"
"\n",
npageslabs, npageslabs_huge, npageslabs_nonhuge,
nactive, nactive_huge, nactive_nonhuge,
ndirty, ndirty_huge, ndirty_nonhuge,
nretained_nonhuge,
npurge_passes, rate_per_second(npurge_passes, uptime),
npurges, rate_per_second(npurges, uptime),
nhugifies, rate_per_second(nhugifies, uptime),
nhugify_failures, rate_per_second(nhugify_failures, uptime),
ndehugifies, rate_per_second(ndehugifies, uptime));
emitter_json_kv(emitter, "npageslabs", emitter_type_size,
&npageslabs);
emitter_json_kv(emitter, "nactive", emitter_type_size,
&nactive);
emitter_json_kv(emitter, "ndirty", emitter_type_size,
&ndirty);
emitter_json_kv(emitter, "npurge_passes", emitter_type_uint64,
&npurge_passes);
emitter_json_kv(emitter, "npurges", emitter_type_uint64,
@ -882,6 +933,24 @@ stats_arena_hpa_shard_counters_print(emitter_t *emitter, unsigned i,
&nhugify_failures);
emitter_json_kv(emitter, "ndehugifies", emitter_type_uint64,
&ndehugifies);
emitter_json_object_kv_begin(emitter, "slabs");
emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
&npageslabs_nonhuge);
emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size,
&nactive_nonhuge);
emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
&ndirty_nonhuge);
emitter_json_kv(emitter, "nretained_nonhuge", emitter_type_size,
&nretained_nonhuge);
emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
&npageslabs_huge);
emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
&nactive_huge);
emitter_json_kv(emitter, "ndirty_huge", emitter_type_size,
&ndirty_huge);
emitter_json_object_end(emitter); /* End "slabs" */
}
static void

View File

@ -1002,6 +1002,63 @@ TEST_BEGIN(test_stats_arenas) {
}
TEST_END
TEST_BEGIN(test_stats_arenas_hpa_shard_counters) {
test_skip_if(!config_stats);
#define TEST_STATS_ARENAS_HPA_SHARD_COUNTERS(t, name) do { \
t name; \
size_t sz = sizeof(t); \
expect_d_eq(mallctl("stats.arenas.0.hpa_shard."#name, \
(void *)&name, &sz, \
NULL, 0), 0, "Unexpected mallctl() failure"); \
} while (0)
TEST_STATS_ARENAS_HPA_SHARD_COUNTERS(size_t, npageslabs);
TEST_STATS_ARENAS_HPA_SHARD_COUNTERS(size_t, nactive);
TEST_STATS_ARENAS_HPA_SHARD_COUNTERS(size_t, ndirty);
TEST_STATS_ARENAS_HPA_SHARD_COUNTERS(uint64_t, npurge_passes);
TEST_STATS_ARENAS_HPA_SHARD_COUNTERS(uint64_t, npurges);
TEST_STATS_ARENAS_HPA_SHARD_COUNTERS(uint64_t, nhugifies);
TEST_STATS_ARENAS_HPA_SHARD_COUNTERS(uint64_t, ndehugifies);
#undef TEST_STATS_ARENAS_HPA_SHARD_COUNTERS
}
TEST_END
TEST_BEGIN(test_stats_arenas_hpa_shard_slabs) {
test_skip_if(!config_stats);
#define TEST_STATS_ARENAS_HPA_SHARD_SLABS_GEN(t, slab, name) do { \
t slab##_##name; \
size_t sz = sizeof(t); \
expect_d_eq(mallctl("stats.arenas.0.hpa_shard."#slab"."#name, \
(void *)&slab##_##name, &sz, \
NULL, 0), 0, "Unexpected mallctl() failure"); \
} while (0)
#define TEST_STATS_ARENAS_HPA_SHARD_SLABS(t, slab, name) do { \
TEST_STATS_ARENAS_HPA_SHARD_SLABS_GEN(t, slab, \
name##_##nonhuge); \
TEST_STATS_ARENAS_HPA_SHARD_SLABS_GEN(t, slab, name##_##huge); \
} while (0)
TEST_STATS_ARENAS_HPA_SHARD_SLABS(size_t, slabs, npageslabs);
TEST_STATS_ARENAS_HPA_SHARD_SLABS(size_t, slabs, nactive);
TEST_STATS_ARENAS_HPA_SHARD_SLABS(size_t, slabs, ndirty);
TEST_STATS_ARENAS_HPA_SHARD_SLABS(size_t, full_slabs, npageslabs);
TEST_STATS_ARENAS_HPA_SHARD_SLABS(size_t, full_slabs, nactive);
TEST_STATS_ARENAS_HPA_SHARD_SLABS(size_t, full_slabs, ndirty);
TEST_STATS_ARENAS_HPA_SHARD_SLABS(size_t, empty_slabs, npageslabs);
TEST_STATS_ARENAS_HPA_SHARD_SLABS(size_t, empty_slabs, nactive);
TEST_STATS_ARENAS_HPA_SHARD_SLABS(size_t, empty_slabs, ndirty);
#undef TEST_STATS_ARENAS_HPA_SHARD_SLABS
#undef TEST_STATS_ARENAS_HPA_SHARD_SLABS_GEN
}
TEST_END
static void
alloc_hook(void *extra, UNUSED hook_alloc_t type, UNUSED void *result,
UNUSED uintptr_t result_raw, UNUSED uintptr_t args_raw[3]) {
@ -1321,6 +1378,8 @@ main(void) {
test_arenas_lookup,
test_prof_active,
test_stats_arenas,
test_stats_arenas_hpa_shard_counters,
test_stats_arenas_hpa_shard_slabs,
test_hooks,
test_hooks_exhaustion,
test_thread_idle,

View File

@ -64,6 +64,24 @@ test_psset_alloc_reuse(psset_t *psset, edata_t *r_edata, size_t size) {
return false;
}
static hpdata_t *
test_psset_hugify(psset_t *psset, edata_t *edata) {
hpdata_t *ps = edata_ps_get(edata);
psset_update_begin(psset, ps);
hpdata_hugify(ps);
psset_update_end(psset, ps);
return ps;
}
static hpdata_t *
test_psset_dehugify(psset_t *psset, edata_t *edata) {
hpdata_t *ps = edata_ps_get(edata);
psset_update_begin(psset, ps);
hpdata_dehugify(ps);
psset_update_end(psset, ps);
return ps;
}
static hpdata_t *
test_psset_dalloc(psset_t *psset, edata_t *edata) {
hpdata_t *ps = edata_ps_get(edata);
@ -339,6 +357,149 @@ TEST_BEGIN(test_multi_pageslab) {
}
TEST_END
TEST_BEGIN(test_stats_merged) {
hpdata_t pageslab;
hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
edata_t alloc[HUGEPAGE_PAGES];
psset_t psset;
psset_init(&psset);
expect_zu_eq(0, psset.stats.merged.npageslabs, "");
expect_zu_eq(0, psset.stats.merged.nactive, "");
expect_zu_eq(0, psset.stats.merged.ndirty, "");
edata_init_test(&alloc[0]);
test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
expect_zu_eq(1, psset.stats.merged.npageslabs, "");
expect_zu_eq(i, psset.stats.merged.nactive, "");
expect_zu_eq(0, psset.stats.merged.ndirty, "");
edata_init_test(&alloc[i]);
bool err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
expect_false(err, "Nonempty psset failed page allocation.");
}
expect_zu_eq(1, psset.stats.merged.npageslabs, "");
expect_zu_eq(HUGEPAGE_PAGES, psset.stats.merged.nactive, "");
expect_zu_eq(0, psset.stats.merged.ndirty, "");
for (ssize_t i = HUGEPAGE_PAGES - 1; i > 0; i--) {
test_psset_dalloc(&psset, &alloc[i]);
expect_zu_eq(1, psset.stats.merged.npageslabs, "");
expect_zu_eq(i, psset.stats.merged.nactive, "");
expect_zu_eq(HUGEPAGE_PAGES - i, psset.stats.merged.ndirty, "");
}
/* No allocations have left. */
test_psset_dalloc(&psset, &alloc[0]);
expect_zu_eq(0, psset.stats.merged.npageslabs, "");
expect_zu_eq(0, psset.stats.merged.nactive, "");
/*
* Last test_psset_dalloc call removed empty pageslab from psset, so
* nothing has left there, even no dirty pages.
*/
expect_zu_eq(0, psset.stats.merged.ndirty, "");
test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
expect_zu_eq(1, psset.stats.merged.npageslabs, "");
expect_zu_eq(1, psset.stats.merged.nactive, "");
expect_zu_eq(0, psset.stats.merged.ndirty, "");
psset_update_begin(&psset, &pageslab);
expect_zu_eq(0, psset.stats.merged.npageslabs, "");
expect_zu_eq(0, psset.stats.merged.nactive, "");
expect_zu_eq(0, psset.stats.merged.ndirty, "");
psset_update_end(&psset, &pageslab);
expect_zu_eq(1, psset.stats.merged.npageslabs, "");
expect_zu_eq(1, psset.stats.merged.nactive, "");
expect_zu_eq(0, psset.stats.merged.ndirty, "");
}
TEST_END
TEST_BEGIN(test_stats_huge) {
test_skip_if(!config_stats);
hpdata_t pageslab;
hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
edata_t alloc[HUGEPAGE_PAGES];
psset_t psset;
psset_init(&psset);
for (int huge = 0; huge < PSSET_NHUGE; ++huge) {
expect_zu_eq(0, psset.stats.slabs[huge].npageslabs, "");
expect_zu_eq(0, psset.stats.slabs[huge].nactive, "");
expect_zu_eq(0, psset.stats.slabs[huge].ndirty, "");
}
edata_init_test(&alloc[0]);
test_psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
expect_zu_eq(1, psset.stats.slabs[0].npageslabs, "");
expect_zu_eq(i, psset.stats.slabs[0].nactive, "");
expect_zu_eq(0, psset.stats.slabs[0].ndirty, "");
expect_zu_eq(0, psset.stats.slabs[1].npageslabs, "");
expect_zu_eq(0, psset.stats.slabs[1].nactive, "");
expect_zu_eq(0, psset.stats.slabs[1].ndirty, "");
edata_init_test(&alloc[i]);
bool err = test_psset_alloc_reuse(&psset, &alloc[i], PAGE);
expect_false(err, "Nonempty psset failed page allocation.");
}
expect_zu_eq(1, psset.stats.slabs[0].npageslabs, "");
expect_zu_eq(HUGEPAGE_PAGES, psset.stats.slabs[0].nactive, "");
expect_zu_eq(0, psset.stats.slabs[0].ndirty, "");
expect_zu_eq(0, psset.stats.slabs[1].npageslabs, "");
expect_zu_eq(0, psset.stats.slabs[1].nactive, "");
expect_zu_eq(0, psset.stats.slabs[1].ndirty, "");
test_psset_hugify(&psset, &alloc[0]);
/* All stats should been moved from nonhuge to huge. */
expect_zu_eq(0, psset.stats.slabs[0].npageslabs, "");
expect_zu_eq(0, psset.stats.slabs[0].nactive, "");
expect_zu_eq(0, psset.stats.slabs[0].ndirty, "");
expect_zu_eq(1, psset.stats.slabs[1].npageslabs, "");
expect_zu_eq(HUGEPAGE_PAGES, psset.stats.slabs[1].nactive, "");
expect_zu_eq(0, psset.stats.slabs[1].ndirty, "");
test_psset_dehugify(&psset, &alloc[0]);
/* And back from huge to nonhuge after dehugification. */
expect_zu_eq(1, psset.stats.slabs[0].npageslabs, "");
expect_zu_eq(HUGEPAGE_PAGES, psset.stats.slabs[0].nactive, "");
expect_zu_eq(0, psset.stats.slabs[0].ndirty, "");
expect_zu_eq(0, psset.stats.slabs[1].npageslabs, "");
expect_zu_eq(0, psset.stats.slabs[1].nactive, "");
expect_zu_eq(0, psset.stats.slabs[1].ndirty, "");
for (ssize_t i = HUGEPAGE_PAGES - 1; i > 0; i--) {
test_psset_dalloc(&psset, &alloc[i]);
expect_zu_eq(1, psset.stats.slabs[0].npageslabs, "");
expect_zu_eq(i, psset.stats.slabs[0].nactive, "");
expect_zu_eq(HUGEPAGE_PAGES - i, psset.stats.slabs[0].ndirty, "");
expect_zu_eq(0, psset.stats.slabs[1].npageslabs, "");
expect_zu_eq(0, psset.stats.slabs[1].nactive, "");
expect_zu_eq(0, psset.stats.slabs[1].ndirty, "");
}
test_psset_dalloc(&psset, &alloc[0]);
for (int huge = 0; huge < PSSET_NHUGE; huge++) {
expect_zu_eq(0, psset.stats.slabs[huge].npageslabs, "");
expect_zu_eq(0, psset.stats.slabs[huge].nactive, "");
expect_zu_eq(0, psset.stats.slabs[huge].ndirty, "");
}
}
TEST_END
static void
stats_expect_empty(psset_bin_stats_t *stats) {
assert_zu_eq(0, stats->npageslabs,
@ -379,7 +540,9 @@ stats_expect(psset_t *psset, size_t nactive) {
expect_zu_eq(nactive, psset_nactive(psset), "");
}
TEST_BEGIN(test_stats) {
TEST_BEGIN(test_stats_fullness) {
test_skip_if(!config_stats);
bool err;
hpdata_t pageslab;
@ -739,7 +902,9 @@ main(void) {
test_reuse,
test_evict,
test_multi_pageslab,
test_stats,
test_stats_merged,
test_stats_huge,
test_stats_fullness,
test_oldest_fit,
test_insert_remove,
test_purge_prefers_nonhuge,