Fix the VM over-reservation on aarch64 w/ larger pages.

HUGEPAGE could be larger on some platforms (e.g. 512M on aarch64 w/ 64K pages),
in which case it would cause grow_retained / exp_grow to over-reserve VMs.

Similarly, make sure the base alloc has a const 2M alignment.
This commit is contained in:
Qi Wang 2024-03-28 14:43:17 -07:00 committed by Qi Wang
parent baa5a90cc6
commit cd05b19f10
3 changed files with 36 additions and 10 deletions

View File

@ -6,6 +6,12 @@
#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/mutex.h"
/*
* Alignment when THP is not enabled. Set to constant 2M in case the HUGEPAGE
* value is unexpected high (which would cause VM over-reservation).
*/
#define BASE_BLOCK_MIN_ALIGN ((size_t)2 << 20)
enum metadata_thp_mode_e {
metadata_thp_disabled = 0,
/*
@ -26,7 +32,6 @@ typedef enum metadata_thp_mode_e metadata_thp_mode_t;
extern metadata_thp_mode_t opt_metadata_thp;
extern const char *const metadata_thp_mode_names[];
/* Embedded at the beginning of every block of base-managed virtual memory. */
typedef struct base_block_s base_block_t;
struct base_block_s {

View File

@ -42,9 +42,17 @@ base_map(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, size_t size) {
bool zero = true;
bool commit = true;
/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
assert(size == HUGEPAGE_CEILING(size));
size_t alignment = HUGEPAGE;
/*
* Use huge page sizes and alignment when opt_metadata_thp is enabled
* or auto.
*/
size_t alignment;
if (opt_metadata_thp == metadata_thp_disabled) {
alignment = BASE_BLOCK_MIN_ALIGN;
} else {
assert(size == HUGEPAGE_CEILING(size));
alignment = HUGEPAGE;
}
if (ehooks_are_default(ehooks)) {
addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
if (have_madvise_huge && addr) {
@ -277,6 +285,13 @@ base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, edata_t *edata, size_t size,
return ret;
}
static size_t
base_block_size_ceil(size_t block_size) {
return opt_metadata_thp == metadata_thp_disabled ?
ALIGNMENT_CEILING(block_size, BASE_BLOCK_MIN_ALIGN) :
HUGEPAGE_CEILING(block_size);
}
/*
* Allocate a block of virtual memory that is large enough to start with a
* base_block_t header, followed by an object of specified size and alignment.
@ -295,14 +310,14 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
* Create increasingly larger blocks in order to limit the total number
* of disjoint virtual memory ranges. Choose the next size in the page
* size class series (skipping size classes that are not a multiple of
* HUGEPAGE), or a size large enough to satisfy the requested size and
* alignment, whichever is larger.
* HUGEPAGE when using metadata_thp), or a size large enough to satisfy
* the requested size and alignment, whichever is larger.
*/
size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
+ usize));
size_t min_block_size = base_block_size_ceil(sz_psz2u(header_size +
gap_size + usize));
pszind_t pind_next = (*pind_last + 1 < sz_psz2ind(SC_LARGE_MAXCLASS)) ?
*pind_last + 1 : *pind_last;
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
size_t next_block_size = base_block_size_ceil(sz_pind2sz(pind_next));
size_t block_size = (min_block_size > next_block_size) ? min_block_size
: next_block_size;
base_block_t *block = (base_block_t *)base_map(tsdn, ehooks, ind,

View File

@ -3,6 +3,12 @@
void
exp_grow_init(exp_grow_t *exp_grow) {
exp_grow->next = sz_psz2ind(HUGEPAGE);
/*
* Enforce a minimal of 2M grow, which is convenient for the huge page
* use cases. Avoid using HUGEPAGE as the value though, because on some
* platforms it can be very large (e.g. 512M on aarch64 w/ 64K pages).
*/
const size_t min_grow = (size_t)2 << 20;
exp_grow->next = sz_psz2ind(min_grow);
exp_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS);
}