Changeset View
Changeset View
Standalone View
Standalone View
head/contrib/jemalloc/src/jemalloc.c
#define JEMALLOC_C_ | #define JEMALLOC_C_ | ||||
#include "jemalloc/internal/jemalloc_preamble.h" | #include "jemalloc/internal/jemalloc_preamble.h" | ||||
#include "jemalloc/internal/jemalloc_internal_includes.h" | #include "jemalloc/internal/jemalloc_internal_includes.h" | ||||
#include "jemalloc/internal/assert.h" | #include "jemalloc/internal/assert.h" | ||||
#include "jemalloc/internal/atomic.h" | #include "jemalloc/internal/atomic.h" | ||||
#include "jemalloc/internal/ctl.h" | #include "jemalloc/internal/ctl.h" | ||||
#include "jemalloc/internal/extent_dss.h" | #include "jemalloc/internal/extent_dss.h" | ||||
#include "jemalloc/internal/extent_mmap.h" | #include "jemalloc/internal/extent_mmap.h" | ||||
#include "jemalloc/internal/hook.h" | |||||
#include "jemalloc/internal/jemalloc_internal_types.h" | #include "jemalloc/internal/jemalloc_internal_types.h" | ||||
#include "jemalloc/internal/log.h" | #include "jemalloc/internal/log.h" | ||||
#include "jemalloc/internal/malloc_io.h" | #include "jemalloc/internal/malloc_io.h" | ||||
#include "jemalloc/internal/mutex.h" | #include "jemalloc/internal/mutex.h" | ||||
#include "jemalloc/internal/rtree.h" | #include "jemalloc/internal/rtree.h" | ||||
#include "jemalloc/internal/size_classes.h" | #include "jemalloc/internal/safety_check.h" | ||||
#include "jemalloc/internal/sc.h" | |||||
#include "jemalloc/internal/spin.h" | #include "jemalloc/internal/spin.h" | ||||
#include "jemalloc/internal/sz.h" | #include "jemalloc/internal/sz.h" | ||||
#include "jemalloc/internal/ticker.h" | #include "jemalloc/internal/ticker.h" | ||||
#include "jemalloc/internal/util.h" | #include "jemalloc/internal/util.h" | ||||
/******************************************************************************/ | /******************************************************************************/ | ||||
/* Data. */ | /* Data. */ | ||||
Show All 16 Lines | #endif | ||||
; | ; | ||||
bool opt_abort_conf = | bool opt_abort_conf = | ||||
#ifdef JEMALLOC_DEBUG | #ifdef JEMALLOC_DEBUG | ||||
true | true | ||||
#else | #else | ||||
false | false | ||||
#endif | #endif | ||||
; | ; | ||||
/* Intentionally default off, even with debug builds. */ | |||||
bool opt_confirm_conf = false; | |||||
const char *opt_junk = | const char *opt_junk = | ||||
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) | #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) | ||||
"true" | "true" | ||||
#else | #else | ||||
"false" | "false" | ||||
#endif | #endif | ||||
; | ; | ||||
bool opt_junk_alloc = | bool opt_junk_alloc = | ||||
Show All 28 Lines | |||||
* arenas. arenas[narenas_auto..narenas_total) are only used if the application | * arenas. arenas[narenas_auto..narenas_total) are only used if the application | ||||
* takes some action to create them and allocate from them. | * takes some action to create them and allocate from them. | ||||
* | * | ||||
* Points to an arena_t. | * Points to an arena_t. | ||||
*/ | */ | ||||
JEMALLOC_ALIGNED(CACHELINE) | JEMALLOC_ALIGNED(CACHELINE) | ||||
atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; | atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; | ||||
static atomic_u_t narenas_total; /* Use narenas_total_*(). */ | static atomic_u_t narenas_total; /* Use narenas_total_*(). */ | ||||
static arena_t *a0; /* arenas[0]; read-only after initialization. */ | /* Below three are read-only after initialization. */ | ||||
unsigned narenas_auto; /* Read-only after initialization. */ | static arena_t *a0; /* arenas[0]. */ | ||||
unsigned narenas_auto; | |||||
unsigned manual_arena_base; | |||||
typedef enum { | typedef enum { | ||||
malloc_init_uninitialized = 3, | malloc_init_uninitialized = 3, | ||||
malloc_init_a0_initialized = 2, | malloc_init_a0_initialized = 2, | ||||
malloc_init_recursible = 1, | malloc_init_recursible = 1, | ||||
malloc_init_initialized = 0 /* Common case --> jnz. */ | malloc_init_initialized = 0 /* Common case --> jnz. */ | ||||
} malloc_init_t; | } malloc_init_t; | ||||
static malloc_init_t malloc_init_state = malloc_init_uninitialized; | static malloc_init_t malloc_init_state = malloc_init_uninitialized; | ||||
▲ Show 20 Lines • Show All 223 Lines • ▼ Show 20 Lines | arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { | ||||
} | } | ||||
/* | /* | ||||
* Another thread may have already initialized arenas[ind] if it's an | * Another thread may have already initialized arenas[ind] if it's an | ||||
* auto arena. | * auto arena. | ||||
*/ | */ | ||||
arena = arena_get(tsdn, ind, false); | arena = arena_get(tsdn, ind, false); | ||||
if (arena != NULL) { | if (arena != NULL) { | ||||
assert(ind < narenas_auto); | assert(arena_is_auto(arena)); | ||||
return arena; | return arena; | ||||
} | } | ||||
/* Actually initialize the arena. */ | /* Actually initialize the arena. */ | ||||
arena = arena_new(tsdn, ind, extent_hooks); | arena = arena_new(tsdn, ind, extent_hooks); | ||||
return arena; | return arena; | ||||
} | } | ||||
static void | static void | ||||
arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { | arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { | ||||
if (ind == 0) { | if (ind == 0) { | ||||
return; | return; | ||||
} | } | ||||
if (have_background_thread) { | /* | ||||
bool err; | * Avoid creating a new background thread just for the huge arena, which | ||||
malloc_mutex_lock(tsdn, &background_thread_lock); | * purges eagerly by default. | ||||
err = background_thread_create(tsdn_tsd(tsdn), ind); | */ | ||||
malloc_mutex_unlock(tsdn, &background_thread_lock); | if (have_background_thread && !arena_is_huge(ind)) { | ||||
if (err) { | if (background_thread_create(tsdn_tsd(tsdn), ind)) { | ||||
malloc_printf("<jemalloc>: error in background thread " | malloc_printf("<jemalloc>: error in background thread " | ||||
"creation for arena %u. Abort.\n", ind); | "creation for arena %u. Abort.\n", ind); | ||||
abort(); | abort(); | ||||
} | } | ||||
} | } | ||||
} | } | ||||
arena_t * | arena_t * | ||||
Show All 13 Lines | |||||
arena_bind(tsd_t *tsd, unsigned ind, bool internal) { | arena_bind(tsd_t *tsd, unsigned ind, bool internal) { | ||||
arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); | arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); | ||||
arena_nthreads_inc(arena, internal); | arena_nthreads_inc(arena, internal); | ||||
if (internal) { | if (internal) { | ||||
tsd_iarena_set(tsd, arena); | tsd_iarena_set(tsd, arena); | ||||
} else { | } else { | ||||
tsd_arena_set(tsd, arena); | tsd_arena_set(tsd, arena); | ||||
unsigned shard = atomic_fetch_add_u(&arena->binshard_next, 1, | |||||
ATOMIC_RELAXED); | |||||
tsd_binshards_t *bins = tsd_binshardsp_get(tsd); | |||||
for (unsigned i = 0; i < SC_NBINS; i++) { | |||||
assert(bin_infos[i].n_shards > 0 && | |||||
bin_infos[i].n_shards <= BIN_SHARDS_MAX); | |||||
bins->binshard[i] = shard % bin_infos[i].n_shards; | |||||
} | } | ||||
} | } | ||||
} | |||||
void | void | ||||
arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { | arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { | ||||
arena_t *oldarena, *newarena; | arena_t *oldarena, *newarena; | ||||
oldarena = arena_get(tsd_tsdn(tsd), oldind, false); | oldarena = arena_get(tsd_tsdn(tsd), oldind, false); | ||||
newarena = arena_get(tsd_tsdn(tsd), newind, false); | newarena = arena_get(tsd_tsdn(tsd), newind, false); | ||||
arena_nthreads_dec(oldarena, false); | arena_nthreads_dec(oldarena, false); | ||||
▲ Show 20 Lines • Show All 367 Lines • ▼ Show 20 Lines | #undef OPTION | ||||
opt_stats_print_opts[opts_len++] = v[i]; | opt_stats_print_opts[opts_len++] = v[i]; | ||||
opt_stats_print_opts[opts_len] = '\0'; | opt_stats_print_opts[opts_len] = '\0'; | ||||
assert(opts_len <= stats_print_tot_num_options); | assert(opts_len <= stats_print_tot_num_options); | ||||
} | } | ||||
assert(opts_len == strlen(opt_stats_print_opts)); | assert(opts_len == strlen(opt_stats_print_opts)); | ||||
} | } | ||||
/* Reads the next size pair in a multi-sized option. */ | |||||
static bool | static bool | ||||
malloc_conf_multi_sizes_next(const char **slab_size_segment_cur, | |||||
size_t *vlen_left, size_t *slab_start, size_t *slab_end, size_t *new_size) { | |||||
const char *cur = *slab_size_segment_cur; | |||||
char *end; | |||||
uintmax_t um; | |||||
set_errno(0); | |||||
/* First number, then '-' */ | |||||
um = malloc_strtoumax(cur, &end, 0); | |||||
if (get_errno() != 0 || *end != '-') { | |||||
return true; | |||||
} | |||||
*slab_start = (size_t)um; | |||||
cur = end + 1; | |||||
/* Second number, then ':' */ | |||||
um = malloc_strtoumax(cur, &end, 0); | |||||
if (get_errno() != 0 || *end != ':') { | |||||
return true; | |||||
} | |||||
*slab_end = (size_t)um; | |||||
cur = end + 1; | |||||
/* Last number */ | |||||
um = malloc_strtoumax(cur, &end, 0); | |||||
if (get_errno() != 0) { | |||||
return true; | |||||
} | |||||
*new_size = (size_t)um; | |||||
/* Consume the separator if there is one. */ | |||||
if (*end == '|') { | |||||
end++; | |||||
} | |||||
*vlen_left -= end - *slab_size_segment_cur; | |||||
*slab_size_segment_cur = end; | |||||
return false; | |||||
} | |||||
static bool | |||||
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, | malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, | ||||
char const **v_p, size_t *vlen_p) { | char const **v_p, size_t *vlen_p) { | ||||
bool accept; | bool accept; | ||||
const char *opts = *opts_p; | const char *opts = *opts_p; | ||||
*k_p = opts; | *k_p = opts; | ||||
for (accept = false; !accept;) { | for (accept = false; !accept;) { | ||||
▲ Show 20 Lines • Show All 72 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
static void | static void | ||||
malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, | malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, | ||||
size_t vlen) { | size_t vlen) { | ||||
malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, | malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, | ||||
(int)vlen, v); | (int)vlen, v); | ||||
/* If abort_conf is set, error out after processing all options. */ | /* If abort_conf is set, error out after processing all options. */ | ||||
const char *experimental = "experimental_"; | |||||
if (strncmp(k, experimental, strlen(experimental)) == 0) { | |||||
/* However, tolerate experimental features. */ | |||||
return; | |||||
} | |||||
had_conf_error = true; | had_conf_error = true; | ||||
} | } | ||||
static void | static void | ||||
malloc_slow_flag_init(void) { | malloc_slow_flag_init(void) { | ||||
/* | /* | ||||
* Combine the runtime options into malloc_slow for fast path. Called | * Combine the runtime options into malloc_slow for fast path. Called | ||||
* after processing all the options. | * after processing all the options. | ||||
*/ | */ | ||||
malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) | malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) | ||||
| (opt_junk_free ? flag_opt_junk_free : 0) | | (opt_junk_free ? flag_opt_junk_free : 0) | ||||
| (opt_zero ? flag_opt_zero : 0) | | (opt_zero ? flag_opt_zero : 0) | ||||
| (opt_utrace ? flag_opt_utrace : 0) | | (opt_utrace ? flag_opt_utrace : 0) | ||||
| (opt_xmalloc ? flag_opt_xmalloc : 0); | | (opt_xmalloc ? flag_opt_xmalloc : 0); | ||||
malloc_slow = (malloc_slow_flags != 0); | malloc_slow = (malloc_slow_flags != 0); | ||||
} | } | ||||
static void | /* Number of sources for initializing malloc_conf */ | ||||
malloc_conf_init(void) { | #define MALLOC_CONF_NSOURCES 4 | ||||
unsigned i; | |||||
char buf[PATH_MAX + 1]; | |||||
const char *opts, *k, *v; | |||||
size_t klen, vlen; | |||||
for (i = 0; i < 4; i++) { | static const char * | ||||
/* Get runtime configuration. */ | obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) { | ||||
switch (i) { | if (config_debug) { | ||||
static unsigned read_source = 0; | |||||
/* | |||||
* Each source should only be read once, to minimize # of | |||||
* syscalls on init. | |||||
*/ | |||||
assert(read_source++ == which_source); | |||||
} | |||||
assert(which_source < MALLOC_CONF_NSOURCES); | |||||
const char *ret; | |||||
switch (which_source) { | |||||
case 0: | case 0: | ||||
opts = config_malloc_conf; | ret = config_malloc_conf; | ||||
break; | break; | ||||
case 1: | case 1: | ||||
if (je_malloc_conf != NULL) { | if (je_malloc_conf != NULL) { | ||||
/* | /* Use options that were compiled into the program. */ | ||||
* Use options that were compiled into the | ret = je_malloc_conf; | ||||
* program. | |||||
*/ | |||||
opts = je_malloc_conf; | |||||
} else { | } else { | ||||
/* No configuration specified. */ | /* No configuration specified. */ | ||||
buf[0] = '\0'; | ret = NULL; | ||||
opts = buf; | |||||
} | } | ||||
break; | break; | ||||
case 2: { | case 2: { | ||||
ssize_t linklen = 0; | ssize_t linklen = 0; | ||||
#ifndef _WIN32 | #ifndef _WIN32 | ||||
int saved_errno = errno; | int saved_errno = errno; | ||||
const char *linkname = | const char *linkname = | ||||
# ifdef JEMALLOC_PREFIX | # ifdef JEMALLOC_PREFIX | ||||
"/etc/"JEMALLOC_PREFIX"malloc.conf" | "/etc/"JEMALLOC_PREFIX"malloc.conf" | ||||
# else | # else | ||||
"/etc/malloc.conf" | "/etc/malloc.conf" | ||||
# endif | # endif | ||||
; | ; | ||||
/* | /* | ||||
* Try to use the contents of the "/etc/malloc.conf" | * Try to use the contents of the "/etc/malloc.conf" symbolic | ||||
* symbolic link's name. | * link's name. | ||||
*/ | */ | ||||
linklen = readlink(linkname, buf, sizeof(buf) - 1); | #ifndef JEMALLOC_READLINKAT | ||||
linklen = readlink(linkname, buf, PATH_MAX); | |||||
#else | |||||
linklen = readlinkat(AT_FDCWD, linkname, buf, PATH_MAX); | |||||
#endif | |||||
if (linklen == -1) { | if (linklen == -1) { | ||||
/* No configuration specified. */ | /* No configuration specified. */ | ||||
linklen = 0; | linklen = 0; | ||||
/* Restore errno. */ | /* Restore errno. */ | ||||
set_errno(saved_errno); | set_errno(saved_errno); | ||||
} | } | ||||
#endif | #endif | ||||
buf[linklen] = '\0'; | buf[linklen] = '\0'; | ||||
opts = buf; | ret = buf; | ||||
break; | break; | ||||
} case 3: { | } case 3: { | ||||
const char *envname = | const char *envname = | ||||
#ifdef JEMALLOC_PREFIX | #ifdef JEMALLOC_PREFIX | ||||
JEMALLOC_CPREFIX"MALLOC_CONF" | JEMALLOC_CPREFIX"MALLOC_CONF" | ||||
#else | #else | ||||
"MALLOC_CONF" | "MALLOC_CONF" | ||||
#endif | #endif | ||||
; | ; | ||||
if ((opts = jemalloc_secure_getenv(envname)) != NULL) { | if ((ret = jemalloc_secure_getenv(envname)) != NULL) { | ||||
/* | /* | ||||
* Do nothing; opts is already initialized to | * Do nothing; opts is already initialized to the value | ||||
* the value of the MALLOC_CONF environment | * of the MALLOC_CONF environment variable. | ||||
* variable. | |||||
*/ | */ | ||||
} else { | } else { | ||||
/* No configuration specified. */ | /* No configuration specified. */ | ||||
buf[0] = '\0'; | ret = NULL; | ||||
opts = buf; | |||||
} | } | ||||
break; | break; | ||||
} default: | } default: | ||||
not_reached(); | not_reached(); | ||||
buf[0] = '\0'; | ret = NULL; | ||||
opts = buf; | |||||
} | } | ||||
return ret; | |||||
} | |||||
static void | |||||
malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS], | |||||
bool initial_call, const char *opts_cache[MALLOC_CONF_NSOURCES], | |||||
char buf[PATH_MAX + 1]) { | |||||
static const char *opts_explain[MALLOC_CONF_NSOURCES] = { | |||||
"string specified via --with-malloc-conf", | |||||
"string pointed to by the global variable malloc_conf", | |||||
"\"name\" of the file referenced by the symbolic link named " | |||||
"/etc/malloc.conf", | |||||
"value of the environment variable MALLOC_CONF" | |||||
}; | |||||
unsigned i; | |||||
const char *opts, *k, *v; | |||||
size_t klen, vlen; | |||||
for (i = 0; i < MALLOC_CONF_NSOURCES; i++) { | |||||
/* Get runtime configuration. */ | |||||
if (initial_call) { | |||||
opts_cache[i] = obtain_malloc_conf(i, buf); | |||||
} | |||||
opts = opts_cache[i]; | |||||
if (!initial_call && opt_confirm_conf) { | |||||
malloc_printf( | |||||
"<jemalloc>: malloc_conf #%u (%s): \"%s\"\n", | |||||
i + 1, opts_explain[i], opts != NULL ? opts : ""); | |||||
} | |||||
if (opts == NULL) { | |||||
continue; | |||||
} | |||||
while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, | while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, | ||||
&vlen)) { | &vlen)) { | ||||
#define CONF_ERROR(msg, k, klen, v, vlen) \ | |||||
if (!initial_call) { \ | |||||
malloc_conf_error( \ | |||||
msg, k, klen, v, vlen); \ | |||||
cur_opt_valid = false; \ | |||||
} | |||||
#define CONF_CONTINUE { \ | |||||
if (!initial_call && opt_confirm_conf \ | |||||
&& cur_opt_valid) { \ | |||||
malloc_printf("<jemalloc>: -- " \ | |||||
"Set conf value: %.*s:%.*s" \ | |||||
"\n", (int)klen, k, \ | |||||
(int)vlen, v); \ | |||||
} \ | |||||
continue; \ | |||||
} | |||||
#define CONF_MATCH(n) \ | #define CONF_MATCH(n) \ | ||||
(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) | (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) | ||||
#define CONF_MATCH_VALUE(n) \ | #define CONF_MATCH_VALUE(n) \ | ||||
(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) | (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) | ||||
#define CONF_HANDLE_BOOL(o, n) \ | #define CONF_HANDLE_BOOL(o, n) \ | ||||
if (CONF_MATCH(n)) { \ | if (CONF_MATCH(n)) { \ | ||||
if (CONF_MATCH_VALUE("true")) { \ | if (CONF_MATCH_VALUE("true")) { \ | ||||
o = true; \ | o = true; \ | ||||
} else if (CONF_MATCH_VALUE("false")) { \ | } else if (CONF_MATCH_VALUE("false")) { \ | ||||
o = false; \ | o = false; \ | ||||
} else { \ | } else { \ | ||||
malloc_conf_error( \ | CONF_ERROR("Invalid conf value",\ | ||||
"Invalid conf value", \ | |||||
k, klen, v, vlen); \ | k, klen, v, vlen); \ | ||||
} \ | } \ | ||||
continue; \ | CONF_CONTINUE; \ | ||||
} | } | ||||
#define CONF_MIN_no(um, min) false | /* | ||||
#define CONF_MIN_yes(um, min) ((um) < (min)) | * One of the CONF_MIN macros below expands, in one of the use points, | ||||
#define CONF_MAX_no(um, max) false | * to "unsigned integer < 0", which is always false, triggering the | ||||
#define CONF_MAX_yes(um, max) ((um) > (max)) | * GCC -Wtype-limits warning, which we disable here and re-enable below. | ||||
*/ | |||||
JEMALLOC_DIAGNOSTIC_PUSH | |||||
JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS | |||||
#define CONF_DONT_CHECK_MIN(um, min) false | |||||
#define CONF_CHECK_MIN(um, min) ((um) < (min)) | |||||
#define CONF_DONT_CHECK_MAX(um, max) false | |||||
#define CONF_CHECK_MAX(um, max) ((um) > (max)) | |||||
#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ | #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ | ||||
if (CONF_MATCH(n)) { \ | if (CONF_MATCH(n)) { \ | ||||
uintmax_t um; \ | uintmax_t um; \ | ||||
char *end; \ | char *end; \ | ||||
\ | \ | ||||
set_errno(0); \ | set_errno(0); \ | ||||
um = malloc_strtoumax(v, &end, 0); \ | um = malloc_strtoumax(v, &end, 0); \ | ||||
if (get_errno() != 0 || (uintptr_t)end -\ | if (get_errno() != 0 || (uintptr_t)end -\ | ||||
(uintptr_t)v != vlen) { \ | (uintptr_t)v != vlen) { \ | ||||
malloc_conf_error( \ | CONF_ERROR("Invalid conf value",\ | ||||
"Invalid conf value", \ | |||||
k, klen, v, vlen); \ | k, klen, v, vlen); \ | ||||
} else if (clip) { \ | } else if (clip) { \ | ||||
if (CONF_MIN_##check_min(um, \ | if (check_min(um, (t)(min))) { \ | ||||
(t)(min))) { \ | |||||
o = (t)(min); \ | o = (t)(min); \ | ||||
} else if ( \ | } else if ( \ | ||||
CONF_MAX_##check_max(um, \ | check_max(um, (t)(max))) { \ | ||||
(t)(max))) { \ | |||||
o = (t)(max); \ | o = (t)(max); \ | ||||
} else { \ | } else { \ | ||||
o = (t)um; \ | o = (t)um; \ | ||||
} \ | } \ | ||||
} else { \ | } else { \ | ||||
if (CONF_MIN_##check_min(um, \ | if (check_min(um, (t)(min)) || \ | ||||
(t)(min)) || \ | check_max(um, (t)(max))) { \ | ||||
CONF_MAX_##check_max(um, \ | CONF_ERROR( \ | ||||
(t)(max))) { \ | |||||
malloc_conf_error( \ | |||||
"Out-of-range " \ | "Out-of-range " \ | ||||
"conf value", \ | "conf value", \ | ||||
k, klen, v, vlen); \ | k, klen, v, vlen); \ | ||||
} else { \ | } else { \ | ||||
o = (t)um; \ | o = (t)um; \ | ||||
} \ | } \ | ||||
} \ | } \ | ||||
continue; \ | CONF_CONTINUE; \ | ||||
} | } | ||||
#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ | #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ | ||||
clip) \ | clip) \ | ||||
CONF_HANDLE_T_U(unsigned, o, n, min, max, \ | CONF_HANDLE_T_U(unsigned, o, n, min, max, \ | ||||
check_min, check_max, clip) | check_min, check_max, clip) | ||||
#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ | #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ | ||||
CONF_HANDLE_T_U(size_t, o, n, min, max, \ | CONF_HANDLE_T_U(size_t, o, n, min, max, \ | ||||
check_min, check_max, clip) | check_min, check_max, clip) | ||||
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ | #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ | ||||
if (CONF_MATCH(n)) { \ | if (CONF_MATCH(n)) { \ | ||||
long l; \ | long l; \ | ||||
char *end; \ | char *end; \ | ||||
\ | \ | ||||
set_errno(0); \ | set_errno(0); \ | ||||
l = strtol(v, &end, 0); \ | l = strtol(v, &end, 0); \ | ||||
if (get_errno() != 0 || (uintptr_t)end -\ | if (get_errno() != 0 || (uintptr_t)end -\ | ||||
(uintptr_t)v != vlen) { \ | (uintptr_t)v != vlen) { \ | ||||
malloc_conf_error( \ | CONF_ERROR("Invalid conf value",\ | ||||
"Invalid conf value", \ | |||||
k, klen, v, vlen); \ | k, klen, v, vlen); \ | ||||
} else if (l < (ssize_t)(min) || l > \ | } else if (l < (ssize_t)(min) || l > \ | ||||
(ssize_t)(max)) { \ | (ssize_t)(max)) { \ | ||||
malloc_conf_error( \ | CONF_ERROR( \ | ||||
"Out-of-range conf value", \ | "Out-of-range conf value", \ | ||||
k, klen, v, vlen); \ | k, klen, v, vlen); \ | ||||
} else { \ | } else { \ | ||||
o = l; \ | o = l; \ | ||||
} \ | } \ | ||||
continue; \ | CONF_CONTINUE; \ | ||||
} | } | ||||
#define CONF_HANDLE_CHAR_P(o, n, d) \ | #define CONF_HANDLE_CHAR_P(o, n, d) \ | ||||
if (CONF_MATCH(n)) { \ | if (CONF_MATCH(n)) { \ | ||||
size_t cpylen = (vlen <= \ | size_t cpylen = (vlen <= \ | ||||
sizeof(o)-1) ? vlen : \ | sizeof(o)-1) ? vlen : \ | ||||
sizeof(o)-1; \ | sizeof(o)-1; \ | ||||
strncpy(o, v, cpylen); \ | strncpy(o, v, cpylen); \ | ||||
o[cpylen] = '\0'; \ | o[cpylen] = '\0'; \ | ||||
continue; \ | CONF_CONTINUE; \ | ||||
} | } | ||||
bool cur_opt_valid = true; | |||||
CONF_HANDLE_BOOL(opt_confirm_conf, "confirm_conf") | |||||
if (initial_call) { | |||||
continue; | |||||
} | |||||
CONF_HANDLE_BOOL(opt_abort, "abort") | CONF_HANDLE_BOOL(opt_abort, "abort") | ||||
CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") | CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") | ||||
if (strncmp("metadata_thp", k, klen) == 0) { | if (strncmp("metadata_thp", k, klen) == 0) { | ||||
int i; | int i; | ||||
bool match = false; | bool match = false; | ||||
for (i = 0; i < metadata_thp_mode_limit; i++) { | for (i = 0; i < metadata_thp_mode_limit; i++) { | ||||
if (strncmp(metadata_thp_mode_names[i], | if (strncmp(metadata_thp_mode_names[i], | ||||
v, vlen) == 0) { | v, vlen) == 0) { | ||||
opt_metadata_thp = i; | opt_metadata_thp = i; | ||||
match = true; | match = true; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
if (!match) { | if (!match) { | ||||
malloc_conf_error("Invalid conf value", | CONF_ERROR("Invalid conf value", | ||||
k, klen, v, vlen); | k, klen, v, vlen); | ||||
} | } | ||||
continue; | CONF_CONTINUE; | ||||
} | } | ||||
CONF_HANDLE_BOOL(opt_retain, "retain") | CONF_HANDLE_BOOL(opt_retain, "retain") | ||||
if (strncmp("dss", k, klen) == 0) { | if (strncmp("dss", k, klen) == 0) { | ||||
int i; | int i; | ||||
bool match = false; | bool match = false; | ||||
for (i = 0; i < dss_prec_limit; i++) { | for (i = 0; i < dss_prec_limit; i++) { | ||||
if (strncmp(dss_prec_names[i], v, vlen) | if (strncmp(dss_prec_names[i], v, vlen) | ||||
== 0) { | == 0) { | ||||
if (extent_dss_prec_set(i)) { | if (extent_dss_prec_set(i)) { | ||||
malloc_conf_error( | CONF_ERROR( | ||||
"Error setting dss", | "Error setting dss", | ||||
k, klen, v, vlen); | k, klen, v, vlen); | ||||
} else { | } else { | ||||
opt_dss = | opt_dss = | ||||
dss_prec_names[i]; | dss_prec_names[i]; | ||||
match = true; | match = true; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
if (!match) { | if (!match) { | ||||
malloc_conf_error("Invalid conf value", | CONF_ERROR("Invalid conf value", | ||||
k, klen, v, vlen); | k, klen, v, vlen); | ||||
} | } | ||||
continue; | CONF_CONTINUE; | ||||
} | } | ||||
CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, | CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, | ||||
UINT_MAX, yes, no, false) | UINT_MAX, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, | ||||
false) | |||||
if (CONF_MATCH("bin_shards")) { | |||||
const char *bin_shards_segment_cur = v; | |||||
size_t vlen_left = vlen; | |||||
do { | |||||
size_t size_start; | |||||
size_t size_end; | |||||
size_t nshards; | |||||
bool err = malloc_conf_multi_sizes_next( | |||||
&bin_shards_segment_cur, &vlen_left, | |||||
&size_start, &size_end, &nshards); | |||||
if (err || bin_update_shard_size( | |||||
bin_shard_sizes, size_start, | |||||
size_end, nshards)) { | |||||
CONF_ERROR( | |||||
"Invalid settings for " | |||||
"bin_shards", k, klen, v, | |||||
vlen); | |||||
break; | |||||
} | |||||
} while (vlen_left > 0); | |||||
CONF_CONTINUE; | |||||
} | |||||
CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, | CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, | ||||
"dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < | "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < | ||||
QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : | QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : | ||||
SSIZE_MAX); | SSIZE_MAX); | ||||
CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, | CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, | ||||
"muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < | "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < | ||||
QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : | QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : | ||||
SSIZE_MAX); | SSIZE_MAX); | ||||
CONF_HANDLE_BOOL(opt_stats_print, "stats_print") | CONF_HANDLE_BOOL(opt_stats_print, "stats_print") | ||||
if (CONF_MATCH("stats_print_opts")) { | if (CONF_MATCH("stats_print_opts")) { | ||||
init_opt_stats_print_opts(v, vlen); | init_opt_stats_print_opts(v, vlen); | ||||
continue; | CONF_CONTINUE; | ||||
} | } | ||||
if (config_fill) { | if (config_fill) { | ||||
if (CONF_MATCH("junk")) { | if (CONF_MATCH("junk")) { | ||||
if (CONF_MATCH_VALUE("true")) { | if (CONF_MATCH_VALUE("true")) { | ||||
opt_junk = "true"; | opt_junk = "true"; | ||||
opt_junk_alloc = opt_junk_free = | opt_junk_alloc = opt_junk_free = | ||||
true; | true; | ||||
} else if (CONF_MATCH_VALUE("false")) { | } else if (CONF_MATCH_VALUE("false")) { | ||||
opt_junk = "false"; | opt_junk = "false"; | ||||
opt_junk_alloc = opt_junk_free = | opt_junk_alloc = opt_junk_free = | ||||
false; | false; | ||||
} else if (CONF_MATCH_VALUE("alloc")) { | } else if (CONF_MATCH_VALUE("alloc")) { | ||||
opt_junk = "alloc"; | opt_junk = "alloc"; | ||||
opt_junk_alloc = true; | opt_junk_alloc = true; | ||||
opt_junk_free = false; | opt_junk_free = false; | ||||
} else if (CONF_MATCH_VALUE("free")) { | } else if (CONF_MATCH_VALUE("free")) { | ||||
opt_junk = "free"; | opt_junk = "free"; | ||||
opt_junk_alloc = false; | opt_junk_alloc = false; | ||||
opt_junk_free = true; | opt_junk_free = true; | ||||
} else { | } else { | ||||
malloc_conf_error( | CONF_ERROR( | ||||
"Invalid conf value", k, | "Invalid conf value", | ||||
klen, v, vlen); | k, klen, v, vlen); | ||||
} | } | ||||
continue; | CONF_CONTINUE; | ||||
} | } | ||||
CONF_HANDLE_BOOL(opt_zero, "zero") | CONF_HANDLE_BOOL(opt_zero, "zero") | ||||
} | } | ||||
if (config_utrace) { | if (config_utrace) { | ||||
CONF_HANDLE_BOOL(opt_utrace, "utrace") | CONF_HANDLE_BOOL(opt_utrace, "utrace") | ||||
} | } | ||||
if (config_xmalloc) { | if (config_xmalloc) { | ||||
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") | CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") | ||||
} | } | ||||
CONF_HANDLE_BOOL(opt_tcache, "tcache") | CONF_HANDLE_BOOL(opt_tcache, "tcache") | ||||
CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit, | |||||
"lg_extent_max_active_fit", 0, | |||||
(sizeof(size_t) << 3), yes, yes, false) | |||||
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", | CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", | ||||
-1, (sizeof(size_t) << 3) - 1) | -1, (sizeof(size_t) << 3) - 1) | ||||
/* | |||||
* The runtime option of oversize_threshold remains | |||||
* undocumented. It may be tweaked in the next major | |||||
* release (6.0). The default value 8M is rather | |||||
* conservative / safe. Tuning it further down may | |||||
* improve fragmentation a bit more, but may also cause | |||||
* contention on the huge arena. | |||||
*/ | |||||
CONF_HANDLE_SIZE_T(opt_oversize_threshold, | |||||
"oversize_threshold", 0, SC_LARGE_MAXCLASS, | |||||
CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, false) | |||||
CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit, | |||||
"lg_extent_max_active_fit", 0, | |||||
(sizeof(size_t) << 3), CONF_DONT_CHECK_MIN, | |||||
CONF_CHECK_MAX, false) | |||||
if (strncmp("percpu_arena", k, klen) == 0) { | if (strncmp("percpu_arena", k, klen) == 0) { | ||||
bool match = false; | bool match = false; | ||||
for (int i = percpu_arena_mode_names_base; i < | for (int i = percpu_arena_mode_names_base; i < | ||||
percpu_arena_mode_names_limit; i++) { | percpu_arena_mode_names_limit; i++) { | ||||
if (strncmp(percpu_arena_mode_names[i], | if (strncmp(percpu_arena_mode_names[i], | ||||
v, vlen) == 0) { | v, vlen) == 0) { | ||||
if (!have_percpu_arena) { | if (!have_percpu_arena) { | ||||
malloc_conf_error( | CONF_ERROR( | ||||
"No getcpu support", | "No getcpu support", | ||||
k, klen, v, vlen); | k, klen, v, vlen); | ||||
} | } | ||||
opt_percpu_arena = i; | opt_percpu_arena = i; | ||||
match = true; | match = true; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
if (!match) { | if (!match) { | ||||
malloc_conf_error("Invalid conf value", | CONF_ERROR("Invalid conf value", | ||||
k, klen, v, vlen); | k, klen, v, vlen); | ||||
} | } | ||||
continue; | CONF_CONTINUE; | ||||
} | } | ||||
CONF_HANDLE_BOOL(opt_background_thread, | CONF_HANDLE_BOOL(opt_background_thread, | ||||
"background_thread"); | "background_thread"); | ||||
CONF_HANDLE_SIZE_T(opt_max_background_threads, | CONF_HANDLE_SIZE_T(opt_max_background_threads, | ||||
"max_background_threads", 1, | "max_background_threads", 1, | ||||
opt_max_background_threads, yes, yes, | opt_max_background_threads, | ||||
CONF_CHECK_MIN, CONF_CHECK_MAX, | |||||
true); | true); | ||||
if (CONF_MATCH("slab_sizes")) { | |||||
bool err; | |||||
const char *slab_size_segment_cur = v; | |||||
size_t vlen_left = vlen; | |||||
do { | |||||
size_t slab_start; | |||||
size_t slab_end; | |||||
size_t pgs; | |||||
err = malloc_conf_multi_sizes_next( | |||||
&slab_size_segment_cur, | |||||
&vlen_left, &slab_start, &slab_end, | |||||
&pgs); | |||||
if (!err) { | |||||
sc_data_update_slab_size( | |||||
sc_data, slab_start, | |||||
slab_end, (int)pgs); | |||||
} else { | |||||
CONF_ERROR("Invalid settings " | |||||
"for slab_sizes", | |||||
k, klen, v, vlen); | |||||
} | |||||
} while (!err && vlen_left > 0); | |||||
CONF_CONTINUE; | |||||
} | |||||
if (config_prof) { | if (config_prof) { | ||||
CONF_HANDLE_BOOL(opt_prof, "prof") | CONF_HANDLE_BOOL(opt_prof, "prof") | ||||
CONF_HANDLE_CHAR_P(opt_prof_prefix, | CONF_HANDLE_CHAR_P(opt_prof_prefix, | ||||
"prof_prefix", "jeprof") | "prof_prefix", "jeprof") | ||||
CONF_HANDLE_BOOL(opt_prof_active, "prof_active") | CONF_HANDLE_BOOL(opt_prof_active, "prof_active") | ||||
CONF_HANDLE_BOOL(opt_prof_thread_active_init, | CONF_HANDLE_BOOL(opt_prof_thread_active_init, | ||||
"prof_thread_active_init") | "prof_thread_active_init") | ||||
CONF_HANDLE_SIZE_T(opt_lg_prof_sample, | CONF_HANDLE_SIZE_T(opt_lg_prof_sample, | ||||
"lg_prof_sample", 0, (sizeof(uint64_t) << 3) | "lg_prof_sample", 0, (sizeof(uint64_t) << 3) | ||||
- 1, no, yes, true) | - 1, CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, | ||||
true) | |||||
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") | CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") | ||||
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, | CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, | ||||
"lg_prof_interval", -1, | "lg_prof_interval", -1, | ||||
(sizeof(uint64_t) << 3) - 1) | (sizeof(uint64_t) << 3) - 1) | ||||
CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") | CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") | ||||
CONF_HANDLE_BOOL(opt_prof_final, "prof_final") | CONF_HANDLE_BOOL(opt_prof_final, "prof_final") | ||||
CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") | CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") | ||||
CONF_HANDLE_BOOL(opt_prof_log, "prof_log") | |||||
} | } | ||||
if (config_log) { | if (config_log) { | ||||
if (CONF_MATCH("log")) { | if (CONF_MATCH("log")) { | ||||
size_t cpylen = ( | size_t cpylen = ( | ||||
vlen <= sizeof(log_var_names) ? | vlen <= sizeof(log_var_names) ? | ||||
vlen : sizeof(log_var_names) - 1); | vlen : sizeof(log_var_names) - 1); | ||||
strncpy(log_var_names, v, cpylen); | strncpy(log_var_names, v, cpylen); | ||||
log_var_names[cpylen] = '\0'; | log_var_names[cpylen] = '\0'; | ||||
continue; | CONF_CONTINUE; | ||||
} | } | ||||
} | } | ||||
if (CONF_MATCH("thp")) { | if (CONF_MATCH("thp")) { | ||||
bool match = false; | bool match = false; | ||||
for (int i = 0; i < thp_mode_names_limit; i++) { | for (int i = 0; i < thp_mode_names_limit; i++) { | ||||
if (strncmp(thp_mode_names[i],v, vlen) | if (strncmp(thp_mode_names[i],v, vlen) | ||||
== 0) { | == 0) { | ||||
if (!have_madvise_huge) { | if (!have_madvise_huge) { | ||||
malloc_conf_error( | CONF_ERROR( | ||||
"No THP support", | "No THP support", | ||||
k, klen, v, vlen); | k, klen, v, vlen); | ||||
} | } | ||||
opt_thp = i; | opt_thp = i; | ||||
match = true; | match = true; | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
if (!match) { | if (!match) { | ||||
malloc_conf_error("Invalid conf value", | CONF_ERROR("Invalid conf value", | ||||
k, klen, v, vlen); | k, klen, v, vlen); | ||||
} | } | ||||
continue; | CONF_CONTINUE; | ||||
} | } | ||||
malloc_conf_error("Invalid conf pair", k, klen, v, | CONF_ERROR("Invalid conf pair", k, klen, v, vlen); | ||||
vlen); | #undef CONF_ERROR | ||||
#undef CONF_CONTINUE | |||||
#undef CONF_MATCH | #undef CONF_MATCH | ||||
#undef CONF_MATCH_VALUE | #undef CONF_MATCH_VALUE | ||||
#undef CONF_HANDLE_BOOL | #undef CONF_HANDLE_BOOL | ||||
#undef CONF_MIN_no | #undef CONF_DONT_CHECK_MIN | ||||
#undef CONF_MIN_yes | #undef CONF_CHECK_MIN | ||||
#undef CONF_MAX_no | #undef CONF_DONT_CHECK_MAX | ||||
#undef CONF_MAX_yes | #undef CONF_CHECK_MAX | ||||
#undef CONF_HANDLE_T_U | #undef CONF_HANDLE_T_U | ||||
#undef CONF_HANDLE_UNSIGNED | #undef CONF_HANDLE_UNSIGNED | ||||
#undef CONF_HANDLE_SIZE_T | #undef CONF_HANDLE_SIZE_T | ||||
#undef CONF_HANDLE_SSIZE_T | #undef CONF_HANDLE_SSIZE_T | ||||
#undef CONF_HANDLE_CHAR_P | #undef CONF_HANDLE_CHAR_P | ||||
/* Re-enable diagnostic "-Wtype-limits" */ | |||||
JEMALLOC_DIAGNOSTIC_POP | |||||
} | } | ||||
if (opt_abort_conf && had_conf_error) { | if (opt_abort_conf && had_conf_error) { | ||||
malloc_abort_invalid_conf(); | malloc_abort_invalid_conf(); | ||||
} | } | ||||
} | } | ||||
atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); | atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); | ||||
} | } | ||||
static void | |||||
malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) { | |||||
const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL}; | |||||
char buf[PATH_MAX + 1]; | |||||
/* The first call only set the confirm_conf option and opts_cache */ | |||||
malloc_conf_init_helper(NULL, NULL, true, opts_cache, buf); | |||||
malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache, | |||||
NULL); | |||||
} | |||||
#undef MALLOC_CONF_NSOURCES | |||||
static bool | static bool | ||||
malloc_init_hard_needed(void) { | malloc_init_hard_needed(void) { | ||||
if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == | if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == | ||||
malloc_init_recursible)) { | malloc_init_recursible)) { | ||||
/* | /* | ||||
* Another thread initialized the allocator before this one | * Another thread initialized the allocator before this one | ||||
* acquired init_lock, or this thread is the initializing | * acquired init_lock, or this thread is the initializing | ||||
* thread, and it is recursively allocating. | * thread, and it is recursively allocating. | ||||
Show All 14 Lines | |||||
#endif | #endif | ||||
return true; | return true; | ||||
} | } | ||||
static bool | static bool | ||||
malloc_init_hard_a0_locked() { | malloc_init_hard_a0_locked() { | ||||
malloc_initializer = INITIALIZER; | malloc_initializer = INITIALIZER; | ||||
JEMALLOC_DIAGNOSTIC_PUSH | |||||
JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS | |||||
sc_data_t sc_data = {0}; | |||||
JEMALLOC_DIAGNOSTIC_POP | |||||
/* | |||||
* Ordering here is somewhat tricky; we need sc_boot() first, since that | |||||
* determines what the size classes will be, and then | |||||
* malloc_conf_init(), since any slab size tweaking will need to be done | |||||
* before sz_boot and bin_boot, which assume that the values they read | |||||
* out of sc_data_global are final. | |||||
*/ | |||||
sc_boot(&sc_data); | |||||
unsigned bin_shard_sizes[SC_NBINS]; | |||||
bin_shard_sizes_boot(bin_shard_sizes); | |||||
/* | |||||
* prof_boot0 only initializes opt_prof_prefix. We need to do it before | |||||
* we parse malloc_conf options, in case malloc_conf parsing overwrites | |||||
* it. | |||||
*/ | |||||
if (config_prof) { | if (config_prof) { | ||||
prof_boot0(); | prof_boot0(); | ||||
} | } | ||||
malloc_conf_init(); | malloc_conf_init(&sc_data, bin_shard_sizes); | ||||
sz_boot(&sc_data); | |||||
bin_boot(&sc_data, bin_shard_sizes); | |||||
if (opt_stats_print) { | if (opt_stats_print) { | ||||
/* Print statistics at exit. */ | /* Print statistics at exit. */ | ||||
if (atexit(stats_print_atexit) != 0) { | if (atexit(stats_print_atexit) != 0) { | ||||
malloc_write("<jemalloc>: Error in atexit()\n"); | malloc_write("<jemalloc>: Error in atexit()\n"); | ||||
if (opt_abort) { | if (opt_abort) { | ||||
abort(); | abort(); | ||||
} | } | ||||
} | } | ||||
} | } | ||||
if (pages_boot()) { | if (pages_boot()) { | ||||
return true; | return true; | ||||
} | } | ||||
if (base_boot(TSDN_NULL)) { | if (base_boot(TSDN_NULL)) { | ||||
return true; | return true; | ||||
} | } | ||||
if (extent_boot()) { | if (extent_boot()) { | ||||
return true; | return true; | ||||
} | } | ||||
if (ctl_boot()) { | if (ctl_boot()) { | ||||
return true; | return true; | ||||
} | } | ||||
if (config_prof) { | if (config_prof) { | ||||
prof_boot1(); | prof_boot1(); | ||||
} | } | ||||
arena_boot(); | arena_boot(&sc_data); | ||||
if (tcache_boot(TSDN_NULL)) { | if (tcache_boot(TSDN_NULL)) { | ||||
return true; | return true; | ||||
} | } | ||||
if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, | if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, | ||||
malloc_mutex_rank_exclusive)) { | malloc_mutex_rank_exclusive)) { | ||||
return true; | return true; | ||||
} | } | ||||
hook_boot(); | |||||
/* | /* | ||||
* Create enough scaffolding to allow recursive allocation in | * Create enough scaffolding to allow recursive allocation in | ||||
* malloc_ncpus(). | * malloc_ncpus(). | ||||
*/ | */ | ||||
narenas_auto = 1; | narenas_auto = 1; | ||||
manual_arena_base = narenas_auto + 1; | |||||
memset(arenas, 0, sizeof(arena_t *) * narenas_auto); | memset(arenas, 0, sizeof(arena_t *) * narenas_auto); | ||||
/* | /* | ||||
* Initialize one arena here. The rest are lazily created in | * Initialize one arena here. The rest are lazily created in | ||||
* arena_choose_hard(). | * arena_choose_hard(). | ||||
*/ | */ | ||||
if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) | if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) | ||||
== NULL) { | == NULL) { | ||||
return true; | return true; | ||||
▲ Show 20 Lines • Show All 131 Lines • ▼ Show 20 Lines | malloc_init_narenas(void) { | ||||
* Limit the number of arenas to the indexing range of MALLOCX_ARENA(). | * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). | ||||
*/ | */ | ||||
if (narenas_auto >= MALLOCX_ARENA_LIMIT) { | if (narenas_auto >= MALLOCX_ARENA_LIMIT) { | ||||
narenas_auto = MALLOCX_ARENA_LIMIT - 1; | narenas_auto = MALLOCX_ARENA_LIMIT - 1; | ||||
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", | malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", | ||||
narenas_auto); | narenas_auto); | ||||
} | } | ||||
narenas_total_set(narenas_auto); | narenas_total_set(narenas_auto); | ||||
if (arena_init_huge()) { | |||||
narenas_total_inc(); | |||||
} | |||||
manual_arena_base = narenas_total_get(); | |||||
return false; | return false; | ||||
} | } | ||||
static void | static void | ||||
malloc_init_percpu(void) { | malloc_init_percpu(void) { | ||||
opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena); | opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 81 Lines • ▼ Show 20 Lines | #define UNLOCK_RETURN(tsdn, ret, reentrancy) \ | ||||
if (opt_background_thread) { | if (opt_background_thread) { | ||||
assert(have_background_thread); | assert(have_background_thread); | ||||
/* | /* | ||||
* Need to finish init & unlock first before creating background | * Need to finish init & unlock first before creating background | ||||
* threads (pthread_create depends on malloc). ctl_init (which | * threads (pthread_create depends on malloc). ctl_init (which | ||||
* sets isthreaded) needs to be called without holding any lock. | * sets isthreaded) needs to be called without holding any lock. | ||||
*/ | */ | ||||
background_thread_ctl_init(tsd_tsdn(tsd)); | background_thread_ctl_init(tsd_tsdn(tsd)); | ||||
if (background_thread_create(tsd, 0)) { | |||||
malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); | |||||
bool err = background_thread_create(tsd, 0); | |||||
malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); | |||||
if (err) { | |||||
return true; | return true; | ||||
} | } | ||||
} | } | ||||
#undef UNLOCK_RETURN | #undef UNLOCK_RETURN | ||||
return false; | return false; | ||||
} | } | ||||
/* | /* | ||||
* End initialization functions. | * End initialization functions. | ||||
*/ | */ | ||||
/******************************************************************************/ | /******************************************************************************/ | ||||
/* | /* | ||||
* Begin allocation-path internal functions and data structures. | * Begin allocation-path internal functions and data structures. | ||||
*/ | */ | ||||
/* | /* | ||||
* Settings determined by the documented behavior of the allocation functions. | * Settings determined by the documented behavior of the allocation functions. | ||||
*/ | */ | ||||
typedef struct static_opts_s static_opts_t; | typedef struct static_opts_s static_opts_t; | ||||
struct static_opts_s { | struct static_opts_s { | ||||
/* Whether or not allocation size may overflow. */ | /* Whether or not allocation size may overflow. */ | ||||
bool may_overflow; | bool may_overflow; | ||||
/* Whether or not allocations of size 0 should be treated as size 1. */ | |||||
bool bump_empty_alloc; | |||||
/* | /* | ||||
* Whether or not allocations (with alignment) of size 0 should be | |||||
* treated as size 1. | |||||
*/ | |||||
bool bump_empty_aligned_alloc; | |||||
/* | |||||
* Whether to assert that allocations are not of size 0 (after any | * Whether to assert that allocations are not of size 0 (after any | ||||
* bumping). | * bumping). | ||||
*/ | */ | ||||
bool assert_nonempty_alloc; | bool assert_nonempty_alloc; | ||||
/* | /* | ||||
* Whether or not to modify the 'result' argument to malloc in case of | * Whether or not to modify the 'result' argument to malloc in case of | ||||
* error. | * error. | ||||
Show All 15 Lines | struct static_opts_s { | ||||
/* | /* | ||||
* False if we're configured to skip some time-consuming operations. | * False if we're configured to skip some time-consuming operations. | ||||
* | * | ||||
* This isn't really a malloc "behavior", but it acts as a useful | * This isn't really a malloc "behavior", but it acts as a useful | ||||
* summary of several other static (or at least, static after program | * summary of several other static (or at least, static after program | ||||
* initialization) options. | * initialization) options. | ||||
*/ | */ | ||||
bool slow; | bool slow; | ||||
/* | |||||
* Return size. | |||||
*/ | |||||
bool usize; | |||||
}; | }; | ||||
JEMALLOC_ALWAYS_INLINE void | JEMALLOC_ALWAYS_INLINE void | ||||
static_opts_init(static_opts_t *static_opts) { | static_opts_init(static_opts_t *static_opts) { | ||||
static_opts->may_overflow = false; | static_opts->may_overflow = false; | ||||
static_opts->bump_empty_alloc = false; | static_opts->bump_empty_aligned_alloc = false; | ||||
static_opts->assert_nonempty_alloc = false; | static_opts->assert_nonempty_alloc = false; | ||||
static_opts->null_out_result_on_error = false; | static_opts->null_out_result_on_error = false; | ||||
static_opts->set_errno_on_error = false; | static_opts->set_errno_on_error = false; | ||||
static_opts->min_alignment = 0; | static_opts->min_alignment = 0; | ||||
static_opts->oom_string = ""; | static_opts->oom_string = ""; | ||||
static_opts->invalid_alignment_string = ""; | static_opts->invalid_alignment_string = ""; | ||||
static_opts->slow = false; | static_opts->slow = false; | ||||
static_opts->usize = false; | |||||
} | } | ||||
/* | /* | ||||
* These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we | * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we | ||||
* should have one constant here per magic value there. Note however that the | * should have one constant here per magic value there. Note however that the | ||||
* representations need not be related. | * representations need not be related. | ||||
*/ | */ | ||||
#define TCACHE_IND_NONE ((unsigned)-1) | #define TCACHE_IND_NONE ((unsigned)-1) | ||||
#define TCACHE_IND_AUTOMATIC ((unsigned)-2) | #define TCACHE_IND_AUTOMATIC ((unsigned)-2) | ||||
#define ARENA_IND_AUTOMATIC ((unsigned)-1) | #define ARENA_IND_AUTOMATIC ((unsigned)-1) | ||||
typedef struct dynamic_opts_s dynamic_opts_t; | typedef struct dynamic_opts_s dynamic_opts_t; | ||||
struct dynamic_opts_s { | struct dynamic_opts_s { | ||||
void **result; | void **result; | ||||
size_t usize; | |||||
size_t num_items; | size_t num_items; | ||||
size_t item_size; | size_t item_size; | ||||
size_t alignment; | size_t alignment; | ||||
bool zero; | bool zero; | ||||
unsigned tcache_ind; | unsigned tcache_ind; | ||||
unsigned arena_ind; | unsigned arena_ind; | ||||
}; | }; | ||||
JEMALLOC_ALWAYS_INLINE void | JEMALLOC_ALWAYS_INLINE void | ||||
dynamic_opts_init(dynamic_opts_t *dynamic_opts) { | dynamic_opts_init(dynamic_opts_t *dynamic_opts) { | ||||
dynamic_opts->result = NULL; | dynamic_opts->result = NULL; | ||||
dynamic_opts->usize = 0; | |||||
dynamic_opts->num_items = 0; | dynamic_opts->num_items = 0; | ||||
dynamic_opts->item_size = 0; | dynamic_opts->item_size = 0; | ||||
dynamic_opts->alignment = 0; | dynamic_opts->alignment = 0; | ||||
dynamic_opts->zero = false; | dynamic_opts->zero = false; | ||||
dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC; | dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC; | ||||
dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC; | dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 47 Lines • ▼ Show 20 Lines | imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, | ||||
/* | /* | ||||
* For small allocations, sampling bumps the usize. If so, we allocate | * For small allocations, sampling bumps the usize. If so, we allocate | ||||
* from the ind_large bucket. | * from the ind_large bucket. | ||||
*/ | */ | ||||
szind_t ind_large; | szind_t ind_large; | ||||
size_t bumped_usize = usize; | size_t bumped_usize = usize; | ||||
if (usize <= SMALL_MAXCLASS) { | if (usize <= SC_SMALL_MAXCLASS) { | ||||
assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) : | assert(((dopts->alignment == 0) ? | ||||
sz_sa2u(LARGE_MINCLASS, dopts->alignment)) | sz_s2u(SC_LARGE_MINCLASS) : | ||||
== LARGE_MINCLASS); | sz_sa2u(SC_LARGE_MINCLASS, dopts->alignment)) | ||||
ind_large = sz_size2index(LARGE_MINCLASS); | == SC_LARGE_MINCLASS); | ||||
bumped_usize = sz_s2u(LARGE_MINCLASS); | ind_large = sz_size2index(SC_LARGE_MINCLASS); | ||||
bumped_usize = sz_s2u(SC_LARGE_MINCLASS); | |||||
ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, | ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, | ||||
bumped_usize, ind_large); | bumped_usize, ind_large); | ||||
if (unlikely(ret == NULL)) { | if (unlikely(ret == NULL)) { | ||||
return NULL; | return NULL; | ||||
} | } | ||||
arena_prof_promote(tsd_tsdn(tsd), ret, usize); | arena_prof_promote(tsd_tsdn(tsd), ret, usize); | ||||
} else { | } else { | ||||
ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind); | ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind); | ||||
▲ Show 20 Lines • Show All 66 Lines • ▼ Show 20 Lines | imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { | ||||
int8_t reentrancy_level; | int8_t reentrancy_level; | ||||
/* Compute the amount of memory the user wants. */ | /* Compute the amount of memory the user wants. */ | ||||
if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, | if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, | ||||
&size))) { | &size))) { | ||||
goto label_oom; | goto label_oom; | ||||
} | } | ||||
/* Validate the user input. */ | |||||
if (sopts->bump_empty_alloc) { | |||||
if (unlikely(size == 0)) { | |||||
size = 1; | |||||
} | |||||
} | |||||
if (sopts->assert_nonempty_alloc) { | |||||
assert (size != 0); | |||||
} | |||||
if (unlikely(dopts->alignment < sopts->min_alignment | if (unlikely(dopts->alignment < sopts->min_alignment | ||||
|| (dopts->alignment & (dopts->alignment - 1)) != 0)) { | || (dopts->alignment & (dopts->alignment - 1)) != 0)) { | ||||
goto label_invalid_alignment; | goto label_invalid_alignment; | ||||
} | } | ||||
/* This is the beginning of the "core" algorithm. */ | /* This is the beginning of the "core" algorithm. */ | ||||
if (dopts->alignment == 0) { | if (dopts->alignment == 0) { | ||||
ind = sz_size2index(size); | ind = sz_size2index(size); | ||||
if (unlikely(ind >= NSIZES)) { | if (unlikely(ind >= SC_NSIZES)) { | ||||
goto label_oom; | goto label_oom; | ||||
} | } | ||||
if (config_stats || (config_prof && opt_prof)) { | if (config_stats || (config_prof && opt_prof) || sopts->usize) { | ||||
usize = sz_index2size(ind); | usize = sz_index2size(ind); | ||||
assert(usize > 0 && usize <= LARGE_MAXCLASS); | dopts->usize = usize; | ||||
assert(usize > 0 && usize | |||||
<= SC_LARGE_MAXCLASS); | |||||
} | } | ||||
} else { | } else { | ||||
if (sopts->bump_empty_aligned_alloc) { | |||||
if (unlikely(size == 0)) { | |||||
size = 1; | |||||
} | |||||
} | |||||
usize = sz_sa2u(size, dopts->alignment); | usize = sz_sa2u(size, dopts->alignment); | ||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { | dopts->usize = usize; | ||||
if (unlikely(usize == 0 | |||||
|| usize > SC_LARGE_MAXCLASS)) { | |||||
goto label_oom; | goto label_oom; | ||||
} | } | ||||
} | } | ||||
/* Validate the user input. */ | |||||
if (sopts->assert_nonempty_alloc) { | |||||
assert (size != 0); | |||||
} | |||||
check_entry_exit_locking(tsd_tsdn(tsd)); | check_entry_exit_locking(tsd_tsdn(tsd)); | ||||
/* | /* | ||||
* If we need to handle reentrancy, we can do it out of a | * If we need to handle reentrancy, we can do it out of a | ||||
* known-initialized arena (i.e. arena 0). | * known-initialized arena (i.e. arena 0). | ||||
*/ | */ | ||||
reentrancy_level = tsd_reentrancy_level_get(tsd); | reentrancy_level = tsd_reentrancy_level_get(tsd); | ||||
Show All 16 Lines | if (config_prof && opt_prof) { | ||||
* Note that if we're going down this path, usize must have been | * Note that if we're going down this path, usize must have been | ||||
* initialized in the previous if statement. | * initialized in the previous if statement. | ||||
*/ | */ | ||||
prof_tctx_t *tctx = prof_alloc_prep( | prof_tctx_t *tctx = prof_alloc_prep( | ||||
tsd, usize, prof_active_get_unlocked(), true); | tsd, usize, prof_active_get_unlocked(), true); | ||||
alloc_ctx_t alloc_ctx; | alloc_ctx_t alloc_ctx; | ||||
if (likely((uintptr_t)tctx == (uintptr_t)1U)) { | if (likely((uintptr_t)tctx == (uintptr_t)1U)) { | ||||
alloc_ctx.slab = (usize <= SMALL_MAXCLASS); | alloc_ctx.slab = (usize | ||||
<= SC_SMALL_MAXCLASS); | |||||
allocation = imalloc_no_sample( | allocation = imalloc_no_sample( | ||||
sopts, dopts, tsd, usize, usize, ind); | sopts, dopts, tsd, usize, usize, ind); | ||||
} else if ((uintptr_t)tctx > (uintptr_t)1U) { | } else if ((uintptr_t)tctx > (uintptr_t)1U) { | ||||
/* | /* | ||||
* Note that ind might still be 0 here. This is fine; | * Note that ind might still be 0 here. This is fine; | ||||
* imalloc_sample ignores ind if dopts->alignment > 0. | * imalloc_sample ignores ind if dopts->alignment > 0. | ||||
*/ | */ | ||||
allocation = imalloc_sample( | allocation = imalloc_sample( | ||||
▲ Show 20 Lines • Show All 88 Lines • ▼ Show 20 Lines | label_invalid_alignment: | ||||
if (sopts->null_out_result_on_error) { | if (sopts->null_out_result_on_error) { | ||||
*dopts->result = NULL; | *dopts->result = NULL; | ||||
} | } | ||||
return EINVAL; | return EINVAL; | ||||
} | } | ||||
/* Returns the errno-style error code of the allocation. */ | JEMALLOC_ALWAYS_INLINE bool | ||||
JEMALLOC_ALWAYS_INLINE int | imalloc_init_check(static_opts_t *sopts, dynamic_opts_t *dopts) { | ||||
imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { | |||||
if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { | if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { | ||||
if (config_xmalloc && unlikely(opt_xmalloc)) { | if (config_xmalloc && unlikely(opt_xmalloc)) { | ||||
malloc_write(sopts->oom_string); | malloc_write(sopts->oom_string); | ||||
abort(); | abort(); | ||||
} | } | ||||
UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); | UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); | ||||
set_errno(ENOMEM); | set_errno(ENOMEM); | ||||
*dopts->result = NULL; | *dopts->result = NULL; | ||||
return false; | |||||
} | |||||
return true; | |||||
} | |||||
/* Returns the errno-style error code of the allocation. */ | |||||
JEMALLOC_ALWAYS_INLINE int | |||||
imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { | |||||
if (tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) { | |||||
return ENOMEM; | return ENOMEM; | ||||
} | } | ||||
/* We always need the tsd. Let's grab it right away. */ | /* We always need the tsd. Let's grab it right away. */ | ||||
tsd_t *tsd = tsd_fetch(); | tsd_t *tsd = tsd_fetch(); | ||||
assert(tsd); | assert(tsd); | ||||
if (likely(tsd_fast(tsd))) { | if (likely(tsd_fast(tsd))) { | ||||
/* Fast and common path. */ | /* Fast and common path. */ | ||||
tsd_assert_fast(tsd); | tsd_assert_fast(tsd); | ||||
sopts->slow = false; | sopts->slow = false; | ||||
return imalloc_body(sopts, dopts, tsd); | return imalloc_body(sopts, dopts, tsd); | ||||
} else { | } else { | ||||
if (!tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) { | |||||
return ENOMEM; | |||||
} | |||||
sopts->slow = true; | sopts->slow = true; | ||||
return imalloc_body(sopts, dopts, tsd); | return imalloc_body(sopts, dopts, tsd); | ||||
} | } | ||||
} | } | ||||
/******************************************************************************/ | |||||
/* | |||||
* Begin malloc(3)-compatible functions. | |||||
*/ | |||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | JEMALLOC_NOINLINE | ||||
void JEMALLOC_NOTHROW * | void * | ||||
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) | malloc_default(size_t size) { | ||||
je_malloc(size_t size) { | |||||
void *ret; | void *ret; | ||||
static_opts_t sopts; | static_opts_t sopts; | ||||
dynamic_opts_t dopts; | dynamic_opts_t dopts; | ||||
LOG("core.malloc.entry", "size: %zu", size); | LOG("core.malloc.entry", "size: %zu", size); | ||||
static_opts_init(&sopts); | static_opts_init(&sopts); | ||||
dynamic_opts_init(&dopts); | dynamic_opts_init(&dopts); | ||||
sopts.bump_empty_alloc = true; | |||||
sopts.null_out_result_on_error = true; | sopts.null_out_result_on_error = true; | ||||
sopts.set_errno_on_error = true; | sopts.set_errno_on_error = true; | ||||
sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n"; | sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n"; | ||||
dopts.result = &ret; | dopts.result = &ret; | ||||
dopts.num_items = 1; | dopts.num_items = 1; | ||||
dopts.item_size = size; | dopts.item_size = size; | ||||
imalloc(&sopts, &dopts); | imalloc(&sopts, &dopts); | ||||
/* | |||||
* Note that this branch gets optimized away -- it immediately follows | |||||
* the check on tsd_fast that sets sopts.slow. | |||||
*/ | |||||
if (sopts.slow) { | |||||
uintptr_t args[3] = {size}; | |||||
hook_invoke_alloc(hook_alloc_malloc, ret, (uintptr_t)ret, args); | |||||
} | |||||
LOG("core.malloc.exit", "result: %p", ret); | LOG("core.malloc.exit", "result: %p", ret); | ||||
return ret; | return ret; | ||||
} | } | ||||
/******************************************************************************/ | |||||
/* | |||||
* Begin malloc(3)-compatible functions. | |||||
*/ | |||||
/* | |||||
* malloc() fastpath. | |||||
* | |||||
* Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit | |||||
* tcache. If either of these is false, we tail-call to the slowpath, | |||||
* malloc_default(). Tail-calling is used to avoid any caller-saved | |||||
* registers. | |||||
* | |||||
* fastpath supports ticker and profiling, both of which will also | |||||
* tail-call to the slowpath if they fire. | |||||
*/ | |||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | |||||
void JEMALLOC_NOTHROW * | |||||
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) | |||||
je_malloc(size_t size) { | |||||
LOG("core.malloc.entry", "size: %zu", size); | |||||
if (tsd_get_allocates() && unlikely(!malloc_initialized())) { | |||||
return malloc_default(size); | |||||
} | |||||
tsd_t *tsd = tsd_get(false); | |||||
if (unlikely(!tsd || !tsd_fast(tsd) || (size > SC_LOOKUP_MAXCLASS))) { | |||||
return malloc_default(size); | |||||
} | |||||
tcache_t *tcache = tsd_tcachep_get(tsd); | |||||
if (unlikely(ticker_trytick(&tcache->gc_ticker))) { | |||||
return malloc_default(size); | |||||
} | |||||
szind_t ind = sz_size2index_lookup(size); | |||||
size_t usize; | |||||
if (config_stats || config_prof) { | |||||
usize = sz_index2size(ind); | |||||
} | |||||
/* Fast path relies on size being a bin. I.e. SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS */ | |||||
assert(ind < SC_NBINS); | |||||
assert(size <= SC_SMALL_MAXCLASS); | |||||
if (config_prof) { | |||||
int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd); | |||||
bytes_until_sample -= usize; | |||||
tsd_bytes_until_sample_set(tsd, bytes_until_sample); | |||||
if (unlikely(bytes_until_sample < 0)) { | |||||
/* | |||||
* Avoid a prof_active check on the fastpath. | |||||
* If prof_active is false, set bytes_until_sample to | |||||
* a large value. If prof_active is set to true, | |||||
* bytes_until_sample will be reset. | |||||
*/ | |||||
if (!prof_active) { | |||||
tsd_bytes_until_sample_set(tsd, SSIZE_MAX); | |||||
} | |||||
return malloc_default(size); | |||||
} | |||||
} | |||||
cache_bin_t *bin = tcache_small_bin_get(tcache, ind); | |||||
bool tcache_success; | |||||
void* ret = cache_bin_alloc_easy(bin, &tcache_success); | |||||
if (tcache_success) { | |||||
if (config_stats) { | |||||
*tsd_thread_allocatedp_get(tsd) += usize; | |||||
bin->tstats.nrequests++; | |||||
} | |||||
if (config_prof) { | |||||
tcache->prof_accumbytes += usize; | |||||
} | |||||
LOG("core.malloc.exit", "result: %p", ret); | |||||
/* Fastpath success */ | |||||
return ret; | |||||
} | |||||
return malloc_default(size); | |||||
} | |||||
JEMALLOC_EXPORT int JEMALLOC_NOTHROW | JEMALLOC_EXPORT int JEMALLOC_NOTHROW | ||||
JEMALLOC_ATTR(nonnull(1)) | JEMALLOC_ATTR(nonnull(1)) | ||||
je_posix_memalign(void **memptr, size_t alignment, size_t size) { | je_posix_memalign(void **memptr, size_t alignment, size_t size) { | ||||
int ret; | int ret; | ||||
static_opts_t sopts; | static_opts_t sopts; | ||||
dynamic_opts_t dopts; | dynamic_opts_t dopts; | ||||
LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, " | LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, " | ||||
"size: %zu", memptr, alignment, size); | "size: %zu", memptr, alignment, size); | ||||
static_opts_init(&sopts); | static_opts_init(&sopts); | ||||
dynamic_opts_init(&dopts); | dynamic_opts_init(&dopts); | ||||
sopts.bump_empty_alloc = true; | sopts.bump_empty_aligned_alloc = true; | ||||
sopts.min_alignment = sizeof(void *); | sopts.min_alignment = sizeof(void *); | ||||
sopts.oom_string = | sopts.oom_string = | ||||
"<jemalloc>: Error allocating aligned memory: out of memory\n"; | "<jemalloc>: Error allocating aligned memory: out of memory\n"; | ||||
sopts.invalid_alignment_string = | sopts.invalid_alignment_string = | ||||
"<jemalloc>: Error allocating aligned memory: invalid alignment\n"; | "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; | ||||
dopts.result = memptr; | dopts.result = memptr; | ||||
dopts.num_items = 1; | dopts.num_items = 1; | ||||
dopts.item_size = size; | dopts.item_size = size; | ||||
dopts.alignment = alignment; | dopts.alignment = alignment; | ||||
ret = imalloc(&sopts, &dopts); | ret = imalloc(&sopts, &dopts); | ||||
if (sopts.slow) { | |||||
uintptr_t args[3] = {(uintptr_t)memptr, (uintptr_t)alignment, | |||||
(uintptr_t)size}; | |||||
hook_invoke_alloc(hook_alloc_posix_memalign, *memptr, | |||||
(uintptr_t)ret, args); | |||||
} | |||||
LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret, | LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret, | ||||
*memptr); | *memptr); | ||||
return ret; | return ret; | ||||
} | } | ||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||||
void JEMALLOC_NOTHROW * | void JEMALLOC_NOTHROW * | ||||
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) | ||||
je_aligned_alloc(size_t alignment, size_t size) { | je_aligned_alloc(size_t alignment, size_t size) { | ||||
void *ret; | void *ret; | ||||
static_opts_t sopts; | static_opts_t sopts; | ||||
dynamic_opts_t dopts; | dynamic_opts_t dopts; | ||||
LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n", | LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n", | ||||
alignment, size); | alignment, size); | ||||
static_opts_init(&sopts); | static_opts_init(&sopts); | ||||
dynamic_opts_init(&dopts); | dynamic_opts_init(&dopts); | ||||
sopts.bump_empty_alloc = true; | sopts.bump_empty_aligned_alloc = true; | ||||
sopts.null_out_result_on_error = true; | sopts.null_out_result_on_error = true; | ||||
sopts.set_errno_on_error = true; | sopts.set_errno_on_error = true; | ||||
sopts.min_alignment = 1; | sopts.min_alignment = 1; | ||||
sopts.oom_string = | sopts.oom_string = | ||||
"<jemalloc>: Error allocating aligned memory: out of memory\n"; | "<jemalloc>: Error allocating aligned memory: out of memory\n"; | ||||
sopts.invalid_alignment_string = | sopts.invalid_alignment_string = | ||||
"<jemalloc>: Error allocating aligned memory: invalid alignment\n"; | "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; | ||||
dopts.result = &ret; | dopts.result = &ret; | ||||
dopts.num_items = 1; | dopts.num_items = 1; | ||||
dopts.item_size = size; | dopts.item_size = size; | ||||
dopts.alignment = alignment; | dopts.alignment = alignment; | ||||
imalloc(&sopts, &dopts); | imalloc(&sopts, &dopts); | ||||
if (sopts.slow) { | |||||
uintptr_t args[3] = {(uintptr_t)alignment, (uintptr_t)size}; | |||||
hook_invoke_alloc(hook_alloc_aligned_alloc, ret, | |||||
(uintptr_t)ret, args); | |||||
} | |||||
LOG("core.aligned_alloc.exit", "result: %p", ret); | LOG("core.aligned_alloc.exit", "result: %p", ret); | ||||
return ret; | return ret; | ||||
} | } | ||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||||
void JEMALLOC_NOTHROW * | void JEMALLOC_NOTHROW * | ||||
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) | ||||
je_calloc(size_t num, size_t size) { | je_calloc(size_t num, size_t size) { | ||||
void *ret; | void *ret; | ||||
static_opts_t sopts; | static_opts_t sopts; | ||||
dynamic_opts_t dopts; | dynamic_opts_t dopts; | ||||
LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size); | LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size); | ||||
static_opts_init(&sopts); | static_opts_init(&sopts); | ||||
dynamic_opts_init(&dopts); | dynamic_opts_init(&dopts); | ||||
sopts.may_overflow = true; | sopts.may_overflow = true; | ||||
sopts.bump_empty_alloc = true; | |||||
sopts.null_out_result_on_error = true; | sopts.null_out_result_on_error = true; | ||||
sopts.set_errno_on_error = true; | sopts.set_errno_on_error = true; | ||||
sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n"; | sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n"; | ||||
dopts.result = &ret; | dopts.result = &ret; | ||||
dopts.num_items = num; | dopts.num_items = num; | ||||
dopts.item_size = size; | dopts.item_size = size; | ||||
dopts.zero = true; | dopts.zero = true; | ||||
imalloc(&sopts, &dopts); | imalloc(&sopts, &dopts); | ||||
if (sopts.slow) { | |||||
uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size}; | |||||
hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args); | |||||
} | |||||
LOG("core.calloc.exit", "result: %p", ret); | LOG("core.calloc.exit", "result: %p", ret); | ||||
return ret; | return ret; | ||||
} | } | ||||
static void * | static void * | ||||
irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, | irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, | ||||
prof_tctx_t *tctx) { | prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) { | ||||
void *p; | void *p; | ||||
if (tctx == NULL) { | if (tctx == NULL) { | ||||
return NULL; | return NULL; | ||||
} | } | ||||
if (usize <= SMALL_MAXCLASS) { | if (usize <= SC_SMALL_MAXCLASS) { | ||||
p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); | p = iralloc(tsd, old_ptr, old_usize, | ||||
SC_LARGE_MINCLASS, 0, false, hook_args); | |||||
if (p == NULL) { | if (p == NULL) { | ||||
return NULL; | return NULL; | ||||
} | } | ||||
arena_prof_promote(tsd_tsdn(tsd), p, usize); | arena_prof_promote(tsd_tsdn(tsd), p, usize); | ||||
} else { | } else { | ||||
p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); | p = iralloc(tsd, old_ptr, old_usize, usize, 0, false, | ||||
hook_args); | |||||
} | } | ||||
return p; | return p; | ||||
} | } | ||||
JEMALLOC_ALWAYS_INLINE void * | JEMALLOC_ALWAYS_INLINE void * | ||||
irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, | irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, | ||||
alloc_ctx_t *alloc_ctx) { | alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) { | ||||
void *p; | void *p; | ||||
bool prof_active; | bool prof_active; | ||||
prof_tctx_t *old_tctx, *tctx; | prof_tctx_t *old_tctx, *tctx; | ||||
prof_active = prof_active_get_unlocked(); | prof_active = prof_active_get_unlocked(); | ||||
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); | old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); | ||||
tctx = prof_alloc_prep(tsd, usize, prof_active, true); | tctx = prof_alloc_prep(tsd, usize, prof_active, true); | ||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { | if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { | ||||
p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); | p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx, | ||||
hook_args); | |||||
} else { | } else { | ||||
p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); | p = iralloc(tsd, old_ptr, old_usize, usize, 0, false, | ||||
hook_args); | |||||
} | } | ||||
if (unlikely(p == NULL)) { | if (unlikely(p == NULL)) { | ||||
prof_alloc_rollback(tsd, tctx, true); | prof_alloc_rollback(tsd, tctx, true); | ||||
return NULL; | return NULL; | ||||
} | } | ||||
prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, | prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, | ||||
old_tctx); | old_tctx); | ||||
Show All 12 Lines | ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { | ||||
assert(ptr != NULL); | assert(ptr != NULL); | ||||
assert(malloc_initialized() || IS_INITIALIZER); | assert(malloc_initialized() || IS_INITIALIZER); | ||||
alloc_ctx_t alloc_ctx; | alloc_ctx_t alloc_ctx; | ||||
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); | rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); | ||||
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, | rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, | ||||
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); | (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); | ||||
assert(alloc_ctx.szind != NSIZES); | assert(alloc_ctx.szind != SC_NSIZES); | ||||
size_t usize; | size_t usize; | ||||
if (config_prof && opt_prof) { | if (config_prof && opt_prof) { | ||||
usize = sz_index2size(alloc_ctx.szind); | usize = sz_index2size(alloc_ctx.szind); | ||||
prof_free(tsd, ptr, usize, &alloc_ctx); | prof_free(tsd, ptr, usize, &alloc_ctx); | ||||
} else if (config_stats) { | } else if (config_stats) { | ||||
usize = sz_index2size(alloc_ctx.szind); | usize = sz_index2size(alloc_ctx.szind); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 64 Lines • ▼ Show 20 Lines | isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { | ||||
} else { | } else { | ||||
isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); | isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); | ||||
} | } | ||||
} | } | ||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||||
void JEMALLOC_NOTHROW * | void JEMALLOC_NOTHROW * | ||||
JEMALLOC_ALLOC_SIZE(2) | JEMALLOC_ALLOC_SIZE(2) | ||||
je_realloc(void *ptr, size_t size) { | je_realloc(void *ptr, size_t arg_size) { | ||||
void *ret; | void *ret; | ||||
tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); | tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); | ||||
size_t usize JEMALLOC_CC_SILENCE_INIT(0); | size_t usize JEMALLOC_CC_SILENCE_INIT(0); | ||||
size_t old_usize = 0; | size_t old_usize = 0; | ||||
size_t size = arg_size; | |||||
LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); | LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); | ||||
if (unlikely(size == 0)) { | if (unlikely(size == 0)) { | ||||
size = 1; | size = 1; | ||||
} | } | ||||
if (likely(ptr != NULL)) { | if (likely(ptr != NULL)) { | ||||
assert(malloc_initialized() || IS_INITIALIZER); | assert(malloc_initialized() || IS_INITIALIZER); | ||||
tsd_t *tsd = tsd_fetch(); | tsd_t *tsd = tsd_fetch(); | ||||
check_entry_exit_locking(tsd_tsdn(tsd)); | check_entry_exit_locking(tsd_tsdn(tsd)); | ||||
hook_ralloc_args_t hook_args = {true, {(uintptr_t)ptr, | |||||
(uintptr_t)arg_size, 0, 0}}; | |||||
alloc_ctx_t alloc_ctx; | alloc_ctx_t alloc_ctx; | ||||
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); | rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); | ||||
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, | rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, | ||||
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); | (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); | ||||
assert(alloc_ctx.szind != NSIZES); | assert(alloc_ctx.szind != SC_NSIZES); | ||||
old_usize = sz_index2size(alloc_ctx.szind); | old_usize = sz_index2size(alloc_ctx.szind); | ||||
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); | assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); | ||||
if (config_prof && opt_prof) { | if (config_prof && opt_prof) { | ||||
usize = sz_s2u(size); | usize = sz_s2u(size); | ||||
ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ? | if (unlikely(usize == 0 | ||||
NULL : irealloc_prof(tsd, ptr, old_usize, usize, | || usize > SC_LARGE_MAXCLASS)) { | ||||
&alloc_ctx); | ret = NULL; | ||||
} else { | } else { | ||||
ret = irealloc_prof(tsd, ptr, old_usize, usize, | |||||
&alloc_ctx, &hook_args); | |||||
} | |||||
} else { | |||||
if (config_stats) { | if (config_stats) { | ||||
usize = sz_s2u(size); | usize = sz_s2u(size); | ||||
} | } | ||||
ret = iralloc(tsd, ptr, old_usize, size, 0, false); | ret = iralloc(tsd, ptr, old_usize, size, 0, false, | ||||
&hook_args); | |||||
} | } | ||||
tsdn = tsd_tsdn(tsd); | tsdn = tsd_tsdn(tsd); | ||||
} else { | } else { | ||||
/* realloc(NULL, size) is equivalent to malloc(size). */ | /* realloc(NULL, size) is equivalent to malloc(size). */ | ||||
void *ret = je_malloc(size); | static_opts_t sopts; | ||||
LOG("core.realloc.exit", "result: %p", ret); | dynamic_opts_t dopts; | ||||
static_opts_init(&sopts); | |||||
dynamic_opts_init(&dopts); | |||||
sopts.null_out_result_on_error = true; | |||||
sopts.set_errno_on_error = true; | |||||
sopts.oom_string = | |||||
"<jemalloc>: Error in realloc(): out of memory\n"; | |||||
dopts.result = &ret; | |||||
dopts.num_items = 1; | |||||
dopts.item_size = size; | |||||
imalloc(&sopts, &dopts); | |||||
if (sopts.slow) { | |||||
uintptr_t args[3] = {(uintptr_t)ptr, arg_size}; | |||||
hook_invoke_alloc(hook_alloc_realloc, ret, | |||||
(uintptr_t)ret, args); | |||||
} | |||||
return ret; | return ret; | ||||
} | } | ||||
if (unlikely(ret == NULL)) { | if (unlikely(ret == NULL)) { | ||||
if (config_xmalloc && unlikely(opt_xmalloc)) { | if (config_xmalloc && unlikely(opt_xmalloc)) { | ||||
malloc_write("<jemalloc>: Error in realloc(): " | malloc_write("<jemalloc>: Error in realloc(): " | ||||
"out of memory\n"); | "out of memory\n"); | ||||
abort(); | abort(); | ||||
Show All 10 Lines | je_realloc(void *ptr, size_t arg_size) { | ||||
} | } | ||||
UTRACE(ptr, size, ret); | UTRACE(ptr, size, ret); | ||||
check_entry_exit_locking(tsdn); | check_entry_exit_locking(tsdn); | ||||
LOG("core.realloc.exit", "result: %p", ret); | LOG("core.realloc.exit", "result: %p", ret); | ||||
return ret; | return ret; | ||||
} | } | ||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW | JEMALLOC_NOINLINE | ||||
je_free(void *ptr) { | void | ||||
LOG("core.free.entry", "ptr: %p", ptr); | free_default(void *ptr) { | ||||
UTRACE(ptr, 0, 0); | UTRACE(ptr, 0, 0); | ||||
if (likely(ptr != NULL)) { | if (likely(ptr != NULL)) { | ||||
/* | /* | ||||
* We avoid setting up tsd fully (e.g. tcache, arena binding) | * We avoid setting up tsd fully (e.g. tcache, arena binding) | ||||
* based on only free() calls -- other activities trigger the | * based on only free() calls -- other activities trigger the | ||||
* minimal to full transition. This is because free() may | * minimal to full transition. This is because free() may | ||||
* happen during thread shutdown after tls deallocation: if a | * happen during thread shutdown after tls deallocation: if a | ||||
* thread never had any malloc activities until then, a | * thread never had any malloc activities until then, a | ||||
Show All 9 Lines | if (likely(tsd_fast(tsd))) { | ||||
tcache = tsd_tcachep_get(tsd); | tcache = tsd_tcachep_get(tsd); | ||||
ifree(tsd, ptr, tcache, false); | ifree(tsd, ptr, tcache, false); | ||||
} else { | } else { | ||||
if (likely(tsd_reentrancy_level_get(tsd) == 0)) { | if (likely(tsd_reentrancy_level_get(tsd) == 0)) { | ||||
tcache = tcache_get(tsd); | tcache = tcache_get(tsd); | ||||
} else { | } else { | ||||
tcache = NULL; | tcache = NULL; | ||||
} | } | ||||
uintptr_t args_raw[3] = {(uintptr_t)ptr}; | |||||
hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw); | |||||
ifree(tsd, ptr, tcache, true); | ifree(tsd, ptr, tcache, true); | ||||
} | } | ||||
check_entry_exit_locking(tsd_tsdn(tsd)); | check_entry_exit_locking(tsd_tsdn(tsd)); | ||||
} | } | ||||
} | |||||
JEMALLOC_ALWAYS_INLINE | |||||
bool free_fastpath(void *ptr, size_t size, bool size_hint) { | |||||
tsd_t *tsd = tsd_get(false); | |||||
if (unlikely(!tsd || !tsd_fast(tsd))) { | |||||
return false; | |||||
} | |||||
tcache_t *tcache = tsd_tcachep_get(tsd); | |||||
alloc_ctx_t alloc_ctx; | |||||
/* | |||||
* If !config_cache_oblivious, we can check PAGE alignment to | |||||
* detect sampled objects. Otherwise addresses are | |||||
* randomized, and we have to look it up in the rtree anyway. | |||||
* See also isfree(). | |||||
*/ | |||||
if (!size_hint || config_cache_oblivious) { | |||||
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); | |||||
bool res = rtree_szind_slab_read_fast(tsd_tsdn(tsd), &extents_rtree, | |||||
rtree_ctx, (uintptr_t)ptr, | |||||
&alloc_ctx.szind, &alloc_ctx.slab); | |||||
/* Note: profiled objects will have alloc_ctx.slab set */ | |||||
if (!res || !alloc_ctx.slab) { | |||||
return false; | |||||
} | |||||
assert(alloc_ctx.szind != SC_NSIZES); | |||||
} else { | |||||
/* | |||||
* Check for both sizes that are too large, and for sampled objects. | |||||
* Sampled objects are always page-aligned. The sampled object check | |||||
* will also check for null ptr. | |||||
*/ | |||||
if (size > SC_LOOKUP_MAXCLASS || (((uintptr_t)ptr & PAGE_MASK) == 0)) { | |||||
return false; | |||||
} | |||||
alloc_ctx.szind = sz_size2index_lookup(size); | |||||
} | |||||
if (unlikely(ticker_trytick(&tcache->gc_ticker))) { | |||||
return false; | |||||
} | |||||
cache_bin_t *bin = tcache_small_bin_get(tcache, alloc_ctx.szind); | |||||
cache_bin_info_t *bin_info = &tcache_bin_info[alloc_ctx.szind]; | |||||
if (!cache_bin_dalloc_easy(bin, bin_info, ptr)) { | |||||
return false; | |||||
} | |||||
if (config_stats) { | |||||
size_t usize = sz_index2size(alloc_ctx.szind); | |||||
*tsd_thread_deallocatedp_get(tsd) += usize; | |||||
} | |||||
return true; | |||||
} | |||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW | |||||
je_free(void *ptr) { | |||||
LOG("core.free.entry", "ptr: %p", ptr); | |||||
if (!free_fastpath(ptr, 0, false)) { | |||||
free_default(ptr); | |||||
} | |||||
LOG("core.free.exit", ""); | LOG("core.free.exit", ""); | ||||
} | } | ||||
/* | /* | ||||
* End malloc(3)-compatible functions. | * End malloc(3)-compatible functions. | ||||
*/ | */ | ||||
/******************************************************************************/ | /******************************************************************************/ | ||||
/* | /* | ||||
Show All 10 Lines | je_memalign(size_t alignment, size_t size) { | ||||
dynamic_opts_t dopts; | dynamic_opts_t dopts; | ||||
LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment, | LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment, | ||||
size); | size); | ||||
static_opts_init(&sopts); | static_opts_init(&sopts); | ||||
dynamic_opts_init(&dopts); | dynamic_opts_init(&dopts); | ||||
sopts.bump_empty_alloc = true; | |||||
sopts.min_alignment = 1; | sopts.min_alignment = 1; | ||||
sopts.oom_string = | sopts.oom_string = | ||||
"<jemalloc>: Error allocating aligned memory: out of memory\n"; | "<jemalloc>: Error allocating aligned memory: out of memory\n"; | ||||
sopts.invalid_alignment_string = | sopts.invalid_alignment_string = | ||||
"<jemalloc>: Error allocating aligned memory: invalid alignment\n"; | "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; | ||||
sopts.null_out_result_on_error = true; | sopts.null_out_result_on_error = true; | ||||
dopts.result = &ret; | dopts.result = &ret; | ||||
dopts.num_items = 1; | dopts.num_items = 1; | ||||
dopts.item_size = size; | dopts.item_size = size; | ||||
dopts.alignment = alignment; | dopts.alignment = alignment; | ||||
imalloc(&sopts, &dopts); | imalloc(&sopts, &dopts); | ||||
if (sopts.slow) { | |||||
uintptr_t args[3] = {alignment, size}; | |||||
hook_invoke_alloc(hook_alloc_memalign, ret, (uintptr_t)ret, | |||||
args); | |||||
} | |||||
LOG("core.memalign.exit", "result: %p", ret); | LOG("core.memalign.exit", "result: %p", ret); | ||||
return ret; | return ret; | ||||
} | } | ||||
#endif | #endif | ||||
#ifdef JEMALLOC_OVERRIDE_VALLOC | #ifdef JEMALLOC_OVERRIDE_VALLOC | ||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||||
void JEMALLOC_NOTHROW * | void JEMALLOC_NOTHROW * | ||||
JEMALLOC_ATTR(malloc) | JEMALLOC_ATTR(malloc) | ||||
je_valloc(size_t size) { | je_valloc(size_t size) { | ||||
void *ret; | void *ret; | ||||
static_opts_t sopts; | static_opts_t sopts; | ||||
dynamic_opts_t dopts; | dynamic_opts_t dopts; | ||||
LOG("core.valloc.entry", "size: %zu\n", size); | LOG("core.valloc.entry", "size: %zu\n", size); | ||||
static_opts_init(&sopts); | static_opts_init(&sopts); | ||||
dynamic_opts_init(&dopts); | dynamic_opts_init(&dopts); | ||||
sopts.bump_empty_alloc = true; | |||||
sopts.null_out_result_on_error = true; | sopts.null_out_result_on_error = true; | ||||
sopts.min_alignment = PAGE; | sopts.min_alignment = PAGE; | ||||
sopts.oom_string = | sopts.oom_string = | ||||
"<jemalloc>: Error allocating aligned memory: out of memory\n"; | "<jemalloc>: Error allocating aligned memory: out of memory\n"; | ||||
sopts.invalid_alignment_string = | sopts.invalid_alignment_string = | ||||
"<jemalloc>: Error allocating aligned memory: invalid alignment\n"; | "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; | ||||
dopts.result = &ret; | dopts.result = &ret; | ||||
dopts.num_items = 1; | dopts.num_items = 1; | ||||
dopts.item_size = size; | dopts.item_size = size; | ||||
dopts.alignment = PAGE; | dopts.alignment = PAGE; | ||||
imalloc(&sopts, &dopts); | imalloc(&sopts, &dopts); | ||||
if (sopts.slow) { | |||||
uintptr_t args[3] = {size}; | |||||
hook_invoke_alloc(hook_alloc_valloc, ret, (uintptr_t)ret, args); | |||||
} | |||||
LOG("core.valloc.exit", "result: %p\n", ret); | LOG("core.valloc.exit", "result: %p\n", ret); | ||||
return ret; | return ret; | ||||
} | } | ||||
#endif | #endif | ||||
#if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK) | #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK) | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 51 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
* End non-standard override functions. | * End non-standard override functions. | ||||
*/ | */ | ||||
/******************************************************************************/ | /******************************************************************************/ | ||||
/* | /* | ||||
* Begin non-standard functions. | * Begin non-standard functions. | ||||
*/ | */ | ||||
#ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API | |||||
#define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y | |||||
#define JEMALLOC_SMALLOCX_CONCAT_HELPER2(x, y) \ | |||||
JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) | |||||
typedef struct { | |||||
void *ptr; | |||||
size_t size; | |||||
} smallocx_return_t; | |||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | ||||
smallocx_return_t JEMALLOC_NOTHROW | |||||
/* | |||||
* The attribute JEMALLOC_ATTR(malloc) cannot be used due to: | |||||
* - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86488 | |||||
*/ | |||||
JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT) | |||||
(size_t size, int flags) { | |||||
/* | |||||
* Note: the attribute JEMALLOC_ALLOC_SIZE(1) cannot be | |||||
* used here because it makes writing beyond the `size` | |||||
* of the `ptr` undefined behavior, but the objective | |||||
* of this function is to allow writing beyond `size` | |||||
* up to `smallocx_return_t::size`. | |||||
*/ | |||||
smallocx_return_t ret; | |||||
static_opts_t sopts; | |||||
dynamic_opts_t dopts; | |||||
LOG("core.smallocx.entry", "size: %zu, flags: %d", size, flags); | |||||
static_opts_init(&sopts); | |||||
dynamic_opts_init(&dopts); | |||||
sopts.assert_nonempty_alloc = true; | |||||
sopts.null_out_result_on_error = true; | |||||
sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n"; | |||||
sopts.usize = true; | |||||
dopts.result = &ret.ptr; | |||||
dopts.num_items = 1; | |||||
dopts.item_size = size; | |||||
if (unlikely(flags != 0)) { | |||||
if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { | |||||
dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); | |||||
} | |||||
dopts.zero = MALLOCX_ZERO_GET(flags); | |||||
if ((flags & MALLOCX_TCACHE_MASK) != 0) { | |||||
if ((flags & MALLOCX_TCACHE_MASK) | |||||
== MALLOCX_TCACHE_NONE) { | |||||
dopts.tcache_ind = TCACHE_IND_NONE; | |||||
} else { | |||||
dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); | |||||
} | |||||
} else { | |||||
dopts.tcache_ind = TCACHE_IND_AUTOMATIC; | |||||
} | |||||
if ((flags & MALLOCX_ARENA_MASK) != 0) | |||||
dopts.arena_ind = MALLOCX_ARENA_GET(flags); | |||||
} | |||||
imalloc(&sopts, &dopts); | |||||
assert(dopts.usize == je_nallocx(size, flags)); | |||||
ret.size = dopts.usize; | |||||
LOG("core.smallocx.exit", "result: %p, size: %zu", ret.ptr, ret.size); | |||||
return ret; | |||||
} | |||||
#undef JEMALLOC_SMALLOCX_CONCAT_HELPER | |||||
#undef JEMALLOC_SMALLOCX_CONCAT_HELPER2 | |||||
#endif | |||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN | |||||
void JEMALLOC_NOTHROW * | void JEMALLOC_NOTHROW * | ||||
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) | JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) | ||||
je_mallocx(size_t size, int flags) { | je_mallocx(size_t size, int flags) { | ||||
void *ret; | void *ret; | ||||
static_opts_t sopts; | static_opts_t sopts; | ||||
dynamic_opts_t dopts; | dynamic_opts_t dopts; | ||||
LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags); | LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags); | ||||
Show All 26 Lines | if ((flags & MALLOCX_TCACHE_MASK) != 0) { | ||||
dopts.tcache_ind = TCACHE_IND_AUTOMATIC; | dopts.tcache_ind = TCACHE_IND_AUTOMATIC; | ||||
} | } | ||||
if ((flags & MALLOCX_ARENA_MASK) != 0) | if ((flags & MALLOCX_ARENA_MASK) != 0) | ||||
dopts.arena_ind = MALLOCX_ARENA_GET(flags); | dopts.arena_ind = MALLOCX_ARENA_GET(flags); | ||||
} | } | ||||
imalloc(&sopts, &dopts); | imalloc(&sopts, &dopts); | ||||
if (sopts.slow) { | |||||
uintptr_t args[3] = {size, flags}; | |||||
hook_invoke_alloc(hook_alloc_mallocx, ret, (uintptr_t)ret, | |||||
args); | |||||
} | |||||
LOG("core.mallocx.exit", "result: %p", ret); | LOG("core.mallocx.exit", "result: %p", ret); | ||||
return ret; | return ret; | ||||
} | } | ||||
static void * | static void * | ||||
irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, | irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, | ||||
size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, | size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, | ||||
prof_tctx_t *tctx) { | prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) { | ||||
void *p; | void *p; | ||||
if (tctx == NULL) { | if (tctx == NULL) { | ||||
return NULL; | return NULL; | ||||
} | } | ||||
if (usize <= SMALL_MAXCLASS) { | if (usize <= SC_SMALL_MAXCLASS) { | ||||
p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS, | p = iralloct(tsdn, old_ptr, old_usize, | ||||
alignment, zero, tcache, arena); | SC_LARGE_MINCLASS, alignment, zero, tcache, | ||||
arena, hook_args); | |||||
if (p == NULL) { | if (p == NULL) { | ||||
return NULL; | return NULL; | ||||
} | } | ||||
arena_prof_promote(tsdn, p, usize); | arena_prof_promote(tsdn, p, usize); | ||||
} else { | } else { | ||||
p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, | p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, | ||||
tcache, arena); | tcache, arena, hook_args); | ||||
} | } | ||||
return p; | return p; | ||||
} | } | ||||
JEMALLOC_ALWAYS_INLINE void * | JEMALLOC_ALWAYS_INLINE void * | ||||
irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, | irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, | ||||
size_t alignment, size_t *usize, bool zero, tcache_t *tcache, | size_t alignment, size_t *usize, bool zero, tcache_t *tcache, | ||||
arena_t *arena, alloc_ctx_t *alloc_ctx) { | arena_t *arena, alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) { | ||||
void *p; | void *p; | ||||
bool prof_active; | bool prof_active; | ||||
prof_tctx_t *old_tctx, *tctx; | prof_tctx_t *old_tctx, *tctx; | ||||
prof_active = prof_active_get_unlocked(); | prof_active = prof_active_get_unlocked(); | ||||
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); | old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); | ||||
tctx = prof_alloc_prep(tsd, *usize, prof_active, false); | tctx = prof_alloc_prep(tsd, *usize, prof_active, false); | ||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { | if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { | ||||
p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, | p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, | ||||
*usize, alignment, zero, tcache, arena, tctx); | *usize, alignment, zero, tcache, arena, tctx, hook_args); | ||||
} else { | } else { | ||||
p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, | p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, | ||||
zero, tcache, arena); | zero, tcache, arena, hook_args); | ||||
} | } | ||||
if (unlikely(p == NULL)) { | if (unlikely(p == NULL)) { | ||||
prof_alloc_rollback(tsd, tctx, false); | prof_alloc_rollback(tsd, tctx, false); | ||||
return NULL; | return NULL; | ||||
} | } | ||||
if (p == old_ptr && alignment != 0) { | if (p == old_ptr && alignment != 0) { | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 54 Lines • ▼ Show 20 Lines | je_rallocx(void *ptr, size_t size, int flags) { | ||||
} else { | } else { | ||||
tcache = tcache_get(tsd); | tcache = tcache_get(tsd); | ||||
} | } | ||||
alloc_ctx_t alloc_ctx; | alloc_ctx_t alloc_ctx; | ||||
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); | rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); | ||||
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, | rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, | ||||
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); | (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); | ||||
assert(alloc_ctx.szind != NSIZES); | assert(alloc_ctx.szind != SC_NSIZES); | ||||
old_usize = sz_index2size(alloc_ctx.szind); | old_usize = sz_index2size(alloc_ctx.szind); | ||||
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); | assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); | ||||
hook_ralloc_args_t hook_args = {false, {(uintptr_t)ptr, size, flags, | |||||
0}}; | |||||
if (config_prof && opt_prof) { | if (config_prof && opt_prof) { | ||||
usize = (alignment == 0) ? | usize = (alignment == 0) ? | ||||
sz_s2u(size) : sz_sa2u(size, alignment); | sz_s2u(size) : sz_sa2u(size, alignment); | ||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { | if (unlikely(usize == 0 | ||||
|| usize > SC_LARGE_MAXCLASS)) { | |||||
goto label_oom; | goto label_oom; | ||||
} | } | ||||
p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, | p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, | ||||
zero, tcache, arena, &alloc_ctx); | zero, tcache, arena, &alloc_ctx, &hook_args); | ||||
if (unlikely(p == NULL)) { | if (unlikely(p == NULL)) { | ||||
goto label_oom; | goto label_oom; | ||||
} | } | ||||
} else { | } else { | ||||
p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, | p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, | ||||
zero, tcache, arena); | zero, tcache, arena, &hook_args); | ||||
if (unlikely(p == NULL)) { | if (unlikely(p == NULL)) { | ||||
goto label_oom; | goto label_oom; | ||||
} | } | ||||
if (config_stats) { | if (config_stats) { | ||||
usize = isalloc(tsd_tsdn(tsd), p); | usize = isalloc(tsd_tsdn(tsd), p); | ||||
} | } | ||||
} | } | ||||
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); | assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); | ||||
Show All 17 Lines | label_oom: | ||||
LOG("core.rallocx.exit", "result: %p", NULL); | LOG("core.rallocx.exit", "result: %p", NULL); | ||||
return NULL; | return NULL; | ||||
} | } | ||||
JEMALLOC_ALWAYS_INLINE size_t | JEMALLOC_ALWAYS_INLINE size_t | ||||
ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, | ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, | ||||
size_t extra, size_t alignment, bool zero) { | size_t extra, size_t alignment, bool zero) { | ||||
size_t usize; | size_t newsize; | ||||
if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) { | if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero, | ||||
&newsize)) { | |||||
return old_usize; | return old_usize; | ||||
} | } | ||||
usize = isalloc(tsdn, ptr); | |||||
return usize; | return newsize; | ||||
} | } | ||||
static size_t | static size_t | ||||
ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, | ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, | ||||
size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { | size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { | ||||
size_t usize; | size_t usize; | ||||
if (tctx == NULL) { | if (tctx == NULL) { | ||||
Show All 17 Lines | ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, | ||||
/* | /* | ||||
* usize isn't knowable before ixalloc() returns when extra is non-zero. | * usize isn't knowable before ixalloc() returns when extra is non-zero. | ||||
* Therefore, compute its maximum possible value and use that in | * Therefore, compute its maximum possible value and use that in | ||||
* prof_alloc_prep() to decide whether to capture a backtrace. | * prof_alloc_prep() to decide whether to capture a backtrace. | ||||
* prof_realloc() will use the actual usize to decide whether to sample. | * prof_realloc() will use the actual usize to decide whether to sample. | ||||
*/ | */ | ||||
if (alignment == 0) { | if (alignment == 0) { | ||||
usize_max = sz_s2u(size+extra); | usize_max = sz_s2u(size+extra); | ||||
assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS); | assert(usize_max > 0 | ||||
&& usize_max <= SC_LARGE_MAXCLASS); | |||||
} else { | } else { | ||||
usize_max = sz_sa2u(size+extra, alignment); | usize_max = sz_sa2u(size+extra, alignment); | ||||
if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) { | if (unlikely(usize_max == 0 | ||||
|| usize_max > SC_LARGE_MAXCLASS)) { | |||||
/* | /* | ||||
* usize_max is out of range, and chances are that | * usize_max is out of range, and chances are that | ||||
* allocation will fail, but use the maximum possible | * allocation will fail, but use the maximum possible | ||||
* value and carry on with prof_alloc_prep(), just in | * value and carry on with prof_alloc_prep(), just in | ||||
* case allocation succeeds. | * case allocation succeeds. | ||||
*/ | */ | ||||
usize_max = LARGE_MAXCLASS; | usize_max = SC_LARGE_MAXCLASS; | ||||
} | } | ||||
} | } | ||||
tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); | tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); | ||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { | if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { | ||||
usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, | usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, | ||||
size, extra, alignment, zero, tctx); | size, extra, alignment, zero, tctx); | ||||
} else { | } else { | ||||
Show All 26 Lines | je_xallocx(void *ptr, size_t size, size_t extra, int flags) { | ||||
assert(malloc_initialized() || IS_INITIALIZER); | assert(malloc_initialized() || IS_INITIALIZER); | ||||
tsd = tsd_fetch(); | tsd = tsd_fetch(); | ||||
check_entry_exit_locking(tsd_tsdn(tsd)); | check_entry_exit_locking(tsd_tsdn(tsd)); | ||||
alloc_ctx_t alloc_ctx; | alloc_ctx_t alloc_ctx; | ||||
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); | rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); | ||||
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, | rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, | ||||
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); | (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); | ||||
assert(alloc_ctx.szind != NSIZES); | assert(alloc_ctx.szind != SC_NSIZES); | ||||
old_usize = sz_index2size(alloc_ctx.szind); | old_usize = sz_index2size(alloc_ctx.szind); | ||||
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); | assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); | ||||
/* | /* | ||||
* The API explicitly absolves itself of protecting against (size + | * The API explicitly absolves itself of protecting against (size + | ||||
* extra) numerical overflow, but we may need to clamp extra to avoid | * extra) numerical overflow, but we may need to clamp extra to avoid | ||||
* exceeding LARGE_MAXCLASS. | * exceeding SC_LARGE_MAXCLASS. | ||||
* | * | ||||
* Ordinarily, size limit checking is handled deeper down, but here we | * Ordinarily, size limit checking is handled deeper down, but here we | ||||
* have to check as part of (size + extra) clamping, since we need the | * have to check as part of (size + extra) clamping, since we need the | ||||
* clamped value in the above helper functions. | * clamped value in the above helper functions. | ||||
*/ | */ | ||||
if (unlikely(size > LARGE_MAXCLASS)) { | if (unlikely(size > SC_LARGE_MAXCLASS)) { | ||||
usize = old_usize; | usize = old_usize; | ||||
goto label_not_resized; | goto label_not_resized; | ||||
} | } | ||||
if (unlikely(LARGE_MAXCLASS - size < extra)) { | if (unlikely(SC_LARGE_MAXCLASS - size < extra)) { | ||||
extra = LARGE_MAXCLASS - size; | extra = SC_LARGE_MAXCLASS - size; | ||||
} | } | ||||
if (config_prof && opt_prof) { | if (config_prof && opt_prof) { | ||||
usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, | usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, | ||||
alignment, zero, &alloc_ctx); | alignment, zero, &alloc_ctx); | ||||
} else { | } else { | ||||
usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, | usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, | ||||
extra, alignment, zero); | extra, alignment, zero); | ||||
} | } | ||||
if (unlikely(usize == old_usize)) { | if (unlikely(usize == old_usize)) { | ||||
goto label_not_resized; | goto label_not_resized; | ||||
} | } | ||||
if (config_stats) { | if (config_stats) { | ||||
*tsd_thread_allocatedp_get(tsd) += usize; | *tsd_thread_allocatedp_get(tsd) += usize; | ||||
*tsd_thread_deallocatedp_get(tsd) += old_usize; | *tsd_thread_deallocatedp_get(tsd) += old_usize; | ||||
} | } | ||||
label_not_resized: | label_not_resized: | ||||
if (unlikely(!tsd_fast(tsd))) { | |||||
uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags}; | |||||
hook_invoke_expand(hook_expand_xallocx, ptr, old_usize, | |||||
usize, (uintptr_t)usize, args); | |||||
} | |||||
UTRACE(ptr, size, ptr); | UTRACE(ptr, size, ptr); | ||||
check_entry_exit_locking(tsd_tsdn(tsd)); | check_entry_exit_locking(tsd_tsdn(tsd)); | ||||
LOG("core.xallocx.exit", "result: %zu", usize); | LOG("core.xallocx.exit", "result: %zu", usize); | ||||
return usize; | return usize; | ||||
} | } | ||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW | ||||
JEMALLOC_ATTR(pure) | JEMALLOC_ATTR(pure) | ||||
je_sallocx(const void *ptr, UNUSED int flags) { | je_sallocx(const void *ptr, int flags) { | ||||
size_t usize; | size_t usize; | ||||
tsdn_t *tsdn; | tsdn_t *tsdn; | ||||
LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags); | LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags); | ||||
assert(malloc_initialized() || IS_INITIALIZER); | assert(malloc_initialized() || IS_INITIALIZER); | ||||
assert(ptr != NULL); | assert(ptr != NULL); | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { | ||||
} | } | ||||
} | } | ||||
UTRACE(ptr, 0, 0); | UTRACE(ptr, 0, 0); | ||||
if (likely(fast)) { | if (likely(fast)) { | ||||
tsd_assert_fast(tsd); | tsd_assert_fast(tsd); | ||||
ifree(tsd, ptr, tcache, false); | ifree(tsd, ptr, tcache, false); | ||||
} else { | } else { | ||||
uintptr_t args_raw[3] = {(uintptr_t)ptr, flags}; | |||||
hook_invoke_dalloc(hook_dalloc_dallocx, ptr, args_raw); | |||||
ifree(tsd, ptr, tcache, true); | ifree(tsd, ptr, tcache, true); | ||||
} | } | ||||
check_entry_exit_locking(tsd_tsdn(tsd)); | check_entry_exit_locking(tsd_tsdn(tsd)); | ||||
LOG("core.dallocx.exit", ""); | LOG("core.dallocx.exit", ""); | ||||
} | } | ||||
JEMALLOC_ALWAYS_INLINE size_t | JEMALLOC_ALWAYS_INLINE size_t | ||||
inallocx(tsdn_t *tsdn, size_t size, int flags) { | inallocx(tsdn_t *tsdn, size_t size, int flags) { | ||||
check_entry_exit_locking(tsdn); | check_entry_exit_locking(tsdn); | ||||
size_t usize; | size_t usize; | ||||
if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { | if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { | ||||
usize = sz_s2u(size); | usize = sz_s2u(size); | ||||
} else { | } else { | ||||
usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); | usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); | ||||
} | } | ||||
check_entry_exit_locking(tsdn); | check_entry_exit_locking(tsdn); | ||||
return usize; | return usize; | ||||
} | } | ||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW | JEMALLOC_NOINLINE void | ||||
je_sdallocx(void *ptr, size_t size, int flags) { | sdallocx_default(void *ptr, size_t size, int flags) { | ||||
assert(ptr != NULL); | assert(ptr != NULL); | ||||
assert(malloc_initialized() || IS_INITIALIZER); | assert(malloc_initialized() || IS_INITIALIZER); | ||||
LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, | |||||
size, flags); | |||||
tsd_t *tsd = tsd_fetch(); | tsd_t *tsd = tsd_fetch(); | ||||
bool fast = tsd_fast(tsd); | bool fast = tsd_fast(tsd); | ||||
size_t usize = inallocx(tsd_tsdn(tsd), size, flags); | size_t usize = inallocx(tsd_tsdn(tsd), size, flags); | ||||
assert(usize == isalloc(tsd_tsdn(tsd), ptr)); | assert(usize == isalloc(tsd_tsdn(tsd), ptr)); | ||||
check_entry_exit_locking(tsd_tsdn(tsd)); | check_entry_exit_locking(tsd_tsdn(tsd)); | ||||
tcache_t *tcache; | tcache_t *tcache; | ||||
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { | if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { | ||||
Show All 17 Lines | if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { | ||||
} | } | ||||
} | } | ||||
UTRACE(ptr, 0, 0); | UTRACE(ptr, 0, 0); | ||||
if (likely(fast)) { | if (likely(fast)) { | ||||
tsd_assert_fast(tsd); | tsd_assert_fast(tsd); | ||||
isfree(tsd, ptr, usize, tcache, false); | isfree(tsd, ptr, usize, tcache, false); | ||||
} else { | } else { | ||||
uintptr_t args_raw[3] = {(uintptr_t)ptr, size, flags}; | |||||
hook_invoke_dalloc(hook_dalloc_sdallocx, ptr, args_raw); | |||||
isfree(tsd, ptr, usize, tcache, true); | isfree(tsd, ptr, usize, tcache, true); | ||||
} | } | ||||
check_entry_exit_locking(tsd_tsdn(tsd)); | check_entry_exit_locking(tsd_tsdn(tsd)); | ||||
} | |||||
JEMALLOC_EXPORT void JEMALLOC_NOTHROW | |||||
je_sdallocx(void *ptr, size_t size, int flags) { | |||||
LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, | |||||
size, flags); | |||||
if (flags !=0 || !free_fastpath(ptr, size, true)) { | |||||
sdallocx_default(ptr, size, flags); | |||||
} | |||||
LOG("core.sdallocx.exit", ""); | LOG("core.sdallocx.exit", ""); | ||||
} | } | ||||
void JEMALLOC_NOTHROW | |||||
je_sdallocx_noflags(void *ptr, size_t size) { | |||||
LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: 0", ptr, | |||||
size); | |||||
if (!free_fastpath(ptr, size, true)) { | |||||
sdallocx_default(ptr, size, 0); | |||||
} | |||||
LOG("core.sdallocx.exit", ""); | |||||
} | |||||
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW | JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW | ||||
JEMALLOC_ATTR(pure) | JEMALLOC_ATTR(pure) | ||||
je_nallocx(size_t size, int flags) { | je_nallocx(size_t size, int flags) { | ||||
size_t usize; | size_t usize; | ||||
tsdn_t *tsdn; | tsdn_t *tsdn; | ||||
assert(size != 0); | assert(size != 0); | ||||
if (unlikely(malloc_init())) { | if (unlikely(malloc_init())) { | ||||
LOG("core.nallocx.exit", "result: %zu", ZU(0)); | LOG("core.nallocx.exit", "result: %zu", ZU(0)); | ||||
return 0; | return 0; | ||||
} | } | ||||
tsdn = tsdn_fetch(); | tsdn = tsdn_fetch(); | ||||
check_entry_exit_locking(tsdn); | check_entry_exit_locking(tsdn); | ||||
usize = inallocx(tsdn, size, flags); | usize = inallocx(tsdn, size, flags); | ||||
if (unlikely(usize > LARGE_MAXCLASS)) { | if (unlikely(usize > SC_LARGE_MAXCLASS)) { | ||||
LOG("core.nallocx.exit", "result: %zu", ZU(0)); | LOG("core.nallocx.exit", "result: %zu", ZU(0)); | ||||
return 0; | return 0; | ||||
} | } | ||||
check_entry_exit_locking(tsdn); | check_entry_exit_locking(tsdn); | ||||
LOG("core.nallocx.exit", "result: %zu", usize); | LOG("core.nallocx.exit", "result: %zu", usize); | ||||
return usize; | return usize; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 296 Lines • ▼ Show 20 Lines | for (j = 0; j < narenas; j++) { | ||||
arena_prefork7(tsd_tsdn(tsd), arena); | arena_prefork7(tsd_tsdn(tsd), arena); | ||||
break; | break; | ||||
default: not_reached(); | default: not_reached(); | ||||
} | } | ||||
} | } | ||||
} | } | ||||
} | } | ||||
prof_prefork1(tsd_tsdn(tsd)); | prof_prefork1(tsd_tsdn(tsd)); | ||||
tsd_prefork(tsd); | |||||
} | } | ||||
#ifndef JEMALLOC_MUTEX_INIT_CB | #ifndef JEMALLOC_MUTEX_INIT_CB | ||||
void | void | ||||
jemalloc_postfork_parent(void) | jemalloc_postfork_parent(void) | ||||
#else | #else | ||||
JEMALLOC_EXPORT void | JEMALLOC_EXPORT void | ||||
_malloc_postfork(void) | _malloc_postfork(void) | ||||
#endif | #endif | ||||
{ | { | ||||
tsd_t *tsd; | tsd_t *tsd; | ||||
unsigned i, narenas; | unsigned i, narenas; | ||||
#ifdef JEMALLOC_MUTEX_INIT_CB | #ifdef JEMALLOC_MUTEX_INIT_CB | ||||
if (!malloc_initialized()) { | if (!malloc_initialized()) { | ||||
return; | return; | ||||
} | } | ||||
#endif | #endif | ||||
assert(malloc_initialized()); | assert(malloc_initialized()); | ||||
tsd = tsd_fetch(); | tsd = tsd_fetch(); | ||||
tsd_postfork_parent(tsd); | |||||
witness_postfork_parent(tsd_witness_tsdp_get(tsd)); | witness_postfork_parent(tsd_witness_tsdp_get(tsd)); | ||||
/* Release all mutexes, now that fork() has completed. */ | /* Release all mutexes, now that fork() has completed. */ | ||||
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { | for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { | ||||
arena_t *arena; | arena_t *arena; | ||||
if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { | if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { | ||||
arena_postfork_parent(tsd_tsdn(tsd), arena); | arena_postfork_parent(tsd_tsdn(tsd), arena); | ||||
} | } | ||||
Show All 10 Lines | |||||
void | void | ||||
jemalloc_postfork_child(void) { | jemalloc_postfork_child(void) { | ||||
tsd_t *tsd; | tsd_t *tsd; | ||||
unsigned i, narenas; | unsigned i, narenas; | ||||
assert(malloc_initialized()); | assert(malloc_initialized()); | ||||
tsd = tsd_fetch(); | tsd = tsd_fetch(); | ||||
tsd_postfork_child(tsd); | |||||
witness_postfork_child(tsd_witness_tsdp_get(tsd)); | witness_postfork_child(tsd_witness_tsdp_get(tsd)); | ||||
/* Release all mutexes, now that fork() has completed. */ | /* Release all mutexes, now that fork() has completed. */ | ||||
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { | for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { | ||||
arena_t *arena; | arena_t *arena; | ||||
if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { | if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { | ||||
arena_postfork_child(tsd_tsdn(tsd), arena); | arena_postfork_child(tsd_tsdn(tsd), arena); | ||||
Show All 19 Lines |