staticvoid _malloc_initialize(void) { MALLOC_LOCK(); if (!_malloc_is_initialized) { unsigned n; malloc_zone_t *zone; _malloc_is_initialized = TRUE; set_flags_from_environment(); // will only set flags up to two times n = malloc_num_zones;
typedefstruct_malloc_zone_t { /* Only zone implementors should depend on the layout of this structure; Regular callers should use the access functions below */ void *reserved1; /* RESERVED FOR CFAllocator DO NOT USE */ void *reserved2; /* RESERVED FOR CFAllocator DO NOT USE */ size_t (*size)(struct_malloc_zone_t *zone, constvoid *ptr); /* returns the size of a block or 0 if not in this zone; must be fast, especially for negative answers */ void *(*malloc)(struct_malloc_zone_t *zone, size_t size); void *(*calloc)(struct_malloc_zone_t *zone, size_t num_items, size_t size); /* same as malloc, but block returned is set to zero */ void *(*valloc)(struct_malloc_zone_t *zone, size_t size); /* same as malloc, but block returned is set to zero and is guaranteed to be page aligned */ void (*free)(struct_malloc_zone_t *zone, void *ptr); void *(*realloc)(struct_malloc_zone_t *zone, void *ptr, size_t size); void (*destroy)(struct_malloc_zone_t *zone); /* zone is destroyed and all memory reclaimed */ constchar *zone_name;
/* Optional batch callbacks; these may be NULL */ unsigned (*batch_malloc)(struct_malloc_zone_t *zone, size_t size, void **results, unsigned num_requested); /* given a size, returns pointers capable of holding that size; returns the number of pointers allocated (maybe 0 or less than num_requested) */ void (*batch_free)(struct_malloc_zone_t *zone, void **to_be_freed, unsigned num_to_be_freed); /* frees all the pointers in to_be_freed; note that to_be_freed may be overwritten during the process */
structmalloc_introspection_t *introspect; unsigned version; /* aligned memory allocation. The callback may be NULL. Present in version >= 5. */ void *(*memalign)(struct_malloc_zone_t *zone, size_t alignment, size_t size); /* free a pointer known to be in zone and known to have the given size. The callback may be NULL. Present in version >= 6.*/ void (*free_definite_size)(struct_malloc_zone_t *zone, void *ptr, size_t size);
/* Empty out caches in the face of memory pressure. The callback may be NULL. Present in version >= 8. */ size_t (*pressure_relief)(struct_malloc_zone_t *zone, size_t goal); } malloc_zone_t;
typedefstruct szone_s { // vm_allocate()'d, so page-aligned to begin with. malloc_zone_t basic_zone; // first page will be given read-only protection uint8_t pad[PAGE_MAX_SIZE - sizeof(malloc_zone_t)];
unsignedlong cpu_id_key; // unused // remainder of structure is R/W (contains no function pointers) unsigned debug_flags; void *log_address;
/* Regions for tiny objects */ _malloc_lock_s tiny_regions_lock CACHE_ALIGN; size_t num_tiny_regions; size_t num_tiny_regions_dealloc; region_hash_generation_t *tiny_region_generation; region_hash_generation_t trg[2];
int num_tiny_magazines; unsigned num_tiny_magazines_mask; int num_tiny_magazines_mask_shift; magazine_t *tiny_magazines; // array of per-processor magazines
uintptr_t last_tiny_advise;
/* Regions for small objects */ _malloc_lock_s small_regions_lock CACHE_ALIGN; size_t num_small_regions; size_t num_small_regions_dealloc; region_hash_generation_t *small_region_generation; region_hash_generation_t srg[2];
unsigned num_small_slots; // determined by physmem size
int num_small_magazines; unsigned num_small_magazines_mask; int num_small_magazines_mask_shift; magazine_t *small_magazines; // array of per-processor magazines
uintptr_t last_small_advise;
/* large objects: all the rest */ _malloc_lock_s large_szone_lock CACHE_ALIGN; // One customer at a time for large unsigned num_large_objects_in_use; unsigned num_large_entries; large_entry_t *large_entries; // hashed by location; null entries don't count size_t num_bytes_in_large_objects;
#if LARGE_CACHE int large_entry_cache_oldest; int large_entry_cache_newest; large_entry_t large_entry_cache[LARGE_ENTRY_CACHE_SIZE]; // "death row" for large malloc/free boolean_t large_legacy_reset_mprotect; size_t large_entry_cache_reserve_bytes; size_t large_entry_cache_reserve_limit; size_t large_entry_cache_bytes; // total size of death row, bytes #endif
/* flag and limits pertaining to altered malloc behavior for systems with large amounts of physical memory */ unsigned is_largemem; unsigned large_threshold; unsigned vm_copy_threshold;
/* security cookie */ uintptr_t cookie;
/* Initial region list */ region_t initial_tiny_regions[INITIAL_NUM_REGIONS]; region_t initial_small_regions[INITIAL_NUM_REGIONS];
/* The purgeable zone constructed by create_purgeable_zone() would like to hand off tiny and small * allocations to the default scalable zone. Record the latter as the "helper" zone here. */ struct szone_s *helper_zone;
typedefstruct nanozone_s { // vm_allocate()'d, so page-aligned to begin with. malloc_zone_t basic_zone; // first page will be given read-only protection uint8_t pad[PAGE_MAX_SIZE - sizeof(malloc_zone_t)];
// remainder of structure is R/W (contains no function pointers) // page-aligned struct nano_meta_s meta_data[NANO_MAG_SIZE][NANO_SLOT_SIZE]; // max: NANO_MAG_SIZE cores x NANO_SLOT_SIZE slots for nano blocks {16 .. 256} _malloc_lock_s band_resupply_lock[NANO_MAG_SIZE]; uintptr_t band_max_mapped_baseaddr[NANO_MAG_SIZE]; size_t core_mapped_size[NANO_MAG_SIZE];
/* * The nano zone constructed by create_nano_zone() would like to hand off tiny, small, and large * allocations to the default scalable zone. Record the latter as the "helper" zone here. */ malloc_zone_t *helper_zone; } nanozone_t;
// * The shared kernel/user "comm page(s)": //* //* The last several pages of every address space are reserved for the kernel/user //* "comm area". During system initialization, the kernel populates the comm pages with //* code customized for the particular processor and platform
comm pages是系统初始化时创建的一块共享的内存区域,保存了一些系统的数据。在libmalloc中有不少地方都用到了comm pages来获取系统的属性。
int num_tiny_magazines; unsigned num_tiny_magazines_mask; int num_tiny_magazines_mask_shift; magazine_t *tiny_magazines; // array of per-processor magazines
//初始化之后完全与tiny相同,只是变量不同。 /* * Initialize variables that size the free list for SMALL allocations based * upon the amount of memory in the system. Switch to a larger number of * free list entries at 1GB. */ // 初始化一些变量,根据系统中的内存的用量来计算SMALL分配列表的大小时将会用到这些变量。 // * The shared kernel/user "comm page(s)": //* //* The last several pages of every address space are reserved for the kernel/user //* "comm area". During system initialization, the kernel populates the comm pages with //* code customized for the particular processor and platform. //*1073741824 //* Because Mach VM cannot map the last page of an address space, we don't use it // //----------根据是否需要使用large,做出不同的初始化---------------start------------- #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__arm64__) if ((hw_memsize = *(uint64_t *)(uintptr_t)_COMM_PAGE_MEMORY_SIZE) >= (1ULL << 30)) #else size_t uint64_t_size = sizeof(hw_memsize);
szone->basic_zone.reserved1 = 0; /* Set to zero once and for all as required by CFAllocator. */ szone->basic_zone.reserved2 = 0; /* Set to zero once and for all as required by CFAllocator. */ mprotect(szone, sizeof(szone->basic_zone), PROT_READ); /* Prevent overwriting the function pointers in basic_zone. */
szone->debug_flags = debug_flags; _malloc_lock_init(&szone->large_szone_lock); // ---------初始化外部调用函数---------------------------------end------------- #if defined(__ppc__) || defined(__ppc64__) /* * In the interest of compatibility for PPC applications executing via Rosetta, * arrange to zero-fill allocations as occurred by side effect in Leopard and earlier. */ zeroify_scalable_zone((malloc_zone_t *)szone); #endif
szone->cpu_id_key = -1UL; // Unused.
// Query the number of configured processors. // Uniprocessor case gets just one tiny and one small magazine (whose index is zero). This gives // the same behavior as the original scalable malloc. MP gets per-CPU magazines // that scale (way) better. #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__arm64__) int nproc = *(uint8_t *)(uintptr_t)_COMM_PAGE_NCPUS; #else int nproc = sysconf(_SC_NPROCESSORS_CONF); #endif // ------为每个一个cpu都申请一个tiny的内存空间,并初始化相关数据--------start //根据cpu的根数,计算获得需要多少个tiny的内存空间 szone->num_tiny_magazines = (nproc > 1) ? MIN(nproc, TINY_MAX_MAGAZINES) : 1;
// FIXME vm_allocate() based on number of configured CPUs //申请需要的内存空间 magazine_t *tiny_magazines = allocate_pages(NULL, TINY_MAGAZINE_PAGED_SIZE, 0, SCALABLE_MALLOC_ADD_GUARD_PAGES, VM_MEMORY_MALLOC); if (NULL == tiny_magazines) returnNULL; //将申请的内存空间赋值到szone中相关的字段上 szone->tiny_magazines = &(tiny_magazines[1]); // szone->tiny_magazines[-1] is the Depot
// The magazines are indexed in [0 .. (num_tiny_magazines - 1)] // Find the smallest power of 2 that exceeds (num_tiny_magazines - 1) // 通过计算获取num_tiny_magazines_mask_shift的值 // num_tiny_magazines_mask_shift = log2(szone->num_tiny_magazines-1) szone->num_tiny_magazines_mask_shift = 0; int i = 1; while( i <= (szone->num_tiny_magazines - 1) ) { szone->num_tiny_magazines_mask_shift++; i <<= 1; }
// Now if i <= TINY_MAX_MAGAZINES we'll never access tiny_magazines[] out of bounds. if (i > TINY_MAX_MAGAZINES) { malloc_printf("*** FATAL ERROR - magazine mask exceeds allocated magazines.\n"); exit(-1); }
// Reduce i by 1 to obtain a mask covering [0 .. (num_tiny_magazines - 1)] szone->num_tiny_magazines_mask = i - 1; // A mask used for hashing to a magazine index (and a safety aid) szone->last_tiny_advise = 0;
// Init the tiny_magazine locks _malloc_lock_init(&szone->tiny_regions_lock); _malloc_lock_init(&szone->tiny_magazines[DEPOT_MAGAZINE_INDEX].magazine_lock); for (i = 0; i < szone->num_tiny_magazines; ++i) { _malloc_lock_init(&szone->tiny_magazines[i].magazine_lock); } // ------为每个一个cpu都申请一个tiny的内存空间,并初始化相关数据--------end // ------为每个一个cpu都申请一个small的内存空间,并初始化相关数据--------start szone->num_small_magazines = (nproc > 1) ? MIN(nproc, SMALL_MAX_MAGAZINES) : 1;
// FIXME vm_allocate() based on number of configured CPUs magazine_t *small_magazines = allocate_pages(NULL, SMALL_MAGAZINE_PAGED_SIZE, 0, SCALABLE_MALLOC_ADD_GUARD_PAGES, VM_MEMORY_MALLOC); if (NULL == small_magazines) returnNULL;
szone->small_magazines = &(small_magazines[1]); // szone->small_magazines[-1] is the Depot
// The magazines are indexed in [0 .. (num_small_magazines - 1)] // Find the smallest power of 2 that exceeds (num_small_magazines - 1) szone->num_small_magazines_mask_shift = 0; while( i <= (szone->num_small_magazines - 1) ) { szone->num_small_magazines_mask_shift++; i <<= 1; }
// Now if i <= SMALL_MAX_MAGAZINES we'll never access small_magazines[] out of bounds. if (i > SMALL_MAX_MAGAZINES) { malloc_printf("*** FATAL ERROR - magazine mask exceeds allocated magazines.\n"); exit(-1); }
// Reduce i by 1 to obtain a mask covering [0 .. (num_small_magazines - 1)] szone->num_small_magazines_mask = i - 1; // A mask used for hashing to a magazine index (and a safety aid) szone->last_small_advise = 0;
// Init the small_magazine locks _malloc_lock_init(&szone->small_regions_lock); _malloc_lock_init(&szone->small_magazines[DEPOT_MAGAZINE_INDEX].magazine_lock); for (i = 0; i < szone->num_small_magazines; ++i) { _malloc_lock_init(&szone->small_magazines[i].magazine_lock); }
__attribute__((visibility("hidden"))) malloc_zone_t * create_nano_zone(size_t initial_size, malloc_zone_t *helper_zone, unsigned debug_flags) { nanozone_t *nanozone; int i, j;
if (!_malloc_engaged_nano) returnNULL;
//检测comm page的版本 #if defined(__x86_64__) if (_COMM_PAGE_VERSION_REQD > (*((uint16_t *)_COMM_PAGE_VERSION))) { malloc_printf("*** FATAL ERROR - comm page version mismatch.\n"); exit(-1); } #endif
/* get memory for the zone. */ //申请nanozone数据结构大小的内存 nanozone = allocate_pages(NULL, SZONE_PAGED_SIZE, 0, 0, VM_MEMORY_MALLOC); if (!nanozone) returnNULL;
/* set up the basic_zone portion of the nanozone structure */ //设置外部调用函数 nanozone->basic_zone.version = 8; nanozone->basic_zone.size = (void *)nano_size; nanozone->basic_zone.malloc = (debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) ? (void *)nano_malloc_scribble : (void *)nano_malloc; nanozone->basic_zone.calloc = (void *)nano_calloc; nanozone->basic_zone.valloc = (void *)nano_valloc; nanozone->basic_zone.free = (debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) ? (void *)nano_free_scribble : (void *)nano_free; nanozone->basic_zone.realloc = (void *)nano_realloc; nanozone->basic_zone.destroy = (void *)nano_destroy; nanozone->basic_zone.batch_malloc = (void *)nano_batch_malloc; nanozone->basic_zone.batch_free = (void *)nano_batch_free; nanozone->basic_zone.introspect = (structmalloc_introspection_t *)&nano_introspect; nanozone->basic_zone.memalign = (void *)nano_memalign; nanozone->basic_zone.free_definite_size = (debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) ? (void *)nano_free_definite_size_scribble : (void *)nano_free_definite_size; nanozone->basic_zone.pressure_relief = (void *)nano_pressure_relief; nanozone->basic_zone.reserved1 = 0; /* Set to zero once and for all as required by CFAllocator. */ nanozone->basic_zone.reserved2 = 0; /* Set to zero once and for all as required by CFAllocator. */ mprotect(nanozone, sizeof(nanozone->basic_zone), PROT_READ); /* Prevent overwriting the function pointers in basic_zone. */ /* set up the remainder of the nanozone structure */ nanozone->debug_flags = debug_flags; nanozone->our_signature = NANOZONE_SIGNATURE; /* Query the number of configured processors. */ #if defined(__x86_64__) nanozone->phys_ncpus = *(uint8_t *)(uintptr_t)_COMM_PAGE_PHYSICAL_CPUS; nanozone->logical_ncpus = *(uint8_t *)(uintptr_t)_COMM_PAGE_LOGICAL_CPUS; #else #error Unknown architecture #endif //根据cpu的不同,对meta_data进行初始化 if (nanozone->phys_ncpus > sizeof(nanozone->core_mapped_size)/sizeof(nanozone->core_mapped_size[0])) { _malloc_printf(ASL_LEVEL_NOTICE, "nano zone abandoned because NCPUS mismatch.\n"); returnNULL; } if (0 != (nanozone->logical_ncpus % nanozone->phys_ncpus)) { malloc_printf("*** FATAL ERROR - logical_ncpus % phys_ncpus != 0.\n"); exit(-1); } switch (nanozone->logical_ncpus/nanozone->phys_ncpus) { case1: nanozone->hyper_shift = 0; break; case2: nanozone->hyper_shift = 1; break; case4: nanozone->hyper_shift = 2; break; default: malloc_printf("*** FATAL ERROR - logical_ncpus / phys_ncpus not 1, 2, or 4.\n"); exit(-1); } /* Initialize slot queue heads and resupply locks. */ //#define OS_ATOMIC_QUEUE_INIT { NULL, 0 } OSQueueHead q0 = OS_ATOMIC_QUEUE_INIT; //{NULL,0} for (i = 0; i < nanozone->phys_ncpus; ++i) { _malloc_lock_init(&nanozone->band_resupply_lock[i]); for (j = 0; j < NANO_SLOT_SIZE; ++j) { nanozone->meta_data[i][j].slot_LIFO = q0; } } //对地址随机化做处理。 /* Initialize the security token. */ if (0 == _dyld_get_image_slide((conststruct mach_header*)_NSGetMachExecuteHeader())) { // zero slide when ASLR has been disabled by boot-arg. Eliminate cloaking. malloc_entropy[0] = 0; malloc_entropy[1] = 0; } nanozone->cookie = (uintptr_t)malloc_entropy[0] & 0x0000ffffffff0000ULL; // scramble central 32bits with this cookie /* Nano zone does not support SCALABLE_MALLOC_ADD_GUARD_PAGES. */ if (nanozone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) { _malloc_printf(ASL_LEVEL_INFO, "nano zone does not support guard pages\n"); nanozone->debug_flags &= ~SCALABLE_MALLOC_ADD_GUARD_PAGES; } nanozone->helper_zone = helper_zone; return (malloc_zone_t *)nanozone; }