typedefstruct nanozone_s { // vm_allocate()'d, so page-aligned to begin with. malloc_zone_t basic_zone; // first page will be given read-only protection uint8_t pad[PAGE_MAX_SIZE - sizeof(malloc_zone_t)];
// remainder of structure is R/W (contains no function pointers) // page-aligned struct nano_meta_s meta_data[NANO_MAG_SIZE][NANO_SLOT_SIZE]; // max: NANO_MAG_SIZE cores x NANO_SLOT_SIZE slots for nano blocks {16 .. 256} _malloc_lock_s band_resupply_lock[NANO_MAG_SIZE]; uintptr_t band_max_mapped_baseaddr[NANO_MAG_SIZE]; size_t core_mapped_size[NANO_MAG_SIZE];
/* * The nano zone constructed by create_nano_zone() would like to hand off tiny, small, and large * allocations to the default scalable zone. Record the latter as the "helper" zone here. */ malloc_zone_t *helper_zone; } nanozone_t;
if (0 == size) size = NANO_REGIME_QUANTA_SIZE; // Historical behavior
k = (size + NANO_REGIME_QUANTA_SIZE - 1) >> SHIFT_NANO_QUANTUM; // round up and shift for number of quanta slot_bytes = k << SHIFT_NANO_QUANTUM; // multiply by power of two quanta size *pKey = k - 1; // Zero-based!
static INLINE void * segregated_next_block(nanozone_t *nanozone, nano_meta_admin_t pMeta, unsignedint slot_bytes, unsignedint mag_index) { while (1) { uintptr_t theLimit = pMeta->slot_limit_addr; // Capture the slot limit that bounds slot_bump_addr right now //pMeta->slot_limit_addr //当前这块pMeta可用内存的结束地址。 uintptr_t b = OSAtomicAdd64Barrier(slot_bytes, (volatileint64_t *)&(pMeta->slot_bump_addr)); //原子的为pMeta->slot_bump_addr添加slot_bytes的长度,偏移到下一个地址 b -= slot_bytes; // Atomic op returned addr of *next* free block. Subtract to get addr for *this* allocation. //减去添加的偏移量,获取当前可以获取的地址 if (b < theLimit) { // Did we stay within the bound of the present slot allocation? //如果地址还在范围之内,则返回地址 //todo:b不仅小于thelimit且远远小于thelimit,就可以获取一个就指向其他内存块的地址了。 return (void *)b; // Yep, so the slot_bump_addr this thread incremented is good to go } else { if (pMeta->slot_exhausted) { // exhausted all the bands availble for this slot? return0; // We're toast } else { // One thread will grow the heap, others will see its been grown and retry allocation _malloc_lock_lock(&nanozone->band_resupply_lock[mag_index]); // re-check state now that we've taken the lock if (pMeta->slot_exhausted) { _malloc_lock_unlock(&nanozone->band_resupply_lock[mag_index]); return0; // Toast } elseif (b < pMeta->slot_limit_addr) { _malloc_lock_unlock(&nanozone->band_resupply_lock[mag_index]); continue; // ... the slot was successfully grown by first-taker (not us). Now try again. } elseif (segregated_band_grow(nanozone, pMeta, slot_bytes, mag_index)) { _malloc_lock_unlock(&nanozone->band_resupply_lock[mag_index]); continue; // ... the slot has been successfully grown by us. Now try again. } else { pMeta->slot_exhausted = TRUE; _malloc_lock_unlock(&nanozone->band_resupply_lock[mag_index]); return0; } } } } }
u.fields.nano_signature = NANOZONE_SIGNATURE; u.fields.nano_mag_index = mag_index; u.fields.nano_band = 0; u.fields.nano_slot = 0; u.fields.nano_offset = 0; s = u.addr; // Base for this core.
// Set the high water mark for this CPU's entire magazine, if this resupply raised it. watermark = nanozone->core_mapped_size[mag_index]; hiwater = MAX( watermark, p - s + SLOT_IN_BAND_SIZE ); nanozone->core_mapped_size[mag_index] = hiwater;
struct nano_blk_addr_s { uint64_t nano_offset:NANO_OFFSET_BITS, // locates the block nano_slot:NANO_SLOT_BITS, // bucket of homogenous quanta-multiple blocks nano_band:NANO_BAND_BITS, nano_mag_index:NANO_MAG_BITS, // the core that allocated this block nano_signature:NANO_SIGNATURE_BITS; // 0x00006nnnnnnnnnnn the address range devoted to us. }; #endif