/* * kmem_cache_alloc - Allocate an object * @cachep: The cache to allocate from. * @flags: See kmalloc(). * * Allocate an object from this cache. The flags are only relevant * if the cache has no available objects. */ void * kmem_cache_alloc(kmem_cache_t *cachep, int flags) { return __cache_alloc(cachep, flags); }
staticvoid* cache_alloc_refill(kmem_cache_t* cachep, int flags) { int batchcount; structkmem_list3 *l3; structarray_cache *ac;
check_irq_off(); ac = ac_data(cachep); retry: batchcount = ac->batchcount; if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { /* if there was little recent activity on this * cache, then perform only a partial refill. * Otherwise we could generate refill bouncing. */ batchcount = BATCHREFILL_LIMIT; } l3 = list3_data(cachep);
// 初始化以及其它极少情况需要执行这部分代码,比如上面跳到must_grow的情况 if (unlikely(!ac->avail)) { int x; // 到这里我们真的需要增加高速缓存的容量了,使用伙伴系统获取新的页,下面再分析这个函数 x = cache_grow(cachep, flags, -1); // cache_grow can reenable interrupts, then ac could change. ac = ac_data(cachep); if (!x && ac->avail == 0) // no objects in sight? abort returnNULL;
/* * Grow (by 1) the number of slabs within a cache. This is called by * kmem_cache_alloc() when there are no active objs left in a cache. */ staticintcache_grow(kmem_cache_t * cachep, int flags, int nodeid) { structslab *slabp; void *objp; size_t offset; int local_flags; unsignedlong ctor_flags;
/* Be lazy and only check for valid flags here, * keeping it out of the critical path in kmem_cache_alloc(). */ if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW)) BUG(); if (flags & SLAB_NO_GROW) return0;
ctor_flags = SLAB_CTOR_CONSTRUCTOR; local_flags = (flags & SLAB_LEVEL_MASK); if (!(local_flags & __GFP_WAIT)) /* * Not allowed to sleep. Need to tell a constructor about * this - it might need to know... */ ctor_flags |= SLAB_CTOR_ATOMIC;
/* About to mess with non-constant members - lock. */ check_irq_off(); spin_lock(&cachep->spinlock);
/* Get colour for the slab, and cal the next value. */ // 开始计算这个slab需要偏移多少,偏移多少由 (颜色 * cachep->colour_off) 计算出 offset = cachep->colour_next; cachep->colour_next++; if (cachep->colour_next >= cachep->colour) cachep->colour_next = 0; offset *= cachep->colour_off;
spin_unlock(&cachep->spinlock);
if (local_flags & __GFP_WAIT) local_irq_enable();
/* * The test for missing atomic flag is performed here, rather than * the more obvious place, simply to reduce the critical path length * in kmem_cache_alloc(). If a caller is seriously mis-behaving they * will eventually be caught here (where it matters). */ kmem_flagcheck(cachep, flags);
/* Get the memory for a slab management obj. */ static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, int colour_off, int local_flags) { structslab *slabp;