staticinlinevoid *kmalloc(size_t size, int flags) { if (__builtin_constant_p(size)) { int i = 0; #define CACHE(x) \ if (size <= x) \ goto found; \ else \ i++; #include"kmalloc_sizes.h" #undef CACHE { externvoid __you_cannot_kmalloc_that_much(void); __you_cannot_kmalloc_that_much(); } found: return kmem_cache_alloc((flags & GFP_DMA) ? malloc_sizes[i].cs_dmacachep : malloc_sizes[i].cs_cachep, flags); } return __kmalloc(size, flags); }
/** * kmalloc - allocate memory * @size: how many bytes of memory are required. * @flags: the type of memory to allocate. * * kmalloc is the normal method of allocating memory * in the kernel. * * The @flags argument may be one of: * * %GFP_USER - Allocate memory on behalf of user. May sleep. * * %GFP_KERNEL - Allocate normal kernel ram. May sleep. * * %GFP_ATOMIC - Allocation will not sleep. Use inside interrupt handlers. * * Additionally, the %GFP_DMA flag may be set to indicate the memory * must be suitable for DMA. This can mean different things on different * platforms. For example, on i386, it means that the memory must come * from the first 16MB. */ void * __kmalloc (size_t size, int flags) { structcache_sizes *csizep = malloc_sizes;
for (; csizep->cs_size; csizep++) { if (size > csizep->cs_size) continue; #if DEBUG /* This happens if someone tries to call * kmem_cache_create(), or kmalloc(), before * the generic caches are initialized. */ BUG_ON(csizep->cs_cachep == NULL); #endif return __cache_alloc(flags & GFP_DMA ? csizep->cs_dmacachep : csizep->cs_cachep, flags); } returnNULL; }
/** * kfree - free previously allocated memory * @objp: pointer returned by kmalloc. * * Don't free memory not originally allocated by kmalloc() * or you will run into trouble. */ voidkfree(constvoid *objp) { kmem_cache_t *c; unsignedlong flags;
if (!objp) return; local_irq_save(flags); kfree_debugcheck(objp); // 通过objp我们获取其高速缓存描述符,如果objp是一个随机的指针,那么就会获取一个错误的高速缓存描述符 // 对这个错误的高速缓存描述符进行__cache_free()操作就会出错 c = GET_PAGE_CACHE(virt_to_page(objp)); __cache_free(c, (void*)objp); local_irq_restore(flags); }
/* * __cache_free * Release an obj back to its cache. If the obj has a constructed * state, it must be in this state _before_ it is released. * * Called with disabled ints. */ staticinlinevoid __cache_free (kmem_cache_t *cachep, void* objp) { structarray_cache *ac = ac_data(cachep);
/** * kmem_cache_free - Deallocate an object * @cachep: The cache the allocation was from. * @objp: The previously allocated object. * * Free an object which was previously allocated from this * cache. */ voidkmem_cache_free(kmem_cache_t *cachep, void *objp) { unsignedlong flags;
/* Destroy all the objs in a slab, and release the mem back to the system. * Before calling the slab must have been unlinked from the cache. * The cache-lock is not held/needed. */ staticvoidslab_destroy(kmem_cache_t *cachep, struct slab *slabp) { // slabp->colouroff包括了offset + slab描述符 + 对象描述符 void *addr = slabp->s_mem - slabp->colouroff;
#if DEBUG #else if (cachep->dtor) { int i; for (i = 0; i < cachep->num; i++) { void* objp = slabp->s_mem+cachep->objsize*i; (cachep->dtor)(objp, cachep, 0); } } #endif
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { structslab_rcu *slab_rcu;
/** * kmem_cache_shrink - Shrink a cache. * @cachep: The cache to shrink. * * Releases as many slabs as possible for a cache. * To help debugging, a zero exit status indicates all slabs were released. */ intkmem_cache_shrink(kmem_cache_t *cachep) { if (!cachep || in_interrupt()) BUG();
/** * kmem_cache_destroy - delete a cache * @cachep: the cache to destroy * * Remove a kmem_cache_t object from the slab cache. * Returns 0 on success. * * It is expected this function will be called by a module when it is * unloaded. This will remove the cache completely, and avoid a duplicate * cache being allocated each time a module is loaded and unloaded, if the * module doesn't have persistent in-kernel storage across loads and unloads. * * The cache must be empty before calling this function. * * The caller must guarantee that noone will allocate memory from the cache * during the kmem_cache_destroy(). */ intkmem_cache_destroy(kmem_cache_t * cachep) { int i;
if (!cachep || in_interrupt()) BUG();
/* Don't let CPUs to come and go */ lock_cpu_hotplug();
/* Find the cache in the chain of caches. */ down(&cache_chain_sem); /* * the chain is never empty, cache_cache is never destroyed */ list_del(&cachep->next); up(&cache_chain_sem);
// 这里会销毁所有free的slab,但如果还有full或者partial的slab,那么就会报错 // 在free_block这个函数中,我们可以通过不断地释放对象来改变slab所在的链表 if (__cache_shrink(cachep)) { slab_error(cachep, "Can't free all objects"); down(&cache_chain_sem); list_add(&cachep->next,&cache_chain); up(&cache_chain_sem); unlock_cpu_hotplug(); return1; }
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) synchronize_kernel();
/* no cpu_online check required here since we clear the percpu * array on cpu offline and set this to NULL. */ // 下面这几步都是销毁对应的描述符,包括cache_array, shared_cache_array和cache本身 for (i = 0; i < NR_CPUS; i++) kfree(cachep->array[i]);