/** * kmem_cache_free - Deallocate an object * @cachep: The cache the allocation was from. * @objp: The previously allocated object. * * Free an object which was previously allocated from this * cache. */ voidkmem_cache_free(struct kmem_cache *cachep, void *objp) { unsignedlong flags; // 判断要释放的对象objp是否来自cachep这个kmem_cache缓存 cachep = cache_from_obj(cachep, objp); if (!cachep) return;
/* * When kmemcg is not being used, both assignments should return the * same value. but we don't want to pay the assignment price in that * case. If it is not compiled in, the compiler should be smart enough * to not do even the assignment. In that case, slab_equal_or_root * will also be a constant. */ if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS)) return s; // ------------------------------------------------------------ (1) page = virt_to_head_page(x); cachep = page->slab_cache; if (slab_equal_or_root(cachep, s)) return cachep;
pr_err("%s: Wrong slab cache. %s but object is from %s\n", __func__, s->name, cachep->name); WARN_ON_ONCE(1); return s; }
#define __pa(x) __virt_to_phys((unsigned long)(x)) #define __virt_to_phys(x) __virt_to_phys_nodebug(x) #define __virt_to_phys_nodebug(x) ({\ phys_addr_t __x = (phys_addr_t)(x);\ __is_lm_address(__x) ? __lm_to_phys(__x) :\ __kimg_to_phys(__x);\ }) /* * The linear kernel range starts in the middle of the virtual adddress * space. Testing the top bit for the start of the region is a * sufficient check. */ #define __is_lm_address(addr) (!!((addr) & BIT(VA_BITS - 1))) #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) #define __kimg_to_phys(addr) ((addr) - kimage_voffset)
// ############################################################################# // __cache_free() // ############################################################################# /* * Release an obj back to its cache. If the obj has a constructed state, it must * be in this state _before_ it is released. Called with disabled ints. */ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, unsignedlong caller) { /* Put the object into the quarantine, don't touch it for now. */ if (kasan_slab_free(cachep, objp, _RET_IP_)) return;
/* * Skip calling cache_free_alien() when the platform is not numa. * This will avoid cache misses that happen while accessing slabp (which * is per page memory reference) to get nodeid. Instead use a global * variable to skip the call, which is mostly likely to be present in * the cache. */ if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) return; // case1: 如果ac中的obj还没有达到limit,那么就很简单,直接释放obj到ac即可 if (ac->avail < ac->limit) { STATS_INC_FREEHIT(cachep); // case2: 如果ac中的obj满了,那么就需要先进行flush操作了 } else { STATS_INC_FREEMISS(cachep); cache_flusharray(cachep, ac); } // 这里不讨论net相关内容,该函数返回0 if (sk_memalloc_socks()) { structpage *page = virt_to_head_page(objp);
if (unlikely(PageSlabPfmemalloc(page))) { cache_free_pfmemalloc(cachep, page, objp); return; } }
/* * Caller needs to acquire correct kmem_cache_node's list_lock * @list: List of detached free slabs should be freed by caller */ staticvoidfree_block(struct kmem_cache *cachep, void **objpp, int nr_objects, int node, struct list_head *list) { int i; structkmem_cache_node *n = get_node(cachep, node); structpage *page;
// 根据情况将slab放回slab_list /* fixup slab chains */ if (page->active == 0) { list_add(&page->lru, &n->slabs_free); n->free_slabs++; } else { /* Unconditionally move a slab to the end of the * partial list on free - maximum time for the * other objects to be freed, too. */ list_add_tail(&page->lru, &n->slabs_partial); } }