从一个缓存中分配对象总是遵循下面的原则:
1.本地高速缓存中是否有空闲对象,如果有的话则从其中获取对象,这时分配的对象是最“热”的;
2.如果本地高速缓存中没有对象,则从kmem_list3中的slab链表中寻找空闲对象并填充到本地高速缓存再分配;
3.如果所有的slab中都没有空闲对象了,那么就要创建新的slab,再分配 。
函数kmem_cache_alloc用于从特定的缓存获取对象,kmalloc用于从普通缓存中获取对象,它们的执行流程如下图所示
实质性的工作是从____cache_alloc()开始的,因此从这个函数作为入口来分析
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) { void *objp; struct array_cache *ac; check_irq_off(); /*获取缓存的本地高速缓存的描述符array_cache*/ ac = cpu_cache_get(cachep); /*如果本地高速缓存中还有空闲对象可以分配则从本地高速缓存中分配*/ if (likely(ac->avail)) { STATS_INC_ALLOCHIT(cachep); ac->touched = 1; /*先将avail的值减1,这样avail对应的空闲对象是最热的,即最近释放出来的, 更有可能驻留在CPU高速缓存中*/ objp = ac->entry[--ac->avail]; } else {/*否则需要填充本地高速缓存*/ STATS_INC_ALLOCMISS(cachep); objp = cache_alloc_refill(cachep, flags); } /* * To avoid a false negative, if an object that is in one of the * per-CPU caches is leaked, we need to make sure kmemleak doesn't * treat the array pointers as a reference to the object. */ kmemleak_erase(&ac->entry[ac->avail]); return objp; }
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) { int batchcount; struct kmem_list3 *l3; struct array_cache *ac; int node; retry: check_irq_off(); node = numa_node_id(); ac = cpu_cache_get(cachep); batchcount = ac->batchcount; /*获取批量转移的数目*/ if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { /* * If there was little recent activity on this cache, then * perform only a partial refill. Otherwise we could generate * refill bouncing. */ batchcount = BATCHREFILL_LIMIT; } /*获取kmem_list3*/ l3 = cachep->nodelists[node]; BUG_ON(ac->avail > 0 || !l3); spin_lock(&l3->list_lock); /* See if we can refill from the shared array */ /*如果有共享本地高速缓存,则从共享本地高速缓存填充*/ if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) goto alloc_done; while (batchcount > 0) { struct list_head *entry; struct slab *slabp; /* Get slab alloc is to come from. */ /*扫描slab链表,先从partial链表开始,如果整个partial链表都无法找到batchcount个空闲对象, 再扫描free链表*/ entry = l3->slabs_partial.next; /*entry回到表头说明partial链表已经扫描完毕,开始扫描free链表*/ if (entry == &l3->slabs_partial) { l3->free_touched = 1; entry = l3->slabs_free.next; if (entry == &l3->slabs_free) goto must_grow; } /*由链表项得到slab描述符*/ slabp = list_entry(entry, struct slab, list); check_slabp(cachep, slabp); check_spinlock_acquired(cachep); /* * The slab was either on partial or free list so * there must be at least one object available for * allocation. */ BUG_ON(slabp->inuse >= cachep->num); /*如果slabp中还存在空闲对象并且还需要继续填充对象到本地高速缓存*/ while (slabp->inuse < cachep->num && batchcount--) { STATS_INC_ALLOCED(cachep); STATS_INC_ACTIVE(cachep); STATS_SET_HIGH(cachep); /*填充的本质就是用ac后面的void*数组元素指向一个空闲对象*/ ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, node); } check_slabp(cachep, slabp); /* move slabp to correct slabp list: */ /*由于从slab中分配出去了对象,因此有可能需要将slab移到其他链表中去*/ list_del(&slabp->list); /*free等于BUFCTL_END表示空闲对象已耗尽,将slab插入full链表*/ if (slabp->free == BUFCTL_END) list_add(&slabp->list, &l3->slabs_full); else/*否则肯定是插入partial链表*/ list_add(&slabp->list, &l3->slabs_partial); } must_grow: l3->free_objects -= ac->avail;/*刷新kmem_list3中的空闲对象*/ alloc_done: spin_unlock(&l3->list_lock); /*avail为0表示kmem_list3中的slab全部处于full状态或者没有slab,则要为缓存分配slab*/ if (unlikely(!ac->avail)) { int x; x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); /* cache_grow can reenable interrupts, then ac could change. */ ac = cpu_cache_get(cachep); if (!x && ac->avail == 0) /* no objects in sight? abort */ return NULL; if (!ac->avail) /* objects refilled by interrupt? */ goto retry; } ac->touched = 1; /*返回最后一个末端的对象*/ return ac->entry[--ac->avail]; }
对于所有slab都空闲对象的情况,需要调用cache_grow()来增加cache的容量,这个函数在后面分析slab的分配时再做介绍。
相关阅读: