在满足以下两个条件时,slab分配器将为高速缓存创建新的slab
1.请求分配对象,但本地高速缓存没有空闲对象可以分配,需要填充
2.kmem_list3维护的链表中没有slab或者所有的slab都处于FULL链表中
这时,调用cache_grow()创建slab增大缓存容量
相关阅读:
Linux Slab分配器(一)--概述
Linux Slab分配器(二)--初始化
Linux Slab分配器(三)--创建缓存
Linux Slab分配器(四)--分配对象
下图给出了cache_grow()的代码流程
static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid, void *objp) { struct slab *slabp; size_t offset; gfp_t local_flags; struct kmem_list3 *l3; /* * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ BUG_ON(flags & GFP_SLAB_BUG_MASK); local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); /* Take the l3 list lock to change the colour_next on this node */ check_irq_off(); l3 = cachep->nodelists[nodeid]; spin_lock(&l3->list_lock); /* Get colour for the slab, and cal the next value. */ /*确定待创建的slab的颜色编号*/ offset = l3->colour_next; /*更新下一个slab的颜色编号*/ l3->colour_next++; /*颜色编号必须小于颜色数*/ if (l3->colour_next >= cachep->colour) l3->colour_next = 0; spin_unlock(&l3->list_lock); /*确定待创建的slab的颜色*/ offset *= cachep->colour_off; if (local_flags & __GFP_WAIT) local_irq_enable(); /* * The test for missing atomic flag is performed here, rather than * the more obvious place, simply to reduce the critical path length * in kmem_cache_alloc(). If a caller is seriously mis-behaving they * will eventually be caught here (where it matters). */ kmem_flagcheck(cachep, flags); /* * Get mem for the objs. Attempt to allocate a physical page from * 'nodeid'. */ if (!objp) /*从伙伴系统分配页框,这是slab分配器与伙伴系统的接口*/ objp = kmem_getpages(cachep, local_flags, nodeid); if (!objp) goto failed; /* Get slab management. */ /*分配slab管理区*/ slabp = alloc_slabmgmt(cachep, objp, offset, local_flags & ~GFP_CONSTRAINT_MASK, nodeid); if (!slabp) goto opps1; /*建立页面到slab和cache的映射,以便于根据obj迅速定位slab描述符和cache描述符*/ slab_map_pages(cachep, slabp, objp); /*初始化对象*/ cache_init_objs(cachep, slabp); if (local_flags & __GFP_WAIT) local_irq_disable(); check_irq_off(); spin_lock(&l3->list_lock); /* Make slab active. */ /*将新创建的slab添加到free链表*/ list_add_tail(&slabp->list, &(l3->slabs_free)); STATS_INC_GROWN(cachep); l3->free_objects += cachep->num; spin_unlock(&l3->list_lock); return 1; opps1: kmem_freepages(cachep, objp); failed: if (local_flags & __GFP_WAIT) local_irq_disable(); return 0; }