創建新的緩存必須通過kmem_cache_create()函數來完成,原型如下
- struct kmem_cache *
- kmem_cache_create (const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
- name:所創建的新緩存的名字
- size :緩存所分配對象的大小
- align:對象的對齊值
- flags:創建用的標識
- ctor:創建對象時的構造函數
相關閱讀:
Linux Slab分配器(一)--概述 http://www.linuxidc.com/Linux/2012-06/62965.htm
Linux Slab分配器(二)--初始化 http://www.linuxidc.com/Linux/2012-06/62966.htm
Linux Slab分配器(三)--創建緩存 http://www.linuxidc.com/Linux/2012-06/63109.htm
Linux Slab分配器(四)--分配對象 http://www.linuxidc.com/Linux/2012-06/63138.htm
kmem_cache_create()的實際工作就是為新的緩存申請緩存描述符,array_cache描述符和kmem_list3描述符,並根據接收的參數對這三個結構中的變量進行相應的初始化。新創建的緩存是空的,不包含slab。
- struct kmem_cache *
- kmem_cache_create (const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
- {
- size_t left_over, slab_size, ralign;
- struct kmem_cache *cachep = NULL, *pc;
- gfp_t gfp;
-
- /*
- * Sanity checks... these are all serious usage bugs.
- */
- /*做一些必要的檢查,以下情況都是不合法的:
- 1.緩存名為空
- 2.處於中斷環境中
- 3.緩存中的對象大小小於處理器的字長
- 4.緩存中的對象大小大於普通緩存的最大長度*/
- if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
- size > KMALLOC_MAX_SIZE) {
- printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
- name);
- BUG();
- }
-
- /*
- * We use cache_chain_mutex to ensure a consistent view of
- * cpu_online_mask as well. Please see cpuup_callback
- */
- if (slab_is_available()) {
- get_online_cpus();
- mutex_lock(&cache_chain_mutex);
- }
-
- list_for_each_entry(pc, &cache_chain, next) {
- char tmp;
- int res;
-
- /*
- * This happens when the module gets unloaded and doesn't
- * destroy its slab cache and no-one else reuses the vmalloc
- * area of the module. Print a warning.
- */
- res = probe_kernel_address(pc->name, tmp);
- if (res) {
- printk(KERN_ERR
- "SLAB: cache with size %d has lost its name\n",
- pc->buffer_size);
- continue;
- }
-
- if (!strcmp(pc->name, name)) {
- printk(KERN_ERR
- "kmem_cache_create: duplicate cache %s\n", name);
- dump_stack();
- goto oops;
- }
- }
-
- #if DEBUG
- WARN_ON(strchr(name, ' ')); /* It confuses parsers */
- #if FORCED_DEBUG
- /*
- * Enable redzoning and last user accounting, except for caches with
- * large objects, if the increased size would increase the object size
- * above the next power of two: caches with object sizes just above a
- * power of two have a significant amount of internal fragmentation.
- */
- if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
- 2 * sizeof(unsigned long long)))
- flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
- if (!(flags & SLAB_DESTROY_BY_RCU))
- flags |= SLAB_POISON;
- #endif
- if (flags & SLAB_DESTROY_BY_RCU)
- BUG_ON(flags & SLAB_POISON);
- #endif
- /*
- * Always checks flags, a caller might be expecting debug support which
- * isn't available.
- */
- BUG_ON(flags & ~CREATE_MASK);
-
- /*
- * Check that size is in terms of words. This is needed to avoid
- * unaligned accesses for some archs when redzoning is used, and makes
- * sure any on-slab bufctl's are also correctly aligned.
- */
- /*如果緩存對象大小沒有對齊到處理器字長,則對齊之*/
- if (size & (BYTES_PER_WORD - 1)) {
- size += (BYTES_PER_WORD - 1);
- size &= ~(BYTES_PER_WORD - 1);
- }
-
- /* calculate the final buffer alignment: */
-
- /* 1) arch recommendation: can be overridden for debug */
- /*要求按照體系結構對齊*/
- if (flags & SLAB_HWCACHE_ALIGN) {
- /*
- * Default alignment: as specified by the arch code. Except if
- * an object is really small, then squeeze multiple objects into
- * one cacheline.
- */
- ralign = cache_line_size();/*對齊值取L1緩存行的大小*/
- /*如果對象大小足夠小,則不斷壓縮對齊值以保證能將足夠多的對象裝入一個緩存行*/
- while (size <= ralign / 2)
- ralign /= 2;
- } else {
- ralign = BYTES_PER_WORD; /*對齊值取處理器字長*/
- }
-
- /*
- * Redzoning and user store require word alignment or possibly larger.
- * Note this will be overridden by architecture or caller mandated
- * alignment if either is greater than BYTES_PER_WORD.
- */
- /*如果開啟了DEBUG,則按需要進行相應的對齊*/
- if (flags & SLAB_STORE_USER)
- ralign = BYTES_PER_WORD;
-
- if (flags & SLAB_RED_ZONE) {
- ralign = REDZONE_ALIGN;
- /* If redzoning, ensure that the second redzone is suitably
- * aligned, by adjusting the object size accordingly. */
- size += REDZONE_ALIGN - 1;
- size &= ~(REDZONE_ALIGN - 1);
- }
-
- /* 2) arch mandated alignment */
- if (ralign < ARCH_SLAB_MINALIGN) {
- ralign = ARCH_SLAB_MINALIGN;
- }
- /* 3) caller mandated alignment */
- if (ralign < align) {
- ralign = align;
- }
- /* disable debug if necessary */
- if (ralign > __alignof__(unsigned long long))
- flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
- /*
- * 4) Store it.
- */
- align = ralign;
-
- if (slab_is_available())
- gfp = GFP_KERNEL;
- else
- gfp = GFP_NOWAIT;
-
- /* Get cache's description obj. */
- /*從cache_cache中分配一個高速緩存描述符*/
- cachep = kmem_cache_zalloc(&cache_cache, gfp);
- if (!cachep)
- goto oops;
-
- #if DEBUG
- cachep->obj_size = size;
-
- /*
- * Both debugging options require word-alignment which is calculated
- * into align above.
- */
- if (flags & SLAB_RED_ZONE) {
- /* add space for red zone words */
- cachep->obj_offset += sizeof(unsigned long long);
- size += 2 * sizeof(unsigned long long);
- }
- if (flags & SLAB_STORE_USER) {
- /* user store requires one word storage behind the end of
- * the real object. But if the second red zone needs to be
- * aligned to 64 bits, we must allow that much space.
- */
- if (flags & SLAB_RED_ZONE)
- size += REDZONE_ALIGN;
- else
- size += BYTES_PER_WORD;
- }
- #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
- if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
- && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
- cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
- size = PAGE_SIZE;
- }
- #endif
- #endif
-
- /*
- * Determine if the slab management is 'on' or 'off' slab.
- * (bootstrapping cannot cope with offslab caches so don't do
- * it too early on.)
- */
- /*如果緩存對象的大小不小於頁面大小的1/8並且不處於slab初始化階段,
- 則選擇將slab描述符放在slab外部以騰出更多的空間給對象*/
- if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
- /*
- * Size is large, assume best to place the slab management obj
- * off-slab (should allow better packing of objs).
- */
- flags |= CFLGS_OFF_SLAB;
-
- /*將對象大小按之前確定的align對齊*/
- size = ALIGN(size, align);
-
- /*計算slab的對象數,分配給slab的頁框階數並返回slab的剩余空間,即碎片大小*/
- left_over = calculate_slab_order(cachep, size, align, flags);
-
- if (!cachep->num) {
- printk(KERN_ERR
- "kmem_cache_create: couldn't create cache %s.\n", name);
- kmem_cache_free(&cache_cache, cachep);
- cachep = NULL;
- goto oops;
- }
- /*將slab管理區的大小按align進行對齊*/
- slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
- + sizeof(struct slab), align);
-
- /*
- * If the slab has been placed off-slab, and we have enough space then
- * move it on-slab. This is at the expense of any extra colouring.
- */
- /*如果之前確定將slab管理區放在slab外部,但是碎片空間大於slab管理區大小,
- 這時改變策略將slab管理區放在slab內部,這樣可以節省外部空間,但是會犧牲
- 著色的顏色個數*/
- if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
- flags &= ~CFLGS_OFF_SLAB;
- left_over -= slab_size;
- }
-
- /*如果的確要將slab管理區放在外部,則不需按照該slab的對齊方式進行對齊了,
- 重新計算slab_size*/
- if (flags & CFLGS_OFF_SLAB) {
- /* really off slab. No need for manual alignment */
- slab_size =
- cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
-
- #ifdef CONFIG_PAGE_POISONING
- /* If we're going to use the generic kernel_map_pages()
- * poisoning, then it's going to smash the contents of
- * the redzone and userword anyhow, so switch them off.
- */
- if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
- flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
- #endif
- }
-
- /*著色偏移區L1緩���行的大小*/
- cachep->colour_off = cache_line_size();
- /* Offset must be a multiple of the alignment. */
- if (cachep->colour_off < align)/*著色偏移小於align的話則要取對齊值*/
- cachep->colour_off = align;
- /*計算著色的顏色數目*/
- cachep->colour = left_over / cachep->colour_off;
- cachep->slab_size = slab_size;
- cachep->flags = flags;
- cachep->gfpflags = 0;
- if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
- cachep->gfpflags |= GFP_DMA;
- cachep->buffer_size = size;
- cachep->reciprocal_buffer_size = reciprocal_value(size);
-
- if (flags & CFLGS_OFF_SLAB) {
- cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
- /*
- * This is a possibility for one of the malloc_sizes caches.
- * But since we go off slab only for object size greater than
- * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
- * this should not happen at all.
- * But leave a BUG_ON for some lucky dude.
- */
- BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
- }
- cachep->ctor = ctor;
- cachep->name = name;
-
- if (setup_cpu_cache(cachep, gfp)) {
- __kmem_cache_destroy(cachep);
- cachep = NULL;
- goto oops;
- }
-
- /* cache setup completed, link it into the list */
- /*將該高速緩存描述符添加進cache_chain*/
- list_add(&cachep->next, &cache_chain);
- oops:
- if (!cachep && (flags & SLAB_PANIC))
- panic("kmem_cache_create(): failed to create slab `%s'\n",
- name);
- if (slab_is_available()) {
- mutex_unlock(&cache_chain_mutex);
- put_online_cpus();
- }
- return cachep;
- }