From 837d678dc264c797c16f81cf56f615f7544891c1 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 16 Aug 2012 00:02:40 +0900 Subject: slub: remove one code path and reduce lock contention in __slab_free() When we try to free object, there is some of case that we need to take a node lock. This is the necessary step for preventing a race. After taking a lock, then we try to cmpxchg_double_slab(). But, there is a possible scenario that cmpxchg_double_slab() is failed with taking a lock. Following example explains it. CPU A CPU B need lock ... need lock ... lock!! lock..but spin free success spin... unlock lock!! free fail In this case, retry with taking a lock is occured in CPU A. I think that in this case for CPU A, "release a lock first, and re-take a lock if necessary" is preferable way. There are two reasons for this. First, this makes __slab_free()'s logic somehow simple. With this patch, 'was_frozen = 1' is "always" handled without taking a lock. So we can remove one code path. Second, it may reduce lock contention. When we do retrying, status of slab is already changed, so we don't need a lock anymore in almost every case. "release a lock first, and re-take a lock if necessary" policy is helpful to this. Signed-off-by: Joonsoo Kim Acked-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index a0d698467f70..e7aec2001ae5 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2459,7 +2459,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page, void *prior; void **object = (void *)x; int was_frozen; - int inuse; struct page new; unsigned long counters; struct kmem_cache_node *n = NULL; @@ -2472,13 +2471,17 @@ static void __slab_free(struct kmem_cache *s, struct page *page, return; do { + if (unlikely(n)) { + spin_unlock_irqrestore(&n->list_lock, flags); + n = NULL; + } prior = page->freelist; counters = page->counters; set_freepointer(s, object, prior); new.counters = counters; was_frozen = new.frozen; new.inuse--; - if ((!new.inuse || !prior) && !was_frozen && !n) { + if ((!new.inuse || !prior) && !was_frozen) { if (!kmem_cache_debug(s) && !prior) @@ -2503,7 +2506,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page, } } - inuse = new.inuse; } while (!cmpxchg_double_slab(s, page, prior, counters, @@ -2529,25 +2531,17 @@ static void __slab_free(struct kmem_cache *s, struct page *page, return; } + if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) + goto slab_empty; + /* - * was_frozen may have been set after we acquired the list_lock in - * an earlier loop. So we need to check it here again. + * Objects left in the slab. If it was not on the partial list before + * then add it. */ - if (was_frozen) - stat(s, FREE_FROZEN); - else { - if (unlikely(!inuse && n->nr_partial > s->min_partial)) - goto slab_empty; - - /* - * Objects left in the slab. If it was not on the partial list before - * then add it. - */ - if (unlikely(!prior)) { - remove_full(s, page); - add_partial(n, page, DEACTIVATE_TO_TAIL); - stat(s, FREE_ADD_PARTIAL); - } + if (kmem_cache_debug(s) && unlikely(!prior)) { + remove_full(s, page); + add_partial(n, page, DEACTIVATE_TO_TAIL); + stat(s, FREE_ADD_PARTIAL); } spin_unlock_irqrestore(&n->list_lock, flags); return; -- cgit v1.2.3 From b7454ad3cfc3043c5264729a6204f049fe1f34b1 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Fri, 19 Oct 2012 18:20:25 +0400 Subject: mm/sl[au]b: Move slabinfo processing to slab_common.c This patch moves all the common machinery to slabinfo processing to slab_common.c. We can do better by noticing that the output is heavily common, and having the allocators to just provide finished information about this. But after this first step, this can be done easier. Signed-off-by: Glauber Costa Acked-by: Christoph Lameter CC: David Rientjes Signed-off-by: Pekka Enberg --- mm/slab.c | 72 ++++++++++---------------------------------------------- mm/slab.h | 8 +++++++ mm/slab_common.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ mm/slub.c | 51 ++++----------------------------------- 4 files changed, 96 insertions(+), 105 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index 33d3363658df..a6e045c13b8d 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4277,7 +4277,7 @@ out: #ifdef CONFIG_SLABINFO -static void print_slabinfo_header(struct seq_file *m) +void print_slabinfo_header(struct seq_file *m) { /* * Output format version, so at least we can change it @@ -4300,28 +4300,7 @@ static void print_slabinfo_header(struct seq_file *m) seq_putc(m, '\n'); } -static void *s_start(struct seq_file *m, loff_t *pos) -{ - loff_t n = *pos; - - mutex_lock(&slab_mutex); - if (!n) - print_slabinfo_header(m); - - return seq_list_start(&slab_caches, *pos); -} - -static void *s_next(struct seq_file *m, void *p, loff_t *pos) -{ - return seq_list_next(p, &slab_caches, pos); -} - -static void s_stop(struct seq_file *m, void *p) -{ - mutex_unlock(&slab_mutex); -} - -static int s_show(struct seq_file *m, void *p) +int slabinfo_show(struct seq_file *m, void *p) { struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); struct slab *slabp; @@ -4418,27 +4397,6 @@ static int s_show(struct seq_file *m, void *p) return 0; } -/* - * slabinfo_op - iterator that generates /proc/slabinfo - * - * Output layout: - * cache-name - * num-active-objs - * total-objs - * object size - * num-active-slabs - * total-slabs - * num-pages-per-slab - * + further values on SMP and with statistics enabled - */ - -static const struct seq_operations slabinfo_op = { - .start = s_start, - .next = s_next, - .stop = s_stop, - .show = s_show, -}; - #define MAX_SLABINFO_WRITE 128 /** * slabinfo_write - Tuning for the slab allocator @@ -4447,7 +4405,7 @@ static const struct seq_operations slabinfo_op = { * @count: data length * @ppos: unused */ -static ssize_t slabinfo_write(struct file *file, const char __user *buffer, +ssize_t slabinfo_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; @@ -4490,19 +4448,6 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer, return res; } -static int slabinfo_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &slabinfo_op); -} - -static const struct file_operations proc_slabinfo_operations = { - .open = slabinfo_open, - .read = seq_read, - .write = slabinfo_write, - .llseek = seq_lseek, - .release = seq_release, -}; - #ifdef CONFIG_DEBUG_SLAB_LEAK static void *leaks_start(struct seq_file *m, loff_t *pos) @@ -4631,6 +4576,16 @@ static int leaks_show(struct seq_file *m, void *p) return 0; } +static void *s_next(struct seq_file *m, void *p, loff_t *pos) +{ + return seq_list_next(p, &slab_caches, pos); +} + +static void s_stop(struct seq_file *m, void *p) +{ + mutex_unlock(&slab_mutex); +} + static const struct seq_operations slabstats_op = { .start = leaks_start, .next = s_next, @@ -4665,7 +4620,6 @@ static const struct file_operations proc_slabstats_operations = { static int __init slab_proc_init(void) { - proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations); #ifdef CONFIG_DEBUG_SLAB_LEAK proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); #endif diff --git a/mm/slab.h b/mm/slab.h index 7deeb449a301..dc78101962a1 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -47,4 +47,12 @@ static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t siz int __kmem_cache_shutdown(struct kmem_cache *); +struct seq_file; +struct file; +void print_slabinfo_header(struct seq_file *m); + +int slabinfo_show(struct seq_file *m, void *p); + +ssize_t slabinfo_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos); #endif diff --git a/mm/slab_common.c b/mm/slab_common.c index 069a24e64403..2e4b4c6d89e2 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include #include #include @@ -192,3 +194,71 @@ int slab_is_available(void) { return slab_state >= UP; } + +#ifdef CONFIG_SLABINFO +static void *s_start(struct seq_file *m, loff_t *pos) +{ + loff_t n = *pos; + + mutex_lock(&slab_mutex); + if (!n) + print_slabinfo_header(m); + + return seq_list_start(&slab_caches, *pos); +} + +static void *s_next(struct seq_file *m, void *p, loff_t *pos) +{ + return seq_list_next(p, &slab_caches, pos); +} + +static void s_stop(struct seq_file *m, void *p) +{ + mutex_unlock(&slab_mutex); +} + +static int s_show(struct seq_file *m, void *p) +{ + return slabinfo_show(m, p); +} + +/* + * slabinfo_op - iterator that generates /proc/slabinfo + * + * Output layout: + * cache-name + * num-active-objs + * total-objs + * object size + * num-active-slabs + * total-slabs + * num-pages-per-slab + * + further values on SMP and with statistics enabled + */ +static const struct seq_operations slabinfo_op = { + .start = s_start, + .next = s_next, + .stop = s_stop, + .show = s_show, +}; + +static int slabinfo_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &slabinfo_op); +} + +static const struct file_operations proc_slabinfo_operations = { + .open = slabinfo_open, + .read = seq_read, + .write = slabinfo_write, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init slab_proc_init(void) +{ + proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations); + return 0; +} +module_init(slab_proc_init); +#endif /* CONFIG_SLABINFO */ diff --git a/mm/slub.c b/mm/slub.c index a0d698467f70..77a0c8a9fc75 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5405,7 +5405,7 @@ __initcall(slab_sysfs_init); * The /proc/slabinfo ABI */ #ifdef CONFIG_SLABINFO -static void print_slabinfo_header(struct seq_file *m) +void print_slabinfo_header(struct seq_file *m) { seq_puts(m, "slabinfo - version: 2.1\n"); seq_puts(m, "# name " @@ -5415,28 +5415,7 @@ static void print_slabinfo_header(struct seq_file *m) seq_putc(m, '\n'); } -static void *s_start(struct seq_file *m, loff_t *pos) -{ - loff_t n = *pos; - - mutex_lock(&slab_mutex); - if (!n) - print_slabinfo_header(m); - - return seq_list_start(&slab_caches, *pos); -} - -static void *s_next(struct seq_file *m, void *p, loff_t *pos) -{ - return seq_list_next(p, &slab_caches, pos); -} - -static void s_stop(struct seq_file *m, void *p) -{ - mutex_unlock(&slab_mutex); -} - -static int s_show(struct seq_file *m, void *p) +int slabinfo_show(struct seq_file *m, void *p) { unsigned long nr_partials = 0; unsigned long nr_slabs = 0; @@ -5472,29 +5451,9 @@ static int s_show(struct seq_file *m, void *p) return 0; } -static const struct seq_operations slabinfo_op = { - .start = s_start, - .next = s_next, - .stop = s_stop, - .show = s_show, -}; - -static int slabinfo_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &slabinfo_op); -} - -static const struct file_operations proc_slabinfo_operations = { - .open = slabinfo_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -static int __init slab_proc_init(void) +ssize_t slabinfo_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) { - proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations); - return 0; + return -EIO; } -module_init(slab_proc_init); #endif /* CONFIG_SLABINFO */ -- cgit v1.2.3 From bcee6e2a13d580f6c21d748fcd7239ccc66cb4b8 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Fri, 19 Oct 2012 18:20:26 +0400 Subject: mm/sl[au]b: Move print_slabinfo_header to slab_common.c The header format is highly similar between slab and slub. The main difference lays in the fact that slab may optionally have statistics added here in case of CONFIG_SLAB_DEBUG, while the slub will stick them somewhere else. By making sure that information conditionally lives inside a globally-visible CONFIG_DEBUG_SLAB switch, we can move the header printing to a common location. Signed-off-by: Glauber Costa Acked-by: Christoph Lameter CC: David Rientjes Signed-off-by: Pekka Enberg --- mm/slab.c | 24 ------------------------ mm/slab.h | 2 -- mm/slab_common.c | 23 +++++++++++++++++++++++ mm/slub.c | 10 ---------- 4 files changed, 23 insertions(+), 36 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index a6e045c13b8d..73811ca0ae29 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4276,30 +4276,6 @@ out: } #ifdef CONFIG_SLABINFO - -void print_slabinfo_header(struct seq_file *m) -{ - /* - * Output format version, so at least we can change it - * without _too_ many complaints. - */ -#if STATS - seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); -#else - seq_puts(m, "slabinfo - version: 2.1\n"); -#endif - seq_puts(m, "# name " - " "); - seq_puts(m, " : tunables "); - seq_puts(m, " : slabdata "); -#if STATS - seq_puts(m, " : globalstat " - " "); - seq_puts(m, " : cpustat "); -#endif - seq_putc(m, '\n'); -} - int slabinfo_show(struct seq_file *m, void *p) { struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); diff --git a/mm/slab.h b/mm/slab.h index dc78101962a1..3442eb83ee1e 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -49,8 +49,6 @@ int __kmem_cache_shutdown(struct kmem_cache *); struct seq_file; struct file; -void print_slabinfo_header(struct seq_file *m); - int slabinfo_show(struct seq_file *m, void *p); ssize_t slabinfo_write(struct file *file, const char __user *buffer, diff --git a/mm/slab_common.c b/mm/slab_common.c index 2e4b4c6d89e2..c64a0438c1f3 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -196,6 +196,29 @@ int slab_is_available(void) } #ifdef CONFIG_SLABINFO +static void print_slabinfo_header(struct seq_file *m) +{ + /* + * Output format version, so at least we can change it + * without _too_ many complaints. + */ +#ifdef CONFIG_DEBUG_SLAB + seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); +#else + seq_puts(m, "slabinfo - version: 2.1\n"); +#endif + seq_puts(m, "# name " + " "); + seq_puts(m, " : tunables "); + seq_puts(m, " : slabdata "); +#ifdef CONFIG_DEBUG_SLAB + seq_puts(m, " : globalstat " + " "); + seq_puts(m, " : cpustat "); +#endif + seq_putc(m, '\n'); +} + static void *s_start(struct seq_file *m, loff_t *pos) { loff_t n = *pos; diff --git a/mm/slub.c b/mm/slub.c index 77a0c8a9fc75..6b5ee3472e18 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5405,16 +5405,6 @@ __initcall(slab_sysfs_init); * The /proc/slabinfo ABI */ #ifdef CONFIG_SLABINFO -void print_slabinfo_header(struct seq_file *m) -{ - seq_puts(m, "slabinfo - version: 2.1\n"); - seq_puts(m, "# name " - " "); - seq_puts(m, " : tunables "); - seq_puts(m, " : slabdata "); - seq_putc(m, '\n'); -} - int slabinfo_show(struct seq_file *m, void *p) { unsigned long nr_partials = 0; -- cgit v1.2.3 From 0d7561c61d76690ed84bd1016acc0fcbff063205 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Fri, 19 Oct 2012 18:20:27 +0400 Subject: sl[au]b: Process slabinfo_show in common code With all the infrastructure in place, we can now have slabinfo_show done from slab_common.c. A cache-specific function is called to grab information about the cache itself, since that is still heavily dependent on the implementation. But with the values produced by it, all the printing and handling is done from common code. Signed-off-by: Glauber Costa CC: Christoph Lameter CC: David Rientjes Signed-off-by: Pekka Enberg --- mm/slab.c | 26 +++++++++++++++----------- mm/slab.h | 16 +++++++++++++++- mm/slab_common.c | 18 +++++++++++++++++- mm/slub.c | 24 ++++++++++-------------- 4 files changed, 57 insertions(+), 27 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index 73811ca0ae29..6d5c83c6ddd5 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4276,9 +4276,8 @@ out: } #ifdef CONFIG_SLABINFO -int slabinfo_show(struct seq_file *m, void *p) +void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) { - struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); struct slab *slabp; unsigned long active_objs; unsigned long num_objs; @@ -4333,13 +4332,20 @@ int slabinfo_show(struct seq_file *m, void *p) if (error) printk(KERN_ERR "slab: cache %s error: %s\n", name, error); - seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", - name, active_objs, num_objs, cachep->size, - cachep->num, (1 << cachep->gfporder)); - seq_printf(m, " : tunables %4u %4u %4u", - cachep->limit, cachep->batchcount, cachep->shared); - seq_printf(m, " : slabdata %6lu %6lu %6lu", - active_slabs, num_slabs, shared_avail); + sinfo->active_objs = active_objs; + sinfo->num_objs = num_objs; + sinfo->active_slabs = active_slabs; + sinfo->num_slabs = num_slabs; + sinfo->shared_avail = shared_avail; + sinfo->limit = cachep->limit; + sinfo->batchcount = cachep->batchcount; + sinfo->shared = cachep->shared; + sinfo->objects_per_slab = cachep->num; + sinfo->cache_order = cachep->gfporder; +} + +void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) +{ #if STATS { /* list3 stats */ unsigned long high = cachep->high_mark; @@ -4369,8 +4375,6 @@ int slabinfo_show(struct seq_file *m, void *p) allochit, allocmiss, freehit, freemiss); } #endif - seq_putc(m, '\n'); - return 0; } #define MAX_SLABINFO_WRITE 128 diff --git a/mm/slab.h b/mm/slab.h index 3442eb83ee1e..5a43c2f13621 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -49,8 +49,22 @@ int __kmem_cache_shutdown(struct kmem_cache *); struct seq_file; struct file; -int slabinfo_show(struct seq_file *m, void *p); +struct slabinfo { + unsigned long active_objs; + unsigned long num_objs; + unsigned long active_slabs; + unsigned long num_slabs; + unsigned long shared_avail; + unsigned int limit; + unsigned int batchcount; + unsigned int shared; + unsigned int objects_per_slab; + unsigned int cache_order; +}; + +void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); +void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); ssize_t slabinfo_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos); #endif diff --git a/mm/slab_common.c b/mm/slab_common.c index c64a0438c1f3..5fb753da6cf0 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -242,7 +242,23 @@ static void s_stop(struct seq_file *m, void *p) static int s_show(struct seq_file *m, void *p) { - return slabinfo_show(m, p); + struct kmem_cache *s = list_entry(p, struct kmem_cache, list); + struct slabinfo sinfo; + + memset(&sinfo, 0, sizeof(sinfo)); + get_slabinfo(s, &sinfo); + + seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", + s->name, sinfo.active_objs, sinfo.num_objs, s->size, + sinfo.objects_per_slab, (1 << sinfo.cache_order)); + + seq_printf(m, " : tunables %4u %4u %4u", + sinfo.limit, sinfo.batchcount, sinfo.shared); + seq_printf(m, " : slabdata %6lu %6lu %6lu", + sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail); + slabinfo_show_stats(m, s); + seq_putc(m, '\n'); + return 0; } /* diff --git a/mm/slub.c b/mm/slub.c index 6b5ee3472e18..472e739278b4 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5405,18 +5405,14 @@ __initcall(slab_sysfs_init); * The /proc/slabinfo ABI */ #ifdef CONFIG_SLABINFO -int slabinfo_show(struct seq_file *m, void *p) +void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) { unsigned long nr_partials = 0; unsigned long nr_slabs = 0; - unsigned long nr_inuse = 0; unsigned long nr_objs = 0; unsigned long nr_free = 0; - struct kmem_cache *s; int node; - s = list_entry(p, struct kmem_cache, list); - for_each_online_node(node) { struct kmem_cache_node *n = get_node(s, node); @@ -5429,16 +5425,16 @@ int slabinfo_show(struct seq_file *m, void *p) nr_free += count_partial(n, count_free); } - nr_inuse = nr_objs - nr_free; + sinfo->active_objs = nr_objs - nr_free; + sinfo->num_objs = nr_objs; + sinfo->active_slabs = nr_slabs; + sinfo->num_slabs = nr_slabs; + sinfo->objects_per_slab = oo_objects(s->oo); + sinfo->cache_order = oo_order(s->oo); +} - seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, - nr_objs, s->size, oo_objects(s->oo), - (1 << oo_order(s->oo))); - seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0); - seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs, - 0UL); - seq_putc(m, '\n'); - return 0; +void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) +{ } ssize_t slabinfo_write(struct file *file, const char __user *buffer, -- cgit v1.2.3 From 1b4f59e356cc94929305bd107b7f38eec62715ad Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 22 Oct 2012 18:05:36 +0400 Subject: slub: Commonize slab_cache field in struct page Right now, slab and slub have fields in struct page to derive which cache a page belongs to, but they do it slightly differently. slab uses a field called slab_cache, that lives in the third double word. slub, uses a field called "slab", living outside of the doublewords area. Ideally, we could use the same field for this. Since slub heavily makes use of the doubleword region, there isn't really much room to move slub's slab_cache field around. Since slab does not have such strict placement restrictions, we can move it outside the doubleword area. The naming used by slab, "slab_cache", is less confusing, and it is preferred over slub's generic "slab". Signed-off-by: Glauber Costa Acked-by: Christoph Lameter CC: David Rientjes Signed-off-by: Pekka Enberg --- include/linux/mm_types.h | 7 ++----- mm/slub.c | 24 ++++++++++++------------ 2 files changed, 14 insertions(+), 17 deletions(-) (limited to 'mm') diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 31f8a3af7d94..2fef4e720e79 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -128,10 +128,7 @@ struct page { }; struct list_head list; /* slobs list of pages */ - struct { /* slab fields */ - struct kmem_cache *slab_cache; - struct slab *slab_page; - }; + struct slab *slab_page; /* slab fields */ }; /* Remainder is not double word aligned */ @@ -146,7 +143,7 @@ struct page { #if USE_SPLIT_PTLOCKS spinlock_t ptl; #endif - struct kmem_cache *slab; /* SLUB: Pointer to slab */ + struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ struct page *first_page; /* Compound tail pages */ }; diff --git a/mm/slub.c b/mm/slub.c index 16274b273c61..35483e0ab6bc 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1092,11 +1092,11 @@ static noinline struct kmem_cache_node *free_debug_processing( if (!check_object(s, page, object, SLUB_RED_ACTIVE)) goto out; - if (unlikely(s != page->slab)) { + if (unlikely(s != page->slab_cache)) { if (!PageSlab(page)) { slab_err(s, page, "Attempt to free object(0x%p) " "outside of slab", object); - } else if (!page->slab) { + } else if (!page->slab_cache) { printk(KERN_ERR "SLUB : no slab for object 0x%p.\n", object); @@ -1357,7 +1357,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) goto out; inc_slabs_node(s, page_to_nid(page), page->objects); - page->slab = s; + page->slab_cache = s; __SetPageSlab(page); if (page->pfmemalloc) SetPageSlabPfmemalloc(page); @@ -1424,7 +1424,7 @@ static void rcu_free_slab(struct rcu_head *h) else page = container_of((struct list_head *)h, struct page, lru); - __free_slab(page->slab, page); + __free_slab(page->slab_cache, page); } static void free_slab(struct kmem_cache *s, struct page *page) @@ -2617,9 +2617,9 @@ void kmem_cache_free(struct kmem_cache *s, void *x) page = virt_to_head_page(x); - if (kmem_cache_debug(s) && page->slab != s) { + if (kmem_cache_debug(s) && page->slab_cache != s) { pr_err("kmem_cache_free: Wrong slab cache. %s but object" - " is from %s\n", page->slab->name, s->name); + " is from %s\n", page->slab_cache->name, s->name); WARN_ON_ONCE(1); return; } @@ -3418,7 +3418,7 @@ size_t ksize(const void *object) return PAGE_SIZE << compound_order(page); } - return slab_ksize(page->slab); + return slab_ksize(page->slab_cache); } EXPORT_SYMBOL(ksize); @@ -3443,8 +3443,8 @@ bool verify_mem_not_deleted(const void *x) } slab_lock(page); - if (on_freelist(page->slab, page, object)) { - object_err(page->slab, page, object, "Object is on free-list"); + if (on_freelist(page->slab_cache, page, object)) { + object_err(page->slab_cache, page, object, "Object is on free-list"); rv = false; } else { rv = true; @@ -3475,7 +3475,7 @@ void kfree(const void *x) __free_pages(page, compound_order(page)); return; } - slab_free(page->slab, page, object, _RET_IP_); + slab_free(page->slab_cache, page, object, _RET_IP_); } EXPORT_SYMBOL(kfree); @@ -3686,11 +3686,11 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) if (n) { list_for_each_entry(p, &n->partial, lru) - p->slab = s; + p->slab_cache = s; #ifdef CONFIG_SLUB_DEBUG list_for_each_entry(p, &n->full, lru) - p->slab = s; + p->slab_cache = s; #endif } } -- cgit v1.2.3 From 999d8795d438d396936811b185428d70b7b7de6d Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Fri, 19 Oct 2012 09:33:10 -0300 Subject: mm/slob: Drop usage of page->private for storing page-sized allocations This field was being used to store size allocation so it could be retrieved by ksize(). However, it is a bad practice to not mark a page as a slab page and then use fields for special purposes. There is no need to store the allocated size and ksize() can simply return PAGE_SIZE << compound_order(page). Cc: Pekka Enberg Cc: Matt Mackall Acked-by: Christoph Lameter Signed-off-by: Ezequiel Garcia Signed-off-by: Pekka Enberg --- mm/slob.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) (limited to 'mm') diff --git a/mm/slob.c b/mm/slob.c index a08e4681fd0d..06a5ec70e728 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -28,9 +28,8 @@ * from kmalloc are prepended with a 4-byte header with the kmalloc size. * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls * alloc_pages() directly, allocating compound pages so the page order - * does not have to be separately tracked, and also stores the exact - * allocation size in page->private so that it can be used to accurately - * provide ksize(). These objects are detected in kfree() because slob_page() + * does not have to be separately tracked. + * These objects are detected in kfree() because PageSlab() * is false for them. * * SLAB is emulated on top of SLOB by simply calling constructors and @@ -455,11 +454,6 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) if (likely(order)) gfp |= __GFP_COMP; ret = slob_new_pages(gfp, order, node); - if (ret) { - struct page *page; - page = virt_to_page(ret); - page->private = size; - } trace_kmalloc_node(caller, ret, size, PAGE_SIZE << order, gfp, node); @@ -514,18 +508,20 @@ EXPORT_SYMBOL(kfree); size_t ksize(const void *block) { struct page *sp; + int align; + unsigned int *m; BUG_ON(!block); if (unlikely(block == ZERO_SIZE_PTR)) return 0; sp = virt_to_page(block); - if (PageSlab(sp)) { - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); - unsigned int *m = (unsigned int *)(block - align); - return SLOB_UNITS(*m) * SLOB_UNIT; - } else - return sp->private; + if (unlikely(!PageSlab(sp))) + return PAGE_SIZE << compound_order(sp); + + align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + m = (unsigned int *)(block - align); + return SLOB_UNITS(*m) * SLOB_UNIT; } EXPORT_SYMBOL(ksize); -- cgit v1.2.3 From fe74fe2bf293d061826f0d7afc2ca8456bdbb40e Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Fri, 19 Oct 2012 09:33:11 -0300 Subject: mm/slob: Use object_size field in kmem_cache_size() Fields object_size and size are not the same: the latter might include slab metadata. Return object_size field in kmem_cache_size(). Also, improve trace accuracy by correctly tracing reported size. Cc: Pekka Enberg Cc: Matt Mackall Acked-by: Christoph Lameter Acked-by: David Rientjes Signed-off-by: Ezequiel Garcia Signed-off-by: Pekka Enberg --- mm/slob.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/slob.c b/mm/slob.c index 06a5ec70e728..287a88aa4a61 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -554,12 +554,12 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) if (c->size < PAGE_SIZE) { b = slob_alloc(c->size, flags, c->align, node); - trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, + trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, SLOB_UNITS(c->size) * SLOB_UNIT, flags, node); } else { b = slob_new_pages(flags, get_order(c->size), node); - trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, + trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, PAGE_SIZE << get_order(c->size), flags, node); } @@ -606,7 +606,7 @@ EXPORT_SYMBOL(kmem_cache_free); unsigned int kmem_cache_size(struct kmem_cache *c) { - return c->size; + return c->object_size; } EXPORT_SYMBOL(kmem_cache_size); -- cgit v1.2.3 From 242860a47a75b933a79a30f6a40bf4858f4a3ecc Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Fri, 19 Oct 2012 09:33:12 -0300 Subject: mm/sl[aou]b: Move common kmem_cache_size() to slab.h This function is identically defined in all three allocators and it's trivial to move it to slab.h Since now it's static, inline, header-defined function this patch also drops the EXPORT_SYMBOL tag. Cc: Pekka Enberg Cc: Matt Mackall Acked-by: Christoph Lameter Signed-off-by: Ezequiel Garcia Signed-off-by: Pekka Enberg --- include/linux/slab.h | 9 ++++++++- mm/slab.c | 6 ------ mm/slob.c | 6 ------ mm/slub.c | 9 --------- 4 files changed, 8 insertions(+), 22 deletions(-) (limited to 'mm') diff --git a/include/linux/slab.h b/include/linux/slab.h index 83d1a1454b7e..743a10415122 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -128,7 +128,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void kmem_cache_free(struct kmem_cache *, void *); -unsigned int kmem_cache_size(struct kmem_cache *); /* * Please use this macro to create slab caches. Simply specify the @@ -388,6 +387,14 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) return kmalloc_node(size, flags | __GFP_ZERO, node); } +/* + * Determine the size of a slab object + */ +static inline unsigned int kmem_cache_size(struct kmem_cache *s) +{ + return s->object_size; +} + void __init kmem_cache_init_late(void); #endif /* _LINUX_SLAB_H */ diff --git a/mm/slab.c b/mm/slab.c index 6d5c83c6ddd5..1f7fd5f51f87 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3969,12 +3969,6 @@ void kfree(const void *objp) } EXPORT_SYMBOL(kfree); -unsigned int kmem_cache_size(struct kmem_cache *cachep) -{ - return cachep->object_size; -} -EXPORT_SYMBOL(kmem_cache_size); - /* * This initializes kmem_list3 or resizes various caches for all nodes. */ diff --git a/mm/slob.c b/mm/slob.c index 287a88aa4a61..fffbc820774d 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -604,12 +604,6 @@ void kmem_cache_free(struct kmem_cache *c, void *b) } EXPORT_SYMBOL(kmem_cache_free); -unsigned int kmem_cache_size(struct kmem_cache *c) -{ - return c->object_size; -} -EXPORT_SYMBOL(kmem_cache_size); - int __kmem_cache_shutdown(struct kmem_cache *c) { /* No way to check for remaining objects */ diff --git a/mm/slub.c b/mm/slub.c index 35483e0ab6bc..deee7c754a7d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3121,15 +3121,6 @@ error: return -EINVAL; } -/* - * Determine the size of a slab object - */ -unsigned int kmem_cache_size(struct kmem_cache *s) -{ - return s->object_size; -} -EXPORT_SYMBOL(kmem_cache_size); - static void list_slab_objects(struct kmem_cache *s, struct page *page, const char *text) { -- cgit v1.2.3 From 8cf9864b1382851d90c7c505f8441c8928f1469e Mon Sep 17 00:00:00 2001 From: Ezequiel Garcia Date: Mon, 22 Oct 2012 09:04:31 -0300 Subject: mm/slob: Use free_page instead of put_page for page-size kmalloc allocations When freeing objects, the slob allocator currently free empty pages calling __free_pages(). However, page-size kmallocs are disposed using put_page() instead. It makes no sense to call put_page() for kernel pages that are provided by the object allocator, so we shouldn't be doing this ourselves. This is based on: commit d9b7f22623b5fa9cc189581dcdfb2ac605933bf4 Author: Glauber Costa slub: use free_page instead of put_page for freeing kmalloc allocation Cc: Christoph Lameter Cc: Pekka Enberg Cc: Matt Mackall Acked-by: Glauber Costa Signed-off-by: Ezequiel Garcia Signed-off-by: Pekka Enberg --- mm/slob.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slob.c b/mm/slob.c index fffbc820774d..e7d790127e4b 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -500,7 +500,7 @@ void kfree(const void *block) unsigned int *m = (unsigned int *)(block - align); slob_free(m, *m + align); } else - put_page(sp); + __free_pages(sp, compound_order(sp)); } EXPORT_SYMBOL(kfree); -- cgit v1.2.3 From d8843922fba49e887874aa1f9e748d620c5092af Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Wed, 17 Oct 2012 15:36:51 +0400 Subject: slab: Ignore internal flags in cache creation Some flags are used internally by the allocators for management purposes. One example of that is the CFLGS_OFF_SLAB flag that slab uses to mark that the metadata for that cache is stored outside of the slab. No cache should ever pass those as a creation flags. We can just ignore this bit if it happens to be passed (such as when duplicating a cache in the kmem memcg patches). Because such flags can vary from allocator to allocator, we allow them to make their own decisions on that, defining SLAB_AVAILABLE_FLAGS with all flags that are valid at creation time. Allocators that doesn't have any specific flag requirement should define that to mean all flags. Common code will mask out all flags not belonging to that set. Acked-by: Christoph Lameter Acked-by: David Rientjes Signed-off-by: Glauber Costa Signed-off-by: Pekka Enberg --- mm/slab.c | 22 ---------------------- mm/slab.h | 25 +++++++++++++++++++++++++ mm/slab_common.c | 7 +++++++ mm/slub.c | 3 --- 4 files changed, 32 insertions(+), 25 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index 1f7fd5f51f87..6ebb9515a3e9 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -162,23 +162,6 @@ */ static bool pfmemalloc_active __read_mostly; -/* Legal flag mask for kmem_cache_create(). */ -#if DEBUG -# define CREATE_MASK (SLAB_RED_ZONE | \ - SLAB_POISON | SLAB_HWCACHE_ALIGN | \ - SLAB_CACHE_DMA | \ - SLAB_STORE_USER | \ - SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ - SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ - SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) -#else -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ - SLAB_CACHE_DMA | \ - SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ - SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ - SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK) -#endif - /* * kmem_bufctl_t: * @@ -2378,11 +2361,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) if (flags & SLAB_DESTROY_BY_RCU) BUG_ON(flags & SLAB_POISON); #endif - /* - * Always checks flags, a caller might be expecting debug support which - * isn't available. - */ - BUG_ON(flags & ~CREATE_MASK); /* * Check that size is in terms of words. This is needed to avoid diff --git a/mm/slab.h b/mm/slab.h index 5a43c2f13621..66a62d3536c6 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -45,6 +45,31 @@ static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t siz #endif +/* Legal flag mask for kmem_cache_create(), for various configurations */ +#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ + SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) + +#if defined(CONFIG_DEBUG_SLAB) +#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) +#elif defined(CONFIG_SLUB_DEBUG) +#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ + SLAB_TRACE | SLAB_DEBUG_FREE) +#else +#define SLAB_DEBUG_FLAGS (0) +#endif + +#if defined(CONFIG_SLAB) +#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ + SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK) +#elif defined(CONFIG_SLUB) +#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ + SLAB_TEMPORARY | SLAB_NOTRACK) +#else +#define SLAB_CACHE_FLAGS (0) +#endif + +#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) + int __kmem_cache_shutdown(struct kmem_cache *); struct seq_file; diff --git a/mm/slab_common.c b/mm/slab_common.c index 5fb753da6cf0..b705be7faa48 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -109,6 +109,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align if (!kmem_cache_sanity_check(name, size) == 0) goto out_locked; + /* + * Some allocators will constraint the set of valid flags to a subset + * of all flags. We expect them to define CACHE_CREATE_MASK in this + * case, and we'll just provide them with a sanitized version of the + * passed flags. + */ + flags &= CACHE_CREATE_MASK; s = __kmem_cache_alias(name, size, align, flags, ctor); if (s) diff --git a/mm/slub.c b/mm/slub.c index deee7c754a7d..b2ada3db4225 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -112,9 +112,6 @@ * the fast path and disables lockless freelists. */ -#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ - SLAB_TRACE | SLAB_DEBUG_FREE) - static inline int kmem_cache_debug(struct kmem_cache *s) { #ifdef CONFIG_SLUB_DEBUG -- cgit v1.2.3 From 789306e5ad6b3051c263ac2478875efa8bc07462 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 5 Oct 2012 16:55:20 +0200 Subject: mm/slob: use min_t() to compare ARCH_SLAB_MINALIGN The definition of ARCH_SLAB_MINALIGN is architecture dependent and can be either of type size_t or int. Comparing that value with ARCH_KMALLOC_MINALIGN can cause harmless warnings on platforms where they are different. Since both are always small positive integer numbers, using the size_t type to compare them is safe and gets rid of the warning. Without this patch, building ARM collie_defconfig results in: mm/slob.c: In function '__kmalloc_node': mm/slob.c:431:152: warning: comparison of distinct pointer types lacks a cast [enabled by default] mm/slob.c: In function 'kfree': mm/slob.c:484:153: warning: comparison of distinct pointer types lacks a cast [enabled by default] mm/slob.c: In function 'ksize': mm/slob.c:503:153: warning: comparison of distinct pointer types lacks a cast [enabled by default] Acked-by: Christoph Lameter Signed-off-by: Arnd Bergmann [ penberg@kernel.org: updates for master ] Signed-off-by: Pekka Enberg --- mm/slob.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/slob.c b/mm/slob.c index e7d790127e4b..87e16c4d9143 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -428,7 +428,7 @@ static __always_inline void * __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) { unsigned int *m; - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); void *ret; gfp &= gfp_allowed_mask; @@ -496,7 +496,7 @@ void kfree(const void *block) sp = virt_to_page(block); if (PageSlab(sp)) { - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); unsigned int *m = (unsigned int *)(block - align); slob_free(m, *m + align); } else @@ -519,7 +519,7 @@ size_t ksize(const void *block) if (unlikely(!PageSlab(sp))) return PAGE_SIZE << compound_order(sp); - align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); m = (unsigned int *)(block - align); return SLOB_UNITS(*m) * SLOB_UNIT; } -- cgit v1.2.3 From a755b76ab4cefe3d3aa046f3abc62b7e087336b3 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 6 Nov 2012 17:10:10 -0800 Subject: mm: fix slab.c kernel-doc warnings Fix new kernel-doc warnings in mm/slab.c: Warning(mm/slab.c:2358): No description found for parameter 'cachep' Warning(mm/slab.c:2358): Excess function parameter 'name' description in '__kmem_cache_create' Warning(mm/slab.c:2358): Excess function parameter 'size' description in '__kmem_cache_create' Warning(mm/slab.c:2358): Excess function parameter 'align' description in '__kmem_cache_create' Warning(mm/slab.c:2358): Excess function parameter 'ctor' description in '__kmem_cache_create' Signed-off-by: Randy Dunlap Cc: Christoph Lameter Cc: Pekka Enberg Cc: Matt Mackall Signed-off-by: Pekka Enberg --- mm/slab.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index 6ebb9515a3e9..e26bff5ed1a6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2314,11 +2314,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) /** * __kmem_cache_create - Create a cache. - * @name: A string which is used in /proc/slabinfo to identify this cache. - * @size: The size of objects to be created in this cache. - * @align: The required alignment for the objects. + * @cachep: cache management descriptor * @flags: SLAB flags - * @ctor: A constructor for the objects. * * Returns a ptr to the cache on success, NULL on failure. * Cannot be called within a int, but can be interrupted. -- cgit v1.2.3 From 59a09917c95e5209135b4f1a87f1263d6ef40fdb Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 28 Nov 2012 16:23:00 +0000 Subject: slub: Use correct cpu_slab on dead cpu Pass a kmem_cache_cpu pointer into unfreeze partials so that a different kmem_cache_cpu structure than the local one can be specified. Acked-by: David Rientjes Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index b2ada3db4225..33576b0cfc41 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1869,12 +1869,14 @@ redo: /* * Unfreeze all the cpu partial slabs. * - * This function must be called with interrupt disabled. + * This function must be called with interrupts disabled + * for the cpu using c (or some other guarantee must be there + * to guarantee no concurrent accesses). */ -static void unfreeze_partials(struct kmem_cache *s) +static void unfreeze_partials(struct kmem_cache *s, + struct kmem_cache_cpu *c) { struct kmem_cache_node *n = NULL, *n2 = NULL; - struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab); struct page *page, *discard_page = NULL; while ((page = c->partial)) { @@ -1960,7 +1962,7 @@ static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) * set to the per node partial list. */ local_irq_save(flags); - unfreeze_partials(s); + unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); local_irq_restore(flags); oldpage = NULL; pobjects = 0; @@ -2003,7 +2005,7 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) if (c->page) flush_slab(s, c); - unfreeze_partials(s); + unfreeze_partials(s, c); } } -- cgit v1.2.3 From 3c58346525d82625e68e24f071804c2dc057b6f4 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 28 Nov 2012 16:23:01 +0000 Subject: slab: Simplify bootstrap The nodelists field in kmem_cache is pointing to the first unused object in the array field when bootstrap is complete. A problem with the current approach is that the statically sized kmem_cache structure use on boot can only contain NR_CPUS entries. If the number of nodes plus the number of cpus is greater then we would overwrite memory following the kmem_cache_boot definition. Increase the size of the array field to ensure that also the node pointers fit into the array field. Once we do that we no longer need the kmem_cache_nodelists array and we can then also use that structure elsewhere. Acked-by: Glauber Costa Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- include/linux/slab_def.h | 6 +++++- mm/slab.c | 21 +++++++++++++-------- 2 files changed, 18 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index cc290f0bdb34..45c0356fdc8c 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -89,9 +89,13 @@ struct kmem_cache { * (see kmem_cache_init()) * We still use [NR_CPUS] and not [1] or [0] because cache_cache * is statically defined, so we reserve the max number of cpus. + * + * We also need to guarantee that the list is able to accomodate a + * pointer for each node since "nodelists" uses the remainder of + * available pointers. */ struct kmem_list3 **nodelists; - struct array_cache *array[NR_CPUS]; + struct array_cache *array[NR_CPUS + MAX_NUMNODES]; /* * Do not add fields after array[] */ diff --git a/mm/slab.c b/mm/slab.c index e26bff5ed1a6..c7ea5234c4e9 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -553,9 +553,7 @@ static struct arraycache_init initarray_generic = { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; /* internal cache of cache description objs */ -static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES]; static struct kmem_cache kmem_cache_boot = { - .nodelists = kmem_cache_nodelists, .batchcount = 1, .limit = BOOT_CPUCACHE_ENTRIES, .shared = 1, @@ -1559,6 +1557,15 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index) } } +/* + * The memory after the last cpu cache pointer is used for the + * the nodelists pointer. + */ +static void setup_nodelists_pointer(struct kmem_cache *cachep) +{ + cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; +} + /* * Initialisation. Called after the page allocator have been initialised and * before smp_init(). @@ -1573,15 +1580,14 @@ void __init kmem_cache_init(void) int node; kmem_cache = &kmem_cache_boot; + setup_nodelists_pointer(kmem_cache); if (num_possible_nodes() == 1) use_alien_caches = 0; - for (i = 0; i < NUM_INIT_LISTS; i++) { + for (i = 0; i < NUM_INIT_LISTS; i++) kmem_list3_init(&initkmem_list3[i]); - if (i < MAX_NUMNODES) - kmem_cache->nodelists[i] = NULL; - } + set_up_list3s(kmem_cache, CACHE_CACHE); /* @@ -1619,7 +1625,6 @@ void __init kmem_cache_init(void) list_add(&kmem_cache->list, &slab_caches); kmem_cache->colour_off = cache_line_size(); kmem_cache->array[smp_processor_id()] = &initarray_cache.cache; - kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; /* * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids @@ -2422,7 +2427,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) else gfp = GFP_NOWAIT; - cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; + setup_nodelists_pointer(cachep); #if DEBUG /* -- cgit v1.2.3 From 45530c4474d258b822e2639c786606d8257aad8b Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 28 Nov 2012 16:23:07 +0000 Subject: mm, sl[au]b: create common functions for boot slab creation Use a special function to create kmalloc caches and use that function in SLAB and SLUB. Acked-by: Joonsoo Kim Reviewed-by: Glauber Costa Acked-by: David Rientjes Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 48 ++++++++++++++---------------------------------- mm/slab.h | 5 +++++ mm/slab_common.c | 36 ++++++++++++++++++++++++++++++++++++ mm/slub.c | 37 +++++-------------------------------- 4 files changed, 60 insertions(+), 66 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index c7ea5234c4e9..e351acea6026 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1659,23 +1659,13 @@ void __init kmem_cache_init(void) * bug. */ - sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name; - sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size; - sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size; - sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN; - __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); - list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); - - if (INDEX_AC != INDEX_L3) { - sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name; - sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size; - sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size; - sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN; - __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); - list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches); - } + sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name, + sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS); + + if (INDEX_AC != INDEX_L3) + sizes[INDEX_L3].cs_cachep = + create_kmalloc_cache(names[INDEX_L3].name, + sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS); slab_early_init = 0; @@ -1687,24 +1677,14 @@ void __init kmem_cache_init(void) * Note for systems short on memory removing the alignment will * allow tighter packing of the smaller caches. */ - if (!sizes->cs_cachep) { - sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - sizes->cs_cachep->name = names->name; - sizes->cs_cachep->size = sizes->cs_size; - sizes->cs_cachep->object_size = sizes->cs_size; - sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN; - __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); - list_add(&sizes->cs_cachep->list, &slab_caches); - } + if (!sizes->cs_cachep) + sizes->cs_cachep = create_kmalloc_cache(names->name, + sizes->cs_size, ARCH_KMALLOC_FLAGS); + #ifdef CONFIG_ZONE_DMA - sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - sizes->cs_dmacachep->name = names->name_dma; - sizes->cs_dmacachep->size = sizes->cs_size; - sizes->cs_dmacachep->object_size = sizes->cs_size; - sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN; - __kmem_cache_create(sizes->cs_dmacachep, - ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC); - list_add(&sizes->cs_dmacachep->list, &slab_caches); + sizes->cs_dmacachep = create_kmalloc_cache( + names->name_dma, sizes->cs_size, + SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS); #endif sizes++; names++; diff --git a/mm/slab.h b/mm/slab.h index 66a62d3536c6..492eafa0b538 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -35,6 +35,11 @@ extern struct kmem_cache *kmem_cache; /* Functions provided by the slab allocators */ extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); +extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, + unsigned long flags); +extern void create_boot_cache(struct kmem_cache *, const char *name, + size_t size, unsigned long flags); + #ifdef CONFIG_SLUB struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); diff --git a/mm/slab_common.c b/mm/slab_common.c index b705be7faa48..497b45c25bae 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -202,6 +202,42 @@ int slab_is_available(void) return slab_state >= UP; } +#ifndef CONFIG_SLOB +/* Create a cache during boot when no slab services are available yet */ +void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size, + unsigned long flags) +{ + int err; + + s->name = name; + s->size = s->object_size = size; + s->align = ARCH_KMALLOC_MINALIGN; + err = __kmem_cache_create(s, flags); + + if (err) + panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n", + name, size, err); + + s->refcount = -1; /* Exempt from merging for now */ +} + +struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, + unsigned long flags) +{ + struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); + + if (!s) + panic("Out of memory when creating slab %s\n", name); + + create_boot_cache(s, name, size, flags); + list_add(&s->list, &slab_caches); + s->refcount = 1; + return s; +} + +#endif /* !CONFIG_SLOB */ + + #ifdef CONFIG_SLABINFO static void print_slabinfo_header(struct seq_file *m) { diff --git a/mm/slub.c b/mm/slub.c index 33576b0cfc41..1be172c157c3 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3245,32 +3245,6 @@ static int __init setup_slub_nomerge(char *str) __setup("slub_nomerge", setup_slub_nomerge); -static struct kmem_cache *__init create_kmalloc_cache(const char *name, - int size, unsigned int flags) -{ - struct kmem_cache *s; - - s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - - s->name = name; - s->size = s->object_size = size; - s->align = ARCH_KMALLOC_MINALIGN; - - /* - * This function is called with IRQs disabled during early-boot on - * single CPU so there's no need to take slab_mutex here. - */ - if (kmem_cache_open(s, flags)) - goto panic; - - list_add(&s->list, &slab_caches); - return s; - -panic: - panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); - return NULL; -} - /* * Conversion table for small slabs sizes / 8 to the index in the * kmalloc array. This is necessary for slabs < 192 since we have non power @@ -3948,6 +3922,10 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) if (err) return err; + /* Mutex is not taken during early boot */ + if (slab_state <= UP) + return 0; + mutex_unlock(&slab_mutex); err = sysfs_slab_add(s); mutex_lock(&slab_mutex); @@ -5249,13 +5227,8 @@ static int sysfs_slab_add(struct kmem_cache *s) { int err; const char *name; - int unmergeable; - - if (slab_state < FULL) - /* Defer until later */ - return 0; + int unmergeable = slab_unmergeable(s); - unmergeable = slab_unmergeable(s); if (unmergeable) { /* * Slabcache can never be merged so we can use the name proper. -- cgit v1.2.3 From dffb4d605c23110e3ad54b8c9f244a8235c013c2 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 28 Nov 2012 16:23:07 +0000 Subject: slub: Use statically allocated kmem_cache boot structure for bootstrap Simplify bootstrap by statically allocated two kmem_cache structures. These are freed after bootup is complete. Allows us to no longer worry about calculations of sizes of kmem_cache structures during bootstrap. Reviewed-by: Glauber Costa Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 67 +++++++++++++++++++-------------------------------------------- 1 file changed, 20 insertions(+), 47 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 1be172c157c3..c82453ac812a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -176,8 +176,6 @@ static inline int kmem_cache_debug(struct kmem_cache *s) #define __OBJECT_POISON 0x80000000UL /* Poison object */ #define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */ -static int kmem_size = sizeof(struct kmem_cache); - #ifdef CONFIG_SMP static struct notifier_block slab_notifier; #endif @@ -3634,15 +3632,16 @@ static int slab_memory_callback(struct notifier_block *self, /* * Used for early kmem_cache structures that were allocated using - * the page allocator + * the page allocator. Allocate them properly then fix up the pointers + * that may be pointing to the wrong kmem_cache structure. */ -static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) +static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache) { int node; + struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - list_add(&s->list, &slab_caches); - s->refcount = -1; + memcpy(s, static_cache, kmem_cache->object_size); for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = get_node(s, node); @@ -3658,70 +3657,44 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) #endif } } + list_add(&s->list, &slab_caches); + return s; } void __init kmem_cache_init(void) { + static __initdata struct kmem_cache boot_kmem_cache, + boot_kmem_cache_node; int i; - int caches = 0; - struct kmem_cache *temp_kmem_cache; - int order; - struct kmem_cache *temp_kmem_cache_node; - unsigned long kmalloc_size; + int caches = 2; if (debug_guardpage_minorder()) slub_max_order = 0; - kmem_size = offsetof(struct kmem_cache, node) + - nr_node_ids * sizeof(struct kmem_cache_node *); - - /* Allocate two kmem_caches from the page allocator */ - kmalloc_size = ALIGN(kmem_size, cache_line_size()); - order = get_order(2 * kmalloc_size); - kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order); + kmem_cache_node = &boot_kmem_cache_node; + kmem_cache = &boot_kmem_cache; - /* - * Must first have the slab cache available for the allocations of the - * struct kmem_cache_node's. There is special bootstrap code in - * kmem_cache_open for slab_state == DOWN. - */ - kmem_cache_node = (void *)kmem_cache + kmalloc_size; - - kmem_cache_node->name = "kmem_cache_node"; - kmem_cache_node->size = kmem_cache_node->object_size = - sizeof(struct kmem_cache_node); - kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC); + create_boot_cache(kmem_cache_node, "kmem_cache_node", + sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN); hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); /* Able to allocate the per node structures */ slab_state = PARTIAL; - temp_kmem_cache = kmem_cache; - kmem_cache->name = "kmem_cache"; - kmem_cache->size = kmem_cache->object_size = kmem_size; - kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC); + create_boot_cache(kmem_cache, "kmem_cache", + offsetof(struct kmem_cache, node) + + nr_node_ids * sizeof(struct kmem_cache_node *), + SLAB_HWCACHE_ALIGN); - kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); - memcpy(kmem_cache, temp_kmem_cache, kmem_size); + kmem_cache = bootstrap(&boot_kmem_cache); /* * Allocate kmem_cache_node properly from the kmem_cache slab. * kmem_cache_node is separately allocated so no need to * update any list pointers. */ - temp_kmem_cache_node = kmem_cache_node; - - kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); - memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size); - - kmem_cache_bootstrap_fixup(kmem_cache_node); - - caches++; - kmem_cache_bootstrap_fixup(kmem_cache); - caches++; - /* Free temporary boot structure */ - free_pages((unsigned long)temp_kmem_cache, order); + kmem_cache_node = bootstrap(&boot_kmem_cache_node); /* Now we can use the kmem_cache to allocate kmalloc slabs */ -- cgit v1.2.3 From 2f9baa9fcf8d0a204ca129a671d6086cc100faab Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 28 Nov 2012 16:23:09 +0000 Subject: slab: Use the new create_boot_cache function to simplify bootstrap Simplify setup and reduce code in kmem_cache_init(). This allows us to get rid of initarray_cache as well as the manual setup code for the kmem_cache and kmem_cache_node arrays during bootstrap. We introduce a new bootstrap state "PARTIAL" for slab that signals the creation of a kmem_cache boot cache. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 49 ++++++++++++++++--------------------------------- 1 file changed, 16 insertions(+), 33 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index e351acea6026..e1790e56fd86 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -547,8 +547,6 @@ static struct cache_names __initdata cache_names[] = { #undef CACHE }; -static struct arraycache_init initarray_cache __initdata = - { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; static struct arraycache_init initarray_generic = { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; @@ -1572,12 +1570,9 @@ static void setup_nodelists_pointer(struct kmem_cache *cachep) */ void __init kmem_cache_init(void) { - size_t left_over; struct cache_sizes *sizes; struct cache_names *names; int i; - int order; - int node; kmem_cache = &kmem_cache_boot; setup_nodelists_pointer(kmem_cache); @@ -1618,36 +1613,16 @@ void __init kmem_cache_init(void) * 6) Resize the head arrays of the kmalloc caches to their final sizes. */ - node = numa_mem_id(); - /* 1) create the kmem_cache */ - INIT_LIST_HEAD(&slab_caches); - list_add(&kmem_cache->list, &slab_caches); - kmem_cache->colour_off = cache_line_size(); - kmem_cache->array[smp_processor_id()] = &initarray_cache.cache; /* * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids */ - kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) + - nr_node_ids * sizeof(struct kmem_list3 *); - kmem_cache->object_size = kmem_cache->size; - kmem_cache->size = ALIGN(kmem_cache->object_size, - cache_line_size()); - kmem_cache->reciprocal_buffer_size = - reciprocal_value(kmem_cache->size); - - for (order = 0; order < MAX_ORDER; order++) { - cache_estimate(order, kmem_cache->size, - cache_line_size(), 0, &left_over, &kmem_cache->num); - if (kmem_cache->num) - break; - } - BUG_ON(!kmem_cache->num); - kmem_cache->gfporder = order; - kmem_cache->colour = left_over / kmem_cache->colour_off; - kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) + - sizeof(struct slab), cache_line_size()); + create_boot_cache(kmem_cache, "kmem_cache", + offsetof(struct kmem_cache, array[nr_cpu_ids]) + + nr_node_ids * sizeof(struct kmem_list3 *), + SLAB_HWCACHE_ALIGN); + list_add(&kmem_cache->list, &slab_caches); /* 2+3) create the kmalloc caches */ sizes = malloc_sizes; @@ -1695,7 +1670,6 @@ void __init kmem_cache_init(void) ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); - BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache); memcpy(ptr, cpu_cache_get(kmem_cache), sizeof(struct arraycache_init)); /* @@ -2250,7 +2224,15 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) if (slab_state == DOWN) { /* - * Note: the first kmem_cache_create must create the cache + * Note: Creation of first cache (kmem_cache). + * The setup_list3s is taken care + * of by the caller of __kmem_cache_create + */ + cachep->array[smp_processor_id()] = &initarray_generic.cache; + slab_state = PARTIAL; + } else if (slab_state == PARTIAL) { + /* + * Note: the second kmem_cache_create must create the cache * that's used by kmalloc(24), otherwise the creation of * further caches will BUG(). */ @@ -2258,7 +2240,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) /* * If the cache that's used by kmalloc(sizeof(kmem_list3)) is - * the first cache, then we need to set up all its list3s, + * the second cache, then we need to set up all its list3s, * otherwise the creation of further caches will BUG(). */ set_up_list3s(cachep, SIZE_AC); @@ -2267,6 +2249,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) else slab_state = PARTIAL_ARRAYCACHE; } else { + /* Remaining boot caches */ cachep->array[smp_processor_id()] = kmalloc(sizeof(struct arraycache_init), gfp); -- cgit v1.2.3 From 4590685546a374fb0f60682ce0e3a6fd48911d46 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 28 Nov 2012 16:23:16 +0000 Subject: mm/sl[aou]b: Common alignment code Extract the code to do object alignment from the allocators. Do the alignment calculations in slab_common so that the __kmem_cache_create functions of the allocators do not have to deal with alignment. Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 20 -------------------- mm/slab.h | 3 +++ mm/slab_common.c | 32 ++++++++++++++++++++++++++++++-- mm/slob.c | 10 ---------- mm/slub.c | 38 +------------------------------------- 5 files changed, 34 insertions(+), 69 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index e1790e56fd86..2c3a2e0394db 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2337,22 +2337,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) size &= ~(BYTES_PER_WORD - 1); } - /* calculate the final buffer alignment: */ - - /* 1) arch recommendation: can be overridden for debug */ - if (flags & SLAB_HWCACHE_ALIGN) { - /* - * Default alignment: as specified by the arch code. Except if - * an object is really small, then squeeze multiple objects into - * one cacheline. - */ - ralign = cache_line_size(); - while (size <= ralign / 2) - ralign /= 2; - } else { - ralign = BYTES_PER_WORD; - } - /* * Redzoning and user store require word alignment or possibly larger. * Note this will be overridden by architecture or caller mandated @@ -2369,10 +2353,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) size &= ~(REDZONE_ALIGN - 1); } - /* 2) arch mandated alignment */ - if (ralign < ARCH_SLAB_MINALIGN) { - ralign = ARCH_SLAB_MINALIGN; - } /* 3) caller mandated alignment */ if (ralign < cachep->align) { ralign = cachep->align; diff --git a/mm/slab.h b/mm/slab.h index 492eafa0b538..1cb9c9ee0e6f 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -32,6 +32,9 @@ extern struct list_head slab_caches; /* The slab cache that manages slab cache information */ extern struct kmem_cache *kmem_cache; +unsigned long calculate_alignment(unsigned long flags, + unsigned long align, unsigned long size); + /* Functions provided by the slab allocators */ extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); diff --git a/mm/slab_common.c b/mm/slab_common.c index 497b45c25bae..a8e76d79ee65 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -72,6 +72,34 @@ static inline int kmem_cache_sanity_check(const char *name, size_t size) } #endif +/* + * Figure out what the alignment of the objects will be given a set of + * flags, a user specified alignment and the size of the objects. + */ +unsigned long calculate_alignment(unsigned long flags, + unsigned long align, unsigned long size) +{ + /* + * If the user wants hardware cache aligned objects then follow that + * suggestion if the object is sufficiently large. + * + * The hardware cache alignment cannot override the specified + * alignment though. If that is greater then use it. + */ + if (flags & SLAB_HWCACHE_ALIGN) { + unsigned long ralign = cache_line_size(); + while (size <= ralign / 2) + ralign /= 2; + align = max(align, ralign); + } + + if (align < ARCH_SLAB_MINALIGN) + align = ARCH_SLAB_MINALIGN; + + return ALIGN(align, sizeof(void *)); +} + + /* * kmem_cache_create - Create a cache. * @name: A string which is used in /proc/slabinfo to identify this cache. @@ -124,7 +152,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); if (s) { s->object_size = s->size = size; - s->align = align; + s->align = calculate_alignment(flags, align, size); s->ctor = ctor; s->name = kstrdup(name, GFP_KERNEL); if (!s->name) { @@ -211,7 +239,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz s->name = name; s->size = s->object_size = size; - s->align = ARCH_KMALLOC_MINALIGN; + s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size); err = __kmem_cache_create(s, flags); if (err) diff --git a/mm/slob.c b/mm/slob.c index 87e16c4d9143..795bab7d391d 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -123,7 +123,6 @@ static inline void clear_slob_page_free(struct page *sp) #define SLOB_UNIT sizeof(slob_t) #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT) -#define SLOB_ALIGN L1_CACHE_BYTES /* * struct slob_rcu is inserted at the tail of allocated slob blocks, which @@ -527,20 +526,11 @@ EXPORT_SYMBOL(ksize); int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) { - size_t align = c->size; - if (flags & SLAB_DESTROY_BY_RCU) { /* leave room for rcu footer at the end of object */ c->size += sizeof(struct slob_rcu); } c->flags = flags; - /* ignore alignment unless it's forced */ - c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; - if (c->align < ARCH_SLAB_MINALIGN) - c->align = ARCH_SLAB_MINALIGN; - if (c->align < align) - c->align = align; - return 0; } diff --git a/mm/slub.c b/mm/slub.c index c82453ac812a..9640edd2cc78 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2760,32 +2760,6 @@ static inline int calculate_order(int size, int reserved) return -ENOSYS; } -/* - * Figure out what the alignment of the objects will be. - */ -static unsigned long calculate_alignment(unsigned long flags, - unsigned long align, unsigned long size) -{ - /* - * If the user wants hardware cache aligned objects then follow that - * suggestion if the object is sufficiently large. - * - * The hardware cache alignment cannot override the specified - * alignment though. If that is greater then use it. - */ - if (flags & SLAB_HWCACHE_ALIGN) { - unsigned long ralign = cache_line_size(); - while (size <= ralign / 2) - ralign /= 2; - align = max(align, ralign); - } - - if (align < ARCH_SLAB_MINALIGN) - align = ARCH_SLAB_MINALIGN; - - return ALIGN(align, sizeof(void *)); -} - static void init_kmem_cache_node(struct kmem_cache_node *n) { @@ -2919,7 +2893,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) { unsigned long flags = s->flags; unsigned long size = s->object_size; - unsigned long align = s->align; int order; /* @@ -2990,20 +2963,12 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) size += sizeof(void *); #endif - /* - * Determine the alignment based on various parameters that the - * user specified and the dynamic determination of cache line size - * on bootup. - */ - align = calculate_alignment(flags, align, s->object_size); - s->align = align; - /* * SLUB stores one object immediately after another beginning from * offset 0. In order to align the objects we have to simply size * each object to conform to the alignment. */ - size = ALIGN(size, align); + size = ALIGN(size, s->align); s->size = size; if (forced_order >= 0) order = forced_order; @@ -3032,7 +2997,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) s->max = s->oo; return !!oo_objects(s->oo); - } static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) -- cgit v1.2.3