Skip to content

Commit c9fc586

Browse files
htejuntorvalds
authored andcommitted
slab: introduce __kmemcg_cache_deactivate()
__kmem_cache_shrink() is called with %true @deactivate only for memcg caches. Remove @deactivate from __kmem_cache_shrink() and introduce __kmemcg_cache_deactivate() instead. Each memcg-supporting allocator should implement it and it should deactivate and drain the cache. This is to allow memcg cache deactivation behavior to further deviate from simple shrinking without messing up __kmem_cache_shrink(). This is pure reorganization and doesn't introduce any observable behavior changes. v2: Dropped unnecessary ifdef in mm/slab.h as suggested by Vladimir. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Tejun Heo <[email protected]> Acked-by: Vladimir Davydov <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Pekka Enberg <[email protected]> Cc: David Rientjes <[email protected]> Cc: Joonsoo Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 510ded3 commit c9fc586

File tree

5 files changed

+36
-23
lines changed

5 files changed

+36
-23
lines changed

mm/slab.c

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2315,7 +2315,7 @@ static int drain_freelist(struct kmem_cache *cache,
23152315
return nr_freed;
23162316
}
23172317

2318-
int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
2318+
int __kmem_cache_shrink(struct kmem_cache *cachep)
23192319
{
23202320
int ret = 0;
23212321
int node;
@@ -2333,9 +2333,16 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
23332333
return (ret ? 1 : 0);
23342334
}
23352335

2336+
#ifdef CONFIG_MEMCG
2337+
void __kmemcg_cache_deactivate(struct kmem_cache *cachep)
2338+
{
2339+
__kmem_cache_shrink(cachep);
2340+
}
2341+
#endif
2342+
23362343
int __kmem_cache_shutdown(struct kmem_cache *cachep)
23372344
{
2338-
return __kmem_cache_shrink(cachep, false);
2345+
return __kmem_cache_shrink(cachep);
23392346
}
23402347

23412348
void __kmem_cache_release(struct kmem_cache *cachep)

mm/slab.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
167167

168168
int __kmem_cache_shutdown(struct kmem_cache *);
169169
void __kmem_cache_release(struct kmem_cache *);
170-
int __kmem_cache_shrink(struct kmem_cache *, bool);
170+
int __kmem_cache_shrink(struct kmem_cache *);
171+
void __kmemcg_cache_deactivate(struct kmem_cache *s);
171172
void slab_kmem_cache_release(struct kmem_cache *);
172173

173174
struct seq_file;

mm/slab_common.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -646,7 +646,7 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
646646
if (!c)
647647
continue;
648648

649-
__kmem_cache_shrink(c, true);
649+
__kmemcg_cache_deactivate(c);
650650
arr->entries[idx] = NULL;
651651
}
652652
mutex_unlock(&slab_mutex);
@@ -794,7 +794,7 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
794794
get_online_cpus();
795795
get_online_mems();
796796
kasan_cache_shrink(cachep);
797-
ret = __kmem_cache_shrink(cachep, false);
797+
ret = __kmem_cache_shrink(cachep);
798798
put_online_mems();
799799
put_online_cpus();
800800
return ret;

mm/slob.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -634,7 +634,7 @@ void __kmem_cache_release(struct kmem_cache *c)
634634
{
635635
}
636636

637-
int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
637+
int __kmem_cache_shrink(struct kmem_cache *d)
638638
{
639639
return 0;
640640
}

mm/slub.c

Lines changed: 22 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -3894,7 +3894,7 @@ EXPORT_SYMBOL(kfree);
38943894
* being allocated from last increasing the chance that the last objects
38953895
* are freed in them.
38963896
*/
3897-
int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
3897+
int __kmem_cache_shrink(struct kmem_cache *s)
38983898
{
38993899
int node;
39003900
int i;
@@ -3906,21 +3906,6 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
39063906
unsigned long flags;
39073907
int ret = 0;
39083908

3909-
if (deactivate) {
3910-
/*
3911-
* Disable empty slabs caching. Used to avoid pinning offline
3912-
* memory cgroups by kmem pages that can be freed.
3913-
*/
3914-
s->cpu_partial = 0;
3915-
s->min_partial = 0;
3916-
3917-
/*
3918-
* s->cpu_partial is checked locklessly (see put_cpu_partial),
3919-
* so we have to make sure the change is visible.
3920-
*/
3921-
synchronize_sched();
3922-
}
3923-
39243909
flush_all(s);
39253910
for_each_kmem_cache_node(s, node, n) {
39263911
INIT_LIST_HEAD(&discard);
@@ -3971,13 +3956,33 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
39713956
return ret;
39723957
}
39733958

3959+
#ifdef CONFIG_MEMCG
3960+
void __kmemcg_cache_deactivate(struct kmem_cache *s)
3961+
{
3962+
/*
3963+
* Disable empty slabs caching. Used to avoid pinning offline
3964+
* memory cgroups by kmem pages that can be freed.
3965+
*/
3966+
s->cpu_partial = 0;
3967+
s->min_partial = 0;
3968+
3969+
/*
3970+
* s->cpu_partial is checked locklessly (see put_cpu_partial), so
3971+
* we have to make sure the change is visible.
3972+
*/
3973+
synchronize_sched();
3974+
3975+
__kmem_cache_shrink(s);
3976+
}
3977+
#endif
3978+
39743979
static int slab_mem_going_offline_callback(void *arg)
39753980
{
39763981
struct kmem_cache *s;
39773982

39783983
mutex_lock(&slab_mutex);
39793984
list_for_each_entry(s, &slab_caches, list)
3980-
__kmem_cache_shrink(s, false);
3985+
__kmem_cache_shrink(s);
39813986
mutex_unlock(&slab_mutex);
39823987

39833988
return 0;

0 commit comments

Comments
 (0)