Skip to content

Commit 11192d9

Browse files
shakeelbtorvalds
authored andcommitted
memcg: flush stats only if updated
At the moment, the kernel flushes the memcg stats on every refault and also on every reclaim iteration. Although rstat maintains per-cpu update tree but on the flush the kernel still has to go through all the cpu rstat update tree to check if there is anything to flush. This patch adds the tracking on the stats update side to make flush side more clever by skipping the flush if there is no update. The stats update codepath is very sensitive performance wise for many workloads and benchmarks. So, we can not follow what the commit aa48e47 ("memcg: infrastructure to flush memcg stats") did which was triggering async flush through queue_work() and caused a lot performance regression reports. That got reverted by the commit 1f82822 ("memcg: flush lruvec stats in the refault"). In this patch we kept the stats update codepath very minimal and let the stats reader side to flush the stats only when the updates are over a specific threshold. For now the threshold is (nr_cpus * CHARGE_BATCH). To evaluate the impact of this patch, an 8 GiB tmpfs file is created on a system with swap-on-zram and the file was pushed to swap through memory.force_empty interface. On reading the whole file, the memcg stat flush in the refault code path is triggered. With this patch, we observed 63% reduction in the read time of 8 GiB file. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Shakeel Butt <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Michal Hocko <[email protected]> Reviewed-by: "Michal Koutný" <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 48384b0 commit 11192d9

File tree

1 file changed

+55
-23
lines changed

1 file changed

+55
-23
lines changed

mm/memcontrol.c

Lines changed: 55 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -103,11 +103,6 @@ static bool do_memsw_account(void)
103103
return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
104104
}
105105

106-
/* memcg and lruvec stats flushing */
107-
static void flush_memcg_stats_dwork(struct work_struct *w);
108-
static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
109-
static DEFINE_SPINLOCK(stats_flush_lock);
110-
111106
#define THRESHOLDS_EVENTS_TARGET 128
112107
#define SOFTLIMIT_EVENTS_TARGET 1024
113108

@@ -635,6 +630,56 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
635630
return mz;
636631
}
637632

633+
/*
634+
* memcg and lruvec stats flushing
635+
*
636+
* Many codepaths leading to stats update or read are performance sensitive and
637+
* adding stats flushing in such codepaths is not desirable. So, to optimize the
638+
* flushing the kernel does:
639+
*
640+
* 1) Periodically and asynchronously flush the stats every 2 seconds to not let
641+
* rstat update tree grow unbounded.
642+
*
643+
* 2) Flush the stats synchronously on reader side only when there are more than
644+
* (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
645+
* will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
646+
* only for 2 seconds due to (1).
647+
*/
648+
static void flush_memcg_stats_dwork(struct work_struct *w);
649+
static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
650+
static DEFINE_SPINLOCK(stats_flush_lock);
651+
static DEFINE_PER_CPU(unsigned int, stats_updates);
652+
static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
653+
654+
static inline void memcg_rstat_updated(struct mem_cgroup *memcg)
655+
{
656+
cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
657+
if (!(__this_cpu_inc_return(stats_updates) % MEMCG_CHARGE_BATCH))
658+
atomic_inc(&stats_flush_threshold);
659+
}
660+
661+
static void __mem_cgroup_flush_stats(void)
662+
{
663+
if (!spin_trylock(&stats_flush_lock))
664+
return;
665+
666+
cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
667+
atomic_set(&stats_flush_threshold, 0);
668+
spin_unlock(&stats_flush_lock);
669+
}
670+
671+
void mem_cgroup_flush_stats(void)
672+
{
673+
if (atomic_read(&stats_flush_threshold) > num_online_cpus())
674+
__mem_cgroup_flush_stats();
675+
}
676+
677+
static void flush_memcg_stats_dwork(struct work_struct *w)
678+
{
679+
mem_cgroup_flush_stats();
680+
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
681+
}
682+
638683
/**
639684
* __mod_memcg_state - update cgroup memory statistics
640685
* @memcg: the memory cgroup
@@ -647,7 +692,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
647692
return;
648693

649694
__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
650-
cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
695+
memcg_rstat_updated(memcg);
651696
}
652697

653698
/* idx can be of type enum memcg_stat_item or node_stat_item. */
@@ -675,10 +720,12 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
675720
memcg = pn->memcg;
676721

677722
/* Update memcg */
678-
__mod_memcg_state(memcg, idx, val);
723+
__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
679724

680725
/* Update lruvec */
681726
__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
727+
728+
memcg_rstat_updated(memcg);
682729
}
683730

684731
/**
@@ -780,7 +827,7 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
780827
return;
781828

782829
__this_cpu_add(memcg->vmstats_percpu->events[idx], count);
783-
cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
830+
memcg_rstat_updated(memcg);
784831
}
785832

786833
static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
@@ -5341,21 +5388,6 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
53415388
memcg_wb_domain_size_changed(memcg);
53425389
}
53435390

5344-
void mem_cgroup_flush_stats(void)
5345-
{
5346-
if (!spin_trylock(&stats_flush_lock))
5347-
return;
5348-
5349-
cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
5350-
spin_unlock(&stats_flush_lock);
5351-
}
5352-
5353-
static void flush_memcg_stats_dwork(struct work_struct *w)
5354-
{
5355-
mem_cgroup_flush_stats();
5356-
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
5357-
}
5358-
53595391
static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
53605392
{
53615393
struct mem_cgroup *memcg = mem_cgroup_from_css(css);

0 commit comments

Comments
 (0)