26
26
#include <linux/page_counter.h>
27
27
#include <linux/vmpressure.h>
28
28
#include <linux/eventfd.h>
29
- #include <linux/mmzone.h>
29
+ #include <linux/mm.h>
30
+ #include <linux/vmstat.h>
30
31
#include <linux/writeback.h>
31
32
#include <linux/page-flags.h>
32
33
@@ -98,11 +99,16 @@ struct mem_cgroup_reclaim_iter {
98
99
unsigned int generation ;
99
100
};
100
101
102
+ struct lruvec_stat {
103
+ long count [NR_VM_NODE_STAT_ITEMS ];
104
+ };
105
+
101
106
/*
102
107
* per-zone information in memory controller.
103
108
*/
104
109
struct mem_cgroup_per_node {
105
110
struct lruvec lruvec ;
111
+ struct lruvec_stat __percpu * lruvec_stat ;
106
112
unsigned long lru_zone_size [MAX_NR_ZONES ][NR_LRU_LISTS ];
107
113
108
114
struct mem_cgroup_reclaim_iter iter [DEF_PRIORITY + 1 ];
@@ -496,23 +502,18 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
496
502
return val ;
497
503
}
498
504
499
- static inline void mod_memcg_state (struct mem_cgroup * memcg ,
500
- enum memcg_stat_item idx , int val )
505
+ static inline void __mod_memcg_state (struct mem_cgroup * memcg ,
506
+ enum memcg_stat_item idx , int val )
501
507
{
502
508
if (!mem_cgroup_disabled ())
503
- this_cpu_add (memcg -> stat -> count [idx ], val );
504
- }
505
-
506
- static inline void inc_memcg_state (struct mem_cgroup * memcg ,
507
- enum memcg_stat_item idx )
508
- {
509
- mod_memcg_state (memcg , idx , 1 );
509
+ __this_cpu_add (memcg -> stat -> count [idx ], val );
510
510
}
511
511
512
- static inline void dec_memcg_state (struct mem_cgroup * memcg ,
513
- enum memcg_stat_item idx )
512
+ static inline void mod_memcg_state (struct mem_cgroup * memcg ,
513
+ enum memcg_stat_item idx , int val )
514
514
{
515
- mod_memcg_state (memcg , idx , -1 );
515
+ if (!mem_cgroup_disabled ())
516
+ this_cpu_add (memcg -> stat -> count [idx ], val );
516
517
}
517
518
518
519
/**
@@ -532,23 +533,82 @@ static inline void dec_memcg_state(struct mem_cgroup *memcg,
532
533
*
533
534
* Kernel pages are an exception to this, since they'll never move.
534
535
*/
536
+ static inline void __mod_memcg_page_state (struct page * page ,
537
+ enum memcg_stat_item idx , int val )
538
+ {
539
+ if (page -> mem_cgroup )
540
+ __mod_memcg_state (page -> mem_cgroup , idx , val );
541
+ }
542
+
535
543
static inline void mod_memcg_page_state (struct page * page ,
536
544
enum memcg_stat_item idx , int val )
537
545
{
538
546
if (page -> mem_cgroup )
539
547
mod_memcg_state (page -> mem_cgroup , idx , val );
540
548
}
541
549
542
- static inline void inc_memcg_page_state (struct page * page ,
543
- enum memcg_stat_item idx )
550
+ static inline unsigned long lruvec_page_state (struct lruvec * lruvec ,
551
+ enum node_stat_item idx )
544
552
{
545
- mod_memcg_page_state (page , idx , 1 );
553
+ struct mem_cgroup_per_node * pn ;
554
+ long val = 0 ;
555
+ int cpu ;
556
+
557
+ if (mem_cgroup_disabled ())
558
+ return node_page_state (lruvec_pgdat (lruvec ), idx );
559
+
560
+ pn = container_of (lruvec , struct mem_cgroup_per_node , lruvec );
561
+ for_each_possible_cpu (cpu )
562
+ val += per_cpu (pn -> lruvec_stat -> count [idx ], cpu );
563
+
564
+ if (val < 0 )
565
+ val = 0 ;
566
+
567
+ return val ;
546
568
}
547
569
548
- static inline void dec_memcg_page_state (struct page * page ,
549
- enum memcg_stat_item idx )
570
+ static inline void __mod_lruvec_state (struct lruvec * lruvec ,
571
+ enum node_stat_item idx , int val )
550
572
{
551
- mod_memcg_page_state (page , idx , -1 );
573
+ struct mem_cgroup_per_node * pn ;
574
+
575
+ __mod_node_page_state (lruvec_pgdat (lruvec ), idx , val );
576
+ if (mem_cgroup_disabled ())
577
+ return ;
578
+ pn = container_of (lruvec , struct mem_cgroup_per_node , lruvec );
579
+ __mod_memcg_state (pn -> memcg , idx , val );
580
+ __this_cpu_add (pn -> lruvec_stat -> count [idx ], val );
581
+ }
582
+
583
+ static inline void mod_lruvec_state (struct lruvec * lruvec ,
584
+ enum node_stat_item idx , int val )
585
+ {
586
+ struct mem_cgroup_per_node * pn ;
587
+
588
+ mod_node_page_state (lruvec_pgdat (lruvec ), idx , val );
589
+ if (mem_cgroup_disabled ())
590
+ return ;
591
+ pn = container_of (lruvec , struct mem_cgroup_per_node , lruvec );
592
+ mod_memcg_state (pn -> memcg , idx , val );
593
+ this_cpu_add (pn -> lruvec_stat -> count [idx ], val );
594
+ }
595
+
596
+ static inline void __mod_lruvec_page_state (struct page * page ,
597
+ enum node_stat_item idx , int val )
598
+ {
599
+ struct lruvec * lruvec ;
600
+
601
+ lruvec = mem_cgroup_lruvec (page_pgdat (page ), page -> mem_cgroup );
602
+ __mod_lruvec_state (lruvec , idx , val );
603
+ }
604
+
605
+ static inline void mod_lruvec_page_state (struct page * page ,
606
+ enum node_stat_item idx , int val )
607
+ {
608
+ struct lruvec * lruvec ;
609
+
610
+ lruvec = mem_cgroup_lruvec (page_pgdat (page ), page -> mem_cgroup );
611
+ mod_lruvec_state (lruvec , idx , val );
552
612
}
553
613
554
614
unsigned long mem_cgroup_soft_limit_reclaim (pg_data_t * pgdat , int order ,
@@ -777,19 +837,21 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
777
837
return 0 ;
778
838
}
779
839
780
- static inline void mod_memcg_state (struct mem_cgroup * memcg ,
781
- enum memcg_stat_item idx ,
782
- int nr )
840
+ static inline void __mod_memcg_state (struct mem_cgroup * memcg ,
841
+ enum memcg_stat_item idx ,
842
+ int nr )
783
843
{
784
844
}
785
845
786
- static inline void inc_memcg_state (struct mem_cgroup * memcg ,
787
- enum memcg_stat_item idx )
846
+ static inline void mod_memcg_state (struct mem_cgroup * memcg ,
847
+ enum memcg_stat_item idx ,
848
+ int nr )
788
849
{
789
850
}
790
851
791
- static inline void dec_memcg_state (struct mem_cgroup * memcg ,
792
- enum memcg_stat_item idx )
852
+ static inline void __mod_memcg_page_state (struct page * page ,
853
+ enum memcg_stat_item idx ,
854
+ int nr )
793
855
{
794
856
}
795
857
@@ -799,14 +861,34 @@ static inline void mod_memcg_page_state(struct page *page,
799
861
{
800
862
}
801
863
802
- static inline void inc_memcg_page_state (struct page * page ,
803
- enum memcg_stat_item idx )
864
+ static inline unsigned long lruvec_page_state (struct lruvec * lruvec ,
865
+ enum node_stat_item idx )
804
866
{
867
+ return node_page_state (lruvec_pgdat (lruvec ), idx );
805
868
}
806
869
807
- static inline void dec_memcg_page_state (struct page * page ,
808
- enum memcg_stat_item idx )
870
+ static inline void __mod_lruvec_state (struct lruvec * lruvec ,
871
+ enum node_stat_item idx , int val )
809
872
{
873
+ __mod_node_page_state (lruvec_pgdat (lruvec ), idx , val );
874
+ }
875
+
876
+ static inline void mod_lruvec_state (struct lruvec * lruvec ,
877
+ enum node_stat_item idx , int val )
878
+ {
879
+ mod_node_page_state (lruvec_pgdat (lruvec ), idx , val );
880
+ }
881
+
882
+ static inline void __mod_lruvec_page_state (struct page * page ,
883
+ enum node_stat_item idx , int val )
884
+ {
885
+ __mod_node_page_state (page_pgdat (page ), idx , val );
886
+ }
887
+
888
+ static inline void mod_lruvec_page_state (struct page * page ,
889
+ enum node_stat_item idx , int val )
890
+ {
891
+ mod_node_page_state (page_pgdat (page ), idx , val );
810
892
}
811
893
812
894
static inline
@@ -838,6 +920,102 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
838
920
}
839
921
#endif /* CONFIG_MEMCG */
840
922
923
+ static inline void __inc_memcg_state (struct mem_cgroup * memcg ,
924
+ enum memcg_stat_item idx )
925
+ {
926
+ __mod_memcg_state (memcg , idx , 1 );
927
+ }
928
+
929
+ static inline void __dec_memcg_state (struct mem_cgroup * memcg ,
930
+ enum memcg_stat_item idx )
931
+ {
932
+ __mod_memcg_state (memcg , idx , -1 );
933
+ }
934
+
935
+ static inline void __inc_memcg_page_state (struct page * page ,
936
+ enum memcg_stat_item idx )
937
+ {
938
+ __mod_memcg_page_state (page , idx , 1 );
939
+ }
940
+
941
+ static inline void __dec_memcg_page_state (struct page * page ,
942
+ enum memcg_stat_item idx )
943
+ {
944
+ __mod_memcg_page_state (page , idx , -1 );
945
+ }
946
+
947
+ static inline void __inc_lruvec_state (struct lruvec * lruvec ,
948
+ enum node_stat_item idx )
949
+ {
950
+ __mod_lruvec_state (lruvec , idx , 1 );
951
+ }
952
+
953
+ static inline void __dec_lruvec_state (struct lruvec * lruvec ,
954
+ enum node_stat_item idx )
955
+ {
956
+ __mod_lruvec_state (lruvec , idx , -1 );
957
+ }
958
+
959
+ static inline void __inc_lruvec_page_state (struct page * page ,
960
+ enum node_stat_item idx )
961
+ {
962
+ __mod_lruvec_page_state (page , idx , 1 );
963
+ }
964
+
965
+ static inline void __dec_lruvec_page_state (struct page * page ,
966
+ enum node_stat_item idx )
967
+ {
968
+ __mod_lruvec_page_state (page , idx , -1 );
969
+ }
970
+
971
+ static inline void inc_memcg_state (struct mem_cgroup * memcg ,
972
+ enum memcg_stat_item idx )
973
+ {
974
+ mod_memcg_state (memcg , idx , 1 );
975
+ }
976
+
977
+ static inline void dec_memcg_state (struct mem_cgroup * memcg ,
978
+ enum memcg_stat_item idx )
979
+ {
980
+ mod_memcg_state (memcg , idx , -1 );
981
+ }
982
+
983
+ static inline void inc_memcg_page_state (struct page * page ,
984
+ enum memcg_stat_item idx )
985
+ {
986
+ mod_memcg_page_state (page , idx , 1 );
987
+ }
988
+
989
+ static inline void dec_memcg_page_state (struct page * page ,
990
+ enum memcg_stat_item idx )
991
+ {
992
+ mod_memcg_page_state (page , idx , -1 );
993
+ }
994
+
995
+ static inline void inc_lruvec_state (struct lruvec * lruvec ,
996
+ enum node_stat_item idx )
997
+ {
998
+ mod_lruvec_state (lruvec , idx , 1 );
999
+ }
1000
+
1001
+ static inline void dec_lruvec_state (struct lruvec * lruvec ,
1002
+ enum node_stat_item idx )
1003
+ {
1004
+ mod_lruvec_state (lruvec , idx , -1 );
1005
+ }
1006
+
1007
+ static inline void inc_lruvec_page_state (struct page * page ,
1008
+ enum node_stat_item idx )
1009
+ {
1010
+ mod_lruvec_page_state (page , idx , 1 );
1011
+ }
1012
+
1013
+ static inline void dec_lruvec_page_state (struct page * page ,
1014
+ enum node_stat_item idx )
1015
+ {
1016
+ mod_lruvec_page_state (page , idx , -1 );
1017
+ }
1018
+
841
1019
#ifdef CONFIG_CGROUP_WRITEBACK
842
1020
843
1021
struct list_head * mem_cgroup_cgwb_list (struct mem_cgroup * memcg );
0 commit comments