kernel: Update MGLRU patchset
[openwrt/staging/dedeckeh.git] / target / linux / generic / backport-5.15 / 020-v6.3-23-mm-multi-gen-LRU-remove-eviction-fairness-safeguard.patch
1 From ce45f1c4b32cf69b166f56ef5bc6c761e06ed4e5 Mon Sep 17 00:00:00 2001
2 From: Yu Zhao <yuzhao@google.com>
3 Date: Wed, 21 Dec 2022 21:19:01 -0700
4 Subject: [PATCH 23/29] mm: multi-gen LRU: remove eviction fairness safeguard
5
6 Recall that the eviction consumes the oldest generation: first it
7 bucket-sorts pages whose gen counters were updated by the aging and
8 reclaims the rest; then it increments lrugen->min_seq.
9
10 The current eviction fairness safeguard for global reclaim has a
11 dilemma: when there are multiple eligible memcgs, should it continue
12 or stop upon meeting the reclaim goal? If it continues, it overshoots
13 and increases direct reclaim latency; if it stops, it loses fairness
14 between memcgs it has taken memory away from and those it has yet to.
15
16 With memcg LRU, the eviction, while ensuring eventual fairness, will
17 stop upon meeting its goal. Therefore the current eviction fairness
18 safeguard for global reclaim will not be needed.
19
20 Note that memcg LRU only applies to global reclaim. For memcg reclaim,
21 the eviction will continue, even if it is overshooting. This becomes
22 unconditional due to code simplification.
23
24 Link: https://lkml.kernel.org/r/20221222041905.2431096-4-yuzhao@google.com
25 Signed-off-by: Yu Zhao <yuzhao@google.com>
26 Cc: Johannes Weiner <hannes@cmpxchg.org>
27 Cc: Jonathan Corbet <corbet@lwn.net>
28 Cc: Michael Larabel <Michael@MichaelLarabel.com>
29 Cc: Michal Hocko <mhocko@kernel.org>
30 Cc: Mike Rapoport <rppt@kernel.org>
31 Cc: Roman Gushchin <roman.gushchin@linux.dev>
32 Cc: Suren Baghdasaryan <surenb@google.com>
33 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
34 ---
35 mm/vmscan.c | 82 +++++++++++++++--------------------------------------
36 1 file changed, 23 insertions(+), 59 deletions(-)
37
38 diff --git a/mm/vmscan.c b/mm/vmscan.c
39 index 2322c913aa64..40e7a947c5c7 100644
40 --- a/mm/vmscan.c
41 +++ b/mm/vmscan.c
42 @@ -443,6 +443,11 @@ static bool cgroup_reclaim(struct scan_control *sc)
43 return sc->target_mem_cgroup;
44 }
45
46 +static bool global_reclaim(struct scan_control *sc)
47 +{
48 + return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup);
49 +}
50 +
51 /**
52 * writeback_throttling_sane - is the usual dirty throttling mechanism available?
53 * @sc: scan_control in question
54 @@ -493,6 +498,11 @@ static bool cgroup_reclaim(struct scan_control *sc)
55 return false;
56 }
57
58 +static bool global_reclaim(struct scan_control *sc)
59 +{
60 + return true;
61 +}
62 +
63 static bool writeback_throttling_sane(struct scan_control *sc)
64 {
65 return true;
66 @@ -4722,8 +4732,7 @@ static int isolate_pages(struct lruvec *lruvec, struct scan_control *sc, int swa
67 return scanned;
68 }
69
70 -static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
71 - bool *need_swapping)
72 +static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
73 {
74 int type;
75 int scanned;
76 @@ -4812,9 +4821,6 @@ static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swapp
77 goto retry;
78 }
79
80 - if (need_swapping && type == LRU_GEN_ANON)
81 - *need_swapping = true;
82 -
83 return scanned;
84 }
85
86 @@ -4853,68 +4859,26 @@ static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *
87 return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
88 }
89
90 -static bool should_abort_scan(struct lruvec *lruvec, unsigned long seq,
91 - struct scan_control *sc, bool need_swapping)
92 +static unsigned long get_nr_to_reclaim(struct scan_control *sc)
93 {
94 - int i;
95 - DEFINE_MAX_SEQ(lruvec);
96 -
97 - if (!current_is_kswapd()) {
98 - /* age each memcg once to ensure fairness */
99 - if (max_seq - seq > 1)
100 - return true;
101 -
102 - /* over-swapping can increase allocation latency */
103 - if (sc->nr_reclaimed >= sc->nr_to_reclaim && need_swapping)
104 - return true;
105 -
106 - /* give this thread a chance to exit and free its memory */
107 - if (fatal_signal_pending(current)) {
108 - sc->nr_reclaimed += MIN_LRU_BATCH;
109 - return true;
110 - }
111 -
112 - if (cgroup_reclaim(sc))
113 - return false;
114 - } else if (sc->nr_reclaimed - sc->last_reclaimed < sc->nr_to_reclaim)
115 - return false;
116 -
117 - /* keep scanning at low priorities to ensure fairness */
118 - if (sc->priority > DEF_PRIORITY - 2)
119 - return false;
120 -
121 - /*
122 - * A minimum amount of work was done under global memory pressure. For
123 - * kswapd, it may be overshooting. For direct reclaim, the target isn't
124 - * met, and yet the allocation may still succeed, since kswapd may have
125 - * caught up. In either case, it's better to stop now, and restart if
126 - * necessary.
127 - */
128 - for (i = 0; i <= sc->reclaim_idx; i++) {
129 - unsigned long wmark;
130 - struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
131 -
132 - if (!managed_zone(zone))
133 - continue;
134 -
135 - wmark = current_is_kswapd() ? high_wmark_pages(zone) : low_wmark_pages(zone);
136 - if (wmark > zone_page_state(zone, NR_FREE_PAGES))
137 - return false;
138 - }
139 + /* don't abort memcg reclaim to ensure fairness */
140 + if (!global_reclaim(sc))
141 + return -1;
142
143 - sc->nr_reclaimed += MIN_LRU_BATCH;
144 + /* discount the previous progress for kswapd */
145 + if (current_is_kswapd())
146 + return sc->nr_to_reclaim + sc->last_reclaimed;
147
148 - return true;
149 + return max(sc->nr_to_reclaim, compact_gap(sc->order));
150 }
151
152 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
153 {
154 struct blk_plug plug;
155 bool need_aging = false;
156 - bool need_swapping = false;
157 unsigned long scanned = 0;
158 unsigned long reclaimed = sc->nr_reclaimed;
159 - DEFINE_MAX_SEQ(lruvec);
160 + unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
161
162 lru_add_drain();
163
164 @@ -4938,7 +4902,7 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc
165 if (!nr_to_scan)
166 goto done;
167
168 - delta = evict_pages(lruvec, sc, swappiness, &need_swapping);
169 + delta = evict_pages(lruvec, sc, swappiness);
170 if (!delta)
171 goto done;
172
173 @@ -4946,7 +4910,7 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc
174 if (scanned >= nr_to_scan)
175 break;
176
177 - if (should_abort_scan(lruvec, max_seq, sc, need_swapping))
178 + if (sc->nr_reclaimed >= nr_to_reclaim)
179 break;
180
181 cond_resched();
182 @@ -5393,7 +5357,7 @@ static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_co
183 if (sc->nr_reclaimed >= nr_to_reclaim)
184 return 0;
185
186 - if (!evict_pages(lruvec, sc, swappiness, NULL))
187 + if (!evict_pages(lruvec, sc, swappiness))
188 return 0;
189
190 cond_resched();
191 --
192 2.40.0
193