cf5b8f0e9f78dbebda03060bd9913a1f9463ea5a
[openwrt/staging/hauke.git] / target / linux / generic / backport-5.15 / 020-v6.1-10-mm-multi-gen-LRU-kill-switch.patch
1 From 640db3a029dca909af47157ca18f52b29d34a1b9 Mon Sep 17 00:00:00 2001
2 From: Yu Zhao <yuzhao@google.com>
3 Date: Sun, 18 Sep 2022 02:00:07 -0600
4 Subject: [PATCH 10/29] mm: multi-gen LRU: kill switch
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 Add /sys/kernel/mm/lru_gen/enabled as a kill switch. Components that
10 can be disabled include:
11 0x0001: the multi-gen LRU core
12 0x0002: walking page table, when arch_has_hw_pte_young() returns
13 true
14 0x0004: clearing the accessed bit in non-leaf PMD entries, when
15 CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y
16 [yYnN]: apply to all the components above
17 E.g.,
18 echo y >/sys/kernel/mm/lru_gen/enabled
19 cat /sys/kernel/mm/lru_gen/enabled
20 0x0007
21 echo 5 >/sys/kernel/mm/lru_gen/enabled
22 cat /sys/kernel/mm/lru_gen/enabled
23 0x0005
24
25 NB: the page table walks happen on the scale of seconds under heavy memory
26 pressure, in which case the mmap_lock contention is a lesser concern,
27 compared with the LRU lock contention and the I/O congestion. So far the
28 only well-known case of the mmap_lock contention happens on Android, due
29 to Scudo [1] which allocates several thousand VMAs for merely a few
30 hundred MBs. The SPF and the Maple Tree also have provided their own
31 assessments [2][3]. However, if walking page tables does worsen the
32 mmap_lock contention, the kill switch can be used to disable it. In this
33 case the multi-gen LRU will suffer a minor performance degradation, as
34 shown previously.
35
36 Clearing the accessed bit in non-leaf PMD entries can also be disabled,
37 since this behavior was not tested on x86 varieties other than Intel and
38 AMD.
39
40 [1] https://source.android.com/devices/tech/debug/scudo
41 [2] https://lore.kernel.org/r/20220128131006.67712-1-michel@lespinasse.org/
42 [3] https://lore.kernel.org/r/20220426150616.3937571-1-Liam.Howlett@oracle.com/
43
44 Link: https://lkml.kernel.org/r/20220918080010.2920238-11-yuzhao@google.com
45 Signed-off-by: Yu Zhao <yuzhao@google.com>
46 Acked-by: Brian Geffon <bgeffon@google.com>
47 Acked-by: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
48 Acked-by: Oleksandr Natalenko <oleksandr@natalenko.name>
49 Acked-by: Steven Barrett <steven@liquorix.net>
50 Acked-by: Suleiman Souhlal <suleiman@google.com>
51 Tested-by: Daniel Byrne <djbyrne@mtu.edu>
52 Tested-by: Donald Carr <d@chaos-reins.com>
53 Tested-by: Holger Hoffstätte <holger@applied-asynchrony.com>
54 Tested-by: Konstantin Kharlamov <Hi-Angel@yandex.ru>
55 Tested-by: Shuang Zhai <szhai2@cs.rochester.edu>
56 Tested-by: Sofia Trinh <sofia.trinh@edi.works>
57 Tested-by: Vaibhav Jain <vaibhav@linux.ibm.com>
58 Cc: Andi Kleen <ak@linux.intel.com>
59 Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
60 Cc: Barry Song <baohua@kernel.org>
61 Cc: Catalin Marinas <catalin.marinas@arm.com>
62 Cc: Dave Hansen <dave.hansen@linux.intel.com>
63 Cc: Hillf Danton <hdanton@sina.com>
64 Cc: Jens Axboe <axboe@kernel.dk>
65 Cc: Johannes Weiner <hannes@cmpxchg.org>
66 Cc: Jonathan Corbet <corbet@lwn.net>
67 Cc: Linus Torvalds <torvalds@linux-foundation.org>
68 Cc: Matthew Wilcox <willy@infradead.org>
69 Cc: Mel Gorman <mgorman@suse.de>
70 Cc: Miaohe Lin <linmiaohe@huawei.com>
71 Cc: Michael Larabel <Michael@MichaelLarabel.com>
72 Cc: Michal Hocko <mhocko@kernel.org>
73 Cc: Mike Rapoport <rppt@kernel.org>
74 Cc: Mike Rapoport <rppt@linux.ibm.com>
75 Cc: Peter Zijlstra <peterz@infradead.org>
76 Cc: Qi Zheng <zhengqi.arch@bytedance.com>
77 Cc: Tejun Heo <tj@kernel.org>
78 Cc: Vlastimil Babka <vbabka@suse.cz>
79 Cc: Will Deacon <will@kernel.org>
80 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
81 ---
82 include/linux/cgroup.h | 15 ++-
83 include/linux/mm_inline.h | 15 ++-
84 include/linux/mmzone.h | 9 ++
85 kernel/cgroup/cgroup-internal.h | 1 -
86 mm/Kconfig | 6 +
87 mm/vmscan.c | 228 +++++++++++++++++++++++++++++++-
88 6 files changed, 265 insertions(+), 9 deletions(-)
89
90 --- a/include/linux/cgroup.h
91 +++ b/include/linux/cgroup.h
92 @@ -433,6 +433,18 @@ static inline void cgroup_put(struct cgr
93 css_put(&cgrp->self);
94 }
95
96 +extern struct mutex cgroup_mutex;
97 +
98 +static inline void cgroup_lock(void)
99 +{
100 + mutex_lock(&cgroup_mutex);
101 +}
102 +
103 +static inline void cgroup_unlock(void)
104 +{
105 + mutex_unlock(&cgroup_mutex);
106 +}
107 +
108 /**
109 * task_css_set_check - obtain a task's css_set with extra access conditions
110 * @task: the task to obtain css_set for
111 @@ -447,7 +459,6 @@ static inline void cgroup_put(struct cgr
112 * as locks used during the cgroup_subsys::attach() methods.
113 */
114 #ifdef CONFIG_PROVE_RCU
115 -extern struct mutex cgroup_mutex;
116 extern spinlock_t css_set_lock;
117 #define task_css_set_check(task, __c) \
118 rcu_dereference_check((task)->cgroups, \
119 @@ -708,6 +719,8 @@ struct cgroup;
120 static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
121 static inline void css_get(struct cgroup_subsys_state *css) {}
122 static inline void css_put(struct cgroup_subsys_state *css) {}
123 +static inline void cgroup_lock(void) {}
124 +static inline void cgroup_unlock(void) {}
125 static inline int cgroup_attach_task_all(struct task_struct *from,
126 struct task_struct *t) { return 0; }
127 static inline int cgroupstats_build(struct cgroupstats *stats,
128 --- a/include/linux/mm_inline.h
129 +++ b/include/linux/mm_inline.h
130 @@ -91,10 +91,21 @@ static __always_inline enum lru_list pag
131
132 #ifdef CONFIG_LRU_GEN
133
134 +#ifdef CONFIG_LRU_GEN_ENABLED
135 static inline bool lru_gen_enabled(void)
136 {
137 - return true;
138 + DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]);
139 +
140 + return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]);
141 +}
142 +#else
143 +static inline bool lru_gen_enabled(void)
144 +{
145 + DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]);
146 +
147 + return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]);
148 }
149 +#endif
150
151 static inline bool lru_gen_in_fault(void)
152 {
153 @@ -207,7 +218,7 @@ static inline bool lru_gen_add_page(stru
154
155 VM_WARN_ON_ONCE_PAGE(gen != -1, page);
156
157 - if (PageUnevictable(page))
158 + if (PageUnevictable(page) || !lrugen->enabled)
159 return false;
160 /*
161 * There are three common cases for this page:
162 --- a/include/linux/mmzone.h
163 +++ b/include/linux/mmzone.h
164 @@ -364,6 +364,13 @@ enum {
165 LRU_GEN_FILE,
166 };
167
168 +enum {
169 + LRU_GEN_CORE,
170 + LRU_GEN_MM_WALK,
171 + LRU_GEN_NONLEAF_YOUNG,
172 + NR_LRU_GEN_CAPS
173 +};
174 +
175 #define MIN_LRU_BATCH BITS_PER_LONG
176 #define MAX_LRU_BATCH (MIN_LRU_BATCH * 64)
177
178 @@ -405,6 +412,8 @@ struct lru_gen_struct {
179 /* can be modified without holding the LRU lock */
180 atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
181 atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
182 + /* whether the multi-gen LRU is enabled */
183 + bool enabled;
184 };
185
186 enum {
187 --- a/kernel/cgroup/cgroup-internal.h
188 +++ b/kernel/cgroup/cgroup-internal.h
189 @@ -165,7 +165,6 @@ struct cgroup_mgctx {
190 #define DEFINE_CGROUP_MGCTX(name) \
191 struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name)
192
193 -extern struct mutex cgroup_mutex;
194 extern spinlock_t css_set_lock;
195 extern struct cgroup_subsys *cgroup_subsys[];
196 extern struct list_head cgroup_roots;
197 --- a/mm/Kconfig
198 +++ b/mm/Kconfig
199 @@ -906,6 +906,12 @@ config LRU_GEN
200 help
201 A high performance LRU implementation to overcommit memory.
202
203 +config LRU_GEN_ENABLED
204 + bool "Enable by default"
205 + depends on LRU_GEN
206 + help
207 + This option enables the multi-gen LRU by default.
208 +
209 config LRU_GEN_STATS
210 bool "Full stats for debugging"
211 depends on LRU_GEN
212 --- a/mm/vmscan.c
213 +++ b/mm/vmscan.c
214 @@ -52,6 +52,7 @@
215 #include <linux/psi.h>
216 #include <linux/pagewalk.h>
217 #include <linux/shmem_fs.h>
218 +#include <linux/ctype.h>
219
220 #include <asm/tlbflush.h>
221 #include <asm/div64.h>
222 @@ -2841,6 +2842,14 @@ static bool can_age_anon_pages(struct pg
223
224 #ifdef CONFIG_LRU_GEN
225
226 +#ifdef CONFIG_LRU_GEN_ENABLED
227 +DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS);
228 +#define get_cap(cap) static_branch_likely(&lru_gen_caps[cap])
229 +#else
230 +DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS);
231 +#define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap])
232 +#endif
233 +
234 /******************************************************************************
235 * shorthand helpers
236 ******************************************************************************/
237 @@ -3717,7 +3726,8 @@ static void walk_pmd_range_locked(pud_t
238 goto next;
239
240 if (!pmd_trans_huge(pmd[i])) {
241 - if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG))
242 + if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) &&
243 + get_cap(LRU_GEN_NONLEAF_YOUNG))
244 pmdp_test_and_clear_young(vma, addr, pmd + i);
245 goto next;
246 }
247 @@ -3815,10 +3825,12 @@ restart:
248 walk->mm_stats[MM_NONLEAF_TOTAL]++;
249
250 #ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG
251 - if (!pmd_young(val))
252 - continue;
253 + if (get_cap(LRU_GEN_NONLEAF_YOUNG)) {
254 + if (!pmd_young(val))
255 + continue;
256
257 - walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos);
258 + walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos);
259 + }
260 #endif
261 if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i))
262 continue;
263 @@ -4080,7 +4092,7 @@ static bool try_to_inc_max_seq(struct lr
264 * handful of PTEs. Spreading the work out over a period of time usually
265 * is less efficient, but it avoids bursty page faults.
266 */
267 - if (!arch_has_hw_pte_young()) {
268 + if (!(arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))) {
269 success = iterate_mm_list_nowalk(lruvec, max_seq);
270 goto done;
271 }
272 @@ -4846,6 +4858,208 @@ done:
273 }
274
275 /******************************************************************************
276 + * state change
277 + ******************************************************************************/
278 +
279 +static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
280 +{
281 + struct lru_gen_struct *lrugen = &lruvec->lrugen;
282 +
283 + if (lrugen->enabled) {
284 + enum lru_list lru;
285 +
286 + for_each_evictable_lru(lru) {
287 + if (!list_empty(&lruvec->lists[lru]))
288 + return false;
289 + }
290 + } else {
291 + int gen, type, zone;
292 +
293 + for_each_gen_type_zone(gen, type, zone) {
294 + if (!list_empty(&lrugen->lists[gen][type][zone]))
295 + return false;
296 + }
297 + }
298 +
299 + return true;
300 +}
301 +
302 +static bool fill_evictable(struct lruvec *lruvec)
303 +{
304 + enum lru_list lru;
305 + int remaining = MAX_LRU_BATCH;
306 +
307 + for_each_evictable_lru(lru) {
308 + int type = is_file_lru(lru);
309 + bool active = is_active_lru(lru);
310 + struct list_head *head = &lruvec->lists[lru];
311 +
312 + while (!list_empty(head)) {
313 + bool success;
314 + struct page *page = lru_to_page(head);
315 +
316 + VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
317 + VM_WARN_ON_ONCE_PAGE(PageActive(page) != active, page);
318 + VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page);
319 + VM_WARN_ON_ONCE_PAGE(page_lru_gen(page) != -1, page);
320 +
321 + del_page_from_lru_list(page, lruvec);
322 + success = lru_gen_add_page(lruvec, page, false);
323 + VM_WARN_ON_ONCE(!success);
324 +
325 + if (!--remaining)
326 + return false;
327 + }
328 + }
329 +
330 + return true;
331 +}
332 +
333 +static bool drain_evictable(struct lruvec *lruvec)
334 +{
335 + int gen, type, zone;
336 + int remaining = MAX_LRU_BATCH;
337 +
338 + for_each_gen_type_zone(gen, type, zone) {
339 + struct list_head *head = &lruvec->lrugen.lists[gen][type][zone];
340 +
341 + while (!list_empty(head)) {
342 + bool success;
343 + struct page *page = lru_to_page(head);
344 +
345 + VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
346 + VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
347 + VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page);
348 + VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
349 +
350 + success = lru_gen_del_page(lruvec, page, false);
351 + VM_WARN_ON_ONCE(!success);
352 + add_page_to_lru_list(page, lruvec);
353 +
354 + if (!--remaining)
355 + return false;
356 + }
357 + }
358 +
359 + return true;
360 +}
361 +
362 +static void lru_gen_change_state(bool enabled)
363 +{
364 + static DEFINE_MUTEX(state_mutex);
365 +
366 + struct mem_cgroup *memcg;
367 +
368 + cgroup_lock();
369 + cpus_read_lock();
370 + get_online_mems();
371 + mutex_lock(&state_mutex);
372 +
373 + if (enabled == lru_gen_enabled())
374 + goto unlock;
375 +
376 + if (enabled)
377 + static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
378 + else
379 + static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
380 +
381 + memcg = mem_cgroup_iter(NULL, NULL, NULL);
382 + do {
383 + int nid;
384 +
385 + for_each_node(nid) {
386 + struct lruvec *lruvec = get_lruvec(memcg, nid);
387 +
388 + if (!lruvec)
389 + continue;
390 +
391 + spin_lock_irq(&lruvec->lru_lock);
392 +
393 + VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
394 + VM_WARN_ON_ONCE(!state_is_valid(lruvec));
395 +
396 + lruvec->lrugen.enabled = enabled;
397 +
398 + while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) {
399 + spin_unlock_irq(&lruvec->lru_lock);
400 + cond_resched();
401 + spin_lock_irq(&lruvec->lru_lock);
402 + }
403 +
404 + spin_unlock_irq(&lruvec->lru_lock);
405 + }
406 +
407 + cond_resched();
408 + } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
409 +unlock:
410 + mutex_unlock(&state_mutex);
411 + put_online_mems();
412 + cpus_read_unlock();
413 + cgroup_unlock();
414 +}
415 +
416 +/******************************************************************************
417 + * sysfs interface
418 + ******************************************************************************/
419 +
420 +static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
421 +{
422 + unsigned int caps = 0;
423 +
424 + if (get_cap(LRU_GEN_CORE))
425 + caps |= BIT(LRU_GEN_CORE);
426 +
427 + if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))
428 + caps |= BIT(LRU_GEN_MM_WALK);
429 +
430 + if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) && get_cap(LRU_GEN_NONLEAF_YOUNG))
431 + caps |= BIT(LRU_GEN_NONLEAF_YOUNG);
432 +
433 + return snprintf(buf, PAGE_SIZE, "0x%04x\n", caps);
434 +}
435 +
436 +static ssize_t store_enabled(struct kobject *kobj, struct kobj_attribute *attr,
437 + const char *buf, size_t len)
438 +{
439 + int i;
440 + unsigned int caps;
441 +
442 + if (tolower(*buf) == 'n')
443 + caps = 0;
444 + else if (tolower(*buf) == 'y')
445 + caps = -1;
446 + else if (kstrtouint(buf, 0, &caps))
447 + return -EINVAL;
448 +
449 + for (i = 0; i < NR_LRU_GEN_CAPS; i++) {
450 + bool enabled = caps & BIT(i);
451 +
452 + if (i == LRU_GEN_CORE)
453 + lru_gen_change_state(enabled);
454 + else if (enabled)
455 + static_branch_enable(&lru_gen_caps[i]);
456 + else
457 + static_branch_disable(&lru_gen_caps[i]);
458 + }
459 +
460 + return len;
461 +}
462 +
463 +static struct kobj_attribute lru_gen_enabled_attr = __ATTR(
464 + enabled, 0644, show_enabled, store_enabled
465 +);
466 +
467 +static struct attribute *lru_gen_attrs[] = {
468 + &lru_gen_enabled_attr.attr,
469 + NULL
470 +};
471 +
472 +static struct attribute_group lru_gen_attr_group = {
473 + .name = "lru_gen",
474 + .attrs = lru_gen_attrs,
475 +};
476 +
477 +/******************************************************************************
478 * initialization
479 ******************************************************************************/
480
481 @@ -4855,6 +5069,7 @@ void lru_gen_init_lruvec(struct lruvec *
482 struct lru_gen_struct *lrugen = &lruvec->lrugen;
483
484 lrugen->max_seq = MIN_NR_GENS + 1;
485 + lrugen->enabled = lru_gen_enabled();
486
487 for_each_gen_type_zone(gen, type, zone)
488 INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
489 @@ -4894,6 +5109,9 @@ static int __init init_lru_gen(void)
490 BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
491 BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
492
493 + if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
494 + pr_err("lru_gen: failed to create sysfs group\n");
495 +
496 return 0;
497 };
498 late_initcall(init_lru_gen);