kernel: Update MGLRU patchset
[openwrt/staging/dedeckeh.git] / target / linux / generic / backport-5.15 / 020-v6.1-07-mm-multi-gen-LRU-exploit-locality-in-rmap.patch
1 From e4277535f6d6708bb19b88c4bad155832671d69b Mon Sep 17 00:00:00 2001
2 From: Yu Zhao <yuzhao@google.com>
3 Date: Sun, 18 Sep 2022 02:00:04 -0600
4 Subject: [PATCH 07/29] mm: multi-gen LRU: exploit locality in rmap
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 Searching the rmap for PTEs mapping each page on an LRU list (to test and
10 clear the accessed bit) can be expensive because pages from different VMAs
11 (PA space) are not cache friendly to the rmap (VA space). For workloads
12 mostly using mapped pages, searching the rmap can incur the highest CPU
13 cost in the reclaim path.
14
15 This patch exploits spatial locality to reduce the trips into the rmap.
16 When shrink_page_list() walks the rmap and finds a young PTE, a new
17 function lru_gen_look_around() scans at most BITS_PER_LONG-1 adjacent
18 PTEs. On finding another young PTE, it clears the accessed bit and
19 updates the gen counter of the page mapped by this PTE to
20 (max_seq%MAX_NR_GENS)+1.
21
22 Server benchmark results:
23 Single workload:
24 fio (buffered I/O): no change
25
26 Single workload:
27 memcached (anon): +[3, 5]%
28 Ops/sec KB/sec
29 patch1-6: 1106168.46 43025.04
30 patch1-7: 1147696.57 44640.29
31
32 Configurations:
33 no change
34
35 Client benchmark results:
36 kswapd profiles:
37 patch1-6
38 39.03% lzo1x_1_do_compress (real work)
39 18.47% page_vma_mapped_walk (overhead)
40 6.74% _raw_spin_unlock_irq
41 3.97% do_raw_spin_lock
42 2.49% ptep_clear_flush
43 2.48% anon_vma_interval_tree_iter_first
44 1.92% page_referenced_one
45 1.88% __zram_bvec_write
46 1.48% memmove
47 1.31% vma_interval_tree_iter_next
48
49 patch1-7
50 48.16% lzo1x_1_do_compress (real work)
51 8.20% page_vma_mapped_walk (overhead)
52 7.06% _raw_spin_unlock_irq
53 2.92% ptep_clear_flush
54 2.53% __zram_bvec_write
55 2.11% do_raw_spin_lock
56 2.02% memmove
57 1.93% lru_gen_look_around
58 1.56% free_unref_page_list
59 1.40% memset
60
61 Configurations:
62 no change
63
64 Link: https://lkml.kernel.org/r/20220918080010.2920238-8-yuzhao@google.com
65 Signed-off-by: Yu Zhao <yuzhao@google.com>
66 Acked-by: Barry Song <baohua@kernel.org>
67 Acked-by: Brian Geffon <bgeffon@google.com>
68 Acked-by: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
69 Acked-by: Oleksandr Natalenko <oleksandr@natalenko.name>
70 Acked-by: Steven Barrett <steven@liquorix.net>
71 Acked-by: Suleiman Souhlal <suleiman@google.com>
72 Tested-by: Daniel Byrne <djbyrne@mtu.edu>
73 Tested-by: Donald Carr <d@chaos-reins.com>
74 Tested-by: Holger Hoffstätte <holger@applied-asynchrony.com>
75 Tested-by: Konstantin Kharlamov <Hi-Angel@yandex.ru>
76 Tested-by: Shuang Zhai <szhai2@cs.rochester.edu>
77 Tested-by: Sofia Trinh <sofia.trinh@edi.works>
78 Tested-by: Vaibhav Jain <vaibhav@linux.ibm.com>
79 Cc: Andi Kleen <ak@linux.intel.com>
80 Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
81 Cc: Catalin Marinas <catalin.marinas@arm.com>
82 Cc: Dave Hansen <dave.hansen@linux.intel.com>
83 Cc: Hillf Danton <hdanton@sina.com>
84 Cc: Jens Axboe <axboe@kernel.dk>
85 Cc: Johannes Weiner <hannes@cmpxchg.org>
86 Cc: Jonathan Corbet <corbet@lwn.net>
87 Cc: Linus Torvalds <torvalds@linux-foundation.org>
88 Cc: Matthew Wilcox <willy@infradead.org>
89 Cc: Mel Gorman <mgorman@suse.de>
90 Cc: Miaohe Lin <linmiaohe@huawei.com>
91 Cc: Michael Larabel <Michael@MichaelLarabel.com>
92 Cc: Michal Hocko <mhocko@kernel.org>
93 Cc: Mike Rapoport <rppt@kernel.org>
94 Cc: Mike Rapoport <rppt@linux.ibm.com>
95 Cc: Peter Zijlstra <peterz@infradead.org>
96 Cc: Qi Zheng <zhengqi.arch@bytedance.com>
97 Cc: Tejun Heo <tj@kernel.org>
98 Cc: Vlastimil Babka <vbabka@suse.cz>
99 Cc: Will Deacon <will@kernel.org>
100 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
101 ---
102 include/linux/memcontrol.h | 31 +++++++
103 include/linux/mmzone.h | 6 ++
104 mm/internal.h | 1 +
105 mm/memcontrol.c | 1 +
106 mm/rmap.c | 7 ++
107 mm/swap.c | 4 +-
108 mm/vmscan.c | 184 +++++++++++++++++++++++++++++++++++++
109 7 files changed, 232 insertions(+), 2 deletions(-)
110
111 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
112 index 4f189b17dafc..8d6a0329bc59 100644
113 --- a/include/linux/memcontrol.h
114 +++ b/include/linux/memcontrol.h
115 @@ -442,6 +442,7 @@ static inline struct obj_cgroup *__page_objcg(struct page *page)
116 * - LRU isolation
117 * - lock_page_memcg()
118 * - exclusive reference
119 + * - mem_cgroup_trylock_pages()
120 *
121 * For a kmem page a caller should hold an rcu read lock to protect memcg
122 * associated with a kmem page from being released.
123 @@ -497,6 +498,7 @@ static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
124 * - LRU isolation
125 * - lock_page_memcg()
126 * - exclusive reference
127 + * - mem_cgroup_trylock_pages()
128 *
129 * For a kmem page a caller should hold an rcu read lock to protect memcg
130 * associated with a kmem page from being released.
131 @@ -953,6 +955,23 @@ void unlock_page_memcg(struct page *page);
132
133 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
134
135 +/* try to stablize page_memcg() for all the pages in a memcg */
136 +static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
137 +{
138 + rcu_read_lock();
139 +
140 + if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
141 + return true;
142 +
143 + rcu_read_unlock();
144 + return false;
145 +}
146 +
147 +static inline void mem_cgroup_unlock_pages(void)
148 +{
149 + rcu_read_unlock();
150 +}
151 +
152 /* idx can be of type enum memcg_stat_item or node_stat_item */
153 static inline void mod_memcg_state(struct mem_cgroup *memcg,
154 int idx, int val)
155 @@ -1369,6 +1388,18 @@ static inline void unlock_page_memcg(struct page *page)
156 {
157 }
158
159 +static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
160 +{
161 + /* to match page_memcg_rcu() */
162 + rcu_read_lock();
163 + return true;
164 +}
165 +
166 +static inline void mem_cgroup_unlock_pages(void)
167 +{
168 + rcu_read_unlock();
169 +}
170 +
171 static inline void mem_cgroup_handle_over_high(void)
172 {
173 }
174 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
175 index fce8945c507c..4db2b877fcf9 100644
176 --- a/include/linux/mmzone.h
177 +++ b/include/linux/mmzone.h
178 @@ -352,6 +352,7 @@ enum lruvec_flags {
179 #ifndef __GENERATING_BOUNDS_H
180
181 struct lruvec;
182 +struct page_vma_mapped_walk;
183
184 #define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
185 #define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
186 @@ -407,6 +408,7 @@ struct lru_gen_struct {
187 };
188
189 void lru_gen_init_lruvec(struct lruvec *lruvec);
190 +void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
191
192 #ifdef CONFIG_MEMCG
193 void lru_gen_init_memcg(struct mem_cgroup *memcg);
194 @@ -419,6 +421,10 @@ static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
195 {
196 }
197
198 +static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
199 +{
200 +}
201 +
202 #ifdef CONFIG_MEMCG
203 static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
204 {
205 diff --git a/mm/internal.h b/mm/internal.h
206 index cf3cb933eba3..5c73246a092e 100644
207 --- a/mm/internal.h
208 +++ b/mm/internal.h
209 @@ -35,6 +35,7 @@
210 void page_writeback_init(void);
211
212 vm_fault_t do_swap_page(struct vm_fault *vmf);
213 +void activate_page(struct page *page);
214
215 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
216 unsigned long floor, unsigned long ceiling);
217 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
218 index 8b634dc72e7f..cc3431c5d9ba 100644
219 --- a/mm/memcontrol.c
220 +++ b/mm/memcontrol.c
221 @@ -2798,6 +2798,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg)
222 * - LRU isolation
223 * - lock_page_memcg()
224 * - exclusive reference
225 + * - mem_cgroup_trylock_pages()
226 */
227 page->memcg_data = (unsigned long)memcg;
228 }
229 diff --git a/mm/rmap.c b/mm/rmap.c
230 index 330b361a460e..22a86122732e 100644
231 --- a/mm/rmap.c
232 +++ b/mm/rmap.c
233 @@ -73,6 +73,7 @@
234 #include <linux/page_idle.h>
235 #include <linux/memremap.h>
236 #include <linux/userfaultfd_k.h>
237 +#include <linux/mm_inline.h>
238
239 #include <asm/tlbflush.h>
240
241 @@ -793,6 +794,12 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
242 }
243
244 if (pvmw.pte) {
245 + if (lru_gen_enabled() && pte_young(*pvmw.pte) &&
246 + !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))) {
247 + lru_gen_look_around(&pvmw);
248 + referenced++;
249 + }
250 +
251 if (ptep_clear_flush_young_notify(vma, address,
252 pvmw.pte)) {
253 /*
254 diff --git a/mm/swap.c b/mm/swap.c
255 index 5d227577b609..966ff2d83343 100644
256 --- a/mm/swap.c
257 +++ b/mm/swap.c
258 @@ -325,7 +325,7 @@ static bool need_activate_page_drain(int cpu)
259 return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
260 }
261
262 -static void activate_page(struct page *page)
263 +void activate_page(struct page *page)
264 {
265 page = compound_head(page);
266 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
267 @@ -345,7 +345,7 @@ static inline void activate_page_drain(int cpu)
268 {
269 }
270
271 -static void activate_page(struct page *page)
272 +void activate_page(struct page *page)
273 {
274 struct lruvec *lruvec;
275
276 diff --git a/mm/vmscan.c b/mm/vmscan.c
277 index 932abd24c1b3..1d0b25ae378c 100644
278 --- a/mm/vmscan.c
279 +++ b/mm/vmscan.c
280 @@ -1409,6 +1409,11 @@ static unsigned int shrink_page_list(struct list_head *page_list,
281 if (!sc->may_unmap && page_mapped(page))
282 goto keep_locked;
283
284 + /* page_update_gen() tried to promote this page? */
285 + if (lru_gen_enabled() && !ignore_references &&
286 + page_mapped(page) && PageReferenced(page))
287 + goto keep_locked;
288 +
289 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
290 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
291
292 @@ -2990,6 +2995,29 @@ static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv)
293 * the aging
294 ******************************************************************************/
295
296 +/* promote pages accessed through page tables */
297 +static int page_update_gen(struct page *page, int gen)
298 +{
299 + unsigned long new_flags, old_flags = READ_ONCE(page->flags);
300 +
301 + VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
302 + VM_WARN_ON_ONCE(!rcu_read_lock_held());
303 +
304 + do {
305 + /* lru_gen_del_page() has isolated this page? */
306 + if (!(old_flags & LRU_GEN_MASK)) {
307 + /* for shrink_page_list() */
308 + new_flags = old_flags | BIT(PG_referenced);
309 + continue;
310 + }
311 +
312 + new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
313 + new_flags |= (gen + 1UL) << LRU_GEN_PGOFF;
314 + } while (!try_cmpxchg(&page->flags, &old_flags, new_flags));
315 +
316 + return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
317 +}
318 +
319 /* protect pages accessed multiple times through file descriptors */
320 static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaiming)
321 {
322 @@ -3001,6 +3029,11 @@ static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaimin
323 VM_WARN_ON_ONCE_PAGE(!(old_flags & LRU_GEN_MASK), page);
324
325 do {
326 + new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
327 + /* page_update_gen() has promoted this page? */
328 + if (new_gen >= 0 && new_gen != old_gen)
329 + return new_gen;
330 +
331 new_gen = (old_gen + 1) % MAX_NR_GENS;
332
333 new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
334 @@ -3015,6 +3048,43 @@ static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaimin
335 return new_gen;
336 }
337
338 +static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr)
339 +{
340 + unsigned long pfn = pte_pfn(pte);
341 +
342 + VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
343 +
344 + if (!pte_present(pte) || is_zero_pfn(pfn))
345 + return -1;
346 +
347 + if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte)))
348 + return -1;
349 +
350 + if (WARN_ON_ONCE(!pfn_valid(pfn)))
351 + return -1;
352 +
353 + return pfn;
354 +}
355 +
356 +static struct page *get_pfn_page(unsigned long pfn, struct mem_cgroup *memcg,
357 + struct pglist_data *pgdat)
358 +{
359 + struct page *page;
360 +
361 + /* try to avoid unnecessary memory loads */
362 + if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
363 + return NULL;
364 +
365 + page = compound_head(pfn_to_page(pfn));
366 + if (page_to_nid(page) != pgdat->node_id)
367 + return NULL;
368 +
369 + if (page_memcg_rcu(page) != memcg)
370 + return NULL;
371 +
372 + return page;
373 +}
374 +
375 static void inc_min_seq(struct lruvec *lruvec, int type)
376 {
377 struct lru_gen_struct *lrugen = &lruvec->lrugen;
378 @@ -3214,6 +3284,114 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
379 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
380 }
381
382 +/*
383 + * This function exploits spatial locality when shrink_page_list() walks the
384 + * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages.
385 + */
386 +void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
387 +{
388 + int i;
389 + pte_t *pte;
390 + unsigned long start;
391 + unsigned long end;
392 + unsigned long addr;
393 + unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {};
394 + struct page *page = pvmw->page;
395 + struct mem_cgroup *memcg = page_memcg(page);
396 + struct pglist_data *pgdat = page_pgdat(page);
397 + struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
398 + DEFINE_MAX_SEQ(lruvec);
399 + int old_gen, new_gen = lru_gen_from_seq(max_seq);
400 +
401 + lockdep_assert_held(pvmw->ptl);
402 + VM_WARN_ON_ONCE_PAGE(PageLRU(page), page);
403 +
404 + if (spin_is_contended(pvmw->ptl))
405 + return;
406 +
407 + start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start);
408 + end = min(pvmw->address | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
409 +
410 + if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
411 + if (pvmw->address - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
412 + end = start + MIN_LRU_BATCH * PAGE_SIZE;
413 + else if (end - pvmw->address < MIN_LRU_BATCH * PAGE_SIZE / 2)
414 + start = end - MIN_LRU_BATCH * PAGE_SIZE;
415 + else {
416 + start = pvmw->address - MIN_LRU_BATCH * PAGE_SIZE / 2;
417 + end = pvmw->address + MIN_LRU_BATCH * PAGE_SIZE / 2;
418 + }
419 + }
420 +
421 + pte = pvmw->pte - (pvmw->address - start) / PAGE_SIZE;
422 +
423 + rcu_read_lock();
424 + arch_enter_lazy_mmu_mode();
425 +
426 + for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
427 + unsigned long pfn;
428 +
429 + pfn = get_pte_pfn(pte[i], pvmw->vma, addr);
430 + if (pfn == -1)
431 + continue;
432 +
433 + if (!pte_young(pte[i]))
434 + continue;
435 +
436 + page = get_pfn_page(pfn, memcg, pgdat);
437 + if (!page)
438 + continue;
439 +
440 + if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i))
441 + VM_WARN_ON_ONCE(true);
442 +
443 + if (pte_dirty(pte[i]) && !PageDirty(page) &&
444 + !(PageAnon(page) && PageSwapBacked(page) &&
445 + !PageSwapCache(page)))
446 + set_page_dirty(page);
447 +
448 + old_gen = page_lru_gen(page);
449 + if (old_gen < 0)
450 + SetPageReferenced(page);
451 + else if (old_gen != new_gen)
452 + __set_bit(i, bitmap);
453 + }
454 +
455 + arch_leave_lazy_mmu_mode();
456 + rcu_read_unlock();
457 +
458 + if (bitmap_weight(bitmap, MIN_LRU_BATCH) < PAGEVEC_SIZE) {
459 + for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
460 + page = pte_page(pte[i]);
461 + activate_page(page);
462 + }
463 + return;
464 + }
465 +
466 + /* page_update_gen() requires stable page_memcg() */
467 + if (!mem_cgroup_trylock_pages(memcg))
468 + return;
469 +
470 + spin_lock_irq(&lruvec->lru_lock);
471 + new_gen = lru_gen_from_seq(lruvec->lrugen.max_seq);
472 +
473 + for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
474 + page = compound_head(pte_page(pte[i]));
475 + if (page_memcg_rcu(page) != memcg)
476 + continue;
477 +
478 + old_gen = page_update_gen(page, new_gen);
479 + if (old_gen < 0 || old_gen == new_gen)
480 + continue;
481 +
482 + lru_gen_update_size(lruvec, page, old_gen, new_gen);
483 + }
484 +
485 + spin_unlock_irq(&lruvec->lru_lock);
486 +
487 + mem_cgroup_unlock_pages();
488 +}
489 +
490 /******************************************************************************
491 * the eviction
492 ******************************************************************************/
493 @@ -3250,6 +3428,12 @@ static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx)
494 return true;
495 }
496
497 + /* promoted */
498 + if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
499 + list_move(&page->lru, &lrugen->lists[gen][type][zone]);
500 + return true;
501 + }
502 +
503 /* protected */
504 if (tier > tier_idx) {
505 int hist = lru_hist_from_seq(lrugen->min_seq[type]);
506 --
507 2.40.0
508