generic: 6.1: manually refresh mglru patch with new kernel version
[openwrt/staging/ldir.git] / target / linux / generic / backport-6.1 / 020-v6.3-25-mm-multi-gen-LRU-shuffle-should_run_aging.patch
1 From 107d54931df3c28d81648122e219bf0034ef4e99 Mon Sep 17 00:00:00 2001
2 From: Yu Zhao <yuzhao@google.com>
3 Date: Wed, 21 Dec 2022 21:19:03 -0700
4 Subject: [PATCH 25/29] mm: multi-gen LRU: shuffle should_run_aging()
5
6 Move should_run_aging() next to its only caller left.
7
8 Link: https://lkml.kernel.org/r/20221222041905.2431096-6-yuzhao@google.com
9 Signed-off-by: Yu Zhao <yuzhao@google.com>
10 Cc: Johannes Weiner <hannes@cmpxchg.org>
11 Cc: Jonathan Corbet <corbet@lwn.net>
12 Cc: Michael Larabel <Michael@MichaelLarabel.com>
13 Cc: Michal Hocko <mhocko@kernel.org>
14 Cc: Mike Rapoport <rppt@kernel.org>
15 Cc: Roman Gushchin <roman.gushchin@linux.dev>
16 Cc: Suren Baghdasaryan <surenb@google.com>
17 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
18 ---
19 mm/vmscan.c | 124 ++++++++++++++++++++++++++--------------------------
20 1 file changed, 62 insertions(+), 62 deletions(-)
21
22 --- a/mm/vmscan.c
23 +++ b/mm/vmscan.c
24 @@ -4183,68 +4183,6 @@ done:
25 return true;
26 }
27
28 -static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
29 - struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
30 -{
31 - int gen, type, zone;
32 - unsigned long old = 0;
33 - unsigned long young = 0;
34 - unsigned long total = 0;
35 - struct lru_gen_folio *lrugen = &lruvec->lrugen;
36 - struct mem_cgroup *memcg = lruvec_memcg(lruvec);
37 - DEFINE_MIN_SEQ(lruvec);
38 -
39 - /* whether this lruvec is completely out of cold folios */
40 - if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
41 - *nr_to_scan = 0;
42 - return true;
43 - }
44 -
45 - for (type = !can_swap; type < ANON_AND_FILE; type++) {
46 - unsigned long seq;
47 -
48 - for (seq = min_seq[type]; seq <= max_seq; seq++) {
49 - unsigned long size = 0;
50 -
51 - gen = lru_gen_from_seq(seq);
52 -
53 - for (zone = 0; zone < MAX_NR_ZONES; zone++)
54 - size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
55 -
56 - total += size;
57 - if (seq == max_seq)
58 - young += size;
59 - else if (seq + MIN_NR_GENS == max_seq)
60 - old += size;
61 - }
62 - }
63 -
64 - /* try to scrape all its memory if this memcg was deleted */
65 - *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
66 -
67 - /*
68 - * The aging tries to be lazy to reduce the overhead, while the eviction
69 - * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
70 - * ideal number of generations is MIN_NR_GENS+1.
71 - */
72 - if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
73 - return false;
74 -
75 - /*
76 - * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
77 - * of the total number of pages for each generation. A reasonable range
78 - * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
79 - * aging cares about the upper bound of hot pages, while the eviction
80 - * cares about the lower bound of cold pages.
81 - */
82 - if (young * MIN_NR_GENS > total)
83 - return true;
84 - if (old * (MIN_NR_GENS + 2) < total)
85 - return true;
86 -
87 - return false;
88 -}
89 -
90 static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
91 {
92 int gen, type, zone;
93 @@ -4828,6 +4766,68 @@ retry:
94 return scanned;
95 }
96
97 +static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
98 + struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
99 +{
100 + int gen, type, zone;
101 + unsigned long old = 0;
102 + unsigned long young = 0;
103 + unsigned long total = 0;
104 + struct lru_gen_folio *lrugen = &lruvec->lrugen;
105 + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
106 + DEFINE_MIN_SEQ(lruvec);
107 +
108 + /* whether this lruvec is completely out of cold folios */
109 + if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
110 + *nr_to_scan = 0;
111 + return true;
112 + }
113 +
114 + for (type = !can_swap; type < ANON_AND_FILE; type++) {
115 + unsigned long seq;
116 +
117 + for (seq = min_seq[type]; seq <= max_seq; seq++) {
118 + unsigned long size = 0;
119 +
120 + gen = lru_gen_from_seq(seq);
121 +
122 + for (zone = 0; zone < MAX_NR_ZONES; zone++)
123 + size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
124 +
125 + total += size;
126 + if (seq == max_seq)
127 + young += size;
128 + else if (seq + MIN_NR_GENS == max_seq)
129 + old += size;
130 + }
131 + }
132 +
133 + /* try to scrape all its memory if this memcg was deleted */
134 + *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
135 +
136 + /*
137 + * The aging tries to be lazy to reduce the overhead, while the eviction
138 + * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
139 + * ideal number of generations is MIN_NR_GENS+1.
140 + */
141 + if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
142 + return false;
143 +
144 + /*
145 + * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
146 + * of the total number of pages for each generation. A reasonable range
147 + * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
148 + * aging cares about the upper bound of hot pages, while the eviction
149 + * cares about the lower bound of cold pages.
150 + */
151 + if (young * MIN_NR_GENS > total)
152 + return true;
153 + if (old * (MIN_NR_GENS + 2) < total)
154 + return true;
155 +
156 + return false;
157 +}
158 +
159 /*
160 * For future optimizations:
161 * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg