mm/vmscan.c | 374 +++++++++++++++++++++++++++++++++----
6 files changed, 500 insertions(+), 35 deletions(-)
-diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
-index e039763029563..82d28b052a9e5 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
-@@ -790,6 +790,11 @@ static inline void obj_cgroup_put(struct obj_cgroup *objcg)
+@@ -795,6 +795,11 @@ static inline void obj_cgroup_put(struct
percpu_ref_put(&objcg->refcnt);
}
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
if (memcg)
-@@ -1290,6 +1295,11 @@ static inline void obj_cgroup_put(struct obj_cgroup *objcg)
+@@ -1295,6 +1300,11 @@ static inline void obj_cgroup_put(struct
{
}
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
}
-diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
-index da38e3d962e2f..c1fd3922dc5dd 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
-@@ -122,6 +122,18 @@ static inline bool lru_gen_in_fault(void)
+@@ -122,6 +122,18 @@ static inline bool lru_gen_in_fault(void
return current->in_lru_fault;
}
static inline int lru_gen_from_seq(unsigned long seq)
{
return seq % MAX_NR_GENS;
-@@ -297,6 +309,11 @@ static inline bool lru_gen_in_fault(void)
+@@ -297,6 +309,11 @@ static inline bool lru_gen_in_fault(void
return false;
}
static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
{
return false;
-diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
-index 02e4323744715..66e067a635682 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -7,6 +7,7 @@
};
enum {
-@@ -479,12 +497,87 @@ void lru_gen_init_lruvec(struct lruvec *lruvec);
+@@ -479,12 +497,87 @@ void lru_gen_init_lruvec(struct lruvec *
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
#ifdef CONFIG_MEMCG
static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
{
}
-@@ -494,6 +587,7 @@ static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+@@ -494,6 +587,7 @@ static inline void lru_gen_look_around(s
}
#ifdef CONFIG_MEMCG
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
}
-@@ -501,7 +595,24 @@ static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
+@@ -501,7 +595,24 @@ static inline void lru_gen_init_memcg(st
static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg)
{
}
#endif
CACHELINE_PADDING(_pad2_);
-diff --git a/mm/memcontrol.c b/mm/memcontrol.c
-index 3e8f1ad0fe9db..7815d556e38cc 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -477,6 +477,16 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
+@@ -477,6 +477,16 @@ static void mem_cgroup_update_tree(struc
struct mem_cgroup_per_node *mz;
struct mem_cgroup_tree_per_node *mctz;
mctz = soft_limit_tree.rb_tree_per_node[nid];
if (!mctz)
return;
-@@ -3522,6 +3532,9 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
+@@ -3523,6 +3533,9 @@ unsigned long mem_cgroup_soft_limit_recl
struct mem_cgroup_tree_per_node *mctz;
unsigned long excess;
if (order > 0)
return 0;
-@@ -5382,6 +5395,7 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
+@@ -5386,6 +5399,7 @@ static int mem_cgroup_css_online(struct
if (unlikely(mem_cgroup_is_root(memcg)))
queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
2UL*HZ);
return 0;
offline_kmem:
memcg_offline_kmem(memcg);
-@@ -5413,6 +5427,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
+@@ -5417,6 +5431,7 @@ static void mem_cgroup_css_offline(struc
memcg_offline_kmem(memcg);
reparent_shrinker_deferred(memcg);
wb_memcg_offline(memcg);
drain_all_stock(memcg);
-@@ -5424,6 +5439,7 @@ static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
+@@ -5428,6 +5443,7 @@ static void mem_cgroup_css_released(stru
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
invalidate_reclaim_iterators(memcg);
}
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
-diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 69668817fed37..473057b81a9df 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -7957,6 +7957,7 @@ static void __init free_area_init_node(int nid)
+@@ -7943,6 +7943,7 @@ static void __init free_area_init_node(i
pgdat_set_deferred_range(pgdat);
free_area_init_core(pgdat);
}
static void __init free_area_init_memoryless_node(int nid)
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index 0c47952714b26..65eb28448f216 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -54,6 +54,8 @@
/* Allocation order */
s8 order;
-@@ -3160,6 +3157,9 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS);
+@@ -3160,6 +3157,9 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_ca
for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
{
struct pglist_data *pgdat = NODE_DATA(nid);
-@@ -4440,8 +4440,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
+@@ -4442,8 +4442,7 @@ done:
if (sc->priority <= DEF_PRIORITY - 2)
wait_event_killable(lruvec->mm_state.wait,
max_seq < READ_ONCE(lrugen->max_seq));
}
VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq));
-@@ -4514,8 +4513,6 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
+@@ -4516,8 +4515,6 @@ static void lru_gen_age_node(struct pgli
VM_WARN_ON_ONCE(!current_is_kswapd());
/* check the order to exclude compaction-induced reclaim */
if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY)
return;
-@@ -5104,8 +5101,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
+@@ -5116,8 +5113,7 @@ static bool should_run_aging(struct lruv
* 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
* reclaim.
*/
{
unsigned long nr_to_scan;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
-@@ -5122,10 +5118,8 @@ static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *
+@@ -5134,10 +5130,8 @@ static unsigned long get_nr_to_scan(stru
if (sc->priority == DEF_PRIORITY)
return nr_to_scan;
}
static unsigned long get_nr_to_reclaim(struct scan_control *sc)
-@@ -5134,29 +5128,18 @@ static unsigned long get_nr_to_reclaim(struct scan_control *sc)
+@@ -5146,29 +5140,18 @@ static unsigned long get_nr_to_reclaim(s
if (!global_reclaim(sc))
return -1;
if (sc->may_swap)
swappiness = get_swappiness(lruvec, sc);
-@@ -5166,7 +5149,7 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc
+@@ -5178,7 +5161,7 @@ static void lru_gen_shrink_lruvec(struct
swappiness = 0;
nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
break;
delta = evict_folios(lruvec, sc, swappiness);
-@@ -5183,11 +5166,252 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc
+@@ -5195,10 +5178,251 @@ static void lru_gen_shrink_lruvec(struct
cond_resched();
}
+ if (try_to_shrink_lruvec(lruvec, sc))
+ lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG);
+
- clear_mm_walk();
-
- blk_finish_plug(&plug);
- }
-
++ clear_mm_walk();
++
++ blk_finish_plug(&plug);
++}
++
+#else /* !CONFIG_MEMCG */
+
+static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
+ if (current_is_kswapd())
+ sc->nr_reclaimed += reclaimed;
+
-+ clear_mm_walk();
-+
-+ blk_finish_plug(&plug);
+ clear_mm_walk();
+
+ blk_finish_plug(&plug);
+
+ /* kswapd should never fail */
+ pgdat->kswapd_failures = 0;
+ WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
+
+ spin_unlock(&pgdat->memcg_lru.lock);
-+}
+ }
+#endif
-+
+
/******************************************************************************
* state change
- ******************************************************************************/
-@@ -5644,11 +5868,11 @@ static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq,
+@@ -5656,11 +5880,11 @@ static int run_cmd(char cmd, int memcg_i
if (!mem_cgroup_disabled()) {
rcu_read_lock();
rcu_read_unlock();
if (!memcg)
-@@ -5796,6 +6020,19 @@ void lru_gen_init_lruvec(struct lruvec *lruvec)
+@@ -5808,6 +6032,19 @@ void lru_gen_init_lruvec(struct lruvec *
}
#ifdef CONFIG_MEMCG
void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
INIT_LIST_HEAD(&memcg->mm_list.fifo);
-@@ -5819,7 +6056,69 @@ void lru_gen_exit_memcg(struct mem_cgroup *memcg)
+@@ -5831,7 +6068,69 @@ void lru_gen_exit_memcg(struct mem_cgrou
}
}
}
static int __init init_lru_gen(void)
{
-@@ -5846,6 +6145,10 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc
+@@ -5858,6 +6157,10 @@ static void lru_gen_shrink_lruvec(struct
{
}
#endif /* CONFIG_LRU_GEN */
static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
-@@ -5859,7 +6162,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
+@@ -5871,7 +6174,7 @@ static void shrink_lruvec(struct lruvec
bool proportional_reclaim;
struct blk_plug plug;
lru_gen_shrink_lruvec(lruvec, sc);
return;
}
-@@ -6102,6 +6405,11 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
+@@ -6114,6 +6417,11 @@ static void shrink_node(pg_data_t *pgdat
struct lruvec *target_lruvec;
bool reclaimable = false;
target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
again:
---
-2.40.1
-