--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
-@@ -818,6 +818,11 @@ static inline void obj_cgroup_put(struct
+@@ -790,6 +790,11 @@ static inline void obj_cgroup_put(struct
percpu_ref_put(&objcg->refcnt);
}
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
if (memcg)
-@@ -1283,6 +1288,11 @@ struct mem_cgroup *mem_cgroup_from_css(s
- return NULL;
+@@ -1290,6 +1295,11 @@ static inline void obj_cgroup_put(struct
+ {
}
+static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
}
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
-@@ -112,6 +112,18 @@ static inline bool lru_gen_in_fault(void
+@@ -122,6 +122,18 @@ static inline bool lru_gen_in_fault(void
return current->in_lru_fault;
}
static inline int lru_gen_from_seq(unsigned long seq)
{
return seq % MAX_NR_GENS;
-@@ -287,6 +299,11 @@ static inline bool lru_gen_in_fault(void
+@@ -297,6 +309,11 @@ static inline bool lru_gen_in_fault(void
return false;
}
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/cache.h>
-@@ -357,6 +358,15 @@ struct folio_vma_mapped_walk;
+@@ -367,6 +368,15 @@ struct page_vma_mapped_walk;
#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
#ifdef CONFIG_LRU_GEN
enum {
-@@ -416,6 +426,14 @@ struct lru_gen_folio {
+@@ -426,6 +436,14 @@ struct lru_gen_folio {
atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
/* whether the multi-gen LRU is enabled */
bool enabled;
};
enum {
-@@ -469,12 +487,87 @@ void lru_gen_init_lruvec(struct lruvec *
+@@ -479,12 +497,87 @@ void lru_gen_init_lruvec(struct lruvec *
void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
#ifdef CONFIG_MEMCG
static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
{
}
-@@ -484,6 +577,7 @@ static inline void lru_gen_look_around(s
+@@ -494,6 +587,7 @@ static inline void lru_gen_look_around(s
}
#ifdef CONFIG_MEMCG
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
}
-@@ -491,7 +585,24 @@ static inline void lru_gen_init_memcg(st
+@@ -501,7 +595,24 @@ static inline void lru_gen_init_memcg(st
static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg)
{
}
#endif /* CONFIG_LRU_GEN */
-@@ -1105,6 +1216,8 @@ typedef struct pglist_data {
+@@ -1219,6 +1330,8 @@ typedef struct pglist_data {
#ifdef CONFIG_LRU_GEN
/* kswap mm walk data */
struct lru_gen_mm_walk mm_walk;
+ struct lru_gen_memcg memcg_lru;
#endif
- ZONE_PADDING(_pad2_)
+ CACHELINE_PADDING(_pad2_);
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -549,6 +549,16 @@ static void mem_cgroup_update_tree(struc
+@@ -477,6 +477,16 @@ static void mem_cgroup_update_tree(struc
struct mem_cgroup_per_node *mz;
struct mem_cgroup_tree_per_node *mctz;
mctz = soft_limit_tree.rb_tree_per_node[nid];
if (!mctz)
return;
-@@ -3433,6 +3443,9 @@ unsigned long mem_cgroup_soft_limit_recl
+@@ -3522,6 +3532,9 @@ unsigned long mem_cgroup_soft_limit_recl
+ struct mem_cgroup_tree_per_node *mctz;
unsigned long excess;
- unsigned long nr_scanned;
+ if (lru_gen_enabled())
+ return 0;
if (order > 0)
return 0;
-@@ -5321,6 +5334,7 @@ static int mem_cgroup_css_online(struct
+@@ -5382,6 +5395,7 @@ static int mem_cgroup_css_online(struct
if (unlikely(mem_cgroup_is_root(memcg)))
queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
2UL*HZ);
+ lru_gen_online_memcg(memcg);
return 0;
- }
-
-@@ -5347,6 +5361,7 @@ static void mem_cgroup_css_offline(struc
+ offline_kmem:
+ memcg_offline_kmem(memcg);
+@@ -5413,6 +5427,7 @@ static void mem_cgroup_css_offline(struc
memcg_offline_kmem(memcg);
reparent_shrinker_deferred(memcg);
wb_memcg_offline(memcg);
drain_all_stock(memcg);
-@@ -5358,6 +5373,7 @@ static void mem_cgroup_css_released(stru
+@@ -5424,6 +5439,7 @@ static void mem_cgroup_css_released(stru
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
invalidate_reclaim_iterators(memcg);
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -7661,6 +7661,7 @@ static void __init free_area_init_node(i
+@@ -7957,6 +7957,7 @@ static void __init free_area_init_node(i
pgdat_set_deferred_range(pgdat);
free_area_init_core(pgdat);
+ lru_gen_init_pgdat(pgdat);
}
- void __init free_area_init_memoryless_node(int nid)
+ static void __init free_area_init_memoryless_node(int nid)
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -54,6 +54,8 @@
#include <asm/tlbflush.h>
#include <asm/div64.h>
-@@ -129,11 +131,6 @@ struct scan_control {
+@@ -134,11 +136,6 @@ struct scan_control {
/* Always discard instead of demoting to lower tier memory */
unsigned int no_demotion:1;
/* Allocation order */
s8 order;
-@@ -2880,6 +2877,9 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_ca
+@@ -3160,6 +3157,9 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_ca
for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
{
struct pglist_data *pgdat = NODE_DATA(nid);
-@@ -4169,8 +4169,7 @@ done:
+@@ -4443,8 +4443,7 @@ done:
if (sc->priority <= DEF_PRIORITY - 2)
wait_event_killable(lruvec->mm_state.wait,
max_seq < READ_ONCE(lrugen->max_seq));
}
VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq));
-@@ -4243,8 +4242,6 @@ static void lru_gen_age_node(struct pgli
+@@ -4517,8 +4516,6 @@ static void lru_gen_age_node(struct pgli
VM_WARN_ON_ONCE(!current_is_kswapd());
/* check the order to exclude compaction-induced reclaim */
if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY)
return;
-@@ -4833,8 +4830,7 @@ static bool should_run_aging(struct lruv
+@@ -5107,8 +5104,7 @@ static bool should_run_aging(struct lruv
* 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
* reclaim.
*/
{
unsigned long nr_to_scan;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
-@@ -4851,10 +4847,8 @@ static unsigned long get_nr_to_scan(stru
+@@ -5125,10 +5121,8 @@ static unsigned long get_nr_to_scan(stru
if (sc->priority == DEF_PRIORITY)
return nr_to_scan;
}
static unsigned long get_nr_to_reclaim(struct scan_control *sc)
-@@ -4863,29 +4857,18 @@ static unsigned long get_nr_to_reclaim(s
+@@ -5137,29 +5131,18 @@ static unsigned long get_nr_to_reclaim(s
if (!global_reclaim(sc))
return -1;
if (sc->may_swap)
swappiness = get_swappiness(lruvec, sc);
-@@ -4895,7 +4878,7 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5169,7 +5152,7 @@ static void lru_gen_shrink_lruvec(struct
swappiness = 0;
nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
break;
delta = evict_folios(lruvec, sc, swappiness);
-@@ -4912,10 +4895,250 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5186,10 +5169,250 @@ static void lru_gen_shrink_lruvec(struct
cond_resched();
}
/******************************************************************************
* state change
-@@ -5370,11 +5593,11 @@ static int run_cmd(char cmd, int memcg_i
+@@ -5647,11 +5870,11 @@ static int run_cmd(char cmd, int memcg_i
if (!mem_cgroup_disabled()) {
rcu_read_lock();
rcu_read_unlock();
if (!memcg)
-@@ -5521,6 +5744,19 @@ void lru_gen_init_lruvec(struct lruvec *
+@@ -5799,6 +6022,19 @@ void lru_gen_init_lruvec(struct lruvec *
}
#ifdef CONFIG_MEMCG
void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
INIT_LIST_HEAD(&memcg->mm_list.fifo);
-@@ -5544,7 +5780,69 @@ void lru_gen_exit_memcg(struct mem_cgrou
+@@ -5822,7 +6058,69 @@ void lru_gen_exit_memcg(struct mem_cgrou
}
}
}
static int __init init_lru_gen(void)
{
-@@ -5571,6 +5869,10 @@ static void lru_gen_shrink_lruvec(struct
+@@ -5849,6 +6147,10 @@ static void lru_gen_shrink_lruvec(struct
{
}
#endif /* CONFIG_LRU_GEN */
static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
-@@ -5584,7 +5886,7 @@ static void shrink_lruvec(struct lruvec
+@@ -5862,7 +6164,7 @@ static void shrink_lruvec(struct lruvec
bool proportional_reclaim;
struct blk_plug plug;
lru_gen_shrink_lruvec(lruvec, sc);
return;
}
-@@ -5826,6 +6128,11 @@ static void shrink_node(pg_data_t *pgdat
+@@ -6105,6 +6407,11 @@ static void shrink_node(pg_data_t *pgdat
struct lruvec *target_lruvec;
bool reclaimable = false;