kernel: bump 5.15 to 5.15.116
[openwrt/openwrt.git] / target / linux / generic / backport-5.15 / 601-v5.18-page_pool-Add-recycle-stats.patch
1 From ad6fa1e1ab1b8164f1ba296b1b4dc556a483bcad Mon Sep 17 00:00:00 2001
2 From: Joe Damato <jdamato@fastly.com>
3 Date: Tue, 1 Mar 2022 23:55:48 -0800
4 Subject: [PATCH 2/3] page_pool: Add recycle stats
5
6 Add per-cpu stats tracking page pool recycling events:
7 - cached: recycling placed page in the page pool cache
8 - cache_full: page pool cache was full
9 - ring: page placed into the ptr ring
10 - ring_full: page released from page pool because the ptr ring was full
11 - released_refcnt: page released (and not recycled) because refcnt > 1
12
13 Signed-off-by: Joe Damato <jdamato@fastly.com>
14 Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
15 Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
16 Signed-off-by: David S. Miller <davem@davemloft.net>
17 ---
18 include/net/page_pool.h | 16 ++++++++++++++++
19 net/core/page_pool.c | 30 ++++++++++++++++++++++++++++--
20 2 files changed, 44 insertions(+), 2 deletions(-)
21
22 --- a/include/net/page_pool.h
23 +++ b/include/net/page_pool.h
24 @@ -93,6 +93,18 @@ struct page_pool_alloc_stats {
25 u64 refill; /* allocations via successful refill */
26 u64 waive; /* failed refills due to numa zone mismatch */
27 };
28 +
29 +struct page_pool_recycle_stats {
30 + u64 cached; /* recycling placed page in the cache. */
31 + u64 cache_full; /* cache was full */
32 + u64 ring; /* recycling placed page back into ptr ring */
33 + u64 ring_full; /* page was released from page-pool because
34 + * PTR ring was full.
35 + */
36 + u64 released_refcnt; /* page released because of elevated
37 + * refcnt
38 + */
39 +};
40 #endif
41
42 struct page_pool {
43 @@ -136,6 +148,10 @@ struct page_pool {
44 */
45 struct ptr_ring ring;
46
47 +#ifdef CONFIG_PAGE_POOL_STATS
48 + /* recycle stats are per-cpu to avoid locking */
49 + struct page_pool_recycle_stats __percpu *recycle_stats;
50 +#endif
51 atomic_t pages_state_release_cnt;
52
53 /* A page_pool is strictly tied to a single RX-queue being
54 --- a/net/core/page_pool.c
55 +++ b/net/core/page_pool.c
56 @@ -52,8 +52,15 @@ static void page_pool_producer_unlock(st
57 #ifdef CONFIG_PAGE_POOL_STATS
58 /* alloc_stat_inc is intended to be used in softirq context */
59 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
60 +/* recycle_stat_inc is safe to use when preemption is possible. */
61 +#define recycle_stat_inc(pool, __stat) \
62 + do { \
63 + struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
64 + this_cpu_inc(s->__stat); \
65 + } while (0)
66 #else
67 #define alloc_stat_inc(pool, __stat)
68 +#define recycle_stat_inc(pool, __stat)
69 #endif
70
71 static int page_pool_init(struct page_pool *pool,
72 @@ -103,6 +110,12 @@ static int page_pool_init(struct page_po
73 pool->p.flags & PP_FLAG_PAGE_FRAG)
74 return -EINVAL;
75
76 +#ifdef CONFIG_PAGE_POOL_STATS
77 + pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
78 + if (!pool->recycle_stats)
79 + return -ENOMEM;
80 +#endif
81 +
82 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
83 return -ENOMEM;
84
85 @@ -435,7 +448,12 @@ static bool page_pool_recycle_in_ring(st
86 else
87 ret = ptr_ring_produce_bh(&pool->ring, page);
88
89 - return (ret == 0) ? true : false;
90 + if (!ret) {
91 + recycle_stat_inc(pool, ring);
92 + return true;
93 + }
94 +
95 + return false;
96 }
97
98 /* Only allow direct recycling in special circumstances, into the
99 @@ -446,11 +464,14 @@ static bool page_pool_recycle_in_ring(st
100 static bool page_pool_recycle_in_cache(struct page *page,
101 struct page_pool *pool)
102 {
103 - if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
104 + if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
105 + recycle_stat_inc(pool, cache_full);
106 return false;
107 + }
108
109 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
110 pool->alloc.cache[pool->alloc.count++] = page;
111 + recycle_stat_inc(pool, cached);
112 return true;
113 }
114
115 @@ -505,6 +526,7 @@ __page_pool_put_page(struct page_pool *p
116 * doing refcnt based recycle tricks, meaning another process
117 * will be invoking put_page.
118 */
119 + recycle_stat_inc(pool, released_refcnt);
120 /* Do not replace this with page_pool_return_page() */
121 page_pool_release_page(pool, page);
122 put_page(page);
123 @@ -518,6 +540,7 @@ void page_pool_put_page(struct page_pool
124 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
125 if (page && !page_pool_recycle_in_ring(pool, page)) {
126 /* Cache full, fallback to free pages */
127 + recycle_stat_inc(pool, ring_full);
128 page_pool_return_page(pool, page);
129 }
130 }
131 @@ -665,6 +688,9 @@ static void page_pool_free(struct page_p
132 if (pool->p.flags & PP_FLAG_DMA_MAP)
133 put_device(pool->p.dev);
134
135 +#ifdef CONFIG_PAGE_POOL_STATS
136 + free_percpu(pool->recycle_stats);
137 +#endif
138 kfree(pool);
139 }
140