kernel: pick patches for MediaTek Ethernet from linux-next
[openwrt/staging/noltari.git] / target / linux / generic / backport-5.15 / 601-v5.18-page_pool-Add-recycle-stats.patch
1 commit ad6fa1e1ab1b8164f1ba296b1b4dc556a483bcad
2 Author: Joe Damato <jdamato@fastly.com>
3 Date: Tue Mar 1 23:55:48 2022 -0800
4
5 page_pool: Add recycle stats
6
7 Add per-cpu stats tracking page pool recycling events:
8 - cached: recycling placed page in the page pool cache
9 - cache_full: page pool cache was full
10 - ring: page placed into the ptr ring
11 - ring_full: page released from page pool because the ptr ring was full
12 - released_refcnt: page released (and not recycled) because refcnt > 1
13
14 Signed-off-by: Joe Damato <jdamato@fastly.com>
15 Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
16 Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
17 Signed-off-by: David S. Miller <davem@davemloft.net>
18
19 --- a/include/net/page_pool.h
20 +++ b/include/net/page_pool.h
21 @@ -93,6 +93,18 @@ struct page_pool_alloc_stats {
22 u64 refill; /* allocations via successful refill */
23 u64 waive; /* failed refills due to numa zone mismatch */
24 };
25 +
26 +struct page_pool_recycle_stats {
27 + u64 cached; /* recycling placed page in the cache. */
28 + u64 cache_full; /* cache was full */
29 + u64 ring; /* recycling placed page back into ptr ring */
30 + u64 ring_full; /* page was released from page-pool because
31 + * PTR ring was full.
32 + */
33 + u64 released_refcnt; /* page released because of elevated
34 + * refcnt
35 + */
36 +};
37 #endif
38
39 struct page_pool {
40 @@ -136,6 +148,10 @@ struct page_pool {
41 */
42 struct ptr_ring ring;
43
44 +#ifdef CONFIG_PAGE_POOL_STATS
45 + /* recycle stats are per-cpu to avoid locking */
46 + struct page_pool_recycle_stats __percpu *recycle_stats;
47 +#endif
48 atomic_t pages_state_release_cnt;
49
50 /* A page_pool is strictly tied to a single RX-queue being
51 --- a/net/core/page_pool.c
52 +++ b/net/core/page_pool.c
53 @@ -29,8 +29,15 @@
54 #ifdef CONFIG_PAGE_POOL_STATS
55 /* alloc_stat_inc is intended to be used in softirq context */
56 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
57 +/* recycle_stat_inc is safe to use when preemption is possible. */
58 +#define recycle_stat_inc(pool, __stat) \
59 + do { \
60 + struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
61 + this_cpu_inc(s->__stat); \
62 + } while (0)
63 #else
64 #define alloc_stat_inc(pool, __stat)
65 +#define recycle_stat_inc(pool, __stat)
66 #endif
67
68 static int page_pool_init(struct page_pool *pool,
69 @@ -80,6 +87,12 @@ static int page_pool_init(struct page_po
70 pool->p.flags & PP_FLAG_PAGE_FRAG)
71 return -EINVAL;
72
73 +#ifdef CONFIG_PAGE_POOL_STATS
74 + pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
75 + if (!pool->recycle_stats)
76 + return -ENOMEM;
77 +#endif
78 +
79 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
80 return -ENOMEM;
81
82 @@ -412,7 +425,12 @@ static bool page_pool_recycle_in_ring(st
83 else
84 ret = ptr_ring_produce_bh(&pool->ring, page);
85
86 - return (ret == 0) ? true : false;
87 + if (!ret) {
88 + recycle_stat_inc(pool, ring);
89 + return true;
90 + }
91 +
92 + return false;
93 }
94
95 /* Only allow direct recycling in special circumstances, into the
96 @@ -423,11 +441,14 @@ static bool page_pool_recycle_in_ring(st
97 static bool page_pool_recycle_in_cache(struct page *page,
98 struct page_pool *pool)
99 {
100 - if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
101 + if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
102 + recycle_stat_inc(pool, cache_full);
103 return false;
104 + }
105
106 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
107 pool->alloc.cache[pool->alloc.count++] = page;
108 + recycle_stat_inc(pool, cached);
109 return true;
110 }
111
112 @@ -482,6 +503,7 @@ __page_pool_put_page(struct page_pool *p
113 * doing refcnt based recycle tricks, meaning another process
114 * will be invoking put_page.
115 */
116 + recycle_stat_inc(pool, released_refcnt);
117 /* Do not replace this with page_pool_return_page() */
118 page_pool_release_page(pool, page);
119 put_page(page);
120 @@ -495,6 +517,7 @@ void page_pool_put_page(struct page_pool
121 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
122 if (page && !page_pool_recycle_in_ring(pool, page)) {
123 /* Cache full, fallback to free pages */
124 + recycle_stat_inc(pool, ring_full);
125 page_pool_return_page(pool, page);
126 }
127 }
128 @@ -641,6 +664,9 @@ static void page_pool_free(struct page_p
129 if (pool->p.flags & PP_FLAG_DMA_MAP)
130 put_device(pool->p.dev);
131
132 +#ifdef CONFIG_PAGE_POOL_STATS
133 + free_percpu(pool->recycle_stats);
134 +#endif
135 kfree(pool);
136 }
137