mtd: fix build with GCC 14
[openwrt/openwrt.git] / target / linux / generic / pending-6.1 / 760-net-core-add-optional-threading-for-backlog-processi.patch
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Thu, 16 Feb 2023 18:39:04 +0100
3 Subject: [PATCH] net/core: add optional threading for backlog processing
4
5 When dealing with few flows or an imbalance on CPU utilization, static RPS
6 CPU assignment can be too inflexible. Add support for enabling threaded NAPI
7 for backlog processing in order to allow the scheduler to better balance
8 processing. This helps better spread the load across idle CPUs.
9
10 Signed-off-by: Felix Fietkau <nbd@nbd.name>
11 ---
12
13 --- a/include/linux/netdevice.h
14 +++ b/include/linux/netdevice.h
15 @@ -543,6 +543,7 @@ static inline bool napi_complete(struct
16 }
17
18 int dev_set_threaded(struct net_device *dev, bool threaded);
19 +int backlog_set_threaded(bool threaded);
20
21 /**
22 * napi_disable - prevent NAPI from scheduling
23 @@ -3152,6 +3153,7 @@ struct softnet_data {
24 unsigned int processed;
25 unsigned int time_squeeze;
26 unsigned int received_rps;
27 + unsigned int process_queue_empty;
28 #ifdef CONFIG_RPS
29 struct softnet_data *rps_ipi_list;
30 #endif
31 --- a/net/core/dev.c
32 +++ b/net/core/dev.c
33 @@ -4625,7 +4625,7 @@ static int napi_schedule_rps(struct soft
34 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
35
36 #ifdef CONFIG_RPS
37 - if (sd != mysd) {
38 + if (sd != mysd && !test_bit(NAPI_STATE_THREADED, &sd->backlog.state)) {
39 sd->rps_ipi_next = mysd->rps_ipi_list;
40 mysd->rps_ipi_list = sd;
41
42 @@ -5806,6 +5806,8 @@ static DEFINE_PER_CPU(struct work_struct
43 /* Network device is going away, flush any packets still pending */
44 static void flush_backlog(struct work_struct *work)
45 {
46 + unsigned int process_queue_empty;
47 + bool threaded, flush_processq;
48 struct sk_buff *skb, *tmp;
49 struct softnet_data *sd;
50
51 @@ -5820,8 +5822,17 @@ static void flush_backlog(struct work_st
52 input_queue_head_incr(sd);
53 }
54 }
55 +
56 + threaded = test_bit(NAPI_STATE_THREADED, &sd->backlog.state);
57 + flush_processq = threaded &&
58 + !skb_queue_empty_lockless(&sd->process_queue);
59 + if (flush_processq)
60 + process_queue_empty = sd->process_queue_empty;
61 rps_unlock_irq_enable(sd);
62
63 + if (threaded)
64 + goto out;
65 +
66 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
67 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
68 __skb_unlink(skb, &sd->process_queue);
69 @@ -5829,7 +5840,16 @@ static void flush_backlog(struct work_st
70 input_queue_head_incr(sd);
71 }
72 }
73 +
74 +out:
75 local_bh_enable();
76 +
77 + while (flush_processq) {
78 + msleep(1);
79 + rps_lock_irq_disable(sd);
80 + flush_processq = process_queue_empty == sd->process_queue_empty;
81 + rps_unlock_irq_enable(sd);
82 + }
83 }
84
85 static bool flush_required(int cpu)
86 @@ -5961,6 +5981,7 @@ static int process_backlog(struct napi_s
87 }
88
89 rps_lock_irq_disable(sd);
90 + sd->process_queue_empty++;
91 if (skb_queue_empty(&sd->input_pkt_queue)) {
92 /*
93 * Inline a custom version of __napi_complete().
94 @@ -5970,7 +5991,8 @@ static int process_backlog(struct napi_s
95 * We can use a plain write instead of clear_bit(),
96 * and we dont need an smp_mb() memory barrier.
97 */
98 - napi->state = 0;
99 + napi->state &= ~(NAPIF_STATE_SCHED |
100 + NAPIF_STATE_SCHED_THREADED);
101 again = false;
102 } else {
103 skb_queue_splice_tail_init(&sd->input_pkt_queue,
104 @@ -6386,6 +6408,55 @@ int dev_set_threaded(struct net_device *
105 }
106 EXPORT_SYMBOL(dev_set_threaded);
107
108 +int backlog_set_threaded(bool threaded)
109 +{
110 + static bool backlog_threaded;
111 + int err = 0;
112 + int i;
113 +
114 + if (backlog_threaded == threaded)
115 + return 0;
116 +
117 + for_each_possible_cpu(i) {
118 + struct softnet_data *sd = &per_cpu(softnet_data, i);
119 + struct napi_struct *n = &sd->backlog;
120 +
121 + if (n->thread)
122 + continue;
123 + n->thread = kthread_run(napi_threaded_poll, n, "napi/backlog-%d", i);
124 + if (IS_ERR(n->thread)) {
125 + err = PTR_ERR(n->thread);
126 + pr_err("kthread_run failed with err %d\n", err);
127 + n->thread = NULL;
128 + threaded = false;
129 + break;
130 + }
131 +
132 + }
133 +
134 + backlog_threaded = threaded;
135 +
136 + /* Make sure kthread is created before THREADED bit
137 + * is set.
138 + */
139 + smp_mb__before_atomic();
140 +
141 + for_each_possible_cpu(i) {
142 + struct softnet_data *sd = &per_cpu(softnet_data, i);
143 + struct napi_struct *n = &sd->backlog;
144 + unsigned long flags;
145 +
146 + rps_lock_irqsave(sd, &flags);
147 + if (threaded)
148 + n->state |= NAPIF_STATE_THREADED;
149 + else
150 + n->state &= ~NAPIF_STATE_THREADED;
151 + rps_unlock_irq_restore(sd, &flags);
152 + }
153 +
154 + return err;
155 +}
156 +
157 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
158 int (*poll)(struct napi_struct *, int), int weight)
159 {
160 @@ -11168,6 +11239,9 @@ static int dev_cpu_dead(unsigned int old
161 raise_softirq_irqoff(NET_TX_SOFTIRQ);
162 local_irq_enable();
163
164 + if (test_bit(NAPI_STATE_THREADED, &oldsd->backlog.state))
165 + return 0;
166 +
167 #ifdef CONFIG_RPS
168 remsd = oldsd->rps_ipi_list;
169 oldsd->rps_ipi_list = NULL;
170 @@ -11480,6 +11554,7 @@ static int __init net_dev_init(void)
171 INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
172 spin_lock_init(&sd->defer_lock);
173
174 + INIT_LIST_HEAD(&sd->backlog.poll_list);
175 init_gro_hash(&sd->backlog);
176 sd->backlog.poll = process_backlog;
177 sd->backlog.weight = weight_p;
178 --- a/net/core/sysctl_net_core.c
179 +++ b/net/core/sysctl_net_core.c
180 @@ -29,6 +29,7 @@ static int int_3600 = 3600;
181 static int min_sndbuf = SOCK_MIN_SNDBUF;
182 static int min_rcvbuf = SOCK_MIN_RCVBUF;
183 static int max_skb_frags = MAX_SKB_FRAGS;
184 +static int backlog_threaded;
185
186 static int net_msg_warn; /* Unused, but still a sysctl */
187
188 @@ -112,6 +113,23 @@ static int rps_sock_flow_sysctl(struct c
189 }
190 #endif /* CONFIG_RPS */
191
192 +static int backlog_threaded_sysctl(struct ctl_table *table, int write,
193 + void *buffer, size_t *lenp, loff_t *ppos)
194 +{
195 + static DEFINE_MUTEX(backlog_threaded_mutex);
196 + int ret;
197 +
198 + mutex_lock(&backlog_threaded_mutex);
199 +
200 + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
201 + if (write && !ret)
202 + ret = backlog_set_threaded(backlog_threaded);
203 +
204 + mutex_unlock(&backlog_threaded_mutex);
205 +
206 + return ret;
207 +}
208 +
209 #ifdef CONFIG_NET_FLOW_LIMIT
210 static DEFINE_MUTEX(flow_limit_update_mutex);
211
212 @@ -473,6 +491,15 @@ static struct ctl_table net_core_table[]
213 .proc_handler = rps_sock_flow_sysctl
214 },
215 #endif
216 + {
217 + .procname = "backlog_threaded",
218 + .data = &backlog_threaded,
219 + .maxlen = sizeof(unsigned int),
220 + .mode = 0644,
221 + .proc_handler = backlog_threaded_sysctl,
222 + .extra1 = SYSCTL_ZERO,
223 + .extra2 = SYSCTL_ONE
224 + },
225 #ifdef CONFIG_NET_FLOW_LIMIT
226 {
227 .procname = "flow_limit_cpu_bitmap",