kernel: bump 5.10 to 5.10.27
[openwrt/staging/hauke.git] / target / linux / generic / backport-5.10 / 600-v5.12-net-extract-napi-poll-functionality-to-__napi_poll.patch
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Mon, 8 Feb 2021 11:34:08 -0800
3 Subject: [PATCH] net: extract napi poll functionality to __napi_poll()
4
5 This commit introduces a new function __napi_poll() which does the main
6 logic of the existing napi_poll() function, and will be called by other
7 functions in later commits.
8 This idea and implementation is done by Felix Fietkau <nbd@nbd.name> and
9 is proposed as part of the patch to move napi work to work_queue
10 context.
11 This commit by itself is a code restructure.
12
13 Signed-off-by: Felix Fietkau <nbd@nbd.name>
14 Signed-off-by: Wei Wang <weiwan@google.com>
15 Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
16 Signed-off-by: David S. Miller <davem@davemloft.net>
17 ---
18
19 --- a/net/core/dev.c
20 +++ b/net/core/dev.c
21 @@ -6752,15 +6752,10 @@ void __netif_napi_del(struct napi_struct
22 }
23 EXPORT_SYMBOL(__netif_napi_del);
24
25 -static int napi_poll(struct napi_struct *n, struct list_head *repoll)
26 +static int __napi_poll(struct napi_struct *n, bool *repoll)
27 {
28 - void *have;
29 int work, weight;
30
31 - list_del_init(&n->poll_list);
32 -
33 - have = netpoll_poll_lock(n);
34 -
35 weight = n->weight;
36
37 /* This NAPI_STATE_SCHED test is for avoiding a race
38 @@ -6780,7 +6775,7 @@ static int napi_poll(struct napi_struct
39 n->poll, work, weight);
40
41 if (likely(work < weight))
42 - goto out_unlock;
43 + return work;
44
45 /* Drivers must not modify the NAPI state if they
46 * consume the entire weight. In such cases this code
47 @@ -6789,7 +6784,7 @@ static int napi_poll(struct napi_struct
48 */
49 if (unlikely(napi_disable_pending(n))) {
50 napi_complete(n);
51 - goto out_unlock;
52 + return work;
53 }
54
55 if (n->gro_bitmask) {
56 @@ -6807,12 +6802,29 @@ static int napi_poll(struct napi_struct
57 if (unlikely(!list_empty(&n->poll_list))) {
58 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
59 n->dev ? n->dev->name : "backlog");
60 - goto out_unlock;
61 + return work;
62 }
63
64 - list_add_tail(&n->poll_list, repoll);
65 + *repoll = true;
66 +
67 + return work;
68 +}
69 +
70 +static int napi_poll(struct napi_struct *n, struct list_head *repoll)
71 +{
72 + bool do_repoll = false;
73 + void *have;
74 + int work;
75 +
76 + list_del_init(&n->poll_list);
77 +
78 + have = netpoll_poll_lock(n);
79 +
80 + work = __napi_poll(n, &do_repoll);
81 +
82 + if (do_repoll)
83 + list_add_tail(&n->poll_list, repoll);
84
85 -out_unlock:
86 netpoll_poll_unlock(have);
87
88 return work;