kernel: update 5.10 flow offload patches
[openwrt/staging/rmilecki.git] / target / linux / generic / pending-5.10 / 640-13-netfilter-flowtable-add-pppoe-support.patch
1 From: Pablo Neira Ayuso <pablo@netfilter.org>
2 Date: Mon, 1 Mar 2021 23:52:49 +0100
3 Subject: [PATCH] netfilter: flowtable: add pppoe support
4
5 ---
6
7 --- a/drivers/net/ppp/ppp_generic.c
8 +++ b/drivers/net/ppp/ppp_generic.c
9 @@ -1453,7 +1453,7 @@ static void ppp_dev_priv_destructor(stru
10 static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
11 struct net_device_path *path)
12 {
13 - struct ppp *ppp = netdev_priv(path->dev);
14 + struct ppp *ppp = netdev_priv(ctx->dev);
15 struct ppp_channel *chan;
16 struct channel *pch;
17
18 --- a/drivers/net/ppp/pppoe.c
19 +++ b/drivers/net/ppp/pppoe.c
20 @@ -987,6 +987,7 @@ static int pppoe_fill_forward_path(struc
21 path->type = DEV_PATH_PPPOE;
22 path->encap.proto = htons(ETH_P_PPP_SES);
23 path->encap.id = be16_to_cpu(po->num);
24 + memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
25 path->dev = ctx->dev;
26 ctx->dev = dev;
27
28 --- a/include/linux/netdevice.h
29 +++ b/include/linux/netdevice.h
30 @@ -848,6 +848,7 @@ struct net_device_path {
31 struct {
32 u16 id;
33 __be16 proto;
34 + u8 h_dest[ETH_ALEN];
35 } encap;
36 struct {
37 enum {
38 --- a/net/netfilter/nf_flow_table_ip.c
39 +++ b/net/netfilter/nf_flow_table_ip.c
40 @@ -7,6 +7,9 @@
41 #include <linux/ip.h>
42 #include <linux/ipv6.h>
43 #include <linux/netdevice.h>
44 +#include <linux/if_ether.h>
45 +#include <linux/if_pppox.h>
46 +#include <linux/ppp_defs.h>
47 #include <net/ip.h>
48 #include <net/ipv6.h>
49 #include <net/ip6_route.h>
50 @@ -162,6 +165,8 @@ static bool ip_has_options(unsigned int
51 static void nf_flow_tuple_encap(struct sk_buff *skb,
52 struct flow_offload_tuple *tuple)
53 {
54 + struct vlan_ethhdr *veth;
55 + struct pppoe_hdr *phdr;
56 int i = 0;
57
58 if (skb_vlan_tag_present(skb)) {
59 @@ -169,23 +174,35 @@ static void nf_flow_tuple_encap(struct s
60 tuple->encap[i].proto = skb->vlan_proto;
61 i++;
62 }
63 - if (skb->protocol == htons(ETH_P_8021Q)) {
64 - struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
65 -
66 + switch (skb->protocol) {
67 + case htons(ETH_P_8021Q):
68 + veth = (struct vlan_ethhdr *)skb_mac_header(skb);
69 tuple->encap[i].id = ntohs(veth->h_vlan_TCI);
70 tuple->encap[i].proto = skb->protocol;
71 + break;
72 + case htons(ETH_P_PPP_SES):
73 + phdr = (struct pppoe_hdr *)skb_mac_header(skb);
74 + tuple->encap[i].id = ntohs(phdr->sid);
75 + tuple->encap[i].proto = skb->protocol;
76 + break;
77 }
78 }
79
80 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
81 - struct flow_offload_tuple *tuple)
82 + struct flow_offload_tuple *tuple, u32 *nhoff)
83 {
84 unsigned int thoff, hdrsize, offset = 0;
85 struct flow_ports *ports;
86 struct iphdr *iph;
87
88 - if (skb->protocol == htons(ETH_P_8021Q))
89 + switch (skb->protocol) {
90 + case htons(ETH_P_8021Q):
91 offset += VLAN_HLEN;
92 + break;
93 + case htons(ETH_P_PPP_SES):
94 + offset += PPPOE_SES_HLEN;
95 + break;
96 + }
97
98 if (!pskb_may_pull(skb, sizeof(*iph) + offset))
99 return -1;
100 @@ -226,6 +243,7 @@ static int nf_flow_tuple_ip(struct sk_bu
101 tuple->l4proto = iph->protocol;
102 tuple->iifidx = dev->ifindex;
103 nf_flow_tuple_encap(skb, tuple);
104 + *nhoff = offset;
105
106 return 0;
107 }
108 @@ -270,14 +288,36 @@ static unsigned int nf_flow_xmit_xfrm(st
109 return NF_STOLEN;
110 }
111
112 +static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
113 +{
114 + __be16 proto;
115 +
116 + proto = *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
117 + sizeof(struct pppoe_hdr)));
118 + switch (proto) {
119 + case htons(PPP_IP):
120 + return htons(ETH_P_IP);
121 + case htons(PPP_IPV6):
122 + return htons(ETH_P_IPV6);
123 + }
124 +
125 + return 0;
126 +}
127 +
128 static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto)
129 {
130 - if (skb->protocol == htons(ETH_P_8021Q)) {
131 - struct vlan_ethhdr *veth;
132 + struct vlan_ethhdr *veth;
133
134 + switch (skb->protocol) {
135 + case htons(ETH_P_8021Q):
136 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
137 if (veth->h_vlan_encapsulated_proto == proto)
138 return true;
139 + break;
140 + case htons(ETH_P_PPP_SES):
141 + if (nf_flow_pppoe_proto(skb) == proto)
142 + return true;
143 + break;
144 }
145
146 return false;
147 @@ -294,12 +334,18 @@ static void nf_flow_encap_pop(struct sk_
148 __vlan_hwaccel_clear_tag(skb);
149 continue;
150 }
151 - if (skb->protocol == htons(ETH_P_8021Q)) {
152 + switch (skb->protocol) {
153 + case htons(ETH_P_8021Q):
154 vlan_hdr = (struct vlan_hdr *)skb->data;
155 __skb_pull(skb, VLAN_HLEN);
156 vlan_set_encap_proto(skb, vlan_hdr);
157 skb_reset_network_header(skb);
158 break;
159 + case htons(ETH_P_PPP_SES):
160 + skb->protocol = nf_flow_pppoe_proto(skb);
161 + skb_pull(skb, PPPOE_SES_HLEN);
162 + skb_reset_network_header(skb);
163 + break;
164 }
165 }
166 }
167 @@ -343,7 +389,7 @@ nf_flow_offload_ip_hook(void *priv, stru
168 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP)))
169 return NF_ACCEPT;
170
171 - if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
172 + if (nf_flow_tuple_ip(skb, state->in, &tuple, &offset) < 0)
173 return NF_ACCEPT;
174
175 tuplehash = flow_offload_lookup(flow_table, &tuple);
176 @@ -357,9 +403,6 @@ nf_flow_offload_ip_hook(void *priv, stru
177 if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
178 return NF_ACCEPT;
179
180 - if (skb->protocol == htons(ETH_P_8021Q))
181 - offset += VLAN_HLEN;
182 -
183 if (skb_try_make_writable(skb, sizeof(*iph) + offset))
184 return NF_DROP;
185
186 @@ -543,14 +586,20 @@ static int nf_flow_nat_ipv6(const struct
187 }
188
189 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
190 - struct flow_offload_tuple *tuple)
191 + struct flow_offload_tuple *tuple, u32 *nhoff)
192 {
193 unsigned int thoff, hdrsize, offset = 0;
194 struct flow_ports *ports;
195 struct ipv6hdr *ip6h;
196
197 - if (skb->protocol == htons(ETH_P_8021Q))
198 + switch (skb->protocol) {
199 + case htons(ETH_P_8021Q):
200 offset += VLAN_HLEN;
201 + break;
202 + case htons(ETH_P_PPP_SES):
203 + offset += PPPOE_SES_HLEN;
204 + break;
205 + }
206
207 if (!pskb_may_pull(skb, sizeof(*ip6h) + offset))
208 return -1;
209 @@ -586,6 +635,7 @@ static int nf_flow_tuple_ipv6(struct sk_
210 tuple->l4proto = ip6h->nexthdr;
211 tuple->iifidx = dev->ifindex;
212 nf_flow_tuple_encap(skb, tuple);
213 + *nhoff = offset;
214
215 return 0;
216 }
217 @@ -611,7 +661,7 @@ nf_flow_offload_ipv6_hook(void *priv, st
218 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6)))
219 return NF_ACCEPT;
220
221 - if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
222 + if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &offset) < 0)
223 return NF_ACCEPT;
224
225 tuplehash = flow_offload_lookup(flow_table, &tuple);
226 @@ -625,9 +675,6 @@ nf_flow_offload_ipv6_hook(void *priv, st
227 if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
228 return NF_ACCEPT;
229
230 - if (skb->protocol == htons(ETH_P_8021Q))
231 - offset += VLAN_HLEN;
232 -
233 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
234 if (nf_flow_state_check(flow, ip6h->nexthdr, skb, sizeof(*ip6h)))
235 return NF_ACCEPT;
236 --- a/net/netfilter/nft_flow_offload.c
237 +++ b/net/netfilter/nft_flow_offload.c
238 @@ -90,6 +90,7 @@ static void nft_dev_path_info(const stru
239 switch (path->type) {
240 case DEV_PATH_ETHERNET:
241 case DEV_PATH_VLAN:
242 + case DEV_PATH_PPPOE:
243 info->indev = path->dev;
244 if (is_zero_ether_addr(info->h_source))
245 memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
246 @@ -97,7 +98,7 @@ static void nft_dev_path_info(const stru
247 if (path->type == DEV_PATH_ETHERNET)
248 break;
249
250 - /* DEV_PATH_VLAN */
251 + /* DEV_PATH_VLAN and DEV_PATH_PPPOE */
252 if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) {
253 info->indev = NULL;
254 break;
255 @@ -106,6 +107,8 @@ static void nft_dev_path_info(const stru
256 info->encap[info->num_encaps].id = path->encap.id;
257 info->encap[info->num_encaps].proto = path->encap.proto;
258 info->num_encaps++;
259 + if (path->type == DEV_PATH_PPPOE)
260 + memcpy(info->h_dest, path->encap.h_dest, ETH_ALEN);
261 break;
262 case DEV_PATH_BRIDGE:
263 if (is_zero_ether_addr(info->h_source))