kernel: backport flow offload pppoe fix
[openwrt/openwrt.git] / target / linux / generic / hack-5.15 / 650-netfilter-add-xt_FLOWOFFLOAD-target.patch
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Tue, 20 Feb 2018 15:56:02 +0100
3 Subject: [PATCH] netfilter: add xt_FLOWOFFLOAD target
4
5 Signed-off-by: Felix Fietkau <nbd@nbd.name>
6 ---
7 create mode 100644 net/netfilter/xt_OFFLOAD.c
8
9 --- a/net/ipv4/netfilter/Kconfig
10 +++ b/net/ipv4/netfilter/Kconfig
11 @@ -56,8 +56,6 @@ config NF_TABLES_ARP
12 help
13 This option enables the ARP support for nf_tables.
14
15 -endif # NF_TABLES
16 -
17 config NF_FLOW_TABLE_IPV4
18 tristate "Netfilter flow table IPv4 module"
19 depends on NF_FLOW_TABLE
20 @@ -66,6 +64,8 @@ config NF_FLOW_TABLE_IPV4
21
22 To compile it as a module, choose M here.
23
24 +endif # NF_TABLES
25 +
26 config NF_DUP_IPV4
27 tristate "Netfilter IPv4 packet duplication to alternate destination"
28 depends on !NF_CONNTRACK || NF_CONNTRACK
29 --- a/net/ipv6/netfilter/Kconfig
30 +++ b/net/ipv6/netfilter/Kconfig
31 @@ -45,7 +45,6 @@ config NFT_FIB_IPV6
32 multicast or blackhole.
33
34 endif # NF_TABLES_IPV6
35 -endif # NF_TABLES
36
37 config NF_FLOW_TABLE_IPV6
38 tristate "Netfilter flow table IPv6 module"
39 @@ -55,6 +54,8 @@ config NF_FLOW_TABLE_IPV6
40
41 To compile it as a module, choose M here.
42
43 +endif # NF_TABLES
44 +
45 config NF_DUP_IPV6
46 tristate "Netfilter IPv6 packet duplication to alternate destination"
47 depends on !NF_CONNTRACK || NF_CONNTRACK
48 --- a/net/netfilter/Kconfig
49 +++ b/net/netfilter/Kconfig
50 @@ -707,8 +707,6 @@ config NFT_REJECT_NETDEV
51
52 endif # NF_TABLES_NETDEV
53
54 -endif # NF_TABLES
55 -
56 config NF_FLOW_TABLE_INET
57 tristate "Netfilter flow table mixed IPv4/IPv6 module"
58 depends on NF_FLOW_TABLE
59 @@ -717,11 +715,12 @@ config NF_FLOW_TABLE_INET
60
61 To compile it as a module, choose M here.
62
63 +endif # NF_TABLES
64 +
65 config NF_FLOW_TABLE
66 tristate "Netfilter flow table module"
67 depends on NETFILTER_INGRESS
68 depends on NF_CONNTRACK
69 - depends on NF_TABLES
70 help
71 This option adds the flow table core infrastructure.
72
73 @@ -1010,6 +1009,15 @@ config NETFILTER_XT_TARGET_NOTRACK
74 depends on NETFILTER_ADVANCED
75 select NETFILTER_XT_TARGET_CT
76
77 +config NETFILTER_XT_TARGET_FLOWOFFLOAD
78 + tristate '"FLOWOFFLOAD" target support'
79 + depends on NF_FLOW_TABLE
80 + depends on NETFILTER_INGRESS
81 + help
82 + This option adds a `FLOWOFFLOAD' target, which uses the nf_flow_offload
83 + module to speed up processing of packets by bypassing the usual
84 + netfilter chains
85 +
86 config NETFILTER_XT_TARGET_RATEEST
87 tristate '"RATEEST" target support'
88 depends on NETFILTER_ADVANCED
89 --- a/net/netfilter/Makefile
90 +++ b/net/netfilter/Makefile
91 @@ -143,6 +143,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIF
92 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
93 obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
94 obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
95 +obj-$(CONFIG_NETFILTER_XT_TARGET_FLOWOFFLOAD) += xt_FLOWOFFLOAD.o
96 obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
97 obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
98 obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
99 --- /dev/null
100 +++ b/net/netfilter/xt_FLOWOFFLOAD.c
101 @@ -0,0 +1,702 @@
102 +/*
103 + * Copyright (C) 2018-2021 Felix Fietkau <nbd@nbd.name>
104 + *
105 + * This program is free software; you can redistribute it and/or modify
106 + * it under the terms of the GNU General Public License version 2 as
107 + * published by the Free Software Foundation.
108 + */
109 +#include <linux/module.h>
110 +#include <linux/init.h>
111 +#include <linux/netfilter.h>
112 +#include <linux/netfilter/xt_FLOWOFFLOAD.h>
113 +#include <linux/if_vlan.h>
114 +#include <net/ip.h>
115 +#include <net/netfilter/nf_conntrack.h>
116 +#include <net/netfilter/nf_conntrack_extend.h>
117 +#include <net/netfilter/nf_conntrack_helper.h>
118 +#include <net/netfilter/nf_flow_table.h>
119 +
120 +struct xt_flowoffload_hook {
121 + struct hlist_node list;
122 + struct nf_hook_ops ops;
123 + struct net *net;
124 + bool registered;
125 + bool used;
126 +};
127 +
128 +struct xt_flowoffload_table {
129 + struct nf_flowtable ft;
130 + struct hlist_head hooks;
131 + struct delayed_work work;
132 +};
133 +
134 +struct nf_forward_info {
135 + const struct net_device *indev;
136 + const struct net_device *outdev;
137 + const struct net_device *hw_outdev;
138 + struct id {
139 + __u16 id;
140 + __be16 proto;
141 + } encap[NF_FLOW_TABLE_ENCAP_MAX];
142 + u8 num_encaps;
143 + u8 ingress_vlans;
144 + u8 h_source[ETH_ALEN];
145 + u8 h_dest[ETH_ALEN];
146 + enum flow_offload_xmit_type xmit_type;
147 +};
148 +
149 +static DEFINE_SPINLOCK(hooks_lock);
150 +
151 +struct xt_flowoffload_table flowtable[2];
152 +
153 +static unsigned int
154 +xt_flowoffload_net_hook(void *priv, struct sk_buff *skb,
155 + const struct nf_hook_state *state)
156 +{
157 + struct vlan_ethhdr *veth;
158 + __be16 proto;
159 +
160 + switch (skb->protocol) {
161 + case htons(ETH_P_8021Q):
162 + veth = (struct vlan_ethhdr *)skb_mac_header(skb);
163 + proto = veth->h_vlan_encapsulated_proto;
164 + break;
165 + case htons(ETH_P_PPP_SES):
166 + if (!nf_flow_pppoe_proto(skb, &proto))
167 + return NF_ACCEPT;
168 + break;
169 + default:
170 + proto = skb->protocol;
171 + break;
172 + }
173 +
174 + switch (proto) {
175 + case htons(ETH_P_IP):
176 + return nf_flow_offload_ip_hook(priv, skb, state);
177 + case htons(ETH_P_IPV6):
178 + return nf_flow_offload_ipv6_hook(priv, skb, state);
179 + }
180 +
181 + return NF_ACCEPT;
182 +}
183 +
184 +static int
185 +xt_flowoffload_create_hook(struct xt_flowoffload_table *table,
186 + struct net_device *dev)
187 +{
188 + struct xt_flowoffload_hook *hook;
189 + struct nf_hook_ops *ops;
190 +
191 + hook = kzalloc(sizeof(*hook), GFP_ATOMIC);
192 + if (!hook)
193 + return -ENOMEM;
194 +
195 + ops = &hook->ops;
196 + ops->pf = NFPROTO_NETDEV;
197 + ops->hooknum = NF_NETDEV_INGRESS;
198 + ops->priority = 10;
199 + ops->priv = &table->ft;
200 + ops->hook = xt_flowoffload_net_hook;
201 + ops->dev = dev;
202 +
203 + hlist_add_head(&hook->list, &table->hooks);
204 + mod_delayed_work(system_power_efficient_wq, &table->work, 0);
205 +
206 + return 0;
207 +}
208 +
209 +static struct xt_flowoffload_hook *
210 +flow_offload_lookup_hook(struct xt_flowoffload_table *table,
211 + struct net_device *dev)
212 +{
213 + struct xt_flowoffload_hook *hook;
214 +
215 + hlist_for_each_entry(hook, &table->hooks, list) {
216 + if (hook->ops.dev == dev)
217 + return hook;
218 + }
219 +
220 + return NULL;
221 +}
222 +
223 +static void
224 +xt_flowoffload_check_device(struct xt_flowoffload_table *table,
225 + struct net_device *dev)
226 +{
227 + struct xt_flowoffload_hook *hook;
228 +
229 + if (!dev)
230 + return;
231 +
232 + spin_lock_bh(&hooks_lock);
233 + hook = flow_offload_lookup_hook(table, dev);
234 + if (hook)
235 + hook->used = true;
236 + else
237 + xt_flowoffload_create_hook(table, dev);
238 + spin_unlock_bh(&hooks_lock);
239 +}
240 +
241 +static void
242 +xt_flowoffload_register_hooks(struct xt_flowoffload_table *table)
243 +{
244 + struct xt_flowoffload_hook *hook;
245 +
246 +restart:
247 + hlist_for_each_entry(hook, &table->hooks, list) {
248 + if (hook->registered)
249 + continue;
250 +
251 + hook->registered = true;
252 + hook->net = dev_net(hook->ops.dev);
253 + spin_unlock_bh(&hooks_lock);
254 + nf_register_net_hook(hook->net, &hook->ops);
255 + if (table->ft.flags & NF_FLOWTABLE_HW_OFFLOAD)
256 + table->ft.type->setup(&table->ft, hook->ops.dev,
257 + FLOW_BLOCK_BIND);
258 + spin_lock_bh(&hooks_lock);
259 + goto restart;
260 + }
261 +
262 +}
263 +
264 +static bool
265 +xt_flowoffload_cleanup_hooks(struct xt_flowoffload_table *table)
266 +{
267 + struct xt_flowoffload_hook *hook;
268 + bool active = false;
269 +
270 +restart:
271 + spin_lock_bh(&hooks_lock);
272 + hlist_for_each_entry(hook, &table->hooks, list) {
273 + if (hook->used || !hook->registered) {
274 + active = true;
275 + continue;
276 + }
277 +
278 + hlist_del(&hook->list);
279 + spin_unlock_bh(&hooks_lock);
280 + if (table->ft.flags & NF_FLOWTABLE_HW_OFFLOAD)
281 + table->ft.type->setup(&table->ft, hook->ops.dev,
282 + FLOW_BLOCK_UNBIND);
283 + nf_unregister_net_hook(hook->net, &hook->ops);
284 + kfree(hook);
285 + goto restart;
286 + }
287 + spin_unlock_bh(&hooks_lock);
288 +
289 + return active;
290 +}
291 +
292 +static void
293 +xt_flowoffload_check_hook(struct nf_flowtable *flowtable,
294 + struct flow_offload *flow, void *data)
295 +{
296 + struct xt_flowoffload_table *table;
297 + struct flow_offload_tuple *tuple0 = &flow->tuplehash[0].tuple;
298 + struct flow_offload_tuple *tuple1 = &flow->tuplehash[1].tuple;
299 + struct xt_flowoffload_hook *hook;
300 +
301 + table = container_of(flowtable, struct xt_flowoffload_table, ft);
302 +
303 + spin_lock_bh(&hooks_lock);
304 + hlist_for_each_entry(hook, &table->hooks, list) {
305 + if (hook->ops.dev->ifindex != tuple0->iifidx &&
306 + hook->ops.dev->ifindex != tuple1->iifidx)
307 + continue;
308 +
309 + hook->used = true;
310 + }
311 + spin_unlock_bh(&hooks_lock);
312 +}
313 +
314 +static void
315 +xt_flowoffload_hook_work(struct work_struct *work)
316 +{
317 + struct xt_flowoffload_table *table;
318 + struct xt_flowoffload_hook *hook;
319 + int err;
320 +
321 + table = container_of(work, struct xt_flowoffload_table, work.work);
322 +
323 + spin_lock_bh(&hooks_lock);
324 + xt_flowoffload_register_hooks(table);
325 + hlist_for_each_entry(hook, &table->hooks, list)
326 + hook->used = false;
327 + spin_unlock_bh(&hooks_lock);
328 +
329 + err = nf_flow_table_iterate(&table->ft, xt_flowoffload_check_hook,
330 + NULL);
331 + if (err && err != -EAGAIN)
332 + goto out;
333 +
334 + if (!xt_flowoffload_cleanup_hooks(table))
335 + return;
336 +
337 +out:
338 + queue_delayed_work(system_power_efficient_wq, &table->work, HZ);
339 +}
340 +
341 +static bool
342 +xt_flowoffload_skip(struct sk_buff *skb, int family)
343 +{
344 + if (skb_sec_path(skb))
345 + return true;
346 +
347 + if (family == NFPROTO_IPV4) {
348 + const struct ip_options *opt = &(IPCB(skb)->opt);
349 +
350 + if (unlikely(opt->optlen))
351 + return true;
352 + }
353 +
354 + return false;
355 +}
356 +
357 +static enum flow_offload_xmit_type nf_xmit_type(struct dst_entry *dst)
358 +{
359 + if (dst_xfrm(dst))
360 + return FLOW_OFFLOAD_XMIT_XFRM;
361 +
362 + return FLOW_OFFLOAD_XMIT_NEIGH;
363 +}
364 +
365 +static void nf_default_forward_path(struct nf_flow_route *route,
366 + struct dst_entry *dst_cache,
367 + enum ip_conntrack_dir dir,
368 + struct net_device **dev)
369 +{
370 + dev[!dir] = dst_cache->dev;
371 + route->tuple[!dir].in.ifindex = dst_cache->dev->ifindex;
372 + route->tuple[dir].dst = dst_cache;
373 + route->tuple[dir].xmit_type = nf_xmit_type(dst_cache);
374 +}
375 +
376 +static bool nf_is_valid_ether_device(const struct net_device *dev)
377 +{
378 + if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
379 + dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
380 + return false;
381 +
382 + return true;
383 +}
384 +
385 +static void nf_dev_path_info(const struct net_device_path_stack *stack,
386 + struct nf_forward_info *info,
387 + unsigned char *ha)
388 +{
389 + const struct net_device_path *path;
390 + int i;
391 +
392 + memcpy(info->h_dest, ha, ETH_ALEN);
393 +
394 + for (i = 0; i < stack->num_paths; i++) {
395 + path = &stack->path[i];
396 + switch (path->type) {
397 + case DEV_PATH_ETHERNET:
398 + case DEV_PATH_DSA:
399 + case DEV_PATH_VLAN:
400 + case DEV_PATH_PPPOE:
401 + info->indev = path->dev;
402 + if (is_zero_ether_addr(info->h_source))
403 + memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
404 +
405 + if (path->type == DEV_PATH_ETHERNET)
406 + break;
407 + if (path->type == DEV_PATH_DSA) {
408 + i = stack->num_paths;
409 + break;
410 + }
411 +
412 + /* DEV_PATH_VLAN and DEV_PATH_PPPOE */
413 + if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) {
414 + info->indev = NULL;
415 + break;
416 + }
417 + if (!info->outdev)
418 + info->outdev = path->dev;
419 + info->encap[info->num_encaps].id = path->encap.id;
420 + info->encap[info->num_encaps].proto = path->encap.proto;
421 + info->num_encaps++;
422 + if (path->type == DEV_PATH_PPPOE)
423 + memcpy(info->h_dest, path->encap.h_dest, ETH_ALEN);
424 + break;
425 + case DEV_PATH_BRIDGE:
426 + if (is_zero_ether_addr(info->h_source))
427 + memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
428 +
429 + switch (path->bridge.vlan_mode) {
430 + case DEV_PATH_BR_VLAN_UNTAG_HW:
431 + info->ingress_vlans |= BIT(info->num_encaps - 1);
432 + break;
433 + case DEV_PATH_BR_VLAN_TAG:
434 + info->encap[info->num_encaps].id = path->bridge.vlan_id;
435 + info->encap[info->num_encaps].proto = path->bridge.vlan_proto;
436 + info->num_encaps++;
437 + break;
438 + case DEV_PATH_BR_VLAN_UNTAG:
439 + info->num_encaps--;
440 + break;
441 + case DEV_PATH_BR_VLAN_KEEP:
442 + break;
443 + }
444 + break;
445 + default:
446 + info->indev = NULL;
447 + break;
448 + }
449 + }
450 + if (!info->outdev)
451 + info->outdev = info->indev;
452 +
453 + info->hw_outdev = info->indev;
454 +
455 + if (nf_is_valid_ether_device(info->indev))
456 + info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
457 +}
458 +
459 +static int nf_dev_fill_forward_path(const struct nf_flow_route *route,
460 + const struct dst_entry *dst_cache,
461 + const struct nf_conn *ct,
462 + enum ip_conntrack_dir dir, u8 *ha,
463 + struct net_device_path_stack *stack)
464 +{
465 + const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
466 + struct net_device *dev = dst_cache->dev;
467 + struct neighbour *n;
468 + u8 nud_state;
469 +
470 + if (!nf_is_valid_ether_device(dev))
471 + goto out;
472 +
473 + n = dst_neigh_lookup(dst_cache, daddr);
474 + if (!n)
475 + return -1;
476 +
477 + read_lock_bh(&n->lock);
478 + nud_state = n->nud_state;
479 + ether_addr_copy(ha, n->ha);
480 + read_unlock_bh(&n->lock);
481 + neigh_release(n);
482 +
483 + if (!(nud_state & NUD_VALID))
484 + return -1;
485 +
486 +out:
487 + return dev_fill_forward_path(dev, ha, stack);
488 +}
489 +
490 +static void nf_dev_forward_path(struct nf_flow_route *route,
491 + const struct nf_conn *ct,
492 + enum ip_conntrack_dir dir,
493 + struct net_device **devs)
494 +{
495 + const struct dst_entry *dst = route->tuple[dir].dst;
496 + struct net_device_path_stack stack;
497 + struct nf_forward_info info = {};
498 + unsigned char ha[ETH_ALEN];
499 + int i;
500 +
501 + if (nf_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
502 + nf_dev_path_info(&stack, &info, ha);
503 +
504 + devs[!dir] = (struct net_device *)info.indev;
505 + if (!info.indev)
506 + return;
507 +
508 + route->tuple[!dir].in.ifindex = info.indev->ifindex;
509 + for (i = 0; i < info.num_encaps; i++) {
510 + route->tuple[!dir].in.encap[i].id = info.encap[i].id;
511 + route->tuple[!dir].in.encap[i].proto = info.encap[i].proto;
512 + }
513 + route->tuple[!dir].in.num_encaps = info.num_encaps;
514 + route->tuple[!dir].in.ingress_vlans = info.ingress_vlans;
515 +
516 + if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
517 + memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
518 + memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
519 + route->tuple[dir].out.ifindex = info.outdev->ifindex;
520 + route->tuple[dir].out.hw_ifindex = info.hw_outdev->ifindex;
521 + route->tuple[dir].xmit_type = info.xmit_type;
522 + }
523 +}
524 +
525 +static int
526 +xt_flowoffload_route(struct sk_buff *skb, const struct nf_conn *ct,
527 + const struct xt_action_param *par,
528 + struct nf_flow_route *route, enum ip_conntrack_dir dir,
529 + struct net_device **devs)
530 +{
531 + struct dst_entry *this_dst = skb_dst(skb);
532 + struct dst_entry *other_dst = NULL;
533 + struct flowi fl;
534 +
535 + memset(&fl, 0, sizeof(fl));
536 + switch (xt_family(par)) {
537 + case NFPROTO_IPV4:
538 + fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
539 + fl.u.ip4.flowi4_oif = xt_in(par)->ifindex;
540 + break;
541 + case NFPROTO_IPV6:
542 + fl.u.ip6.saddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
543 + fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
544 + fl.u.ip6.flowi6_oif = xt_in(par)->ifindex;
545 + break;
546 + }
547 +
548 + if (!dst_hold_safe(this_dst))
549 + return -ENOENT;
550 +
551 + nf_route(xt_net(par), &other_dst, &fl, false, xt_family(par));
552 + if (!other_dst) {
553 + dst_release(this_dst);
554 + return -ENOENT;
555 + }
556 +
557 + nf_default_forward_path(route, this_dst, dir, devs);
558 + nf_default_forward_path(route, other_dst, !dir, devs);
559 +
560 + if (route->tuple[dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH &&
561 + route->tuple[!dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH) {
562 + nf_dev_forward_path(route, ct, dir, devs);
563 + nf_dev_forward_path(route, ct, !dir, devs);
564 + }
565 +
566 + return 0;
567 +}
568 +
569 +static unsigned int
570 +flowoffload_tg(struct sk_buff *skb, const struct xt_action_param *par)
571 +{
572 + struct xt_flowoffload_table *table;
573 + const struct xt_flowoffload_target_info *info = par->targinfo;
574 + struct tcphdr _tcph, *tcph = NULL;
575 + enum ip_conntrack_info ctinfo;
576 + enum ip_conntrack_dir dir;
577 + struct nf_flow_route route = {};
578 + struct flow_offload *flow = NULL;
579 + struct net_device *devs[2] = {};
580 + struct nf_conn *ct;
581 + struct net *net;
582 +
583 + if (xt_flowoffload_skip(skb, xt_family(par)))
584 + return XT_CONTINUE;
585 +
586 + ct = nf_ct_get(skb, &ctinfo);
587 + if (ct == NULL)
588 + return XT_CONTINUE;
589 +
590 + switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
591 + case IPPROTO_TCP:
592 + if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
593 + return XT_CONTINUE;
594 +
595 + tcph = skb_header_pointer(skb, par->thoff,
596 + sizeof(_tcph), &_tcph);
597 + if (unlikely(!tcph || tcph->fin || tcph->rst))
598 + return XT_CONTINUE;
599 + break;
600 + case IPPROTO_UDP:
601 + break;
602 + default:
603 + return XT_CONTINUE;
604 + }
605 +
606 + if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
607 + ct->status & (IPS_SEQ_ADJUST | IPS_NAT_CLASH))
608 + return XT_CONTINUE;
609 +
610 + if (!nf_ct_is_confirmed(ct))
611 + return XT_CONTINUE;
612 +
613 + dir = CTINFO2DIR(ctinfo);
614 +
615 + devs[dir] = xt_out(par);
616 + devs[!dir] = xt_in(par);
617 +
618 + if (!devs[dir] || !devs[!dir])
619 + return XT_CONTINUE;
620 +
621 + if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
622 + return XT_CONTINUE;
623 +
624 + if (xt_flowoffload_route(skb, ct, par, &route, dir, devs) < 0)
625 + goto err_flow_route;
626 +
627 + flow = flow_offload_alloc(ct);
628 + if (!flow)
629 + goto err_flow_alloc;
630 +
631 + flow_offload_route_init(flow, &route);
632 +
633 + if (tcph) {
634 + ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
635 + ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
636 + }
637 +
638 + table = &flowtable[!!(info->flags & XT_FLOWOFFLOAD_HW)];
639 +
640 + net = read_pnet(&table->ft.net);
641 + if (!net)
642 + write_pnet(&table->ft.net, xt_net(par));
643 +
644 + if (flow_offload_add(&table->ft, flow) < 0)
645 + goto err_flow_add;
646 +
647 + xt_flowoffload_check_device(table, devs[0]);
648 + xt_flowoffload_check_device(table, devs[1]);
649 +
650 + return XT_CONTINUE;
651 +
652 +err_flow_add:
653 + flow_offload_free(flow);
654 +err_flow_alloc:
655 + dst_release(route.tuple[dir].dst);
656 + dst_release(route.tuple[!dir].dst);
657 +err_flow_route:
658 + clear_bit(IPS_OFFLOAD_BIT, &ct->status);
659 +
660 + return XT_CONTINUE;
661 +}
662 +
663 +static int flowoffload_chk(const struct xt_tgchk_param *par)
664 +{
665 + struct xt_flowoffload_target_info *info = par->targinfo;
666 +
667 + if (info->flags & ~XT_FLOWOFFLOAD_MASK)
668 + return -EINVAL;
669 +
670 + return 0;
671 +}
672 +
673 +static struct xt_target offload_tg_reg __read_mostly = {
674 + .family = NFPROTO_UNSPEC,
675 + .name = "FLOWOFFLOAD",
676 + .revision = 0,
677 + .targetsize = sizeof(struct xt_flowoffload_target_info),
678 + .usersize = sizeof(struct xt_flowoffload_target_info),
679 + .checkentry = flowoffload_chk,
680 + .target = flowoffload_tg,
681 + .me = THIS_MODULE,
682 +};
683 +
684 +static int flow_offload_netdev_event(struct notifier_block *this,
685 + unsigned long event, void *ptr)
686 +{
687 + struct xt_flowoffload_hook *hook0, *hook1;
688 + struct net_device *dev = netdev_notifier_info_to_dev(ptr);
689 +
690 + if (event != NETDEV_UNREGISTER)
691 + return NOTIFY_DONE;
692 +
693 + spin_lock_bh(&hooks_lock);
694 + hook0 = flow_offload_lookup_hook(&flowtable[0], dev);
695 + if (hook0)
696 + hlist_del(&hook0->list);
697 +
698 + hook1 = flow_offload_lookup_hook(&flowtable[1], dev);
699 + if (hook1)
700 + hlist_del(&hook1->list);
701 + spin_unlock_bh(&hooks_lock);
702 +
703 + if (hook0) {
704 + nf_unregister_net_hook(hook0->net, &hook0->ops);
705 + kfree(hook0);
706 + }
707 +
708 + if (hook1) {
709 + nf_unregister_net_hook(hook1->net, &hook1->ops);
710 + kfree(hook1);
711 + }
712 +
713 + nf_flow_table_cleanup(dev);
714 +
715 + return NOTIFY_DONE;
716 +}
717 +
718 +static struct notifier_block flow_offload_netdev_notifier = {
719 + .notifier_call = flow_offload_netdev_event,
720 +};
721 +
722 +static int nf_flow_rule_route_inet(struct net *net,
723 + const struct flow_offload *flow,
724 + enum flow_offload_tuple_dir dir,
725 + struct nf_flow_rule *flow_rule)
726 +{
727 + const struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
728 + int err;
729 +
730 + switch (flow_tuple->l3proto) {
731 + case NFPROTO_IPV4:
732 + err = nf_flow_rule_route_ipv4(net, flow, dir, flow_rule);
733 + break;
734 + case NFPROTO_IPV6:
735 + err = nf_flow_rule_route_ipv6(net, flow, dir, flow_rule);
736 + break;
737 + default:
738 + err = -1;
739 + break;
740 + }
741 +
742 + return err;
743 +}
744 +
745 +static struct nf_flowtable_type flowtable_inet = {
746 + .family = NFPROTO_INET,
747 + .init = nf_flow_table_init,
748 + .setup = nf_flow_table_offload_setup,
749 + .action = nf_flow_rule_route_inet,
750 + .free = nf_flow_table_free,
751 + .hook = xt_flowoffload_net_hook,
752 + .owner = THIS_MODULE,
753 +};
754 +
755 +static int init_flowtable(struct xt_flowoffload_table *tbl)
756 +{
757 + INIT_DELAYED_WORK(&tbl->work, xt_flowoffload_hook_work);
758 + tbl->ft.type = &flowtable_inet;
759 + tbl->ft.flags = NF_FLOWTABLE_COUNTER;
760 +
761 + return nf_flow_table_init(&tbl->ft);
762 +}
763 +
764 +static int __init xt_flowoffload_tg_init(void)
765 +{
766 + int ret;
767 +
768 + register_netdevice_notifier(&flow_offload_netdev_notifier);
769 +
770 + ret = init_flowtable(&flowtable[0]);
771 + if (ret)
772 + return ret;
773 +
774 + ret = init_flowtable(&flowtable[1]);
775 + if (ret)
776 + goto cleanup;
777 +
778 + flowtable[1].ft.flags |= NF_FLOWTABLE_HW_OFFLOAD;
779 +
780 + ret = xt_register_target(&offload_tg_reg);
781 + if (ret)
782 + goto cleanup2;
783 +
784 + return 0;
785 +
786 +cleanup2:
787 + nf_flow_table_free(&flowtable[1].ft);
788 +cleanup:
789 + nf_flow_table_free(&flowtable[0].ft);
790 + return ret;
791 +}
792 +
793 +static void __exit xt_flowoffload_tg_exit(void)
794 +{
795 + xt_unregister_target(&offload_tg_reg);
796 + unregister_netdevice_notifier(&flow_offload_netdev_notifier);
797 + nf_flow_table_free(&flowtable[0].ft);
798 + nf_flow_table_free(&flowtable[1].ft);
799 +}
800 +
801 +MODULE_LICENSE("GPL");
802 +module_init(xt_flowoffload_tg_init);
803 +module_exit(xt_flowoffload_tg_exit);
804 --- a/net/netfilter/nf_flow_table_core.c
805 +++ b/net/netfilter/nf_flow_table_core.c
806 @@ -7,7 +7,6 @@
807 #include <linux/netdevice.h>
808 #include <net/ip.h>
809 #include <net/ip6_route.h>
810 -#include <net/netfilter/nf_tables.h>
811 #include <net/netfilter/nf_flow_table.h>
812 #include <net/netfilter/nf_conntrack.h>
813 #include <net/netfilter/nf_conntrack_core.h>
814 @@ -373,8 +372,7 @@ flow_offload_lookup(struct nf_flowtable
815 }
816 EXPORT_SYMBOL_GPL(flow_offload_lookup);
817
818 -static int
819 -nf_flow_table_iterate(struct nf_flowtable *flow_table,
820 +int nf_flow_table_iterate(struct nf_flowtable *flow_table,
821 void (*iter)(struct nf_flowtable *flowtable,
822 struct flow_offload *flow, void *data),
823 void *data)
824 @@ -428,6 +426,7 @@ static void nf_flow_offload_gc_step(stru
825 nf_flow_offload_stats(flow_table, flow);
826 }
827 }
828 +EXPORT_SYMBOL_GPL(nf_flow_table_iterate);
829
830 void nf_flow_table_gc_run(struct nf_flowtable *flow_table)
831 {
832 --- /dev/null
833 +++ b/include/uapi/linux/netfilter/xt_FLOWOFFLOAD.h
834 @@ -0,0 +1,17 @@
835 +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
836 +#ifndef _XT_FLOWOFFLOAD_H
837 +#define _XT_FLOWOFFLOAD_H
838 +
839 +#include <linux/types.h>
840 +
841 +enum {
842 + XT_FLOWOFFLOAD_HW = 1 << 0,
843 +
844 + XT_FLOWOFFLOAD_MASK = XT_FLOWOFFLOAD_HW
845 +};
846 +
847 +struct xt_flowoffload_target_info {
848 + __u32 flags;
849 +};
850 +
851 +#endif /* _XT_FLOWOFFLOAD_H */
852 --- a/include/net/netfilter/nf_flow_table.h
853 +++ b/include/net/netfilter/nf_flow_table.h
854 @@ -276,6 +276,11 @@ void nf_flow_table_free(struct nf_flowta
855
856 void flow_offload_teardown(struct flow_offload *flow);
857
858 +int nf_flow_table_iterate(struct nf_flowtable *flow_table,
859 + void (*iter)(struct nf_flowtable *flowtable,
860 + struct flow_offload *flow, void *data),
861 + void *data);
862 +
863 void nf_flow_snat_port(const struct flow_offload *flow,
864 struct sk_buff *skb, unsigned int thoff,
865 u8 protocol, enum flow_offload_tuple_dir dir);