kernel: fix flow offload issues with pppoe
authorFelix Fietkau <nbd@nbd.name>
Fri, 6 May 2022 13:49:58 +0000 (15:49 +0200)
committerFelix Fietkau <nbd@nbd.name>
Fri, 6 May 2022 13:50:32 +0000 (15:50 +0200)
sync xt_FLOWOFFLOAD code with latest version of nft_flow_offload

Signed-off-by: Felix Fietkau <nbd@nbd.name>
target/linux/generic/hack-5.15/650-netfilter-add-xt_FLOWOFFLOAD-target.patch
target/linux/generic/pending-5.15/704-00-netfilter-flowtable-fix-excessive-hw-offload-attempt.patch [new file with mode: 0644]
target/linux/generic/pending-5.15/704-01-netfilter-nft_flow_offload-skip-dst-neigh-lookup-for.patch [new file with mode: 0644]
target/linux/generic/pending-5.15/704-02-net-fix-dev_fill_forward_path-with-pppoe-bridge.patch [new file with mode: 0644]
target/linux/generic/pending-5.15/704-03-netfilter-nft_flow_offload-fix-offload-with-pppoe-vl.patch [new file with mode: 0644]

index fa21ca1229d4abc8380817638efb6be8ea2c24ba..0eca9f8d8f2d11876d156824625264b0ff2b5d11 100644 (file)
@@ -98,7 +98,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
  obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
 --- /dev/null
 +++ b/net/netfilter/xt_FLOWOFFLOAD.c
-@@ -0,0 +1,657 @@
+@@ -0,0 +1,694 @@
 +/*
 + * Copyright (C) 2018-2021 Felix Fietkau <nbd@nbd.name>
 + *
@@ -110,6 +110,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
 +#include <linux/init.h>
 +#include <linux/netfilter.h>
 +#include <linux/netfilter/xt_FLOWOFFLOAD.h>
++#include <linux/if_vlan.h>
 +#include <net/ip.h>
 +#include <net/netfilter/nf_conntrack.h>
 +#include <net/netfilter/nf_conntrack_extend.h>
@@ -130,6 +131,21 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
 +      struct delayed_work work;
 +};
 +
++struct nf_forward_info {
++      const struct net_device *indev;
++      const struct net_device *outdev;
++      const struct net_device *hw_outdev;
++      struct id {
++              __u16   id;
++              __be16  proto;
++      } encap[NF_FLOW_TABLE_ENCAP_MAX];
++      u8 num_encaps;
++      u8 ingress_vlans;
++      u8 h_source[ETH_ALEN];
++      u8 h_dest[ETH_ALEN];
++      enum flow_offload_xmit_type xmit_type;
++};
++
 +static DEFINE_SPINLOCK(hooks_lock);
 +
 +struct xt_flowoffload_table flowtable[2];
@@ -138,12 +154,23 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
 +xt_flowoffload_net_hook(void *priv, struct sk_buff *skb,
 +                      const struct nf_hook_state *state)
 +{
-+      struct nf_flowtable *ft = priv;
-+
-+      if (!atomic_read(&ft->rhashtable.nelems))
-+              return NF_ACCEPT;
++      struct vlan_ethhdr *veth;
++      __be16 proto;
 +
 +      switch (skb->protocol) {
++      case htons(ETH_P_8021Q):
++              veth = (struct vlan_ethhdr *)skb_mac_header(skb);
++              proto = veth->h_vlan_encapsulated_proto;
++              break;
++      case htons(ETH_P_PPP_SES):
++              proto = nf_flow_pppoe_proto(skb);
++              break;
++      default:
++              proto = skb->protocol;
++              break;
++      }
++
++      switch (proto) {
 +      case htons(ETH_P_IP):
 +              return nf_flow_offload_ip_hook(priv, skb, state);
 +      case htons(ETH_P_IPV6):
@@ -323,7 +350,26 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
 +      return false;
 +}
 +
-+static bool flow_is_valid_ether_device(const struct net_device *dev)
++static enum flow_offload_xmit_type nf_xmit_type(struct dst_entry *dst)
++{
++      if (dst_xfrm(dst))
++              return FLOW_OFFLOAD_XMIT_XFRM;
++
++      return FLOW_OFFLOAD_XMIT_NEIGH;
++}
++
++static void nf_default_forward_path(struct nf_flow_route *route,
++                                  struct dst_entry *dst_cache,
++                                  enum ip_conntrack_dir dir,
++                                  struct net_device **dev)
++{
++      dev[!dir] = dst_cache->dev;
++      route->tuple[!dir].in.ifindex   = dst_cache->dev->ifindex;
++      route->tuple[dir].dst           = dst_cache;
++      route->tuple[dir].xmit_type     = nf_xmit_type(dst_cache);
++}
++
++static bool nf_is_valid_ether_device(const struct net_device *dev)
 +{
 +      if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
 +          dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
@@ -332,174 +378,181 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
 +      return true;
 +}
 +
-+static void
-+xt_flowoffload_route_check_path(struct nf_flow_route *route,
-+                              const struct nf_conn *ct,
-+                              enum ip_conntrack_dir dir,
-+                              struct net_device **out_dev)
++static void nf_dev_path_info(const struct net_device_path_stack *stack,
++                           struct nf_forward_info *info,
++                           unsigned char *ha)
 +{
-+      const struct dst_entry *dst = route->tuple[dir].dst;
-+      const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
-+      struct net_device_path_stack stack;
-+      enum net_device_path_type prev_type;
-+      struct net_device *dev = dst->dev;
-+      struct neighbour *n;
-+      bool last = false;
-+      u8 nud_state;
++      const struct net_device_path *path;
 +      int i;
 +
-+      route->tuple[!dir].in.ifindex = dev->ifindex;
-+      route->tuple[dir].out.ifindex = dev->ifindex;
-+
-+      if (route->tuple[dir].xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
-+              return;
-+
-+      if ((dev->flags & IFF_LOOPBACK) ||
-+          dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
-+          !is_valid_ether_addr(dev->dev_addr))
-+              return;
-+
-+      n = dst_neigh_lookup(dst, daddr);
-+      if (!n)
-+              return;
-+
-+      read_lock_bh(&n->lock);
-+      nud_state = n->nud_state;
-+      memcpy(route->tuple[dir].out.h_dest, n->ha, ETH_ALEN);
-+      read_unlock_bh(&n->lock);
-+      neigh_release(n);
-+
-+      if (!(nud_state & NUD_VALID))
-+              return;
-+
-+      if (dev_fill_forward_path(dev, route->tuple[dir].out.h_dest, &stack) ||
-+          !stack.num_paths)
-+              return;
-+
-+      prev_type = DEV_PATH_ETHERNET;
-+      for (i = 0; i <= stack.num_paths; i++) {
-+              const struct net_device_path *path = &stack.path[i];
-+              int n_encaps = route->tuple[!dir].in.num_encaps;
-+
-+              dev = (struct net_device *)path->dev;
-+              if (flow_is_valid_ether_device(dev)) {
-+                      if (route->tuple[dir].xmit_type != FLOW_OFFLOAD_XMIT_DIRECT) {
-+                              memcpy(route->tuple[dir].out.h_source,
-+                                     dev->dev_addr, ETH_ALEN);
-+                              route->tuple[dir].out.ifindex = dev->ifindex;
-+                      }
-+                      route->tuple[dir].xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
-+              }
++      memcpy(info->h_dest, ha, ETH_ALEN);
 +
++      for (i = 0; i < stack->num_paths; i++) {
++              path = &stack->path[i];
 +              switch (path->type) {
-+              case DEV_PATH_PPPOE:
++              case DEV_PATH_ETHERNET:
++              case DEV_PATH_DSA:
 +              case DEV_PATH_VLAN:
-+                      if (n_encaps >= NF_FLOW_TABLE_ENCAP_MAX ||
-+                          i == stack.num_paths) {
-+                              last = true;
++              case DEV_PATH_PPPOE:
++                      info->indev = path->dev;
++                      if (is_zero_ether_addr(info->h_source))
++                              memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
++
++                      if (path->type == DEV_PATH_ETHERNET)
++                              break;
++                      if (path->type == DEV_PATH_DSA) {
++                              i = stack->num_paths;
 +                              break;
 +                      }
 +
-+                      route->tuple[!dir].in.num_encaps++;
-+                      route->tuple[!dir].in.encap[n_encaps].id = path->encap.id;
-+                      route->tuple[!dir].in.encap[n_encaps].proto = path->encap.proto;
++                      /* DEV_PATH_VLAN and DEV_PATH_PPPOE */
++                      if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) {
++                              info->indev = NULL;
++                              break;
++                      }
++                      if (!info->outdev)
++                              info->outdev = path->dev;
++                      info->encap[info->num_encaps].id = path->encap.id;
++                      info->encap[info->num_encaps].proto = path->encap.proto;
++                      info->num_encaps++;
 +                      if (path->type == DEV_PATH_PPPOE)
-+                              memcpy(route->tuple[dir].out.h_dest,
-+                                     path->encap.h_dest, ETH_ALEN);
++                              memcpy(info->h_dest, path->encap.h_dest, ETH_ALEN);
 +                      break;
 +              case DEV_PATH_BRIDGE:
++                      if (is_zero_ether_addr(info->h_source))
++                              memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
++
 +                      switch (path->bridge.vlan_mode) {
++                      case DEV_PATH_BR_VLAN_UNTAG_HW:
++                              info->ingress_vlans |= BIT(info->num_encaps - 1);
++                              break;
 +                      case DEV_PATH_BR_VLAN_TAG:
-+                              if (n_encaps >= NF_FLOW_TABLE_ENCAP_MAX ||
-+                                  i == stack.num_paths) {
-+                                      last = true;
-+                                      break;
-+                              }
-+
-+                              route->tuple[!dir].in.num_encaps++;
-+                              route->tuple[!dir].in.encap[n_encaps].id =
-+                                      path->bridge.vlan_id;
-+                              route->tuple[!dir].in.encap[n_encaps].proto =
-+                                      path->bridge.vlan_proto;
++                              info->encap[info->num_encaps].id = path->bridge.vlan_id;
++                              info->encap[info->num_encaps].proto = path->bridge.vlan_proto;
++                              info->num_encaps++;
 +                              break;
 +                      case DEV_PATH_BR_VLAN_UNTAG:
-+                              route->tuple[!dir].in.num_encaps--;
-+                              break;
-+                      case DEV_PATH_BR_VLAN_UNTAG_HW:
-+                              route->tuple[!dir].in.ingress_vlans |= BIT(n_encaps - 1);
++                              info->num_encaps--;
 +                              break;
 +                      case DEV_PATH_BR_VLAN_KEEP:
 +                              break;
 +                      }
 +                      break;
 +              default:
-+                      last = true;
++                      info->indev = NULL;
 +                      break;
 +              }
-+
-+              if (last)
-+                      break;
 +      }
++      if (!info->outdev)
++              info->outdev = info->indev;
++
++      info->hw_outdev = info->indev;
++
++      if (nf_is_valid_ether_device(info->indev))
++              info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
++}
++
++static int nf_dev_fill_forward_path(const struct nf_flow_route *route,
++                                   const struct dst_entry *dst_cache,
++                                   const struct nf_conn *ct,
++                                   enum ip_conntrack_dir dir, u8 *ha,
++                                   struct net_device_path_stack *stack)
++{
++      const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
++      struct net_device *dev = dst_cache->dev;
++      struct neighbour *n;
++      u8 nud_state;
++
++      if (!nf_is_valid_ether_device(dev))
++              goto out;
++
++      n = dst_neigh_lookup(dst_cache, daddr);
++      if (!n)
++              return -1;
++
++      read_lock_bh(&n->lock);
++      nud_state = n->nud_state;
++      ether_addr_copy(ha, n->ha);
++      read_unlock_bh(&n->lock);
++      neigh_release(n);
++
++      if (!(nud_state & NUD_VALID))
++              return -1;
++
++out:
++      return dev_fill_forward_path(dev, ha, stack);
++}
++
++static void nf_dev_forward_path(struct nf_flow_route *route,
++                              const struct nf_conn *ct,
++                              enum ip_conntrack_dir dir,
++                              struct net_device **devs)
++{
++      const struct dst_entry *dst = route->tuple[dir].dst;
++      struct net_device_path_stack stack;
++      struct nf_forward_info info = {};
++      unsigned char ha[ETH_ALEN];
++      int i;
 +
-+      *out_dev = dev;
-+      route->tuple[dir].out.hw_ifindex = dev->ifindex;
-+      route->tuple[!dir].in.ifindex = dev->ifindex;
++      if (nf_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
++              nf_dev_path_info(&stack, &info, ha);
++
++      devs[!dir] = (struct net_device *)info.indev;
++      if (!info.indev)
++              return;
++
++      route->tuple[!dir].in.ifindex = info.indev->ifindex;
++      for (i = 0; i < info.num_encaps; i++) {
++              route->tuple[!dir].in.encap[i].id = info.encap[i].id;
++              route->tuple[!dir].in.encap[i].proto = info.encap[i].proto;
++      }
++      route->tuple[!dir].in.num_encaps = info.num_encaps;
++      route->tuple[!dir].in.ingress_vlans = info.ingress_vlans;
++
++      if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
++              memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
++              memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
++              route->tuple[dir].out.ifindex = info.outdev->ifindex;
++              route->tuple[dir].out.hw_ifindex = info.hw_outdev->ifindex;
++              route->tuple[dir].xmit_type = info.xmit_type;
++      }
 +}
 +
 +static int
-+xt_flowoffload_route_dir(struct nf_flow_route *route, const struct nf_conn *ct,
-+                       enum ip_conntrack_dir dir,
-+                       const struct xt_action_param *par, int ifindex)
++xt_flowoffload_route(struct sk_buff *skb, const struct nf_conn *ct,
++                   const struct xt_action_param *par,
++                   struct nf_flow_route *route, enum ip_conntrack_dir dir,
++                   struct net_device **devs)
 +{
-+      struct dst_entry *dst = NULL;
++      struct dst_entry *this_dst = skb_dst(skb);
++      struct dst_entry *other_dst = NULL;
 +      struct flowi fl;
 +
 +      memset(&fl, 0, sizeof(fl));
 +      switch (xt_family(par)) {
 +      case NFPROTO_IPV4:
-+              fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.src.u3.ip;
-+              fl.u.ip4.flowi4_oif = ifindex;
++              fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
++              fl.u.ip4.flowi4_oif = xt_in(par)->ifindex;
 +              break;
 +      case NFPROTO_IPV6:
 +              fl.u.ip6.saddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
-+              fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.src.u3.in6;
-+              fl.u.ip6.flowi6_oif = ifindex;
++              fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
++              fl.u.ip6.flowi6_oif = xt_in(par)->ifindex;
 +              break;
 +      }
 +
-+      nf_route(xt_net(par), &dst, &fl, false, xt_family(par));
-+      if (!dst)
++      nf_route(xt_net(par), &other_dst, &fl, false, xt_family(par));
++      if (!other_dst)
 +              return -ENOENT;
 +
-+      route->tuple[dir].dst = dst;
-+      if (dst_xfrm(dst))
-+              route->tuple[dir].xmit_type = FLOW_OFFLOAD_XMIT_XFRM;
-+      else
-+              route->tuple[dir].xmit_type = FLOW_OFFLOAD_XMIT_NEIGH;
-+
-+      return 0;
-+}
-+
-+static int
-+xt_flowoffload_route(struct sk_buff *skb, const struct nf_conn *ct,
-+                   const struct xt_action_param *par,
-+                   struct nf_flow_route *route, enum ip_conntrack_dir dir,
-+                   struct net_device **dev)
-+{
-+      int ret;
++      nf_default_forward_path(route, this_dst, dir, devs);
++      nf_default_forward_path(route, other_dst, !dir, devs);
 +
-+      ret = xt_flowoffload_route_dir(route, ct, dir, par,
-+                                     dev[dir]->ifindex);
-+      if (ret)
-+              return ret;
-+
-+      ret = xt_flowoffload_route_dir(route, ct, !dir, par,
-+                                     dev[!dir]->ifindex);
-+      if (ret)
-+              return ret;
-+
-+      xt_flowoffload_route_check_path(route, ct, dir, &dev[!dir]);
-+      xt_flowoffload_route_check_path(route, ct, !dir, &dev[dir]);
++      if (route->tuple[dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH &&
++          route->tuple[!dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH) {
++              nf_dev_forward_path(route, ct, dir, devs);
++              nf_dev_forward_path(route, ct, !dir, devs);
++      }
 +
 +      return 0;
 +}
@@ -542,7 +595,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
 +      }
 +
 +      if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
-+          ct->status & IPS_SEQ_ADJUST)
++          ct->status & (IPS_SEQ_ADJUST | IPS_NAT_CLASH))
 +              return XT_CONTINUE;
 +
 +      if (!nf_ct_is_confirmed(ct))
@@ -586,7 +639,6 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
 +      xt_flowoffload_check_device(table, devs[0]);
 +      xt_flowoffload_check_device(table, devs[1]);
 +
-+      dst_release(route.tuple[dir].dst);
 +      dst_release(route.tuple[!dir].dst);
 +
 +      return XT_CONTINUE;
@@ -594,7 +646,6 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
 +err_flow_add:
 +      flow_offload_free(flow);
 +err_flow_alloc:
-+      dst_release(route.tuple[dir].dst);
 +      dst_release(route.tuple[!dir].dst);
 +err_flow_route:
 +      clear_bit(IPS_OFFLOAD_BIT, &ct->status);
@@ -661,20 +712,6 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
 +      .notifier_call  = flow_offload_netdev_event,
 +};
 +
-+static unsigned int
-+nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
-+                        const struct nf_hook_state *state)
-+{
-+      switch (skb->protocol) {
-+      case htons(ETH_P_IP):
-+              return nf_flow_offload_ip_hook(priv, skb, state);
-+      case htons(ETH_P_IPV6):
-+              return nf_flow_offload_ipv6_hook(priv, skb, state);
-+      }
-+
-+      return NF_ACCEPT;
-+}
-+
 +static int nf_flow_rule_route_inet(struct net *net,
 +                                 const struct flow_offload *flow,
 +                                 enum flow_offload_tuple_dir dir,
@@ -704,7 +741,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
 +      .setup          = nf_flow_table_offload_setup,
 +      .action         = nf_flow_rule_route_inet,
 +      .free           = nf_flow_table_free,
-+      .hook           = nf_flow_offload_inet_hook,
++      .hook           = xt_flowoffload_net_hook,
 +      .owner          = THIS_MODULE,
 +};
 +
@@ -766,7 +803,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
  #include <net/netfilter/nf_flow_table.h>
  #include <net/netfilter/nf_conntrack.h>
  #include <net/netfilter/nf_conntrack_core.h>
-@@ -397,8 +396,7 @@ flow_offload_lookup(struct nf_flowtable
+@@ -399,8 +398,7 @@ flow_offload_lookup(struct nf_flowtable
  }
  EXPORT_SYMBOL_GPL(flow_offload_lookup);
  
@@ -776,7 +813,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
                      void (*iter)(struct flow_offload *flow, void *data),
                      void *data)
  {
-@@ -430,6 +428,7 @@ nf_flow_table_iterate(struct nf_flowtabl
+@@ -432,6 +430,7 @@ nf_flow_table_iterate(struct nf_flowtabl
  
        return err;
  }
diff --git a/target/linux/generic/pending-5.15/704-00-netfilter-flowtable-fix-excessive-hw-offload-attempt.patch b/target/linux/generic/pending-5.15/704-00-netfilter-flowtable-fix-excessive-hw-offload-attempt.patch
new file mode 100644 (file)
index 0000000..c03cb5a
--- /dev/null
@@ -0,0 +1,27 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Fri, 6 May 2022 12:37:23 +0200
+Subject: [PATCH] netfilter: flowtable: fix excessive hw offload attempts
+ after failure
+
+If a flow cannot be offloaded, the code currently repeatedly tries again as
+quickly as possible, which can significantly increase system load.
+Fix this by limiting flow timeout update and hardware offload retry to once
+per second.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -329,8 +329,10 @@ void flow_offload_refresh(struct nf_flow
+       u32 timeout;
+       timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
+-      if (READ_ONCE(flow->timeout) != timeout)
++      if (timeout - READ_ONCE(flow->timeout) > HZ)
+               WRITE_ONCE(flow->timeout, timeout);
++      else
++              return;
+       if (likely(!nf_flowtable_hw_offload(flow_table)))
+               return;
diff --git a/target/linux/generic/pending-5.15/704-01-netfilter-nft_flow_offload-skip-dst-neigh-lookup-for.patch b/target/linux/generic/pending-5.15/704-01-netfilter-nft_flow_offload-skip-dst-neigh-lookup-for.patch
new file mode 100644 (file)
index 0000000..6683a53
--- /dev/null
@@ -0,0 +1,64 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Fri, 6 May 2022 12:43:58 +0200
+Subject: [PATCH] netfilter: nft_flow_offload: skip dst neigh lookup for
+ ppp devices
+
+The dst entry does not contain a valid hardware address, so skip the lookup
+in order to avoid running into errors here.
+The proper hardware address is filled in from nft_dev_path_info
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -36,6 +36,15 @@ static void nft_default_forward_path(str
+       route->tuple[dir].xmit_type     = nft_xmit_type(dst_cache);
+ }
++static bool nft_is_valid_ether_device(const struct net_device *dev)
++{
++      if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
++          dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
++              return false;
++
++      return true;
++}
++
+ static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
+                                    const struct dst_entry *dst_cache,
+                                    const struct nf_conn *ct,
+@@ -47,6 +56,9 @@ static int nft_dev_fill_forward_path(con
+       struct neighbour *n;
+       u8 nud_state;
++      if (!nft_is_valid_ether_device(dev))
++              goto out;
++
+       n = dst_neigh_lookup(dst_cache, daddr);
+       if (!n)
+               return -1;
+@@ -60,6 +72,7 @@ static int nft_dev_fill_forward_path(con
+       if (!(nud_state & NUD_VALID))
+               return -1;
++out:
+       return dev_fill_forward_path(dev, ha, stack);
+ }
+@@ -78,15 +91,6 @@ struct nft_forward_info {
+       enum flow_offload_xmit_type xmit_type;
+ };
+-static bool nft_is_valid_ether_device(const struct net_device *dev)
+-{
+-      if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
+-          dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
+-              return false;
+-
+-      return true;
+-}
+-
+ static void nft_dev_path_info(const struct net_device_path_stack *stack,
+                             struct nft_forward_info *info,
+                             unsigned char *ha, struct nf_flowtable *flowtable)
diff --git a/target/linux/generic/pending-5.15/704-02-net-fix-dev_fill_forward_path-with-pppoe-bridge.patch b/target/linux/generic/pending-5.15/704-02-net-fix-dev_fill_forward_path-with-pppoe-bridge.patch
new file mode 100644 (file)
index 0000000..984733a
--- /dev/null
@@ -0,0 +1,66 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Fri, 6 May 2022 13:54:44 +0200
+Subject: [PATCH] net: fix dev_fill_forward_path with pppoe + bridge
+
+When calling dev_fill_forward_path on a pppoe device, the provided destination
+address is invalid. In order for the bridge fdb lookup to succeed, the pppoe
+code needs to update ctx->daddr to the correct value.
+Fix this by storing the address inside struct net_device_path_ctx
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -90,7 +90,6 @@ mtk_flow_get_wdma_info(struct net_device
+ {
+       struct net_device_path_ctx ctx = {
+               .dev = dev,
+-              .daddr = addr,
+       };
+       struct net_device_path path = {};
+@@ -100,6 +99,7 @@ mtk_flow_get_wdma_info(struct net_device
+       if (!dev->netdev_ops->ndo_fill_forward_path)
+               return -1;
++      memcpy(ctx.daddr, addr, sizeof(ctx.daddr));
+       if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path))
+               return -1;
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -988,6 +988,7 @@ static int pppoe_fill_forward_path(struc
+       path->encap.proto = htons(ETH_P_PPP_SES);
+       path->encap.id = be16_to_cpu(po->num);
+       memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
++      memcpy(ctx->daddr, po->pppoe_pa.remote, ETH_ALEN);
+       path->dev = ctx->dev;
+       ctx->dev = dev;
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -894,7 +894,7 @@ struct net_device_path_stack {
+ struct net_device_path_ctx {
+       const struct net_device *dev;
+-      const u8                *daddr;
++      u8                      daddr[ETH_ALEN];
+       int                     num_vlans;
+       struct {
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -741,11 +741,11 @@ int dev_fill_forward_path(const struct n
+       const struct net_device *last_dev;
+       struct net_device_path_ctx ctx = {
+               .dev    = dev,
+-              .daddr  = daddr,
+       };
+       struct net_device_path *path;
+       int ret = 0;
++      memcpy(ctx.daddr, daddr, sizeof(ctx.daddr));
+       stack->num_paths = 0;
+       while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
+               last_dev = ctx.dev;
diff --git a/target/linux/generic/pending-5.15/704-03-netfilter-nft_flow_offload-fix-offload-with-pppoe-vl.patch b/target/linux/generic/pending-5.15/704-03-netfilter-nft_flow_offload-fix-offload-with-pppoe-vl.patch
new file mode 100644 (file)
index 0000000..1e0dc99
--- /dev/null
@@ -0,0 +1,24 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Fri, 6 May 2022 15:15:06 +0200
+Subject: [PATCH] netfilter: nft_flow_offload: fix offload with pppoe +
+ vlan
+
+When running a combination of PPPoE on top of a VLAN, we need to set
+info->outdev to the PPPoE device, otherwise PPPoE encap is skipped
+during software offload.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -123,7 +123,8 @@ static void nft_dev_path_info(const stru
+                               info->indev = NULL;
+                               break;
+                       }
+-                      info->outdev = path->dev;
++                      if (!info->outdev)
++                              info->outdev = path->dev;
+                       info->encap[info->num_encaps].id = path->encap.id;
+                       info->encap[info->num_encaps].proto = path->encap.proto;
+                       info->num_encaps++;