1 From: Pablo Neira Ayuso <pablo@netfilter.org>
2 Date: Fri, 20 Nov 2020 13:49:19 +0100
3 Subject: [PATCH] netfilter: flowtable: use dev_fill_forward_path() to
6 The egress device in the tuple is obtained from route. Use
7 dev_fill_forward_path() instead to provide the real egress device for
8 this flow whenever this is available.
10 The new FLOW_OFFLOAD_XMIT_DIRECT type uses dev_queue_xmit() to transmit
11 ethernet frames. Cache the source and destination hardware address to
12 use dev_queue_xmit() to transfer packets.
14 The FLOW_OFFLOAD_XMIT_DIRECT replaces FLOW_OFFLOAD_XMIT_NEIGH if
15 dev_fill_forward_path() finds a direct transmit path.
17 In case of topology updates, if peer is moved to different bridge port,
18 the connection will time out, reconnect will result in a new entry with
19 the correct path. Snooping fdb updates would allow for cleaning up stale
22 Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
25 --- a/include/net/netfilter/nf_flow_table.h
26 +++ b/include/net/netfilter/nf_flow_table.h
27 @@ -92,6 +92,7 @@ enum flow_offload_tuple_dir {
28 enum flow_offload_xmit_type {
29 FLOW_OFFLOAD_XMIT_NEIGH = 0,
30 FLOW_OFFLOAD_XMIT_XFRM,
31 + FLOW_OFFLOAD_XMIT_DIRECT,
34 struct flow_offload_tuple {
35 @@ -120,8 +121,14 @@ struct flow_offload_tuple {
40 - struct dst_entry *dst_cache;
42 + struct dst_entry *dst_cache;
45 + u8 h_source[ETH_ALEN];
46 + u8 h_dest[ETH_ALEN];
51 struct flow_offload_tuple_rhash {
52 @@ -168,6 +175,11 @@ struct nf_flow_route {
58 + u8 h_source[ETH_ALEN];
59 + u8 h_dest[ETH_ALEN];
61 enum flow_offload_xmit_type xmit_type;
62 } tuple[FLOW_OFFLOAD_DIR_MAX];
64 --- a/net/netfilter/nf_flow_table_core.c
65 +++ b/net/netfilter/nf_flow_table_core.c
66 @@ -81,9 +81,6 @@ static int flow_offload_fill_route(struc
67 struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
68 struct dst_entry *dst = route->tuple[dir].dst;
70 - if (!dst_hold_safe(route->tuple[dir].dst))
73 switch (flow_tuple->l3proto) {
75 flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
76 @@ -94,12 +91,36 @@ static int flow_offload_fill_route(struc
79 flow_tuple->iifidx = route->tuple[dir].in.ifindex;
81 + switch (route->tuple[dir].xmit_type) {
82 + case FLOW_OFFLOAD_XMIT_DIRECT:
83 + memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest,
85 + memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
87 + flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
89 + case FLOW_OFFLOAD_XMIT_XFRM:
90 + case FLOW_OFFLOAD_XMIT_NEIGH:
91 + if (!dst_hold_safe(route->tuple[dir].dst))
94 + flow_tuple->dst_cache = dst;
97 flow_tuple->xmit_type = route->tuple[dir].xmit_type;
98 - flow_tuple->dst_cache = dst;
103 +static void nft_flow_dst_release(struct flow_offload *flow,
104 + enum flow_offload_tuple_dir dir)
106 + if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
107 + flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
108 + dst_release(flow->tuplehash[dir].tuple.dst_cache);
111 int flow_offload_route_init(struct flow_offload *flow,
112 const struct nf_flow_route *route)
114 @@ -118,7 +139,7 @@ int flow_offload_route_init(struct flow_
118 - dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
119 + nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
123 @@ -169,8 +190,8 @@ static void flow_offload_fixup_ct(struct
125 static void flow_offload_route_release(struct flow_offload *flow)
127 - dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
128 - dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
129 + nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
130 + nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY);
133 void flow_offload_free(struct flow_offload *flow)
134 --- a/net/netfilter/nf_flow_table_ip.c
135 +++ b/net/netfilter/nf_flow_table_ip.c
136 @@ -248,6 +248,24 @@ static unsigned int nf_flow_xmit_xfrm(st
140 +static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
141 + const struct flow_offload_tuple_rhash *tuplehash,
142 + unsigned short type)
144 + struct net_device *outdev;
146 + outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx);
151 + dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest,
152 + tuplehash->tuple.out.h_source, skb->len);
153 + dev_queue_xmit(skb);
159 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
160 const struct nf_hook_state *state)
161 @@ -262,6 +280,7 @@ nf_flow_offload_ip_hook(void *priv, stru
167 if (skb->protocol != htons(ETH_P_IP))
169 @@ -303,22 +322,32 @@ nf_flow_offload_ip_hook(void *priv, stru
170 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
171 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
173 - rt = (struct rtable *)tuplehash->tuple.dst_cache;
175 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
176 + rt = (struct rtable *)tuplehash->tuple.dst_cache;
177 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
178 IPCB(skb)->iif = skb->dev->ifindex;
179 IPCB(skb)->flags = IPSKB_FORWARDED;
180 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
183 - outdev = rt->dst.dev;
185 - nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
186 - skb_dst_set_noref(skb, &rt->dst);
187 - neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
188 + switch (tuplehash->tuple.xmit_type) {
189 + case FLOW_OFFLOAD_XMIT_NEIGH:
190 + rt = (struct rtable *)tuplehash->tuple.dst_cache;
191 + outdev = rt->dst.dev;
193 + nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
194 + skb_dst_set_noref(skb, &rt->dst);
195 + neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
198 + case FLOW_OFFLOAD_XMIT_DIRECT:
199 + ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP);
200 + if (ret == NF_DROP)
201 + flow_offload_teardown(flow);
208 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
210 @@ -504,6 +533,7 @@ nf_flow_offload_ipv6_hook(void *priv, st
211 struct net_device *outdev;
212 struct ipv6hdr *ip6h;
216 if (skb->protocol != htons(ETH_P_IPV6))
218 @@ -545,21 +575,31 @@ nf_flow_offload_ipv6_hook(void *priv, st
219 if (flow_table->flags & NF_FLOWTABLE_COUNTER)
220 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
222 - rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
224 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
225 + rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
226 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
227 IP6CB(skb)->iif = skb->dev->ifindex;
228 IP6CB(skb)->flags = IP6SKB_FORWARDED;
229 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
232 - outdev = rt->dst.dev;
234 - nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
235 - skb_dst_set_noref(skb, &rt->dst);
236 - neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
237 + switch (tuplehash->tuple.xmit_type) {
238 + case FLOW_OFFLOAD_XMIT_NEIGH:
239 + rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
240 + outdev = rt->dst.dev;
242 + nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
243 + skb_dst_set_noref(skb, &rt->dst);
244 + neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
247 + case FLOW_OFFLOAD_XMIT_DIRECT:
248 + ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6);
249 + if (ret == NF_DROP)
250 + flow_offload_teardown(flow);
257 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);
258 --- a/net/netfilter/nft_flow_offload.c
259 +++ b/net/netfilter/nft_flow_offload.c
260 @@ -39,12 +39,11 @@ static void nft_default_forward_path(str
261 static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
262 const struct dst_entry *dst_cache,
263 const struct nf_conn *ct,
264 - enum ip_conntrack_dir dir,
265 + enum ip_conntrack_dir dir, u8 *ha,
266 struct net_device_path_stack *stack)
268 const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
269 struct net_device *dev = dst_cache->dev;
270 - unsigned char ha[ETH_ALEN];
274 @@ -66,22 +65,35 @@ static int nft_dev_fill_forward_path(con
276 struct nft_forward_info {
277 const struct net_device *dev;
278 + u8 h_source[ETH_ALEN];
279 + u8 h_dest[ETH_ALEN];
280 + enum flow_offload_xmit_type xmit_type;
283 static void nft_dev_path_info(const struct net_device_path_stack *stack,
284 - struct nft_forward_info *info)
285 + struct nft_forward_info *info,
288 const struct net_device_path *path;
291 + memcpy(info->h_dest, ha, ETH_ALEN);
293 for (i = 0; i < stack->num_paths; i++) {
294 path = &stack->path[i];
295 switch (path->type) {
296 case DEV_PATH_ETHERNET:
297 info->dev = path->dev;
298 + if (is_zero_ether_addr(info->h_source))
299 + memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
301 - case DEV_PATH_VLAN:
302 case DEV_PATH_BRIDGE:
303 + if (is_zero_ether_addr(info->h_source))
304 + memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
306 + info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
308 + case DEV_PATH_VLAN:
312 @@ -114,14 +126,22 @@ static void nft_dev_forward_path(struct
313 const struct dst_entry *dst = route->tuple[dir].dst;
314 struct net_device_path_stack stack;
315 struct nft_forward_info info = {};
316 + unsigned char ha[ETH_ALEN];
318 - if (nft_dev_fill_forward_path(route, dst, ct, dir, &stack) >= 0)
319 - nft_dev_path_info(&stack, &info);
320 + if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
321 + nft_dev_path_info(&stack, &info, ha);
323 if (!info.dev || !nft_flowtable_find_dev(info.dev, ft))
326 route->tuple[!dir].in.ifindex = info.dev->ifindex;
328 + if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
329 + memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
330 + memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
331 + route->tuple[dir].out.ifindex = info.dev->ifindex;
332 + route->tuple[dir].xmit_type = info.xmit_type;
336 static int nft_flow_route(const struct nft_pktinfo *pkt,