508dc90e14cf012427507572626b8d7c871ab67b
[openwrt/staging/rmilecki.git] / target / linux / generic / pending-5.10 / 640-11-netfilter-flowtable-add-offload-support-for-xmit-pat.patch
1 From: Pablo Neira Ayuso <pablo@netfilter.org>
2 Date: Mon, 7 Dec 2020 20:31:44 +0100
3 Subject: [PATCH] netfilter: flowtable: add offload support for xmit path
4 types
5
6 When the flow tuple xmit_type is set to FLOW_OFFLOAD_XMIT_DIRECT, the
7 dst_cache pointer is not valid, and the h_source/h_dest/ifidx out fields
8 need to be used.
9
10 This patch also adds the FLOW_ACTION_VLAN_PUSH action to pass the VLAN
11 tag to the driver.
12 ---
13
14 --- a/net/netfilter/nf_flow_table_offload.c
15 +++ b/net/netfilter/nf_flow_table_offload.c
16 @@ -175,28 +175,45 @@ static int flow_offload_eth_src(struct n
17 enum flow_offload_tuple_dir dir,
18 struct nf_flow_rule *flow_rule)
19 {
20 - const struct flow_offload_tuple *tuple = &flow->tuplehash[!dir].tuple;
21 struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
22 struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
23 - struct net_device *dev;
24 + const struct flow_offload_tuple *other_tuple, *this_tuple;
25 + struct net_device *dev = NULL;
26 + const unsigned char *addr;
27 u32 mask, val;
28 u16 val16;
29
30 - dev = dev_get_by_index(net, tuple->iifidx);
31 - if (!dev)
32 - return -ENOENT;
33 + this_tuple = &flow->tuplehash[dir].tuple;
34 +
35 + switch (this_tuple->xmit_type) {
36 + case FLOW_OFFLOAD_XMIT_DIRECT:
37 + addr = this_tuple->out.h_source;
38 + break;
39 + case FLOW_OFFLOAD_XMIT_NEIGH:
40 + other_tuple = &flow->tuplehash[!dir].tuple;
41 + dev = dev_get_by_index(net, other_tuple->iifidx);
42 + if (!dev)
43 + return -ENOENT;
44 +
45 + addr = dev->dev_addr;
46 + break;
47 + default:
48 + return -EOPNOTSUPP;
49 + }
50
51 mask = ~0xffff0000;
52 - memcpy(&val16, dev->dev_addr, 2);
53 + memcpy(&val16, addr, 2);
54 val = val16 << 16;
55 flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
56 &val, &mask);
57
58 mask = ~0xffffffff;
59 - memcpy(&val, dev->dev_addr + 2, 4);
60 + memcpy(&val, addr + 2, 4);
61 flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
62 &val, &mask);
63 - dev_put(dev);
64 +
65 + if (dev)
66 + dev_put(dev);
67
68 return 0;
69 }
70 @@ -208,27 +225,40 @@ static int flow_offload_eth_dst(struct n
71 {
72 struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
73 struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
74 - const void *daddr = &flow->tuplehash[!dir].tuple.src_v4;
75 + const struct flow_offload_tuple *other_tuple, *this_tuple;
76 const struct dst_entry *dst_cache;
77 unsigned char ha[ETH_ALEN];
78 struct neighbour *n;
79 + const void *daddr;
80 u32 mask, val;
81 u8 nud_state;
82 u16 val16;
83
84 - dst_cache = flow->tuplehash[dir].tuple.dst_cache;
85 - n = dst_neigh_lookup(dst_cache, daddr);
86 - if (!n)
87 - return -ENOENT;
88 -
89 - read_lock_bh(&n->lock);
90 - nud_state = n->nud_state;
91 - ether_addr_copy(ha, n->ha);
92 - read_unlock_bh(&n->lock);
93 + this_tuple = &flow->tuplehash[dir].tuple;
94
95 - if (!(nud_state & NUD_VALID)) {
96 + switch (this_tuple->xmit_type) {
97 + case FLOW_OFFLOAD_XMIT_DIRECT:
98 + ether_addr_copy(ha, this_tuple->out.h_dest);
99 + break;
100 + case FLOW_OFFLOAD_XMIT_NEIGH:
101 + other_tuple = &flow->tuplehash[!dir].tuple;
102 + daddr = &other_tuple->src_v4;
103 + dst_cache = this_tuple->dst_cache;
104 + n = dst_neigh_lookup(dst_cache, daddr);
105 + if (!n)
106 + return -ENOENT;
107 +
108 + read_lock_bh(&n->lock);
109 + nud_state = n->nud_state;
110 + ether_addr_copy(ha, n->ha);
111 + read_unlock_bh(&n->lock);
112 neigh_release(n);
113 - return -ENOENT;
114 +
115 + if (!(nud_state & NUD_VALID))
116 + return -ENOENT;
117 + break;
118 + default:
119 + return -EOPNOTSUPP;
120 }
121
122 mask = ~0xffffffff;
123 @@ -241,7 +271,6 @@ static int flow_offload_eth_dst(struct n
124 val = val16;
125 flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
126 &val, &mask);
127 - neigh_release(n);
128
129 return 0;
130 }
131 @@ -463,27 +492,52 @@ static void flow_offload_ipv4_checksum(s
132 }
133 }
134
135 -static void flow_offload_redirect(const struct flow_offload *flow,
136 +static void flow_offload_redirect(struct net *net,
137 + const struct flow_offload *flow,
138 enum flow_offload_tuple_dir dir,
139 struct nf_flow_rule *flow_rule)
140 {
141 - struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
142 - struct rtable *rt;
143 + const struct flow_offload_tuple *this_tuple, *other_tuple;
144 + struct flow_action_entry *entry;
145 + struct net_device *dev;
146 + int ifindex;
147 +
148 + this_tuple = &flow->tuplehash[dir].tuple;
149 + switch (this_tuple->xmit_type) {
150 + case FLOW_OFFLOAD_XMIT_DIRECT:
151 + this_tuple = &flow->tuplehash[dir].tuple;
152 + ifindex = this_tuple->out.ifidx;
153 + break;
154 + case FLOW_OFFLOAD_XMIT_NEIGH:
155 + other_tuple = &flow->tuplehash[!dir].tuple;
156 + ifindex = other_tuple->iifidx;
157 + break;
158 + default:
159 + return;
160 + }
161
162 - rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
163 + dev = dev_get_by_index(net, ifindex);
164 + if (!dev)
165 + return;
166 +
167 + entry = flow_action_entry_next(flow_rule);
168 entry->id = FLOW_ACTION_REDIRECT;
169 - entry->dev = rt->dst.dev;
170 - dev_hold(rt->dst.dev);
171 + entry->dev = dev;
172 }
173
174 static void flow_offload_encap_tunnel(const struct flow_offload *flow,
175 enum flow_offload_tuple_dir dir,
176 struct nf_flow_rule *flow_rule)
177 {
178 + const struct flow_offload_tuple *this_tuple;
179 struct flow_action_entry *entry;
180 struct dst_entry *dst;
181
182 - dst = flow->tuplehash[dir].tuple.dst_cache;
183 + this_tuple = &flow->tuplehash[dir].tuple;
184 + if (this_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
185 + return;
186 +
187 + dst = this_tuple->dst_cache;
188 if (dst && dst->lwtstate) {
189 struct ip_tunnel_info *tun_info;
190
191 @@ -500,10 +554,15 @@ static void flow_offload_decap_tunnel(co
192 enum flow_offload_tuple_dir dir,
193 struct nf_flow_rule *flow_rule)
194 {
195 + const struct flow_offload_tuple *other_tuple;
196 struct flow_action_entry *entry;
197 struct dst_entry *dst;
198
199 - dst = flow->tuplehash[!dir].tuple.dst_cache;
200 + other_tuple = &flow->tuplehash[!dir].tuple;
201 + if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
202 + return;
203 +
204 + dst = other_tuple->dst_cache;
205 if (dst && dst->lwtstate) {
206 struct ip_tunnel_info *tun_info;
207
208 @@ -515,10 +574,14 @@ static void flow_offload_decap_tunnel(co
209 }
210 }
211
212 -int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
213 - enum flow_offload_tuple_dir dir,
214 - struct nf_flow_rule *flow_rule)
215 +static int
216 +nf_flow_rule_route_common(struct net *net, const struct flow_offload *flow,
217 + enum flow_offload_tuple_dir dir,
218 + struct nf_flow_rule *flow_rule)
219 {
220 + const struct flow_offload_tuple *other_tuple;
221 + int i;
222 +
223 flow_offload_decap_tunnel(flow, dir, flow_rule);
224 flow_offload_encap_tunnel(flow, dir, flow_rule);
225
226 @@ -526,6 +589,26 @@ int nf_flow_rule_route_ipv4(struct net *
227 flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
228 return -1;
229
230 + other_tuple = &flow->tuplehash[!dir].tuple;
231 +
232 + for (i = 0; i < other_tuple->in_vlan_num; i++) {
233 + struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
234 +
235 + entry->id = FLOW_ACTION_VLAN_PUSH;
236 + entry->vlan.vid = other_tuple->in_vlan[i].id;
237 + entry->vlan.proto = other_tuple->in_vlan[i].proto;
238 + }
239 +
240 + return 0;
241 +}
242 +
243 +int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
244 + enum flow_offload_tuple_dir dir,
245 + struct nf_flow_rule *flow_rule)
246 +{
247 + if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
248 + return -1;
249 +
250 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
251 flow_offload_ipv4_snat(net, flow, dir, flow_rule);
252 flow_offload_port_snat(net, flow, dir, flow_rule);
253 @@ -538,7 +621,7 @@ int nf_flow_rule_route_ipv4(struct net *
254 test_bit(NF_FLOW_DNAT, &flow->flags))
255 flow_offload_ipv4_checksum(net, flow, flow_rule);
256
257 - flow_offload_redirect(flow, dir, flow_rule);
258 + flow_offload_redirect(net, flow, dir, flow_rule);
259
260 return 0;
261 }
262 @@ -548,11 +631,7 @@ int nf_flow_rule_route_ipv6(struct net *
263 enum flow_offload_tuple_dir dir,
264 struct nf_flow_rule *flow_rule)
265 {
266 - flow_offload_decap_tunnel(flow, dir, flow_rule);
267 - flow_offload_encap_tunnel(flow, dir, flow_rule);
268 -
269 - if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
270 - flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
271 + if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
272 return -1;
273
274 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
275 @@ -564,7 +643,7 @@ int nf_flow_rule_route_ipv6(struct net *
276 flow_offload_port_dnat(net, flow, dir, flow_rule);
277 }
278
279 - flow_offload_redirect(flow, dir, flow_rule);
280 + flow_offload_redirect(net, flow, dir, flow_rule);
281
282 return 0;
283 }
284 @@ -578,10 +657,10 @@ nf_flow_offload_rule_alloc(struct net *n
285 enum flow_offload_tuple_dir dir)
286 {
287 const struct nf_flowtable *flowtable = offload->flowtable;
288 + const struct flow_offload_tuple *tuple, *other_tuple;
289 const struct flow_offload *flow = offload->flow;
290 - const struct flow_offload_tuple *tuple;
291 + struct dst_entry *other_dst = NULL;
292 struct nf_flow_rule *flow_rule;
293 - struct dst_entry *other_dst;
294 int err = -ENOMEM;
295
296 flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
297 @@ -597,7 +676,10 @@ nf_flow_offload_rule_alloc(struct net *n
298 flow_rule->rule->match.key = &flow_rule->match.key;
299
300 tuple = &flow->tuplehash[dir].tuple;
301 - other_dst = flow->tuplehash[!dir].tuple.dst_cache;
302 + other_tuple = &flow->tuplehash[!dir].tuple;
303 + if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH)
304 + other_dst = other_tuple->dst_cache;
305 +
306 err = nf_flow_rule_match(&flow_rule->match, tuple, other_dst);
307 if (err < 0)
308 goto err_flow_match;