2c2175922c283e8d39585715ff077605d219114c
[openwrt/staging/jow.git] / target / linux / realtek / files-5.15 / drivers / net / dsa / rtl83xx / tc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <net/dsa.h>
4 #include <linux/delay.h>
5 #include <linux/netdevice.h>
6 #include <net/flow_offload.h>
7 #include <linux/rhashtable.h>
8 #include <asm/mach-rtl838x/mach-rtl83xx.h>
9
10 #include "rtl83xx.h"
11 #include "rtl838x.h"
12
13 /* Parse the flow rule for the matching conditions */
14 static int rtl83xx_parse_flow_rule(struct rtl838x_switch_priv *priv,
15 struct flow_rule *rule, struct rtl83xx_flow *flow)
16 {
17 struct flow_dissector *dissector = rule->match.dissector;
18
19 pr_debug("In %s\n", __func__);
20 /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
21 if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
22 (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
23 pr_err("Cannot form TC key: used_keys = 0x%x\n", dissector->used_keys);
24 return -EOPNOTSUPP;
25 }
26
27 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
28 struct flow_match_basic match;
29
30 pr_debug("%s: BASIC\n", __func__);
31 flow_rule_match_basic(rule, &match);
32 if (match.key->n_proto == htons(ETH_P_ARP))
33 flow->rule.frame_type = 0;
34 if (match.key->n_proto == htons(ETH_P_IP))
35 flow->rule.frame_type = 2;
36 if (match.key->n_proto == htons(ETH_P_IPV6))
37 flow->rule.frame_type = 3;
38 if ((match.key->n_proto == htons(ETH_P_ARP)) || flow->rule.frame_type)
39 flow->rule.frame_type_m = 3;
40 if (flow->rule.frame_type >= 2) {
41 if (match.key->ip_proto == IPPROTO_UDP)
42 flow->rule.frame_type_l4 = 0;
43 if (match.key->ip_proto == IPPROTO_TCP)
44 flow->rule.frame_type_l4 = 1;
45 if (match.key->ip_proto == IPPROTO_ICMP || match.key->ip_proto == IPPROTO_ICMPV6)
46 flow->rule.frame_type_l4 = 2;
47 if (match.key->ip_proto == IPPROTO_TCP)
48 flow->rule.frame_type_l4 = 3;
49 if ((match.key->ip_proto == IPPROTO_UDP) || flow->rule.frame_type_l4)
50 flow->rule.frame_type_l4_m = 7;
51 }
52 }
53
54 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
55 struct flow_match_eth_addrs match;
56
57 pr_debug("%s: ETH_ADDR\n", __func__);
58 flow_rule_match_eth_addrs(rule, &match);
59 ether_addr_copy(flow->rule.dmac, match.key->dst);
60 ether_addr_copy(flow->rule.dmac_m, match.mask->dst);
61 ether_addr_copy(flow->rule.smac, match.key->src);
62 ether_addr_copy(flow->rule.smac_m, match.mask->src);
63 }
64
65 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
66 struct flow_match_vlan match;
67
68 pr_debug("%s: VLAN\n", __func__);
69 flow_rule_match_vlan(rule, &match);
70 flow->rule.itag = match.key->vlan_id;
71 flow->rule.itag_m = match.mask->vlan_id;
72 /* TODO: What about match.key->vlan_priority? */
73 }
74
75 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
76 struct flow_match_ipv4_addrs match;
77
78 pr_debug("%s: IPV4\n", __func__);
79 flow_rule_match_ipv4_addrs(rule, &match);
80 flow->rule.is_ipv6 = false;
81 flow->rule.dip = match.key->dst;
82 flow->rule.dip_m = match.mask->dst;
83 flow->rule.sip = match.key->src;
84 flow->rule.sip_m = match.mask->src;
85 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
86 struct flow_match_ipv6_addrs match;
87
88 pr_debug("%s: IPV6\n", __func__);
89 flow->rule.is_ipv6 = true;
90 flow_rule_match_ipv6_addrs(rule, &match);
91 flow->rule.dip6 = match.key->dst;
92 flow->rule.dip6_m = match.mask->dst;
93 flow->rule.sip6 = match.key->src;
94 flow->rule.sip6_m = match.mask->src;
95 }
96
97 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
98 struct flow_match_ports match;
99
100 pr_debug("%s: PORTS\n", __func__);
101 flow_rule_match_ports(rule, &match);
102 flow->rule.dport = match.key->dst;
103 flow->rule.dport_m = match.mask->dst;
104 flow->rule.sport = match.key->src;
105 flow->rule.sport_m = match.mask->src;
106 }
107
108 /* TODO: ICMP */
109 return 0;
110 }
111
112 static void rtl83xx_flow_bypass_all(struct rtl83xx_flow *flow)
113 {
114 flow->rule.bypass_sel = true;
115 flow->rule.bypass_all = true;
116 flow->rule.bypass_igr_stp = true;
117 flow->rule.bypass_ibc_sc = true;
118 }
119
120 static int rtl83xx_parse_fwd(struct rtl838x_switch_priv *priv,
121 const struct flow_action_entry *act, struct rtl83xx_flow *flow)
122 {
123 struct net_device *dev = act->dev;
124 int port;
125
126 port = rtl83xx_port_is_under(dev, priv);
127 if (port < 0) {
128 netdev_info(dev, "%s: not a DSA device.\n", __func__);
129 return -EINVAL;
130 }
131
132 flow->rule.fwd_sel = true;
133 flow->rule.fwd_data = port;
134 pr_debug("Using port index: %d\n", port);
135 rtl83xx_flow_bypass_all(flow);
136
137 return 0;
138 }
139
140 static int rtl83xx_add_flow(struct rtl838x_switch_priv *priv, struct flow_cls_offload *f,
141 struct rtl83xx_flow *flow)
142 {
143 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
144 const struct flow_action_entry *act;
145 int i, err;
146
147 pr_debug("%s\n", __func__);
148
149 rtl83xx_parse_flow_rule(priv, rule, flow);
150
151 flow_action_for_each(i, act, &rule->action) {
152 switch (act->id) {
153 case FLOW_ACTION_DROP:
154 pr_debug("%s: DROP\n", __func__);
155 flow->rule.drop = true;
156 rtl83xx_flow_bypass_all(flow);
157 return 0;
158
159 case FLOW_ACTION_TRAP:
160 pr_debug("%s: TRAP\n", __func__);
161 flow->rule.fwd_data = priv->cpu_port;
162 flow->rule.fwd_act = PIE_ACT_REDIRECT_TO_PORT;
163 rtl83xx_flow_bypass_all(flow);
164 break;
165
166 case FLOW_ACTION_MANGLE:
167 pr_err("%s: FLOW_ACTION_MANGLE not supported\n", __func__);
168 return -EOPNOTSUPP;
169
170 case FLOW_ACTION_ADD:
171 pr_err("%s: FLOW_ACTION_ADD not supported\n", __func__);
172 return -EOPNOTSUPP;
173
174 case FLOW_ACTION_VLAN_PUSH:
175 pr_debug("%s: VLAN_PUSH\n", __func__);
176 /* TODO: act->vlan.proto */
177 flow->rule.ivid_act = PIE_ACT_VID_ASSIGN;
178 flow->rule.ivid_sel = true;
179 flow->rule.ivid_data = htons(act->vlan.vid);
180 flow->rule.ovid_act = PIE_ACT_VID_ASSIGN;
181 flow->rule.ovid_sel = true;
182 flow->rule.ovid_data = htons(act->vlan.vid);
183 flow->rule.fwd_mod_to_cpu = true;
184 break;
185
186 case FLOW_ACTION_VLAN_POP:
187 pr_debug("%s: VLAN_POP\n", __func__);
188 flow->rule.ivid_act = PIE_ACT_VID_ASSIGN;
189 flow->rule.ivid_data = 0;
190 flow->rule.ivid_sel = true;
191 flow->rule.ovid_act = PIE_ACT_VID_ASSIGN;
192 flow->rule.ovid_data = 0;
193 flow->rule.ovid_sel = true;
194 flow->rule.fwd_mod_to_cpu = true;
195 break;
196
197 case FLOW_ACTION_CSUM:
198 pr_err("%s: FLOW_ACTION_CSUM not supported\n", __func__);
199 return -EOPNOTSUPP;
200
201 case FLOW_ACTION_REDIRECT:
202 pr_debug("%s: REDIRECT\n", __func__);
203 err = rtl83xx_parse_fwd(priv, act, flow);
204 if (err)
205 return err;
206 flow->rule.fwd_act = PIE_ACT_REDIRECT_TO_PORT;
207 break;
208
209 case FLOW_ACTION_MIRRED:
210 pr_debug("%s: MIRRED\n", __func__);
211 err = rtl83xx_parse_fwd(priv, act, flow);
212 if (err)
213 return err;
214 flow->rule.fwd_act = PIE_ACT_COPY_TO_PORT;
215 break;
216
217 default:
218 pr_err("%s: Flow action not supported: %d\n", __func__, act->id);
219 return -EOPNOTSUPP;
220 }
221 }
222
223 return 0;
224 }
225
226 static const struct rhashtable_params tc_ht_params = {
227 .head_offset = offsetof(struct rtl83xx_flow, node),
228 .key_offset = offsetof(struct rtl83xx_flow, cookie),
229 .key_len = sizeof(((struct rtl83xx_flow *)0)->cookie),
230 .automatic_shrinking = true,
231 };
232
233 static int rtl83xx_configure_flower(struct rtl838x_switch_priv *priv,
234 struct flow_cls_offload *f)
235 {
236 struct rtl83xx_flow *flow;
237 int err = 0;
238
239 pr_debug("In %s\n", __func__);
240
241 rcu_read_lock();
242 pr_debug("Cookie %08lx\n", f->cookie);
243 flow = rhashtable_lookup(&priv->tc_ht, &f->cookie, tc_ht_params);
244 if (flow) {
245 pr_info("%s: Got flow\n", __func__);
246 err = -EEXIST;
247 goto rcu_unlock;
248 }
249
250 rcu_unlock:
251 rcu_read_unlock();
252 if (flow)
253 goto out;
254 pr_debug("%s: New flow\n", __func__);
255
256 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
257 if (!flow) {
258 err = -ENOMEM;
259 goto out;
260 }
261
262 flow->cookie = f->cookie;
263 flow->priv = priv;
264
265 err = rhashtable_insert_fast(&priv->tc_ht, &flow->node, tc_ht_params);
266 if (err) {
267 pr_err("Could not insert add new rule\n");
268 goto out_free;
269 }
270
271 rtl83xx_add_flow(priv, f, flow); /* TODO: check error */
272
273 /* Add log action to flow */
274 flow->rule.packet_cntr = rtl83xx_packet_cntr_alloc(priv);
275 if (flow->rule.packet_cntr >= 0) {
276 pr_debug("Using packet counter %d\n", flow->rule.packet_cntr);
277 flow->rule.log_sel = true;
278 flow->rule.log_data = flow->rule.packet_cntr;
279 }
280
281 err = priv->r->pie_rule_add(priv, &flow->rule);
282 return err;
283
284 out_free:
285 kfree(flow);
286 out:
287 pr_err("%s: error %d\n", __func__, err);
288
289 return err;
290 }
291
292 static int rtl83xx_delete_flower(struct rtl838x_switch_priv *priv,
293 struct flow_cls_offload * cls_flower)
294 {
295 struct rtl83xx_flow *flow;
296
297 pr_debug("In %s\n", __func__);
298 rcu_read_lock();
299 flow = rhashtable_lookup_fast(&priv->tc_ht, &cls_flower->cookie, tc_ht_params);
300 if (!flow) {
301 rcu_read_unlock();
302 return -EINVAL;
303 }
304
305 priv->r->pie_rule_rm(priv, &flow->rule);
306
307 rhashtable_remove_fast(&priv->tc_ht, &flow->node, tc_ht_params);
308
309 kfree_rcu(flow, rcu_head);
310
311 rcu_read_unlock();
312
313 return 0;
314 }
315
316 static int rtl83xx_stats_flower(struct rtl838x_switch_priv *priv,
317 struct flow_cls_offload * cls_flower)
318 {
319 struct rtl83xx_flow *flow;
320 unsigned long lastused = 0;
321 int total_packets, new_packets;
322
323 pr_debug("%s: \n", __func__);
324 flow = rhashtable_lookup_fast(&priv->tc_ht, &cls_flower->cookie, tc_ht_params);
325 if (!flow)
326 return -1;
327
328 if (flow->rule.packet_cntr >= 0) {
329 total_packets = priv->r->packet_cntr_read(flow->rule.packet_cntr);
330 pr_debug("Total packets: %d\n", total_packets);
331 new_packets = total_packets - flow->rule.last_packet_cnt;
332 flow->rule.last_packet_cnt = total_packets;
333 }
334
335 /* TODO: We need a second PIE rule to count the bytes */
336 flow_stats_update(&cls_flower->stats, 100 * new_packets, new_packets, 0, lastused,
337 FLOW_ACTION_HW_STATS_IMMEDIATE);
338
339 return 0;
340 }
341
342 static int rtl83xx_setup_tc_cls_flower(struct rtl838x_switch_priv *priv,
343 struct flow_cls_offload *cls_flower)
344 {
345 pr_debug("%s: %d\n", __func__, cls_flower->command);
346 switch (cls_flower->command) {
347 case FLOW_CLS_REPLACE:
348 return rtl83xx_configure_flower(priv, cls_flower);
349 case FLOW_CLS_DESTROY:
350 return rtl83xx_delete_flower(priv, cls_flower);
351 case FLOW_CLS_STATS:
352 return rtl83xx_stats_flower(priv, cls_flower);
353 default:
354 return -EOPNOTSUPP;
355 }
356 }
357
358
359 static int rtl83xx_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
360 void *cb_priv)
361 {
362 struct rtl838x_switch_priv *priv = cb_priv;
363
364 switch (type) {
365 case TC_SETUP_CLSFLOWER:
366 pr_debug("%s: TC_SETUP_CLSFLOWER\n", __func__);
367 return rtl83xx_setup_tc_cls_flower(priv, type_data);
368 default:
369 return -EOPNOTSUPP;
370 }
371 }
372
373 static LIST_HEAD(rtl83xx_block_cb_list);
374
375 int rtl83xx_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data)
376 {
377 struct rtl838x_switch_priv *priv;
378 struct flow_block_offload *f = type_data;
379 static bool first_time = true;
380 int err;
381
382 pr_debug("%s: %d\n", __func__, type);
383
384 if(!netdev_uses_dsa(dev)) {
385 pr_err("%s: no DSA\n", __func__);
386 return 0;
387 }
388 priv = dev->dsa_ptr->ds->priv;
389
390 switch (type) {
391 case TC_SETUP_BLOCK:
392 if (first_time) {
393 first_time = false;
394 err = rhashtable_init(&priv->tc_ht, &tc_ht_params);
395 if (err)
396 pr_err("%s: Could not initialize hash table\n", __func__);
397 }
398
399 f->unlocked_driver_cb = true;
400 return flow_block_cb_setup_simple(type_data,
401 &rtl83xx_block_cb_list,
402 rtl83xx_setup_tc_block_cb,
403 priv, priv, true);
404 default:
405 return -EOPNOTSUPP;
406 }
407
408 return 0;
409 }