mediatek: update to latest kernel patchset from v4.13-rc
[openwrt/openwrt.git] / target / linux / mediatek / patches-4.9 / 0026-net-mediatek-backport-v4.10-driver.patch
1 From 99d9d02a05df503184be094de336e7515fe3e235 Mon Sep 17 00:00:00 2001
2 From: John Crispin <john@phrozen.org>
3 Date: Thu, 10 Aug 2017 14:26:29 +0200
4 Subject: [PATCH 26/57] net: mediatek: backport v4.10 driver
5
6 Signed-off-by: John Crispin <john@phrozen.org>
7 ---
8 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 49 ++-
9 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 16 +-
10 drivers/net/ethernet/mediatek/mtk_hnat/Makefile | 4 +
11 drivers/net/ethernet/mediatek/mtk_hnat/hnat.c | 315 +++++++++++++++
12 drivers/net/ethernet/mediatek/mtk_hnat/hnat.h | 425 +++++++++++++++++++++
13 .../net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c | 259 +++++++++++++
14 .../net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c | 289 ++++++++++++++
15 .../net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h | 44 +++
16 8 files changed, 1378 insertions(+), 23 deletions(-)
17 create mode 100644 drivers/net/ethernet/mediatek/mtk_hnat/Makefile
18 create mode 100644 drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
19 create mode 100644 drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
20 create mode 100644 drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
21 create mode 100644 drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
22 create mode 100644 drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
23
24 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
25 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
26 @@ -462,8 +462,8 @@ static void mtk_stats_update(struct mtk_
27 }
28 }
29
30 -static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
31 - struct rtnl_link_stats64 *storage)
32 +static struct rtnl_link_stats64 * mtk_get_stats64(struct net_device *dev,
33 + struct rtnl_link_stats64 *storage)
34 {
35 struct mtk_mac *mac = netdev_priv(dev);
36 struct mtk_hw_stats *hw_stats = mac->hw_stats;
37 @@ -615,7 +615,7 @@ static int mtk_tx_map(struct sk_buff *sk
38 struct mtk_mac *mac = netdev_priv(dev);
39 struct mtk_eth *eth = mac->hw;
40 struct mtk_tx_dma *itxd, *txd;
41 - struct mtk_tx_buf *tx_buf;
42 + struct mtk_tx_buf *itx_buf, *tx_buf;
43 dma_addr_t mapped_addr;
44 unsigned int nr_frags;
45 int i, n_desc = 1;
46 @@ -629,8 +629,8 @@ static int mtk_tx_map(struct sk_buff *sk
47 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
48 txd4 |= fport;
49
50 - tx_buf = mtk_desc_to_tx_buf(ring, itxd);
51 - memset(tx_buf, 0, sizeof(*tx_buf));
52 + itx_buf = mtk_desc_to_tx_buf(ring, itxd);
53 + memset(itx_buf, 0, sizeof(*itx_buf));
54
55 if (gso)
56 txd4 |= TX_DMA_TSO;
57 @@ -649,9 +649,11 @@ static int mtk_tx_map(struct sk_buff *sk
58 return -ENOMEM;
59
60 WRITE_ONCE(itxd->txd1, mapped_addr);
61 - tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
62 - dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
63 - dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
64 + itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
65 + itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
66 + MTK_TX_FLAGS_FPORT1;
67 + dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr);
68 + dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb));
69
70 /* TX SG offload */
71 txd = itxd;
72 @@ -687,11 +689,13 @@ static int mtk_tx_map(struct sk_buff *sk
73 last_frag * TX_DMA_LS0));
74 WRITE_ONCE(txd->txd4, fport);
75
76 - tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
77 tx_buf = mtk_desc_to_tx_buf(ring, txd);
78 memset(tx_buf, 0, sizeof(*tx_buf));
79 -
80 + tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
81 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
82 + tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
83 + MTK_TX_FLAGS_FPORT1;
84 +
85 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
86 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
87 frag_size -= frag_map_size;
88 @@ -700,7 +704,7 @@ static int mtk_tx_map(struct sk_buff *sk
89 }
90
91 /* store skb to cleanup */
92 - tx_buf->skb = skb;
93 + itx_buf->skb = skb;
94
95 WRITE_ONCE(itxd->txd4, txd4);
96 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
97 @@ -845,7 +849,7 @@ static int mtk_start_xmit(struct sk_buff
98 drop:
99 spin_unlock(&eth->page_lock);
100 stats->tx_dropped++;
101 - dev_kfree_skb(skb);
102 + dev_kfree_skb_any(skb);
103 return NETDEV_TX_OK;
104 }
105
106 @@ -1014,17 +1018,16 @@ static int mtk_poll_tx(struct mtk_eth *e
107
108 while ((cpu != dma) && budget) {
109 u32 next_cpu = desc->txd2;
110 - int mac;
111 + int mac = 0;
112
113 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
114 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
115 break;
116
117 - mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
118 - TX_DMA_FPORT_MASK;
119 - mac--;
120 -
121 tx_buf = mtk_desc_to_tx_buf(ring, desc);
122 + if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
123 + mac = 1;
124 +
125 skb = tx_buf->skb;
126 if (!skb) {
127 condition = 1;
128 @@ -1848,6 +1851,12 @@ static int mtk_hw_init(struct mtk_eth *e
129 /* GE2, Force 1000M/FD, FC ON */
130 mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
131
132 + /* Indicates CDM to parse the MTK special tag from CPU
133 + * which also is working out for untag packets.
134 + */
135 + val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
136 + mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
137 +
138 /* Enable RX VLan Offloading */
139 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
140
141 @@ -1910,10 +1919,9 @@ static int __init mtk_init(struct net_de
142
143 /* If the mac address is invalid, use random mac address */
144 if (!is_valid_ether_addr(dev->dev_addr)) {
145 - random_ether_addr(dev->dev_addr);
146 + eth_hw_addr_random(dev);
147 dev_err(eth->dev, "generated random MAC address %pM\n",
148 dev->dev_addr);
149 - dev->addr_assign_type = NET_ADDR_RANDOM;
150 }
151
152 return mtk_phy_connect(dev);
153 @@ -2247,7 +2255,6 @@ static const struct net_device_ops mtk_n
154 .ndo_set_mac_address = mtk_set_mac_address,
155 .ndo_validate_addr = eth_validate_addr,
156 .ndo_do_ioctl = mtk_do_ioctl,
157 - .ndo_change_mtu = eth_change_mtu,
158 .ndo_tx_timeout = mtk_tx_timeout,
159 .ndo_get_stats64 = mtk_get_stats64,
160 .ndo_fix_features = mtk_fix_features,
161 @@ -2320,6 +2327,8 @@ static int mtk_add_mac(struct mtk_eth *e
162 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
163
164 eth->netdev[id]->irq = eth->irq[0];
165 + eth->netdev[id]->dev.of_node = np;
166 +
167 return 0;
168
169 free_netdev:
170 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
171 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
172 @@ -70,6 +70,10 @@
173 /* Frame Engine Interrupt Grouping Register */
174 #define MTK_FE_INT_GRP 0x20
175
176 +/* CDMP Ingress Control Register */
177 +#define MTK_CDMQ_IG_CTRL 0x1400
178 +#define MTK_CDMQ_STAG_EN BIT(0)
179 +
180 /* CDMP Exgress Control Register */
181 #define MTK_CDMP_EG_CTRL 0x404
182
183 @@ -406,12 +410,18 @@ struct mtk_hw_stats {
184 struct u64_stats_sync syncp;
185 };
186
187 -/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
188 - * memory was allocated so that it can be freed properly
189 - */
190 enum mtk_tx_flags {
191 + /* PDMA descriptor can point at 1-2 segments. This enum allows us to
192 + * track how memory was allocated so that it can be freed properly.
193 + */
194 MTK_TX_FLAGS_SINGLE0 = 0x01,
195 MTK_TX_FLAGS_PAGE0 = 0x02,
196 +
197 + /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted
198 + * SKB out instead of looking up through hardware TX descriptor.
199 + */
200 + MTK_TX_FLAGS_FPORT0 = 0x04,
201 + MTK_TX_FLAGS_FPORT1 = 0x08,
202 };
203
204 /* This enum allows us to identify how the clock is defined on the array of the
205 --- /dev/null
206 +++ b/drivers/net/ethernet/mediatek/mtk_hnat/Makefile
207 @@ -0,0 +1,4 @@
208 +ccflags-y=-Werror
209 +
210 +obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtkhnat.o
211 +mtkhnat-objs := hnat.o hnat_nf_hook.o hnat_debugfs.o
212 --- /dev/null
213 +++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
214 @@ -0,0 +1,315 @@
215 +/* This program is free software; you can redistribute it and/or modify
216 + * it under the terms of the GNU General Public License as published by
217 + * the Free Software Foundation; version 2 of the License
218 + *
219 + * This program is distributed in the hope that it will be useful,
220 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
221 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
222 + * GNU General Public License for more details.
223 + *
224 + * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
225 + * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
226 + */
227 +
228 +#include <linux/dma-mapping.h>
229 +#include <linux/delay.h>
230 +#include <linux/if.h>
231 +#include <linux/io.h>
232 +#include <linux/module.h>
233 +#include <linux/of_device.h>
234 +#include <linux/platform_device.h>
235 +#include <linux/reset.h>
236 +
237 +#include "hnat.h"
238 +
239 +struct hnat_priv *host;
240 +
241 +static void cr_set_bits(void __iomem * reg, u32 bs)
242 +{
243 + u32 val = readl(reg);
244 +
245 + val |= bs;
246 + writel(val, reg);
247 +}
248 +
249 +static void cr_clr_bits(void __iomem * reg, u32 bs)
250 +{
251 + u32 val = readl(reg);
252 +
253 + val &= ~bs;
254 + writel(val, reg);
255 +}
256 +
257 +static void cr_set_field(void __iomem * reg, u32 field, u32 val)
258 +{
259 + unsigned int tv = readl(reg);
260 +
261 + tv &= ~field;
262 + tv |= ((val) << (ffs((unsigned int)field) - 1));
263 + writel(tv, reg);
264 +}
265 +
266 +static int hnat_start(void)
267 +{
268 + u32 foe_table_sz;
269 +
270 + /* mapp the FOE table */
271 + foe_table_sz = FOE_4TB_SIZ * sizeof(struct foe_entry);
272 + host->foe_table_cpu =
273 + dma_alloc_coherent(host->dev, foe_table_sz, &host->foe_table_dev,
274 + GFP_KERNEL);
275 + if (!host->foe_table_cpu)
276 + return -1;
277 +
278 + writel(host->foe_table_dev, host->ppe_base + PPE_TB_BASE);
279 + memset(host->foe_table_cpu, 0, foe_table_sz);
280 +
281 + /* setup hashing */
282 + cr_set_field(host->ppe_base + PPE_TB_CFG, TB_ETRY_NUM, TABLE_4K);
283 + cr_set_field(host->ppe_base + PPE_TB_CFG, HASH_MODE, HASH_MODE_1);
284 + writel(HASH_SEED_KEY, host->ppe_base + PPE_HASH_SEED);
285 + cr_set_field(host->ppe_base + PPE_TB_CFG, XMODE, 0);
286 + cr_set_field(host->ppe_base + PPE_TB_CFG, TB_ENTRY_SIZE, ENTRY_64B);
287 + cr_set_field(host->ppe_base + PPE_TB_CFG, SMA, SMA_FWD_CPU_BUILD_ENTRY);
288 +
289 + /* set ip proto */
290 + writel(0xFFFFFFFF, host->ppe_base + PPE_IP_PROT_CHK);
291 +
292 + /* setup caching */
293 + cr_set_field(host->ppe_base + PPE_CAH_CTRL, CAH_X_MODE, 1);
294 + cr_set_field(host->ppe_base + PPE_CAH_CTRL, CAH_X_MODE, 0);
295 + cr_set_field(host->ppe_base + PPE_CAH_CTRL, CAH_EN, 1);
296 +
297 + /* enable FOE */
298 + cr_set_bits(host->ppe_base + PPE_FLOW_CFG,
299 + BIT_IPV4_NAT_EN | BIT_IPV4_NAPT_EN |
300 + BIT_IPV4_NAT_FRAG_EN | BIT_IPV4_HASH_GREK);
301 +
302 + /* setup FOE aging */
303 + cr_set_field(host->ppe_base + PPE_TB_CFG, NTU_AGE, 1);
304 + cr_set_field(host->ppe_base + PPE_TB_CFG, UNBD_AGE, 1);
305 + cr_set_field(host->ppe_base + PPE_UNB_AGE, UNB_MNP, 1000);
306 + cr_set_field(host->ppe_base + PPE_UNB_AGE, UNB_DLTA, 3);
307 + cr_set_field(host->ppe_base + PPE_TB_CFG, TCP_AGE, 1);
308 + cr_set_field(host->ppe_base + PPE_TB_CFG, UDP_AGE, 1);
309 + cr_set_field(host->ppe_base + PPE_TB_CFG, FIN_AGE, 1);
310 + cr_set_field(host->ppe_base + PPE_BND_AGE_0, UDP_DLTA, 5);
311 + cr_set_field(host->ppe_base + PPE_BND_AGE_0, NTU_DLTA, 5);
312 + cr_set_field(host->ppe_base + PPE_BND_AGE_1, FIN_DLTA, 5);
313 + cr_set_field(host->ppe_base + PPE_BND_AGE_1, TCP_DLTA, 5);
314 +
315 + /* setup FOE ka */
316 + cr_set_field(host->ppe_base + PPE_TB_CFG, KA_CFG, 3);
317 + cr_set_field(host->ppe_base + PPE_KA, KA_T, 1);
318 + cr_set_field(host->ppe_base + PPE_KA, TCP_KA, 1);
319 + cr_set_field(host->ppe_base + PPE_KA, UDP_KA, 1);
320 + cr_set_field(host->ppe_base + PPE_BIND_LMT_1, NTU_KA, 1);
321 +
322 + /* setup FOE rate limit */
323 + cr_set_field(host->ppe_base + PPE_BIND_LMT_0, QURT_LMT, 16383);
324 + cr_set_field(host->ppe_base + PPE_BIND_LMT_0, HALF_LMT, 16383);
325 + cr_set_field(host->ppe_base + PPE_BIND_LMT_1, FULL_LMT, 16383);
326 + cr_set_field(host->ppe_base + PPE_BNDR, BIND_RATE, 1);
327 +
328 + /* setup FOE cf gen */
329 + cr_set_field(host->ppe_base + PPE_GLO_CFG, PPE_EN, 1);
330 + writel(0, host->ppe_base + PPE_DFT_CPORT); // pdma
331 + //writel(0x55555555, host->ppe_base + PPE_DFT_CPORT); //qdma
332 + cr_set_field(host->ppe_base + PPE_GLO_CFG, TTL0_DRP, 1);
333 +
334 + /* fwd packets from gmac to PPE */
335 + cr_clr_bits(host->fe_base + GDMA1_FWD_CFG, GDM1_ALL_FRC_MASK);
336 + cr_set_bits(host->fe_base + GDMA1_FWD_CFG,
337 + BITS_GDM1_ALL_FRC_P_PPE);
338 + cr_clr_bits(host->fe_base + GDMA2_FWD_CFG, GDM2_ALL_FRC_MASK);
339 + cr_set_bits(host->fe_base + GDMA2_FWD_CFG,
340 + BITS_GDM2_ALL_FRC_P_PPE);
341 +
342 + dev_info(host->dev, "hwnat start\n");
343 +
344 + return 0;
345 +}
346 +
347 +static int ppe_busy_wait(void)
348 +{
349 + unsigned long t_start = jiffies;
350 + u32 r = 0;
351 +
352 + while (1) {
353 + r = readl((host->ppe_base + 0x0));
354 + if (!(r & BIT(31)))
355 + return 0;
356 + if (time_after(jiffies, t_start + HZ))
357 + break;
358 + usleep_range(10, 20);
359 + }
360 +
361 + dev_err(host->dev, "ppe:%s timeout\n", __func__);
362 +
363 + return -1;
364 +}
365 +
366 +static void hnat_stop(void)
367 +{
368 + u32 foe_table_sz;
369 + struct foe_entry *entry, *end;
370 + u32 r1 = 0, r2 = 0;
371 +
372 + /* discard all traffic while we disable the PPE */
373 + cr_clr_bits(host->fe_base + GDMA1_FWD_CFG, GDM1_ALL_FRC_MASK);
374 + cr_set_bits(host->fe_base + GDMA1_FWD_CFG,
375 + BITS_GDM1_ALL_FRC_P_DISCARD);
376 + cr_clr_bits(host->fe_base + GDMA2_FWD_CFG, GDM2_ALL_FRC_MASK);
377 + cr_set_bits(host->fe_base + GDMA2_FWD_CFG,
378 + BITS_GDM2_ALL_FRC_P_DISCARD);
379 +
380 + if (ppe_busy_wait()) {
381 + reset_control_reset(host->rstc);
382 + msleep(2000);
383 + return;
384 + }
385 +
386 + entry = host->foe_table_cpu;
387 + end = host->foe_table_cpu + FOE_4TB_SIZ;
388 + while (entry < end) {
389 + entry->bfib1.state = INVALID;
390 + entry++;
391 + }
392 +
393 + /* disable caching */
394 + cr_set_field(host->ppe_base + PPE_CAH_CTRL, CAH_X_MODE, 1);
395 + cr_set_field(host->ppe_base + PPE_CAH_CTRL, CAH_X_MODE, 0);
396 + cr_set_field(host->ppe_base + PPE_CAH_CTRL, CAH_EN, 0);
397 +
398 + /* flush cache has to be ahead of hnat diable --*/
399 + cr_set_field(host->ppe_base + PPE_GLO_CFG, PPE_EN, 0);
400 +
401 + /* disable FOE */
402 + cr_clr_bits(host->ppe_base + PPE_FLOW_CFG,
403 + BIT_IPV4_NAPT_EN | BIT_IPV4_NAT_EN |
404 + BIT_IPV4_NAT_FRAG_EN |
405 + BIT_FUC_FOE | BIT_FMC_FOE | BIT_FUC_FOE);
406 +
407 + /* disable FOE aging */
408 + cr_set_field(host->ppe_base + PPE_TB_CFG, NTU_AGE, 0);
409 + cr_set_field(host->ppe_base + PPE_TB_CFG, UNBD_AGE, 0);
410 + cr_set_field(host->ppe_base + PPE_TB_CFG, TCP_AGE, 0);
411 + cr_set_field(host->ppe_base + PPE_TB_CFG, UDP_AGE, 0);
412 + cr_set_field(host->ppe_base + PPE_TB_CFG, FIN_AGE, 0);
413 +
414 + r1 = readl(host->fe_base + 0x100);
415 + r2 = readl(host->fe_base + 0x10c);
416 +
417 + dev_info(host->dev, "0x100 = 0x%x, 0x10c = 0x%x\n", r1, r2);
418 +
419 + if (((r1 & 0xff00) >> 0x8) >= (r1 & 0xff) ||
420 + ((r1 & 0xff00) >> 0x8) >= (r2 & 0xff)) {
421 + dev_info(host->dev, "reset pse\n");
422 + writel(0x1, host->fe_base + 0x4);
423 + }
424 +
425 + /* free the FOE table */
426 + foe_table_sz = FOE_4TB_SIZ * sizeof(struct foe_entry);
427 + dma_free_coherent(NULL, foe_table_sz, host->foe_table_cpu,
428 + host->foe_table_dev);
429 + writel(0, host->ppe_base + PPE_TB_BASE);
430 +
431 + if (ppe_busy_wait()) {
432 + reset_control_reset(host->rstc);
433 + msleep(2000);
434 + return;
435 + }
436 +
437 + /* send all traffic back to the DMA engine */
438 + cr_clr_bits(host->fe_base + GDMA1_FWD_CFG, GDM1_ALL_FRC_MASK);
439 + cr_set_bits(host->fe_base + GDMA1_FWD_CFG,
440 + BITS_GDM1_ALL_FRC_P_CPU_PDMA);
441 + cr_clr_bits(host->fe_base + GDMA2_FWD_CFG, GDM2_ALL_FRC_MASK);
442 + cr_set_bits(host->fe_base + GDMA2_FWD_CFG,
443 + BITS_GDM2_ALL_FRC_P_CPU_PDMA);
444 +}
445 +
446 +static int hnat_probe(struct platform_device *pdev)
447 +{
448 + int err = 0;
449 + struct resource *res ;
450 + const char *name;
451 + struct device_node *np;
452 +
453 + host = devm_kzalloc(&pdev->dev, sizeof(struct hnat_priv), GFP_KERNEL);
454 + if (!host)
455 + return -ENOMEM;
456 +
457 + host->dev = &pdev->dev;
458 + np = host->dev->of_node;
459 +
460 + err = of_property_read_string(np, "mtketh-wan", &name);
461 + if (err < 0)
462 + return -EINVAL;
463 +
464 + strncpy(host->wan, (char *)name, IFNAMSIZ);
465 + dev_info(&pdev->dev, "wan = %s\n", host->wan);
466 +
467 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
468 + if (!res)
469 + return -ENOENT;
470 +
471 + host->fe_base = devm_ioremap_nocache(&pdev->dev, res->start,
472 + res->end - res->start + 1);
473 + if (!host->fe_base)
474 + return -EADDRNOTAVAIL;
475 +
476 + host->ppe_base = host->fe_base + 0xe00;
477 + err = hnat_init_debugfs(host);
478 + if (err)
479 + return err;
480 +
481 + host->rstc = devm_reset_control_get(&pdev->dev, NULL);
482 + if (IS_ERR(host->rstc))
483 + return PTR_ERR(host->rstc);
484 +
485 + err = hnat_start();
486 + if (err)
487 + goto err_out;
488 +
489 + err = hnat_register_nf_hooks();
490 + if (err)
491 + goto err_out;
492 +
493 + return 0;
494 +
495 +err_out:
496 + hnat_stop();
497 + hnat_deinit_debugfs(host);
498 + return err;
499 +}
500 +
501 +static int hnat_remove(struct platform_device *pdev)
502 +{
503 + hnat_unregister_nf_hooks();
504 + hnat_stop();
505 + hnat_deinit_debugfs(host);
506 +
507 + return 0;
508 +}
509 +
510 +const struct of_device_id of_hnat_match[] = {
511 + { .compatible = "mediatek,mt7623-hnat" },
512 + {},
513 +};
514 +
515 +static struct platform_driver hnat_driver = {
516 + .probe = hnat_probe,
517 + .remove = hnat_remove,
518 + .driver = {
519 + .name = "mediatek_soc_hnat",
520 + .of_match_table = of_hnat_match,
521 + },
522 +};
523 +
524 +module_platform_driver(hnat_driver);
525 +
526 +MODULE_LICENSE("GPL v2");
527 +MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
528 +MODULE_AUTHOR("John Crispin <john@phrozen.org>");
529 +MODULE_DESCRIPTION("Mediatek Hardware NAT");
530 --- /dev/null
531 +++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
532 @@ -0,0 +1,425 @@
533 +/* This program is free software; you can redistribute it and/or modify
534 + * it under the terms of the GNU General Public License as published by
535 + * the Free Software Foundation; version 2 of the License
536 + *
537 + * This program is distributed in the hope that it will be useful,
538 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
539 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
540 + * GNU General Public License for more details.
541 + *
542 + * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
543 + * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
544 + */
545 +
546 +#include <linux/debugfs.h>
547 +#include <linux/string.h>
548 +#include <linux/if.h>
549 +#include <linux/if_ether.h>
550 +
551 +/*--------------------------------------------------------------------------*/
552 +/* Register Offset*/
553 +/*--------------------------------------------------------------------------*/
554 +#define PPE_GLO_CFG 0x00
555 +#define PPE_FLOW_CFG 0x04
556 +#define PPE_IP_PROT_CHK 0x08
557 +#define PPE_IP_PROT_0 0x0C
558 +#define PPE_IP_PROT_1 0x10
559 +#define PPE_IP_PROT_2 0x14
560 +#define PPE_IP_PROT_3 0x18
561 +#define PPE_TB_CFG 0x1C
562 +#define PPE_TB_BASE 0x20
563 +#define PPE_TB_USED 0x24
564 +#define PPE_BNDR 0x28
565 +#define PPE_BIND_LMT_0 0x2C
566 +#define PPE_BIND_LMT_1 0x30
567 +#define PPE_KA 0x34
568 +#define PPE_UNB_AGE 0x38
569 +#define PPE_BND_AGE_0 0x3C
570 +#define PPE_BND_AGE_1 0x40
571 +#define PPE_HASH_SEED 0x44
572 +#define PPE_DFT_CPORT 0x48
573 +#define PPE_MCAST_PPSE 0x84
574 +#define PPE_MCAST_L_0 0x88
575 +#define PPE_MCAST_H_0 0x8C
576 +#define PPE_MCAST_L_1 0x90
577 +#define PPE_MCAST_H_1 0x94
578 +#define PPE_MCAST_L_2 0x98
579 +#define PPE_MCAST_H_2 0x9C
580 +#define PPE_MCAST_L_3 0xA0
581 +#define PPE_MCAST_H_3 0xA4
582 +#define PPE_MCAST_L_4 0xA8
583 +#define PPE_MCAST_H_4 0xAC
584 +#define PPE_MCAST_L_5 0xB0
585 +#define PPE_MCAST_H_5 0xB4
586 +#define PPE_MCAST_L_6 0xBC
587 +#define PPE_MCAST_H_6 0xC0
588 +#define PPE_MCAST_L_7 0xC4
589 +#define PPE_MCAST_H_7 0xC8
590 +#define PPE_MCAST_L_8 0xCC
591 +#define PPE_MCAST_H_8 0xD0
592 +#define PPE_MCAST_L_9 0xD4
593 +#define PPE_MCAST_H_9 0xD8
594 +#define PPE_MCAST_L_A 0xDC
595 +#define PPE_MCAST_H_A 0xE0
596 +#define PPE_MCAST_L_B 0xE4
597 +#define PPE_MCAST_H_B 0xE8
598 +#define PPE_MCAST_L_C 0xEC
599 +#define PPE_MCAST_H_C 0xF0
600 +#define PPE_MCAST_L_D 0xF4
601 +#define PPE_MCAST_H_D 0xF8
602 +#define PPE_MCAST_L_E 0xFC
603 +#define PPE_MCAST_H_E 0xE0
604 +#define PPE_MCAST_L_F 0x100
605 +#define PPE_MCAST_H_F 0x104
606 +#define PPE_MTU_DRP 0x108
607 +#define PPE_MTU_VLYR_0 0x10C
608 +#define PPE_MTU_VLYR_1 0x110
609 +#define PPE_MTU_VLYR_2 0x114
610 +#define PPE_VPM_TPID 0x118
611 +#define PPE_CAH_CTRL 0x120
612 +#define PPE_CAH_TAG_SRH 0x124
613 +#define PPE_CAH_LINE_RW 0x128
614 +#define PPE_CAH_WDATA 0x12C
615 +#define PPE_CAH_RDATA 0x130
616 +
617 +#define GDMA1_FWD_CFG 0x500
618 +#define GDMA2_FWD_CFG 0x1500
619 +/*--------------------------------------------------------------------------*/
620 +/* Register Mask*/
621 +/*--------------------------------------------------------------------------*/
622 +/* PPE_TB_CFG mask */
623 +#define TB_ETRY_NUM (0x7 << 0) /* RW */
624 +#define TB_ENTRY_SIZE (0x1 << 3) /* RW */
625 +#define SMA (0x3 << 4) /* RW */
626 +#define NTU_AGE (0x1 << 7) /* RW */
627 +#define UNBD_AGE (0x1 << 8) /* RW */
628 +#define TCP_AGE (0x1 << 9) /* RW */
629 +#define UDP_AGE (0x1 << 10) /* RW */
630 +#define FIN_AGE (0x1 << 11) /* RW */
631 +#define KA_CFG (0x3<< 12)
632 +#define HASH_MODE (0x3 << 14) /* RW */
633 +#define XMODE (0x3 << 18) /* RW */
634 +
635 +/*PPE_CAH_CTRL mask*/
636 +#define CAH_EN (0x1 << 0) /* RW */
637 +#define CAH_X_MODE (0x1 << 9) /* RW */
638 +
639 +/*PPE_UNB_AGE mask*/
640 +#define UNB_DLTA (0xff << 0) /* RW */
641 +#define UNB_MNP (0xffff << 16) /* RW */
642 +
643 +/*PPE_BND_AGE_0 mask*/
644 +#define UDP_DLTA (0xffff << 0) /* RW */
645 +#define NTU_DLTA (0xffff << 16) /* RW */
646 +
647 +/*PPE_BND_AGE_1 mask*/
648 +#define TCP_DLTA (0xffff << 0) /* RW */
649 +#define FIN_DLTA (0xffff << 16) /* RW */
650 +
651 +/*PPE_KA mask*/
652 +#define KA_T (0xffff << 0) /* RW */
653 +#define TCP_KA (0xff << 16) /* RW */
654 +#define UDP_KA (0xff << 24) /* RW */
655 +
656 +/*PPE_BIND_LMT_0 mask*/
657 +#define QURT_LMT (0x3ff << 0) /* RW */
658 +#define HALF_LMT (0x3ff << 16) /* RW */
659 +
660 +/*PPE_BIND_LMT_1 mask*/
661 +#define FULL_LMT (0x3fff << 0) /* RW */
662 +#define NTU_KA (0xff << 16) /* RW */
663 +
664 +/*PPE_BNDR mask*/
665 +#define BIND_RATE (0xffff << 0) /* RW */
666 +#define PBND_RD_PRD (0xffff << 16) /* RW */
667 +
668 +/*PPE_GLO_CFG mask*/
669 +#define PPE_EN (0x1 << 0) /* RW */
670 +#define TTL0_DRP (0x1 << 4) /* RW */
671 +
672 +/*GDMA1_FWD_CFG mask */
673 +#define GDM1_UFRC_MASK (0x7 << 12) /* RW */
674 +#define GDM1_BFRC_MASK (0x7 << 8) /*RW*/
675 +#define GDM1_MFRC_MASK (0x7 << 4) /*RW*/
676 +#define GDM1_OFRC_MASK (0x7 << 0) /*RW*/
677 +#define GDM1_ALL_FRC_MASK (GDM1_UFRC_MASK | GDM1_BFRC_MASK | GDM1_MFRC_MASK | GDM1_OFRC_MASK)
678 +
679 +#define GDM2_UFRC_MASK (0x7 << 12) /* RW */
680 +#define GDM2_BFRC_MASK (0x7 << 8) /*RW*/
681 +#define GDM2_MFRC_MASK (0x7 << 4) /*RW*/
682 +#define GDM2_OFRC_MASK (0x7 << 0) /*RW*/
683 +#define GDM2_ALL_FRC_MASK (GDM2_UFRC_MASK | GDM2_BFRC_MASK | GDM2_MFRC_MASK | GDM2_OFRC_MASK)
684 +
685 +/*--------------------------------------------------------------------------*/
686 +/* Descriptor Structure */
687 +/*--------------------------------------------------------------------------*/
688 +#define HNAT_SKB_CB(__skb) ((struct hnat_skb_cb *)&((__skb)->cb[40]))
689 +struct hnat_skb_cb {
690 + __u16 iif;
691 +};
692 +
693 +struct hnat_unbind_info_blk {
694 + u32 time_stamp:8;
695 + u32 pcnt:16; /* packet count */
696 + u32 preb:1;
697 + u32 pkt_type:3;
698 + u32 state:2;
699 + u32 udp:1;
700 + u32 sta:1; /* static entry */
701 +} __attribute__ ((packed));
702 +
703 +struct hnat_bind_info_blk {
704 + u32 time_stamp:15;
705 + u32 ka:1; /* keep alive */
706 + u32 vlan_layer:3;
707 + u32 psn:1; /* egress packet has PPPoE session */
708 + u32 vpm:1; /* 0:ethertype remark, 1:0x8100(CR default) */
709 + u32 ps:1; /* packet sampling */
710 + u32 cah:1; /* cacheable flag */
711 + u32 rmt:1; /* remove tunnel ip header (6rd/dslite only) */
712 + u32 ttl:1;
713 + u32 pkt_type:3;
714 + u32 state:2;
715 + u32 udp:1;
716 + u32 sta:1; /* static entry */
717 +} __attribute__ ((packed));
718 +
719 +struct hnat_info_blk2 {
720 + u32 qid:4; /* QID in Qos Port */
721 + u32 fqos:1; /* force to PSE QoS port */
722 + u32 dp:3; /* force to PSE port x
723 + 0:PSE,1:GSW, 2:GMAC,4:PPE,5:QDMA,7=DROP */
724 + u32 mcast:1; /* multicast this packet to CPU */
725 + u32 pcpl:1; /* OSBN */
726 + u32 mlen:1; /* 0:post 1:pre packet length in meter */
727 + u32 alen:1; /* 0:post 1:pre packet length in accounting */
728 + u32 port_mg:6; /* port meter group */
729 + u32 port_ag:6; /* port account group */
730 + u32 dscp:8; /* DSCP value */
731 +} __attribute__ ((packed));
732 +
733 +struct hnat_ipv4_hnapt {
734 + union {
735 + struct hnat_bind_info_blk bfib1;
736 + struct hnat_unbind_info_blk udib1;
737 + u32 info_blk1;
738 + };
739 + u32 sip;
740 + u32 dip;
741 + u16 dport;
742 + u16 sport;
743 + union {
744 + struct hnat_info_blk2 iblk2;
745 + u32 info_blk2;
746 + };
747 + u32 new_sip;
748 + u32 new_dip;
749 + u16 new_dport;
750 + u16 new_sport;
751 + u32 resv1;
752 + u32 resv2;
753 + u32 resv3:26;
754 + u32 act_dp:6; /* UDF */
755 + u16 vlan1;
756 + u16 etype;
757 + u32 dmac_hi;
758 + u16 vlan2;
759 + u16 dmac_lo;
760 + u32 smac_hi;
761 + u16 pppoe_id;
762 + u16 smac_lo;
763 +} __attribute__ ((packed));
764 +
765 +struct foe_entry {
766 + union {
767 + struct hnat_unbind_info_blk udib1;
768 + struct hnat_bind_info_blk bfib1;
769 + struct hnat_ipv4_hnapt ipv4_hnapt;
770 + };
771 +};
772 +
773 +#define HNAT_AC_BYTE_LO(x) (0x2000 + (x * 16))
774 +#define HNAT_AC_BYTE_HI(x) (0x2004 + (x * 16))
775 +#define HNAT_AC_PACKET(x) (0x2008 + (x * 16))
776 +#define HNAT_COUNTER_MAX 64
777 +#define HNAT_AC_TIMER_INTERVAL (HZ)
778 +
779 +struct hnat_accounting {
780 + u64 bytes;
781 + u64 packets;
782 +};
783 +
784 +struct hnat_priv {
785 + struct device *dev;
786 + void __iomem *fe_base;
787 + void __iomem *ppe_base;
788 + struct foe_entry *foe_table_cpu;
789 + dma_addr_t foe_table_dev;
790 + u8 enable;
791 + u8 enable1;
792 + struct dentry *root;
793 + struct debugfs_regset32 *regset;
794 +
795 + struct timer_list ac_timer;
796 + struct hnat_accounting acct[HNAT_COUNTER_MAX];
797 +
798 + /*devices we plays for*/
799 + char wan[IFNAMSIZ];
800 +
801 + struct reset_control *rstc;
802 +};
803 +
804 +enum FoeEntryState {
805 + INVALID = 0,
806 + UNBIND = 1,
807 + BIND = 2,
808 + FIN = 3
809 +};
810 +/*--------------------------------------------------------------------------*/
811 +/* Common Definition*/
812 +/*--------------------------------------------------------------------------*/
813 +
814 +#define FOE_4TB_SIZ 4096
815 +#define HASH_SEED_KEY 0x12345678
816 +
817 +/*PPE_TB_CFG value*/
818 +#define ENTRY_80B 1
819 +#define ENTRY_64B 0
820 +#define TABLE_1K 0
821 +#define TABLE_2K 1
822 +#define TABLE_4K 2
823 +#define TABLE_8K 3
824 +#define TABLE_16K 4
825 +#define SMA_DROP 0 /* Drop the packet */
826 +#define SMA_DROP2 1 /* Drop the packet */
827 +#define SMA_ONLY_FWD_CPU 2 /* Only Forward to CPU */
828 +#define SMA_FWD_CPU_BUILD_ENTRY 3 /* Forward to CPU and build new FOE entry */
829 +#define HASH_MODE_0 0
830 +#define HASH_MODE_1 1
831 +#define HASH_MODE_2 2
832 +#define HASH_MODE_3 3
833 +
834 +/*PPE_FLOW_CFG*/
835 +#define BIT_FUC_FOE BIT(2)
836 +#define BIT_FMC_FOE BIT(1)
837 +#define BIT_FBC_FOE BIT(0)
838 +#define BIT_IPV4_NAT_EN BIT(12)
839 +#define BIT_IPV4_NAPT_EN BIT(13)
840 +#define BIT_IPV4_NAT_FRAG_EN BIT(17)
841 +#define BIT_IPV4_HASH_GREK BIT(19)
842 +
843 +/*GDMA1_FWD_CFG value */
844 +#define BITS_GDM1_UFRC_P_PPE (NR_PPE_PORT << 12)
845 +#define BITS_GDM1_BFRC_P_PPE (NR_PPE_PORT << 8)
846 +#define BITS_GDM1_MFRC_P_PPE (NR_PPE_PORT << 4)
847 +#define BITS_GDM1_OFRC_P_PPE (NR_PPE_PORT << 0)
848 +#define BITS_GDM1_ALL_FRC_P_PPE (BITS_GDM1_UFRC_P_PPE | BITS_GDM1_BFRC_P_PPE | BITS_GDM1_MFRC_P_PPE | BITS_GDM1_OFRC_P_PPE)
849 +
850 +#define BITS_GDM1_UFRC_P_CPU_PDMA (NR_PDMA_PORT << 12)
851 +#define BITS_GDM1_BFRC_P_CPU_PDMA (NR_PDMA_PORT << 8)
852 +#define BITS_GDM1_MFRC_P_CPU_PDMA (NR_PDMA_PORT << 4)
853 +#define BITS_GDM1_OFRC_P_CPU_PDMA (NR_PDMA_PORT << 0)
854 +#define BITS_GDM1_ALL_FRC_P_CPU_PDMA (BITS_GDM1_UFRC_P_CPU_PDMA | BITS_GDM1_BFRC_P_CPU_PDMA | BITS_GDM1_MFRC_P_CPU_PDMA | BITS_GDM1_OFRC_P_CPU_PDMA)
855 +
856 +#define BITS_GDM1_UFRC_P_CPU_QDMA (NR_QDMA_PORT << 12)
857 +#define BITS_GDM1_BFRC_P_CPU_QDMA (NR_QDMA_PORT << 8)
858 +#define BITS_GDM1_MFRC_P_CPU_QDMA (NR_QDMA_PORT << 4)
859 +#define BITS_GDM1_OFRC_P_CPU_QDMA (NR_QDMA_PORT << 0)
860 +#define BITS_GDM1_ALL_FRC_P_CPU_QDMA (BITS_GDM1_UFRC_P_CPU_QDMA | BITS_GDM1_BFRC_P_CPU_QDMA | BITS_GDM1_MFRC_P_CPU_QDMA | BITS_GDM1_OFRC_P_CPU_QDMA)
861 +
862 +#define BITS_GDM1_UFRC_P_DISCARD (NR_DISCARD << 12)
863 +#define BITS_GDM1_BFRC_P_DISCARD (NR_DISCARD << 8)
864 +#define BITS_GDM1_MFRC_P_DISCARD (NR_DISCARD << 4)
865 +#define BITS_GDM1_OFRC_P_DISCARD (NR_DISCARD << 0)
866 +#define BITS_GDM1_ALL_FRC_P_DISCARD (BITS_GDM1_UFRC_P_DISCARD | BITS_GDM1_BFRC_P_DISCARD | BITS_GDM1_MFRC_P_DISCARD | BITS_GDM1_OFRC_P_DISCARD)
867 +
868 +#define BITS_GDM2_UFRC_P_PPE (NR_PPE_PORT << 12)
869 +#define BITS_GDM2_BFRC_P_PPE (NR_PPE_PORT << 8)
870 +#define BITS_GDM2_MFRC_P_PPE (NR_PPE_PORT << 4)
871 +#define BITS_GDM2_OFRC_P_PPE (NR_PPE_PORT << 0)
872 +#define BITS_GDM2_ALL_FRC_P_PPE (BITS_GDM2_UFRC_P_PPE | BITS_GDM2_BFRC_P_PPE | BITS_GDM2_MFRC_P_PPE | BITS_GDM2_OFRC_P_PPE)
873 +
874 +#define BITS_GDM2_UFRC_P_CPU_PDMA (NR_PDMA_PORT << 12)
875 +#define BITS_GDM2_BFRC_P_CPU_PDMA (NR_PDMA_PORT << 8)
876 +#define BITS_GDM2_MFRC_P_CPU_PDMA (NR_PDMA_PORT << 4)
877 +#define BITS_GDM2_OFRC_P_CPU_PDMA (NR_PDMA_PORT << 0)
878 +#define BITS_GDM2_ALL_FRC_P_CPU_PDMA (BITS_GDM2_UFRC_P_CPU_PDMA | BITS_GDM2_BFRC_P_CPU_PDMA | BITS_GDM2_MFRC_P_CPU_PDMA | BITS_GDM2_OFRC_P_CPU_PDMA)
879 +
880 +#define BITS_GDM2_UFRC_P_CPU_QDMA (NR_QDMA_PORT << 12)
881 +#define BITS_GDM2_BFRC_P_CPU_QDMA (NR_QDMA_PORT << 8)
882 +#define BITS_GDM2_MFRC_P_CPU_QDMA (NR_QDMA_PORT << 4)
883 +#define BITS_GDM2_OFRC_P_CPU_QDMA (NR_QDMA_PORT << 0)
884 +#define BITS_GDM2_ALL_FRC_P_CPU_QDMA (BITS_GDM2_UFRC_P_CPU_QDMA | BITS_GDM2_BFRC_P_CPU_QDMA | BITS_GDM2_MFRC_P_CPU_QDMA | BITS_GDM2_OFRC_P_CPU_QDMA)
885 +
886 +#define BITS_GDM2_UFRC_P_DISCARD (NR_DISCARD << 12)
887 +#define BITS_GDM2_BFRC_P_DISCARD (NR_DISCARD << 8)
888 +#define BITS_GDM2_MFRC_P_DISCARD (NR_DISCARD << 4)
889 +#define BITS_GDM2_OFRC_P_DISCARD (NR_DISCARD << 0)
890 +#define BITS_GDM2_ALL_FRC_P_DISCARD (BITS_GDM2_UFRC_P_DISCARD | BITS_GDM2_BFRC_P_DISCARD | BITS_GDM2_MFRC_P_DISCARD | BITS_GDM2_OFRC_P_DISCARD)
891 +
892 +#define hnat_is_enabled(host) (host->enable)
893 +#define hnat_enabled(host) (host->enable = 1)
894 +#define hnat_disabled(host) (host->enable = 0)
895 +#define hnat_is_enabled1(host) (host->enable1)
896 +#define hnat_enabled1(host) (host->enable1 = 1)
897 +#define hnat_disabled1(host) (host->enable1 = 0)
898 +
899 +#define entry_hnat_is_bound(e) (e->bfib1.state == BIND)
900 +#define entry_hnat_state(e) (e->bfib1.state)
901 +
902 +#define skb_hnat_is_hashed(skb) (skb_hnat_entry(skb)!=0x3fff && skb_hnat_entry(skb)< FOE_4TB_SIZ)
903 +#define FROM_GE_LAN(skb) (HNAT_SKB_CB(skb)->iif == FOE_MAGIC_GE_LAN)
904 +#define FROM_GE_WAN(skb) (HNAT_SKB_CB(skb)->iif == FOE_MAGIC_GE_WAN)
905 +#define FROM_GE_PPD(skb) (HNAT_SKB_CB(skb)->iif == FOE_MAGIC_GE_PPD)
906 +#define FOE_MAGIC_GE_WAN 0x7273
907 +#define FOE_MAGIC_GE_LAN 0x7272
908 +#define FOE_INVALID 0xffff
909 +
910 +#define TCP_FIN_SYN_RST 0x0C /* Ingress packet is TCP fin/syn/rst (for IPv4 NAPT/DS-Lite or IPv6 5T-route/6RD) */
911 +#define UN_HIT 0x0D/* FOE Un-hit */
912 +#define HIT_UNBIND 0x0E/* FOE Hit unbind */
913 +#define HIT_UNBIND_RATE_REACH 0xf
914 +#define HNAT_HIT_BIND_OLD_DUP_HDR 0x15
915 +#define HNAT_HIT_BIND_FORCE_TO_CPU 0x16
916 +
917 +#define HIT_BIND_KEEPALIVE_MC_NEW_HDR 0x14
918 +#define HIT_BIND_KEEPALIVE_DUP_OLD_HDR 0x15
919 +#define IPV4_HNAPT 0
920 +#define IPV4_HNAT 1
921 +#define IP_FORMAT(addr) \
922 + ((unsigned char *)&addr)[3], \
923 + ((unsigned char *)&addr)[2], \
924 + ((unsigned char *)&addr)[1], \
925 + ((unsigned char *)&addr)[0]
926 +
927 +/*PSE Ports*/
928 +#define NR_PDMA_PORT 0
929 +#define NR_GMAC1_PORT 1
930 +#define NR_GMAC2_PORT 2
931 +#define NR_PPE_PORT 4
932 +#define NR_QDMA_PORT 5
933 +#define NR_DISCARD 7
934 +#define IS_LAN(dev) (!strncmp(dev->name, "lan", 3))
935 +#define IS_WAN(dev) (!strcmp(dev->name, host->wan))
936 +#define IS_BR(dev) (!strncmp(dev->name, "br", 2))
937 +#define IS_IPV4_HNAPT(x) (((x)->bfib1.pkt_type == IPV4_HNAPT) ? 1: 0)
938 +#define IS_IPV4_HNAT(x) (((x)->bfib1.pkt_type == IPV4_HNAT) ? 1 : 0)
939 +#define IS_IPV4_GRP(x) (IS_IPV4_HNAPT(x) | IS_IPV4_HNAT(x))
940 +
941 +#define es(entry) (entry_state[entry->bfib1.state])
942 +#define ei(entry, end) (FOE_4TB_SIZ - (int)(end - entry))
943 +#define pt(entry) (packet_type[entry->ipv4_hnapt.bfib1.pkt_type])
944 +#define ipv4_smac(mac,e) ({mac[0]=e->ipv4_hnapt.smac_hi[3]; mac[1]=e->ipv4_hnapt.smac_hi[2];\
945 + mac[2]=e->ipv4_hnapt.smac_hi[1]; mac[3]=e->ipv4_hnapt.smac_hi[0];\
946 + mac[4]=e->ipv4_hnapt.smac_lo[1]; mac[5]=e->ipv4_hnapt.smac_lo[0];})
947 +#define ipv4_dmac(mac,e) ({mac[0]=e->ipv4_hnapt.dmac_hi[3]; mac[1]=e->ipv4_hnapt.dmac_hi[2];\
948 + mac[2]=e->ipv4_hnapt.dmac_hi[1]; mac[3]=e->ipv4_hnapt.dmac_hi[0];\
949 + mac[4]=e->ipv4_hnapt.dmac_lo[1]; mac[5]=e->ipv4_hnapt.dmac_lo[0];})
950 +
951 +extern struct hnat_priv *host;
952 +
953 +extern void hnat_deinit_debugfs(struct hnat_priv *h);
954 +extern int __init hnat_init_debugfs(struct hnat_priv *h);
955 +extern int hnat_register_nf_hooks(void);
956 +extern void hnat_unregister_nf_hooks(void);
957 +
958 --- /dev/null
959 +++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
960 @@ -0,0 +1,489 @@
961 +/* This program is free software; you can redistribute it and/or modify
962 + * it under the terms of the GNU General Public License as published by
963 + * the Free Software Foundation; version 2 of the License
964 + *
965 + * This program is distributed in the hope that it will be useful,
966 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
967 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
968 + * GNU General Public License for more details.
969 + *
970 + * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
971 + * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
972 + */
973 +
974 +#include <linux/kernel.h>
975 +#include <linux/slab.h>
976 +#include <linux/dma-mapping.h>
977 +
978 +#include "hnat.h"
979 +
980 +static const char *entry_state[] = {
981 + "INVALID",
982 + "UNBIND",
983 + "BIND",
984 + "FIN"
985 +};
986 +
987 +static const char *packet_type[] = {
988 + "IPV4_HNAPT",
989 + "IPV4_HNAT",
990 + "IPV6_1T_ROUTE",
991 + "IPV4_DSLITE",
992 + "IPV6_3T_ROUTE",
993 + "IPV6_5T_ROUTE",
994 + "IPV6_6RD",
995 +};
996 +
997 +static int hnat_debug_show(struct seq_file *m, void *private)
998 +{
999 + struct hnat_priv *h = host;
1000 + struct foe_entry *entry, *end;
1001 +
1002 + entry = h->foe_table_cpu;
1003 + end = h->foe_table_cpu + FOE_4TB_SIZ;
1004 + while (entry < end) {
1005 + if (!entry->bfib1.state) {
1006 + entry++;
1007 + continue;
1008 + }
1009 +
1010 + if (IS_IPV4_HNAPT(entry)) {
1011 + __be32 saddr = htonl(entry->ipv4_hnapt.sip);
1012 + __be32 daddr = htonl(entry->ipv4_hnapt.dip);
1013 + __be32 nsaddr = htonl(entry->ipv4_hnapt.new_sip);
1014 + __be32 ndaddr = htonl(entry->ipv4_hnapt.new_dip);
1015 + unsigned char h_dest[ETH_ALEN];
1016 + unsigned char h_source[ETH_ALEN];
1017 +
1018 + *((u32*) h_source) = swab32(entry->ipv4_hnapt.smac_hi);
1019 + *((u16*) &h_source[4]) = swab16(entry->ipv4_hnapt.smac_lo);
1020 + *((u32*) h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
1021 + *((u16*) &h_dest[4]) = swab16(entry->ipv4_hnapt.dmac_lo);
1022 + seq_printf(m,
1023 + "(%p)0x%05x|state=%s|type=%s|%pI4:%d->%pI4:%d=>%pI4:%d->%pI4:%d|%pM=>%pM|etype=0x%04x|info1=0x%x|info2=0x%x|vlan1=%d|vlan2=%d\n",
1024 + (void *)h->foe_table_dev + ((void *)(entry) - (void *)h->foe_table_cpu),
1025 + ei(entry, end), es(entry), pt(entry),
1026 + &saddr, entry->ipv4_hnapt.sport,
1027 + &daddr, entry->ipv4_hnapt.dport,
1028 + &nsaddr, entry->ipv4_hnapt.new_sport,
1029 + &ndaddr, entry->ipv4_hnapt.new_dport, h_source,
1030 + h_dest, ntohs(entry->ipv4_hnapt.etype),
1031 + entry->ipv4_hnapt.info_blk1,
1032 + entry->ipv4_hnapt.info_blk2,
1033 + entry->ipv4_hnapt.vlan1,
1034 + entry->ipv4_hnapt.vlan2);
1035 + } else
1036 + seq_printf(m, "0x%05x state=%s\n",
1037 + ei(entry, end), es(entry));
1038 + entry++;
1039 + }
1040 +
1041 + return 0;
1042 +}
1043 +
1044 +static int hnat_debug_open(struct inode *inode, struct file *file)
1045 +{
1046 + return single_open(file, hnat_debug_show, file->private_data);
1047 +}
1048 +
1049 +static const struct file_operations hnat_debug_fops = {
1050 + .open = hnat_debug_open,
1051 + .read = seq_read,
1052 + .llseek = seq_lseek,
1053 + .release = single_release,
1054 +};
1055 +
1056 +#define QDMA_TX_SCH_TX 0x1a14
1057 +
1058 +static ssize_t hnat_sched_show(struct file *file, char __user *user_buf,
1059 + size_t count, loff_t *ppos)
1060 +{
1061 + int id = (int) file->private_data;
1062 + struct hnat_priv *h = host;
1063 + u32 reg = readl(h->fe_base + QDMA_TX_SCH_TX);
1064 + int enable;
1065 + int max_rate;
1066 + char *buf;
1067 + unsigned int len = 0, buf_len = 1500;
1068 + ssize_t ret_cnt;
1069 +
1070 + buf = kzalloc(buf_len, GFP_KERNEL);
1071 + if (!buf)
1072 + return -ENOMEM;
1073 +
1074 +
1075 + if (id)
1076 + reg >>= 16;
1077 + reg &= 0xffff;
1078 + enable = !! (reg & BIT(11));
1079 + max_rate = ((reg >> 4) & 0x7f);
1080 + reg &= 0xf;
1081 + while (reg--)
1082 + max_rate *= 10;
1083 +
1084 + len += scnprintf(buf + len, buf_len - len,
1085 + "EN\tMAX\n%d\t%d\n", enable, max_rate);
1086 +
1087 + if (len > buf_len)
1088 + len = buf_len;
1089 +
1090 + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
1091 +
1092 + kfree(buf);
1093 + return ret_cnt;
1094 +}
1095 +
1096 +static ssize_t hnat_sched_write(struct file *file,
1097 + const char __user *buf, size_t length, loff_t *offset)
1098 +{
1099 + int id = (int) file->private_data;
1100 + struct hnat_priv *h = host;
1101 + char line[64];
1102 + int enable, rate, exp = 0, shift = 0;
1103 + size_t size;
1104 + u32 reg = readl(h->fe_base + QDMA_TX_SCH_TX);
1105 + u32 val = 0;
1106 +
1107 + if (length > sizeof(line))
1108 + return -EINVAL;
1109 +
1110 + if (copy_from_user(line, buf, length))
1111 + return -EFAULT;
1112 +
1113 + sscanf(line, "%d %d", &enable, &rate);
1114 +
1115 + while (rate > 127) {
1116 + rate /= 10;
1117 + exp++;
1118 + }
1119 +
1120 + if (enable)
1121 + val |= BIT(11);
1122 + val |= (rate & 0x7f) << 4;
1123 + val |= exp & 0xf;
1124 + if (id)
1125 + shift = 16;
1126 + reg &= ~(0xffff << shift);
1127 + reg |= val << shift;
1128 + writel(reg, h->fe_base + QDMA_TX_SCH_TX);
1129 +
1130 + size = strlen(line);
1131 + *offset += size;
1132 +
1133 + return length;
1134 +}
1135 +
1136 +static const struct file_operations hnat_sched_fops = {
1137 + .open = simple_open,
1138 + .read = hnat_sched_show,
1139 + .write = hnat_sched_write,
1140 + .llseek = default_llseek,
1141 +};
1142 +
1143 +#define QTX_CFG(x) (0x1800 + (x * 0x10))
1144 +#define QTX_SCH(x) (0x1804 + (x * 0x10))
1145 +
1146 +static ssize_t hnat_queue_show(struct file *file, char __user *user_buf,
1147 + size_t count, loff_t *ppos)
1148 +{
1149 + struct hnat_priv *h = host;
1150 + int id = (int) file->private_data;
1151 + u32 reg = readl(h->fe_base + QTX_SCH(id));
1152 + u32 cfg = readl(h->fe_base + QTX_CFG(id));
1153 + int scheduler = !!(reg & BIT(31));
1154 + int min_rate_en = !!(reg & BIT(27));
1155 + int min_rate = (reg >> 20) & 0x7f;
1156 + int min_rate_exp = (reg >> 16) & 0xf;
1157 + int max_rate_en = !!(reg & BIT(11));
1158 + int max_weight = (reg >> 12) & 0xf;
1159 + int max_rate = (reg >> 4) & 0x7f;
1160 + int max_rate_exp = reg & 0xf;
1161 + char *buf;
1162 + unsigned int len = 0, buf_len = 1500;
1163 + ssize_t ret_cnt;
1164 +
1165 + buf = kzalloc(buf_len, GFP_KERNEL);
1166 + if (!buf)
1167 + return -ENOMEM;
1168 +
1169 + while (min_rate_exp--)
1170 + min_rate *= 10;
1171 +
1172 + while (max_rate_exp--)
1173 + max_rate *= 10;
1174 +
1175 + len += scnprintf(buf + len, buf_len - len,
1176 + "scheduler: %d\nhw resv: %d\nsw resv: %d\n",
1177 + scheduler, (cfg >> 8) & 0xff, cfg & 0xff);
1178 + len += scnprintf(buf + len, buf_len - len,
1179 + "\tEN\tRATE\t\tWEIGHT\n");
1180 + len += scnprintf(buf + len, buf_len - len,
1181 + "max\t%d\t%8d\t%d\n", max_rate_en, max_rate, max_weight);
1182 + len += scnprintf(buf + len, buf_len - len,
1183 + "min\t%d\t%8d\t-\n", min_rate_en, min_rate);
1184 +
1185 + if (len > buf_len)
1186 + len = buf_len;
1187 +
1188 + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
1189 +
1190 + kfree(buf);
1191 + return ret_cnt;
1192 +}
1193 +
1194 +static ssize_t hnat_queue_write(struct file *file,
1195 + const char __user *buf, size_t length, loff_t *offset)
1196 +{
1197 + int id = (int) file->private_data;
1198 + struct hnat_priv *h = host;
1199 + char line[64];
1200 + int max_enable, max_rate, max_exp = 0;
1201 + int min_enable, min_rate, min_exp = 0;
1202 + int weight;
1203 + int resv;
1204 + int scheduler;
1205 + size_t size;
1206 + u32 reg = readl(h->fe_base + QTX_SCH(id));
1207 +
1208 + if (length > sizeof(line))
1209 + return -EINVAL;
1210 +
1211 + if (copy_from_user(line, buf, length))
1212 + return -EFAULT;
1213 +
1214 + sscanf(line, "%d %d %d %d %d %d %d", &scheduler, &min_enable, &min_rate, &max_enable, &max_rate, &weight, &resv);
1215 +
1216 + while (max_rate > 127) {
1217 + max_rate /= 10;
1218 + max_exp++;
1219 + }
1220 +
1221 + while (min_rate > 127) {
1222 + min_rate /= 10;
1223 + min_exp++;
1224 + }
1225 +
1226 + reg &= 0x70000000;
1227 + if (scheduler)
1228 + reg |= BIT(31);
1229 + if (min_enable)
1230 + reg |= BIT(27);
1231 + reg |= (min_rate & 0x7f) << 20;
1232 + reg |= (min_exp & 0xf) << 16;
1233 + if (max_enable)
1234 + reg |= BIT(11);
1235 + reg |= (weight & 0xf) << 12;
1236 + reg |= (max_rate & 0x7f) << 4;
1237 + reg |= max_exp & 0xf;
1238 + writel(reg, h->fe_base + QTX_SCH(id));
1239 +
1240 + resv &= 0xff;
1241 + reg = readl(h->fe_base + QTX_CFG(id));
1242 + reg &= 0xffff0000;
1243 + reg |= (resv << 8) | resv;
1244 + writel(reg, h->fe_base + QTX_CFG(id));
1245 +
1246 + size = strlen(line);
1247 + *offset += size;
1248 +
1249 + return length;
1250 +}
1251 +
1252 +static const struct file_operations hnat_queue_fops = {
1253 + .open = simple_open,
1254 + .read = hnat_queue_show,
1255 + .write = hnat_queue_write,
1256 + .llseek = default_llseek,
1257 +};
1258 +
1259 +static void hnat_ac_timer_handle(unsigned long priv)
1260 +{
1261 + struct hnat_priv *h = (struct hnat_priv*) priv;
1262 + int i;
1263 +
1264 + for (i = 0; i < HNAT_COUNTER_MAX; i++) {
1265 + u32 b_hi, b_lo;
1266 + u64 b;
1267 +
1268 + b_lo = readl(h->fe_base + HNAT_AC_BYTE_LO(i));
1269 + b_hi = readl(h->fe_base + HNAT_AC_BYTE_HI(i));
1270 + b = b_hi;
1271 + b <<= 32;
1272 + b += b_lo;
1273 + h->acct[i].bytes += b;
1274 + h->acct[i].packets += readl(h->fe_base + HNAT_AC_PACKET(i));
1275 + }
1276 +
1277 + mod_timer(&h->ac_timer, jiffies + HNAT_AC_TIMER_INTERVAL);
1278 +}
1279 +
1280 +static ssize_t hnat_counter_show(struct file *file, char __user *user_buf,
1281 + size_t count, loff_t *ppos)
1282 +{
1283 + struct hnat_priv *h = host;
1284 + int id = (int) file->private_data;
1285 + char *buf;
1286 + unsigned int len = 0, buf_len = 1500;
1287 + ssize_t ret_cnt;
1288 + int id2 = id + (HNAT_COUNTER_MAX / 2);
1289 +
1290 + buf = kzalloc(buf_len, GFP_KERNEL);
1291 + if (!buf)
1292 + return -ENOMEM;
1293 +
1294 + len += scnprintf(buf + len, buf_len - len,
1295 + "tx pkts : %llu\ntx bytes: %llu\nrx pktks : %llu\nrx bytes : %llu\n",
1296 + h->acct[id].packets, h->acct[id].bytes,
1297 + h->acct[id2].packets, h->acct[id2].bytes);
1298 +
1299 + if (len > buf_len)
1300 + len = buf_len;
1301 +
1302 + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
1303 +
1304 + kfree(buf);
1305 + return ret_cnt;
1306 +}
1307 +
1308 +static const struct file_operations hnat_counter_fops = {
1309 + .open = simple_open,
1310 + .read = hnat_counter_show,
1311 + .llseek = default_llseek,
1312 +};
1313 +
1314 +#define dump_register(nm) \
1315 +{ \
1316 + .name = __stringify(nm), \
1317 + .offset = PPE_ ##nm , \
1318 +}
1319 +
1320 +static const struct debugfs_reg32 hnat_regs[] = {
1321 + dump_register(GLO_CFG),
1322 + dump_register(FLOW_CFG),
1323 + dump_register(IP_PROT_CHK),
1324 + dump_register(IP_PROT_0),
1325 + dump_register(IP_PROT_1),
1326 + dump_register(IP_PROT_2),
1327 + dump_register(IP_PROT_3),
1328 + dump_register(TB_CFG),
1329 + dump_register(TB_BASE),
1330 + dump_register(TB_USED),
1331 + dump_register(BNDR),
1332 + dump_register(BIND_LMT_0),
1333 + dump_register(BIND_LMT_1),
1334 + dump_register(KA),
1335 + dump_register(UNB_AGE),
1336 + dump_register(BND_AGE_0),
1337 + dump_register(BND_AGE_1),
1338 + dump_register(HASH_SEED),
1339 + dump_register(DFT_CPORT),
1340 + dump_register(MCAST_PPSE),
1341 + dump_register(MCAST_L_0),
1342 + dump_register(MCAST_H_0),
1343 + dump_register(MCAST_L_1),
1344 + dump_register(MCAST_H_1),
1345 + dump_register(MCAST_L_2),
1346 + dump_register(MCAST_H_2),
1347 + dump_register(MCAST_L_3),
1348 + dump_register(MCAST_H_3),
1349 + dump_register(MCAST_L_4),
1350 + dump_register(MCAST_H_4),
1351 + dump_register(MCAST_L_5),
1352 + dump_register(MCAST_H_5),
1353 + dump_register(MCAST_L_6),
1354 + dump_register(MCAST_H_6),
1355 + dump_register(MCAST_L_7),
1356 + dump_register(MCAST_H_7),
1357 + dump_register(MCAST_L_8),
1358 + dump_register(MCAST_H_8),
1359 + dump_register(MCAST_L_9),
1360 + dump_register(MCAST_H_9),
1361 + dump_register(MCAST_L_A),
1362 + dump_register(MCAST_H_A),
1363 + dump_register(MCAST_L_B),
1364 + dump_register(MCAST_H_B),
1365 + dump_register(MCAST_L_C),
1366 + dump_register(MCAST_H_C),
1367 + dump_register(MCAST_L_D),
1368 + dump_register(MCAST_H_D),
1369 + dump_register(MCAST_L_E),
1370 + dump_register(MCAST_H_E),
1371 + dump_register(MCAST_L_F),
1372 + dump_register(MCAST_H_F),
1373 + dump_register(MTU_DRP),
1374 + dump_register(MTU_VLYR_0),
1375 + dump_register(MTU_VLYR_1),
1376 + dump_register(MTU_VLYR_2),
1377 + dump_register(VPM_TPID),
1378 + dump_register(VPM_TPID),
1379 + dump_register(CAH_CTRL),
1380 + dump_register(CAH_TAG_SRH),
1381 + dump_register(CAH_LINE_RW),
1382 + dump_register(CAH_WDATA),
1383 + dump_register(CAH_RDATA),
1384 +};
1385 +
1386 +int __init hnat_init_debugfs(struct hnat_priv *h)
1387 +{
1388 + int ret = 0;
1389 + struct dentry *root;
1390 + struct dentry *file;
1391 + int i;
1392 + char name[16];
1393 +
1394 + root = debugfs_create_dir("hnat", NULL);
1395 + if (!root) {
1396 + dev_err(h->dev, "%s:err at %d\n", __func__, __LINE__);
1397 + ret = -ENOMEM;
1398 + goto err0;
1399 + }
1400 + h->root = root;
1401 + h->regset = kzalloc(sizeof(*h->regset), GFP_KERNEL);
1402 + if (!h->regset) {
1403 + dev_err(h->dev, "%s:err at %d\n", __func__, __LINE__);
1404 + ret = -ENOMEM;
1405 + goto err1;
1406 + }
1407 + h->regset->regs = hnat_regs;
1408 + h->regset->nregs = ARRAY_SIZE(hnat_regs);
1409 + h->regset->base = h->ppe_base;
1410 +
1411 + file = debugfs_create_regset32("regdump", S_IRUGO, root, h->regset);
1412 + if (!file) {
1413 + dev_err(h->dev, "%s:err at %d\n", __func__, __LINE__);
1414 + ret = -ENOMEM;
1415 + goto err1;
1416 + }
1417 + debugfs_create_file("all_entry", S_IRUGO, root, h, &hnat_debug_fops);
1418 + for (i = 0; i < HNAT_COUNTER_MAX / 2; i++) {
1419 + snprintf(name, sizeof(name), "counter%d", i);
1420 + debugfs_create_file(name, S_IRUGO, root, (void *)i, &hnat_counter_fops);
1421 + }
1422 +
1423 + for (i = 0; i < 2; i++) {
1424 + snprintf(name, sizeof(name), "scheduler%d", i);
1425 + debugfs_create_file(name, S_IRUGO, root, (void *)i, &hnat_sched_fops);
1426 + }
1427 +
1428 + for (i = 0; i < 16; i++) {
1429 + snprintf(name, sizeof(name), "queue%d", i);
1430 + debugfs_create_file(name, S_IRUGO, root, (void *)i, &hnat_queue_fops);
1431 + }
1432 +
1433 + setup_timer(&h->ac_timer, hnat_ac_timer_handle, (unsigned long) h);
1434 + mod_timer(&h->ac_timer, jiffies + HNAT_AC_TIMER_INTERVAL);
1435 +
1436 + return 0;
1437 +
1438 + err1:
1439 + debugfs_remove_recursive(root);
1440 + err0:
1441 + return ret;
1442 +}
1443 +
1444 +void hnat_deinit_debugfs(struct hnat_priv *h)
1445 +{
1446 + del_timer(&h->ac_timer);
1447 + debugfs_remove_recursive(h->root);
1448 + h->root = NULL;
1449 +}
1450 --- /dev/null
1451 +++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
1452 @@ -0,0 +1,289 @@
1453 +/* This program is free software; you can redistribute it and/or modify
1454 + * it under the terms of the GNU General Public License as published by
1455 + * the Free Software Foundation; version 2 of the License
1456 + *
1457 + * This program is distributed in the hope that it will be useful,
1458 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1459 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1460 + * GNU General Public License for more details.
1461 + *
1462 + * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
1463 + * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
1464 + */
1465 +
1466 +#include <linux/netfilter_bridge.h>
1467 +
1468 +#include <net/arp.h>
1469 +#include <net/neighbour.h>
1470 +#include <net/netfilter/nf_conntrack_helper.h>
1471 +
1472 +#include "nf_hnat_mtk.h"
1473 +#include "hnat.h"
1474 +
1475 +#include "../mtk_eth_soc.h"
1476 +
1477 +static unsigned int skb_to_hnat_info(struct sk_buff *skb,
1478 + const struct net_device *dev,
1479 + struct foe_entry *foe)
1480 +{
1481 + struct foe_entry entry = { 0 };
1482 + int lan = IS_LAN(dev);
1483 + struct ethhdr *eth;
1484 + struct iphdr *iph;
1485 + struct tcphdr *tcph;
1486 + struct udphdr *udph;
1487 + int tcp = 0;
1488 + int ipv4 = 0;
1489 + u32 gmac;
1490 +
1491 + eth = eth_hdr(skb);
1492 + switch (ntohs(eth->h_proto)) {
1493 + case ETH_P_IP:
1494 + ipv4 = 1;
1495 + break;
1496 +
1497 + default:
1498 + return -1;
1499 + }
1500 +
1501 + iph = ip_hdr(skb);
1502 + switch (iph->protocol) {
1503 + case IPPROTO_TCP:
1504 + tcph = tcp_hdr(skb);
1505 + tcp = 1;
1506 + break;
1507 +
1508 + case IPPROTO_UDP:
1509 + udph = udp_hdr(skb);
1510 + break;
1511 +
1512 + default:
1513 + return -1;
1514 + }
1515 +
1516 + entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1517 +
1518 + if (lan) {
1519 + entry.ipv4_hnapt.etype = htons(ETH_P_8021Q);
1520 + entry.bfib1.vlan_layer = 1;
1521 + entry.ipv4_hnapt.vlan1 = BIT(dev->name[3] - '0');
1522 + }
1523 +
1524 + if (dev->priv_flags & IFF_802_1Q_VLAN) {
1525 + struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
1526 +
1527 + entry.ipv4_hnapt.etype = htons(ETH_P_8021Q);
1528 + entry.bfib1.vlan_layer = 1;
1529 + if (lan)
1530 + entry.ipv4_hnapt.vlan2 = vlan->vlan_id;
1531 + else
1532 + entry.ipv4_hnapt.vlan1 = vlan->vlan_id;
1533 + }
1534 +
1535 + entry.ipv4_hnapt.dmac_hi = swab32(*((u32*) eth->h_dest));
1536 + entry.ipv4_hnapt.dmac_lo = swab16(*((u16*) &eth->h_dest[4]));
1537 + entry.ipv4_hnapt.smac_hi = swab32(*((u32*) eth->h_source));
1538 + entry.ipv4_hnapt.smac_lo = swab16(*((u16*) &eth->h_source[4]));
1539 + entry.ipv4_hnapt.pppoe_id = 0;
1540 + entry.bfib1.psn = 0;
1541 + entry.ipv4_hnapt.bfib1.vpm = 1;
1542 +
1543 + if (ipv4)
1544 + entry.ipv4_hnapt.bfib1.pkt_type = IPV4_HNAPT;
1545 +
1546 + entry.ipv4_hnapt.new_sip = ntohl(iph->saddr);
1547 + entry.ipv4_hnapt.new_dip = ntohl(iph->daddr);
1548 + entry.ipv4_hnapt.iblk2.dscp = iph->tos;
1549 +#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
1550 + entry.ipv4_hnapt.iblk2.qid = skb->mark & 0x7;
1551 + if (lan)
1552 + entry.ipv4_hnapt.iblk2.qid += 8;
1553 + entry.ipv4_hnapt.iblk2.fqos = 1;
1554 +#endif
1555 + if (tcp) {
1556 + entry.ipv4_hnapt.new_sport = ntohs(tcph->source);
1557 + entry.ipv4_hnapt.new_dport = ntohs(tcph->dest);
1558 + entry.ipv4_hnapt.bfib1.udp = 0;
1559 + } else {
1560 + entry.ipv4_hnapt.new_sport = ntohs(udph->source);
1561 + entry.ipv4_hnapt.new_dport = ntohs(udph->dest);
1562 + entry.ipv4_hnapt.bfib1.udp = 1;
1563 + }
1564 +
1565 + if (IS_LAN(dev))
1566 + gmac = NR_GMAC1_PORT;
1567 + else if (IS_WAN(dev))
1568 + gmac = NR_GMAC2_PORT;
1569 +
1570 + if (is_multicast_ether_addr(&eth->h_dest[0]))
1571 + entry.ipv4_hnapt.iblk2.mcast = 1;
1572 + else
1573 + entry.ipv4_hnapt.iblk2.mcast = 0;
1574 +
1575 + entry.ipv4_hnapt.iblk2.dp = gmac;
1576 + entry.ipv4_hnapt.iblk2.port_mg = 0x3f;
1577 + entry.ipv4_hnapt.iblk2.port_ag = (skb->mark >> 3) & 0x1f;
1578 + if (IS_LAN(dev))
1579 + entry.ipv4_hnapt.iblk2.port_ag += 32;
1580 + entry.bfib1.time_stamp = readl((host->fe_base + 0x0010)) & (0xFFFF);
1581 + entry.ipv4_hnapt.bfib1.ttl = 1;
1582 + entry.ipv4_hnapt.bfib1.cah = 1;
1583 + entry.ipv4_hnapt.bfib1.ka = 1;
1584 + entry.bfib1.state = BIND;
1585 +
1586 + entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1587 + entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1588 + entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1589 + entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1590 +
1591 + memcpy(foe, &entry, sizeof(entry));
1592 +
1593 + return 0;
1594 +}
1595 +
1596 +static unsigned int mtk_hnat_nf_post_routing(struct sk_buff *skb,
1597 + const struct net_device *out,
1598 + unsigned int (*fn)(struct sk_buff *, const struct net_device *),
1599 + const char *func)
1600 +{
1601 + struct foe_entry *entry;
1602 + struct nf_conn *ct;
1603 + enum ip_conntrack_info ctinfo;
1604 + const struct nf_conn_help *help;
1605 +
1606 + if ((skb->mark & 0x7) < 4)
1607 + return 0;
1608 +
1609 + ct = nf_ct_get(skb, &ctinfo);
1610 + if (!ct)
1611 + return 0;
1612 +
1613 + /* rcu_read_lock()ed by nf_hook_slow */
1614 + help = nfct_help(ct);
1615 + if (help && rcu_dereference(help->helper))
1616 + return 0;
1617 +
1618 + if ((FROM_GE_WAN(skb) || FROM_GE_LAN(skb)) &&
1619 + skb_hnat_is_hashed(skb) &&
1620 + (skb_hnat_reason(skb) == HIT_BIND_KEEPALIVE_DUP_OLD_HDR))
1621 + return -1;
1622 +
1623 + if ((IS_LAN(out) && FROM_GE_WAN(skb)) ||
1624 + (IS_WAN(out) && FROM_GE_LAN(skb))) {
1625 + if (!skb_hnat_is_hashed(skb))
1626 + return 0;
1627 +
1628 + entry = &host->foe_table_cpu[skb_hnat_entry(skb)];
1629 + if (entry_hnat_is_bound(entry))
1630 + return 0;
1631 +
1632 + if (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH &&
1633 + skb_hnat_alg(skb) == 0) {
1634 + if (fn && fn(skb, out))
1635 + return 0;
1636 + skb_to_hnat_info(skb, out, entry);
1637 + }
1638 + }
1639 +
1640 + return 0;
1641 +}
1642 +
1643 +static unsigned int mtk_hnat_nf_pre_routing(void *priv,
1644 + struct sk_buff *skb,
1645 + const struct nf_hook_state *state)
1646 +{
1647 + if (IS_WAN(state->in))
1648 + HNAT_SKB_CB(skb)->iif = FOE_MAGIC_GE_WAN;
1649 + else if (IS_LAN(state->in))
1650 + HNAT_SKB_CB(skb)->iif = FOE_MAGIC_GE_LAN;
1651 + else if (!IS_BR(state->in))
1652 + HNAT_SKB_CB(skb)->iif = FOE_INVALID;
1653 +
1654 + return NF_ACCEPT;
1655 +}
1656 +
1657 +static unsigned int hnat_get_nexthop(struct sk_buff *skb, const struct net_device *out) {
1658 +
1659 + u32 nexthop;
1660 + struct neighbour *neigh;
1661 + struct dst_entry *dst = skb_dst(skb);
1662 + struct rtable *rt = (struct rtable *)dst;
1663 + struct net_device *dev = (__force struct net_device *)out;
1664 +
1665 + rcu_read_lock_bh();
1666 + nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr);
1667 + neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
1668 + if (unlikely(!neigh)) {
1669 + dev_err(host->dev, "%s:++ no neigh\n", __func__);
1670 + return -1;
1671 + }
1672 +
1673 + /* why do we get all zero ethernet address ? */
1674 + if (!is_valid_ether_addr(neigh->ha)){
1675 + rcu_read_unlock_bh();
1676 + return -1;
1677 + }
1678 +
1679 + memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
1680 + memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
1681 +
1682 + rcu_read_unlock_bh();
1683 +
1684 + return 0;
1685 +}
1686 +
1687 +static unsigned int mtk_hnat_ipv4_nf_post_routing(void *priv,
1688 + struct sk_buff *skb,
1689 + const struct nf_hook_state *state)
1690 +{
1691 + if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_get_nexthop, __func__))
1692 + return NF_ACCEPT;
1693 +
1694 + return NF_DROP;
1695 +}
1696 +
1697 +static unsigned int mtk_hnat_br_nf_post_routing(void *priv,
1698 + struct sk_buff *skb,
1699 + const struct nf_hook_state *state)
1700 +{
1701 + if (!mtk_hnat_nf_post_routing(skb, state->out , 0, __func__))
1702 + return NF_ACCEPT;
1703 +
1704 + return NF_DROP;
1705 +}
1706 +
1707 +static struct nf_hook_ops mtk_hnat_nf_ops[] __read_mostly = {
1708 + {
1709 + .hook = mtk_hnat_nf_pre_routing,
1710 + .pf = NFPROTO_IPV4,
1711 + .hooknum = NF_INET_PRE_ROUTING,
1712 + .priority = NF_IP_PRI_FIRST,
1713 + }, {
1714 + .hook = mtk_hnat_ipv4_nf_post_routing,
1715 + .pf = NFPROTO_IPV4,
1716 + .hooknum = NF_INET_POST_ROUTING,
1717 + .priority = NF_IP_PRI_LAST,
1718 + }, {
1719 + .hook = mtk_hnat_nf_pre_routing,
1720 + .pf = NFPROTO_BRIDGE,
1721 + .hooknum = NF_BR_PRE_ROUTING,
1722 + .priority = NF_BR_PRI_FIRST,
1723 + }, {
1724 + .hook = mtk_hnat_br_nf_post_routing,
1725 + .pf = NFPROTO_BRIDGE,
1726 + .hooknum = NF_BR_POST_ROUTING,
1727 + .priority = NF_BR_PRI_LAST - 1,
1728 + },
1729 +};
1730 +
1731 +int hnat_register_nf_hooks(void)
1732 +{
1733 + return nf_register_hooks(mtk_hnat_nf_ops,
1734 + ARRAY_SIZE(mtk_hnat_nf_ops));
1735 +}
1736 +
1737 +void hnat_unregister_nf_hooks(void)
1738 +{
1739 + nf_unregister_hooks(mtk_hnat_nf_ops,
1740 + ARRAY_SIZE(mtk_hnat_nf_ops));
1741 +}
1742 --- /dev/null
1743 +++ b/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
1744 @@ -0,0 +1,44 @@
1745 +/* This program is free software; you can redistribute it and/or modify
1746 + * it under the terms of the GNU General Public License as published by
1747 + * the Free Software Foundation; version 2 of the License
1748 + *
1749 + * This program is distributed in the hope that it will be useful,
1750 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1751 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1752 + * GNU General Public License for more details.
1753 + *
1754 + * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
1755 + * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
1756 + */
1757 +
1758 +#ifndef NF_HNAT_MTK_H
1759 +#define NF_HNAT_MTK_H
1760 +
1761 +#include <asm/dma-mapping.h>
1762 +#include <linux/netdevice.h>
1763 +
1764 +#define HNAT_SKB_CB2(__skb) ((struct hnat_skb_cb2 *)&((__skb)->cb[44]))
1765 +struct hnat_skb_cb2 {
1766 + __u32 magic;
1767 +};
1768 +
1769 +struct hnat_desc {
1770 + u32 entry:14;
1771 + u32 crsn:5;
1772 + u32 sport:4;
1773 + u32 alg:9;
1774 +} __attribute__ ((packed));
1775 +
1776 +#define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
1777 +#define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)
1778 +#define skb_hnat_entry(skb) (((struct hnat_desc *)(skb->head))->entry)
1779 +#define skb_hnat_sport(skb) (((struct hnat_desc *)(skb->head))->sport)
1780 +#define skb_hnat_alg(skb) (((struct hnat_desc *)(skb->head))->alg)
1781 +
1782 +u32 hnat_tx(struct sk_buff *skb);
1783 +u32 hnat_set_skb_info(struct sk_buff *skb, u32 *rxd);
1784 +u32 hnat_reg(struct net_device *, void __iomem *);
1785 +u32 hnat_unreg(void);
1786 +
1787 +#endif
1788 +