mediatek: add an upstreamed spi-nand driver
[openwrt/staging/mkresin.git] / target / linux / mediatek / patches-5.15 / 120-11-mtd-nand-make-mtk_ecc.c-a-separated-module.patch
1 From ebb9653d4a87c64fb679e4c339e867556dada719 Mon Sep 17 00:00:00 2001
2 From: Chuanhong Guo <gch981213@gmail.com>
3 Date: Tue, 22 Mar 2022 18:44:21 +0800
4 Subject: [PATCH 11/15] mtd: nand: make mtk_ecc.c a separated module
5
6 this code will be used in mediatek snfi spi-mem controller with
7 pipelined ECC engine.
8
9 Signed-off-by: Chuanhong Guo <gch981213@gmail.com>
10 (cherry picked from commit 316f47cec4ce5b81aa8006de202d8769c117a52d)
11 ---
12 drivers/mtd/nand/Kconfig | 7 +++++++
13 drivers/mtd/nand/Makefile | 1 +
14 drivers/mtd/nand/{raw/mtk_ecc.c => ecc-mtk.c} | 3 +--
15 drivers/mtd/nand/raw/Kconfig | 1 +
16 drivers/mtd/nand/raw/Makefile | 2 +-
17 drivers/mtd/nand/raw/mtk_nand.c | 2 +-
18 .../nand/raw/mtk_ecc.h => include/linux/mtd/nand-ecc-mtk.h | 0
19 7 files changed, 12 insertions(+), 4 deletions(-)
20 rename drivers/mtd/nand/{raw/mtk_ecc.c => ecc-mtk.c} (99%)
21 rename drivers/mtd/nand/raw/mtk_ecc.h => include/linux/mtd/nand-ecc-mtk.h (100%)
22
23 --- a/drivers/mtd/nand/Kconfig
24 +++ b/drivers/mtd/nand/Kconfig
25 @@ -50,6 +50,13 @@ config MTD_NAND_MTK_BMT
26 bool "Support MediaTek NAND Bad-block Management Table"
27 default n
28
29 +config MTD_NAND_ECC_MEDIATEK
30 + tristate "Mediatek hardware ECC engine"
31 + depends on HAS_IOMEM
32 + select MTD_NAND_ECC
33 + help
34 + This enables support for the hardware ECC engine from Mediatek.
35 +
36 endmenu
37
38 endmenu
39 --- a/drivers/mtd/nand/Makefile
40 +++ b/drivers/mtd/nand/Makefile
41 @@ -3,6 +3,7 @@
42 nandcore-objs := core.o bbt.o
43 obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
44 obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o mtk_bmt_v2.o mtk_bmt_bbt.o mtk_bmt_nmbm.o
45 +obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
46
47 obj-y += onenand/
48 obj-y += raw/
49 --- a/drivers/mtd/nand/raw/mtk_ecc.c
50 +++ /dev/null
51 @@ -1,599 +0,0 @@
52 -// SPDX-License-Identifier: GPL-2.0 OR MIT
53 -/*
54 - * MTK ECC controller driver.
55 - * Copyright (C) 2016 MediaTek Inc.
56 - * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
57 - * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
58 - */
59 -
60 -#include <linux/platform_device.h>
61 -#include <linux/dma-mapping.h>
62 -#include <linux/interrupt.h>
63 -#include <linux/clk.h>
64 -#include <linux/module.h>
65 -#include <linux/iopoll.h>
66 -#include <linux/of.h>
67 -#include <linux/of_platform.h>
68 -#include <linux/mutex.h>
69 -
70 -#include "mtk_ecc.h"
71 -
72 -#define ECC_IDLE_MASK BIT(0)
73 -#define ECC_IRQ_EN BIT(0)
74 -#define ECC_PG_IRQ_SEL BIT(1)
75 -#define ECC_OP_ENABLE (1)
76 -#define ECC_OP_DISABLE (0)
77 -
78 -#define ECC_ENCCON (0x00)
79 -#define ECC_ENCCNFG (0x04)
80 -#define ECC_MS_SHIFT (16)
81 -#define ECC_ENCDIADDR (0x08)
82 -#define ECC_ENCIDLE (0x0C)
83 -#define ECC_DECCON (0x100)
84 -#define ECC_DECCNFG (0x104)
85 -#define DEC_EMPTY_EN BIT(31)
86 -#define DEC_CNFG_CORRECT (0x3 << 12)
87 -#define ECC_DECIDLE (0x10C)
88 -#define ECC_DECENUM0 (0x114)
89 -
90 -#define ECC_TIMEOUT (500000)
91 -
92 -#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
93 -#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
94 -
95 -struct mtk_ecc_caps {
96 - u32 err_mask;
97 - u32 err_shift;
98 - const u8 *ecc_strength;
99 - const u32 *ecc_regs;
100 - u8 num_ecc_strength;
101 - u8 ecc_mode_shift;
102 - u32 parity_bits;
103 - int pg_irq_sel;
104 -};
105 -
106 -struct mtk_ecc {
107 - struct device *dev;
108 - const struct mtk_ecc_caps *caps;
109 - void __iomem *regs;
110 - struct clk *clk;
111 -
112 - struct completion done;
113 - struct mutex lock;
114 - u32 sectors;
115 -
116 - u8 *eccdata;
117 -};
118 -
119 -/* ecc strength that each IP supports */
120 -static const u8 ecc_strength_mt2701[] = {
121 - 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
122 - 40, 44, 48, 52, 56, 60
123 -};
124 -
125 -static const u8 ecc_strength_mt2712[] = {
126 - 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
127 - 40, 44, 48, 52, 56, 60, 68, 72, 80
128 -};
129 -
130 -static const u8 ecc_strength_mt7622[] = {
131 - 4, 6, 8, 10, 12
132 -};
133 -
134 -enum mtk_ecc_regs {
135 - ECC_ENCPAR00,
136 - ECC_ENCIRQ_EN,
137 - ECC_ENCIRQ_STA,
138 - ECC_DECDONE,
139 - ECC_DECIRQ_EN,
140 - ECC_DECIRQ_STA,
141 -};
142 -
143 -static int mt2701_ecc_regs[] = {
144 - [ECC_ENCPAR00] = 0x10,
145 - [ECC_ENCIRQ_EN] = 0x80,
146 - [ECC_ENCIRQ_STA] = 0x84,
147 - [ECC_DECDONE] = 0x124,
148 - [ECC_DECIRQ_EN] = 0x200,
149 - [ECC_DECIRQ_STA] = 0x204,
150 -};
151 -
152 -static int mt2712_ecc_regs[] = {
153 - [ECC_ENCPAR00] = 0x300,
154 - [ECC_ENCIRQ_EN] = 0x80,
155 - [ECC_ENCIRQ_STA] = 0x84,
156 - [ECC_DECDONE] = 0x124,
157 - [ECC_DECIRQ_EN] = 0x200,
158 - [ECC_DECIRQ_STA] = 0x204,
159 -};
160 -
161 -static int mt7622_ecc_regs[] = {
162 - [ECC_ENCPAR00] = 0x10,
163 - [ECC_ENCIRQ_EN] = 0x30,
164 - [ECC_ENCIRQ_STA] = 0x34,
165 - [ECC_DECDONE] = 0x11c,
166 - [ECC_DECIRQ_EN] = 0x140,
167 - [ECC_DECIRQ_STA] = 0x144,
168 -};
169 -
170 -static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
171 - enum mtk_ecc_operation op)
172 -{
173 - struct device *dev = ecc->dev;
174 - u32 val;
175 - int ret;
176 -
177 - ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val,
178 - val & ECC_IDLE_MASK,
179 - 10, ECC_TIMEOUT);
180 - if (ret)
181 - dev_warn(dev, "%s NOT idle\n",
182 - op == ECC_ENCODE ? "encoder" : "decoder");
183 -}
184 -
185 -static irqreturn_t mtk_ecc_irq(int irq, void *id)
186 -{
187 - struct mtk_ecc *ecc = id;
188 - u32 dec, enc;
189 -
190 - dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA])
191 - & ECC_IRQ_EN;
192 - if (dec) {
193 - dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
194 - if (dec & ecc->sectors) {
195 - /*
196 - * Clear decode IRQ status once again to ensure that
197 - * there will be no extra IRQ.
198 - */
199 - readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]);
200 - ecc->sectors = 0;
201 - complete(&ecc->done);
202 - } else {
203 - return IRQ_HANDLED;
204 - }
205 - } else {
206 - enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA])
207 - & ECC_IRQ_EN;
208 - if (enc)
209 - complete(&ecc->done);
210 - else
211 - return IRQ_NONE;
212 - }
213 -
214 - return IRQ_HANDLED;
215 -}
216 -
217 -static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
218 -{
219 - u32 ecc_bit, dec_sz, enc_sz;
220 - u32 reg, i;
221 -
222 - for (i = 0; i < ecc->caps->num_ecc_strength; i++) {
223 - if (ecc->caps->ecc_strength[i] == config->strength)
224 - break;
225 - }
226 -
227 - if (i == ecc->caps->num_ecc_strength) {
228 - dev_err(ecc->dev, "invalid ecc strength %d\n",
229 - config->strength);
230 - return -EINVAL;
231 - }
232 -
233 - ecc_bit = i;
234 -
235 - if (config->op == ECC_ENCODE) {
236 - /* configure ECC encoder (in bits) */
237 - enc_sz = config->len << 3;
238 -
239 - reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
240 - reg |= (enc_sz << ECC_MS_SHIFT);
241 - writel(reg, ecc->regs + ECC_ENCCNFG);
242 -
243 - if (config->mode != ECC_NFI_MODE)
244 - writel(lower_32_bits(config->addr),
245 - ecc->regs + ECC_ENCDIADDR);
246 -
247 - } else {
248 - /* configure ECC decoder (in bits) */
249 - dec_sz = (config->len << 3) +
250 - config->strength * ecc->caps->parity_bits;
251 -
252 - reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
253 - reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
254 - reg |= DEC_EMPTY_EN;
255 - writel(reg, ecc->regs + ECC_DECCNFG);
256 -
257 - if (config->sectors)
258 - ecc->sectors = 1 << (config->sectors - 1);
259 - }
260 -
261 - return 0;
262 -}
263 -
264 -void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
265 - int sectors)
266 -{
267 - u32 offset, i, err;
268 - u32 bitflips = 0;
269 -
270 - stats->corrected = 0;
271 - stats->failed = 0;
272 -
273 - for (i = 0; i < sectors; i++) {
274 - offset = (i >> 2) << 2;
275 - err = readl(ecc->regs + ECC_DECENUM0 + offset);
276 - err = err >> ((i % 4) * ecc->caps->err_shift);
277 - err &= ecc->caps->err_mask;
278 - if (err == ecc->caps->err_mask) {
279 - /* uncorrectable errors */
280 - stats->failed++;
281 - continue;
282 - }
283 -
284 - stats->corrected += err;
285 - bitflips = max_t(u32, bitflips, err);
286 - }
287 -
288 - stats->bitflips = bitflips;
289 -}
290 -EXPORT_SYMBOL(mtk_ecc_get_stats);
291 -
292 -void mtk_ecc_release(struct mtk_ecc *ecc)
293 -{
294 - clk_disable_unprepare(ecc->clk);
295 - put_device(ecc->dev);
296 -}
297 -EXPORT_SYMBOL(mtk_ecc_release);
298 -
299 -static void mtk_ecc_hw_init(struct mtk_ecc *ecc)
300 -{
301 - mtk_ecc_wait_idle(ecc, ECC_ENCODE);
302 - writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON);
303 -
304 - mtk_ecc_wait_idle(ecc, ECC_DECODE);
305 - writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON);
306 -}
307 -
308 -static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
309 -{
310 - struct platform_device *pdev;
311 - struct mtk_ecc *ecc;
312 -
313 - pdev = of_find_device_by_node(np);
314 - if (!pdev)
315 - return ERR_PTR(-EPROBE_DEFER);
316 -
317 - ecc = platform_get_drvdata(pdev);
318 - if (!ecc) {
319 - put_device(&pdev->dev);
320 - return ERR_PTR(-EPROBE_DEFER);
321 - }
322 -
323 - clk_prepare_enable(ecc->clk);
324 - mtk_ecc_hw_init(ecc);
325 -
326 - return ecc;
327 -}
328 -
329 -struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
330 -{
331 - struct mtk_ecc *ecc = NULL;
332 - struct device_node *np;
333 -
334 - np = of_parse_phandle(of_node, "ecc-engine", 0);
335 - if (np) {
336 - ecc = mtk_ecc_get(np);
337 - of_node_put(np);
338 - }
339 -
340 - return ecc;
341 -}
342 -EXPORT_SYMBOL(of_mtk_ecc_get);
343 -
344 -int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
345 -{
346 - enum mtk_ecc_operation op = config->op;
347 - u16 reg_val;
348 - int ret;
349 -
350 - ret = mutex_lock_interruptible(&ecc->lock);
351 - if (ret) {
352 - dev_err(ecc->dev, "interrupted when attempting to lock\n");
353 - return ret;
354 - }
355 -
356 - mtk_ecc_wait_idle(ecc, op);
357 -
358 - ret = mtk_ecc_config(ecc, config);
359 - if (ret) {
360 - mutex_unlock(&ecc->lock);
361 - return ret;
362 - }
363 -
364 - if (config->mode != ECC_NFI_MODE || op != ECC_ENCODE) {
365 - init_completion(&ecc->done);
366 - reg_val = ECC_IRQ_EN;
367 - /*
368 - * For ECC_NFI_MODE, if ecc->caps->pg_irq_sel is 1, then it
369 - * means this chip can only generate one ecc irq during page
370 - * read / write. If is 0, generate one ecc irq each ecc step.
371 - */
372 - if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE)
373 - reg_val |= ECC_PG_IRQ_SEL;
374 - if (op == ECC_ENCODE)
375 - writew(reg_val, ecc->regs +
376 - ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
377 - else
378 - writew(reg_val, ecc->regs +
379 - ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
380 - }
381 -
382 - writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
383 -
384 - return 0;
385 -}
386 -EXPORT_SYMBOL(mtk_ecc_enable);
387 -
388 -void mtk_ecc_disable(struct mtk_ecc *ecc)
389 -{
390 - enum mtk_ecc_operation op = ECC_ENCODE;
391 -
392 - /* find out the running operation */
393 - if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE)
394 - op = ECC_DECODE;
395 -
396 - /* disable it */
397 - mtk_ecc_wait_idle(ecc, op);
398 - if (op == ECC_DECODE) {
399 - /*
400 - * Clear decode IRQ status in case there is a timeout to wait
401 - * decode IRQ.
402 - */
403 - readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
404 - writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
405 - } else {
406 - writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
407 - }
408 -
409 - writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
410 -
411 - mutex_unlock(&ecc->lock);
412 -}
413 -EXPORT_SYMBOL(mtk_ecc_disable);
414 -
415 -int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op)
416 -{
417 - int ret;
418 -
419 - ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
420 - if (!ret) {
421 - dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
422 - (op == ECC_ENCODE) ? "encoder" : "decoder");
423 - return -ETIMEDOUT;
424 - }
425 -
426 - return 0;
427 -}
428 -EXPORT_SYMBOL(mtk_ecc_wait_done);
429 -
430 -int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
431 - u8 *data, u32 bytes)
432 -{
433 - dma_addr_t addr;
434 - u32 len;
435 - int ret;
436 -
437 - addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
438 - ret = dma_mapping_error(ecc->dev, addr);
439 - if (ret) {
440 - dev_err(ecc->dev, "dma mapping error\n");
441 - return -EINVAL;
442 - }
443 -
444 - config->op = ECC_ENCODE;
445 - config->addr = addr;
446 - ret = mtk_ecc_enable(ecc, config);
447 - if (ret) {
448 - dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
449 - return ret;
450 - }
451 -
452 - ret = mtk_ecc_wait_done(ecc, ECC_ENCODE);
453 - if (ret)
454 - goto timeout;
455 -
456 - mtk_ecc_wait_idle(ecc, ECC_ENCODE);
457 -
458 - /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
459 - len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
460 -
461 - /* write the parity bytes generated by the ECC back to temp buffer */
462 - __ioread32_copy(ecc->eccdata,
463 - ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00],
464 - round_up(len, 4));
465 -
466 - /* copy into possibly unaligned OOB region with actual length */
467 - memcpy(data + bytes, ecc->eccdata, len);
468 -timeout:
469 -
470 - dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
471 - mtk_ecc_disable(ecc);
472 -
473 - return ret;
474 -}
475 -EXPORT_SYMBOL(mtk_ecc_encode);
476 -
477 -void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p)
478 -{
479 - const u8 *ecc_strength = ecc->caps->ecc_strength;
480 - int i;
481 -
482 - for (i = 0; i < ecc->caps->num_ecc_strength; i++) {
483 - if (*p <= ecc_strength[i]) {
484 - if (!i)
485 - *p = ecc_strength[i];
486 - else if (*p != ecc_strength[i])
487 - *p = ecc_strength[i - 1];
488 - return;
489 - }
490 - }
491 -
492 - *p = ecc_strength[ecc->caps->num_ecc_strength - 1];
493 -}
494 -EXPORT_SYMBOL(mtk_ecc_adjust_strength);
495 -
496 -unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc)
497 -{
498 - return ecc->caps->parity_bits;
499 -}
500 -EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
501 -
502 -static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
503 - .err_mask = 0x3f,
504 - .err_shift = 8,
505 - .ecc_strength = ecc_strength_mt2701,
506 - .ecc_regs = mt2701_ecc_regs,
507 - .num_ecc_strength = 20,
508 - .ecc_mode_shift = 5,
509 - .parity_bits = 14,
510 - .pg_irq_sel = 0,
511 -};
512 -
513 -static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
514 - .err_mask = 0x7f,
515 - .err_shift = 8,
516 - .ecc_strength = ecc_strength_mt2712,
517 - .ecc_regs = mt2712_ecc_regs,
518 - .num_ecc_strength = 23,
519 - .ecc_mode_shift = 5,
520 - .parity_bits = 14,
521 - .pg_irq_sel = 1,
522 -};
523 -
524 -static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
525 - .err_mask = 0x1f,
526 - .err_shift = 5,
527 - .ecc_strength = ecc_strength_mt7622,
528 - .ecc_regs = mt7622_ecc_regs,
529 - .num_ecc_strength = 5,
530 - .ecc_mode_shift = 4,
531 - .parity_bits = 13,
532 - .pg_irq_sel = 0,
533 -};
534 -
535 -static const struct of_device_id mtk_ecc_dt_match[] = {
536 - {
537 - .compatible = "mediatek,mt2701-ecc",
538 - .data = &mtk_ecc_caps_mt2701,
539 - }, {
540 - .compatible = "mediatek,mt2712-ecc",
541 - .data = &mtk_ecc_caps_mt2712,
542 - }, {
543 - .compatible = "mediatek,mt7622-ecc",
544 - .data = &mtk_ecc_caps_mt7622,
545 - },
546 - {},
547 -};
548 -
549 -static int mtk_ecc_probe(struct platform_device *pdev)
550 -{
551 - struct device *dev = &pdev->dev;
552 - struct mtk_ecc *ecc;
553 - struct resource *res;
554 - u32 max_eccdata_size;
555 - int irq, ret;
556 -
557 - ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
558 - if (!ecc)
559 - return -ENOMEM;
560 -
561 - ecc->caps = of_device_get_match_data(dev);
562 -
563 - max_eccdata_size = ecc->caps->num_ecc_strength - 1;
564 - max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];
565 - max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3;
566 - max_eccdata_size = round_up(max_eccdata_size, 4);
567 - ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL);
568 - if (!ecc->eccdata)
569 - return -ENOMEM;
570 -
571 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
572 - ecc->regs = devm_ioremap_resource(dev, res);
573 - if (IS_ERR(ecc->regs))
574 - return PTR_ERR(ecc->regs);
575 -
576 - ecc->clk = devm_clk_get(dev, NULL);
577 - if (IS_ERR(ecc->clk)) {
578 - dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
579 - return PTR_ERR(ecc->clk);
580 - }
581 -
582 - irq = platform_get_irq(pdev, 0);
583 - if (irq < 0)
584 - return irq;
585 -
586 - ret = dma_set_mask(dev, DMA_BIT_MASK(32));
587 - if (ret) {
588 - dev_err(dev, "failed to set DMA mask\n");
589 - return ret;
590 - }
591 -
592 - ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
593 - if (ret) {
594 - dev_err(dev, "failed to request irq\n");
595 - return -EINVAL;
596 - }
597 -
598 - ecc->dev = dev;
599 - mutex_init(&ecc->lock);
600 - platform_set_drvdata(pdev, ecc);
601 - dev_info(dev, "probed\n");
602 -
603 - return 0;
604 -}
605 -
606 -#ifdef CONFIG_PM_SLEEP
607 -static int mtk_ecc_suspend(struct device *dev)
608 -{
609 - struct mtk_ecc *ecc = dev_get_drvdata(dev);
610 -
611 - clk_disable_unprepare(ecc->clk);
612 -
613 - return 0;
614 -}
615 -
616 -static int mtk_ecc_resume(struct device *dev)
617 -{
618 - struct mtk_ecc *ecc = dev_get_drvdata(dev);
619 - int ret;
620 -
621 - ret = clk_prepare_enable(ecc->clk);
622 - if (ret) {
623 - dev_err(dev, "failed to enable clk\n");
624 - return ret;
625 - }
626 -
627 - return 0;
628 -}
629 -
630 -static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
631 -#endif
632 -
633 -MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
634 -
635 -static struct platform_driver mtk_ecc_driver = {
636 - .probe = mtk_ecc_probe,
637 - .driver = {
638 - .name = "mtk-ecc",
639 - .of_match_table = of_match_ptr(mtk_ecc_dt_match),
640 -#ifdef CONFIG_PM_SLEEP
641 - .pm = &mtk_ecc_pm_ops,
642 -#endif
643 - },
644 -};
645 -
646 -module_platform_driver(mtk_ecc_driver);
647 -
648 -MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
649 -MODULE_DESCRIPTION("MTK Nand ECC Driver");
650 -MODULE_LICENSE("Dual MIT/GPL");
651 --- /dev/null
652 +++ b/drivers/mtd/nand/ecc-mtk.c
653 @@ -0,0 +1,598 @@
654 +// SPDX-License-Identifier: GPL-2.0 OR MIT
655 +/*
656 + * MTK ECC controller driver.
657 + * Copyright (C) 2016 MediaTek Inc.
658 + * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
659 + * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
660 + */
661 +
662 +#include <linux/platform_device.h>
663 +#include <linux/dma-mapping.h>
664 +#include <linux/interrupt.h>
665 +#include <linux/clk.h>
666 +#include <linux/module.h>
667 +#include <linux/iopoll.h>
668 +#include <linux/of.h>
669 +#include <linux/of_platform.h>
670 +#include <linux/mutex.h>
671 +#include <linux/mtd/nand-ecc-mtk.h>
672 +
673 +#define ECC_IDLE_MASK BIT(0)
674 +#define ECC_IRQ_EN BIT(0)
675 +#define ECC_PG_IRQ_SEL BIT(1)
676 +#define ECC_OP_ENABLE (1)
677 +#define ECC_OP_DISABLE (0)
678 +
679 +#define ECC_ENCCON (0x00)
680 +#define ECC_ENCCNFG (0x04)
681 +#define ECC_MS_SHIFT (16)
682 +#define ECC_ENCDIADDR (0x08)
683 +#define ECC_ENCIDLE (0x0C)
684 +#define ECC_DECCON (0x100)
685 +#define ECC_DECCNFG (0x104)
686 +#define DEC_EMPTY_EN BIT(31)
687 +#define DEC_CNFG_CORRECT (0x3 << 12)
688 +#define ECC_DECIDLE (0x10C)
689 +#define ECC_DECENUM0 (0x114)
690 +
691 +#define ECC_TIMEOUT (500000)
692 +
693 +#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
694 +#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
695 +
696 +struct mtk_ecc_caps {
697 + u32 err_mask;
698 + u32 err_shift;
699 + const u8 *ecc_strength;
700 + const u32 *ecc_regs;
701 + u8 num_ecc_strength;
702 + u8 ecc_mode_shift;
703 + u32 parity_bits;
704 + int pg_irq_sel;
705 +};
706 +
707 +struct mtk_ecc {
708 + struct device *dev;
709 + const struct mtk_ecc_caps *caps;
710 + void __iomem *regs;
711 + struct clk *clk;
712 +
713 + struct completion done;
714 + struct mutex lock;
715 + u32 sectors;
716 +
717 + u8 *eccdata;
718 +};
719 +
720 +/* ecc strength that each IP supports */
721 +static const u8 ecc_strength_mt2701[] = {
722 + 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
723 + 40, 44, 48, 52, 56, 60
724 +};
725 +
726 +static const u8 ecc_strength_mt2712[] = {
727 + 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
728 + 40, 44, 48, 52, 56, 60, 68, 72, 80
729 +};
730 +
731 +static const u8 ecc_strength_mt7622[] = {
732 + 4, 6, 8, 10, 12
733 +};
734 +
735 +enum mtk_ecc_regs {
736 + ECC_ENCPAR00,
737 + ECC_ENCIRQ_EN,
738 + ECC_ENCIRQ_STA,
739 + ECC_DECDONE,
740 + ECC_DECIRQ_EN,
741 + ECC_DECIRQ_STA,
742 +};
743 +
744 +static int mt2701_ecc_regs[] = {
745 + [ECC_ENCPAR00] = 0x10,
746 + [ECC_ENCIRQ_EN] = 0x80,
747 + [ECC_ENCIRQ_STA] = 0x84,
748 + [ECC_DECDONE] = 0x124,
749 + [ECC_DECIRQ_EN] = 0x200,
750 + [ECC_DECIRQ_STA] = 0x204,
751 +};
752 +
753 +static int mt2712_ecc_regs[] = {
754 + [ECC_ENCPAR00] = 0x300,
755 + [ECC_ENCIRQ_EN] = 0x80,
756 + [ECC_ENCIRQ_STA] = 0x84,
757 + [ECC_DECDONE] = 0x124,
758 + [ECC_DECIRQ_EN] = 0x200,
759 + [ECC_DECIRQ_STA] = 0x204,
760 +};
761 +
762 +static int mt7622_ecc_regs[] = {
763 + [ECC_ENCPAR00] = 0x10,
764 + [ECC_ENCIRQ_EN] = 0x30,
765 + [ECC_ENCIRQ_STA] = 0x34,
766 + [ECC_DECDONE] = 0x11c,
767 + [ECC_DECIRQ_EN] = 0x140,
768 + [ECC_DECIRQ_STA] = 0x144,
769 +};
770 +
771 +static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
772 + enum mtk_ecc_operation op)
773 +{
774 + struct device *dev = ecc->dev;
775 + u32 val;
776 + int ret;
777 +
778 + ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val,
779 + val & ECC_IDLE_MASK,
780 + 10, ECC_TIMEOUT);
781 + if (ret)
782 + dev_warn(dev, "%s NOT idle\n",
783 + op == ECC_ENCODE ? "encoder" : "decoder");
784 +}
785 +
786 +static irqreturn_t mtk_ecc_irq(int irq, void *id)
787 +{
788 + struct mtk_ecc *ecc = id;
789 + u32 dec, enc;
790 +
791 + dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA])
792 + & ECC_IRQ_EN;
793 + if (dec) {
794 + dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
795 + if (dec & ecc->sectors) {
796 + /*
797 + * Clear decode IRQ status once again to ensure that
798 + * there will be no extra IRQ.
799 + */
800 + readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]);
801 + ecc->sectors = 0;
802 + complete(&ecc->done);
803 + } else {
804 + return IRQ_HANDLED;
805 + }
806 + } else {
807 + enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA])
808 + & ECC_IRQ_EN;
809 + if (enc)
810 + complete(&ecc->done);
811 + else
812 + return IRQ_NONE;
813 + }
814 +
815 + return IRQ_HANDLED;
816 +}
817 +
818 +static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
819 +{
820 + u32 ecc_bit, dec_sz, enc_sz;
821 + u32 reg, i;
822 +
823 + for (i = 0; i < ecc->caps->num_ecc_strength; i++) {
824 + if (ecc->caps->ecc_strength[i] == config->strength)
825 + break;
826 + }
827 +
828 + if (i == ecc->caps->num_ecc_strength) {
829 + dev_err(ecc->dev, "invalid ecc strength %d\n",
830 + config->strength);
831 + return -EINVAL;
832 + }
833 +
834 + ecc_bit = i;
835 +
836 + if (config->op == ECC_ENCODE) {
837 + /* configure ECC encoder (in bits) */
838 + enc_sz = config->len << 3;
839 +
840 + reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
841 + reg |= (enc_sz << ECC_MS_SHIFT);
842 + writel(reg, ecc->regs + ECC_ENCCNFG);
843 +
844 + if (config->mode != ECC_NFI_MODE)
845 + writel(lower_32_bits(config->addr),
846 + ecc->regs + ECC_ENCDIADDR);
847 +
848 + } else {
849 + /* configure ECC decoder (in bits) */
850 + dec_sz = (config->len << 3) +
851 + config->strength * ecc->caps->parity_bits;
852 +
853 + reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
854 + reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
855 + reg |= DEC_EMPTY_EN;
856 + writel(reg, ecc->regs + ECC_DECCNFG);
857 +
858 + if (config->sectors)
859 + ecc->sectors = 1 << (config->sectors - 1);
860 + }
861 +
862 + return 0;
863 +}
864 +
865 +void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
866 + int sectors)
867 +{
868 + u32 offset, i, err;
869 + u32 bitflips = 0;
870 +
871 + stats->corrected = 0;
872 + stats->failed = 0;
873 +
874 + for (i = 0; i < sectors; i++) {
875 + offset = (i >> 2) << 2;
876 + err = readl(ecc->regs + ECC_DECENUM0 + offset);
877 + err = err >> ((i % 4) * ecc->caps->err_shift);
878 + err &= ecc->caps->err_mask;
879 + if (err == ecc->caps->err_mask) {
880 + /* uncorrectable errors */
881 + stats->failed++;
882 + continue;
883 + }
884 +
885 + stats->corrected += err;
886 + bitflips = max_t(u32, bitflips, err);
887 + }
888 +
889 + stats->bitflips = bitflips;
890 +}
891 +EXPORT_SYMBOL(mtk_ecc_get_stats);
892 +
893 +void mtk_ecc_release(struct mtk_ecc *ecc)
894 +{
895 + clk_disable_unprepare(ecc->clk);
896 + put_device(ecc->dev);
897 +}
898 +EXPORT_SYMBOL(mtk_ecc_release);
899 +
900 +static void mtk_ecc_hw_init(struct mtk_ecc *ecc)
901 +{
902 + mtk_ecc_wait_idle(ecc, ECC_ENCODE);
903 + writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON);
904 +
905 + mtk_ecc_wait_idle(ecc, ECC_DECODE);
906 + writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON);
907 +}
908 +
909 +static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
910 +{
911 + struct platform_device *pdev;
912 + struct mtk_ecc *ecc;
913 +
914 + pdev = of_find_device_by_node(np);
915 + if (!pdev)
916 + return ERR_PTR(-EPROBE_DEFER);
917 +
918 + ecc = platform_get_drvdata(pdev);
919 + if (!ecc) {
920 + put_device(&pdev->dev);
921 + return ERR_PTR(-EPROBE_DEFER);
922 + }
923 +
924 + clk_prepare_enable(ecc->clk);
925 + mtk_ecc_hw_init(ecc);
926 +
927 + return ecc;
928 +}
929 +
930 +struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
931 +{
932 + struct mtk_ecc *ecc = NULL;
933 + struct device_node *np;
934 +
935 + np = of_parse_phandle(of_node, "ecc-engine", 0);
936 + if (np) {
937 + ecc = mtk_ecc_get(np);
938 + of_node_put(np);
939 + }
940 +
941 + return ecc;
942 +}
943 +EXPORT_SYMBOL(of_mtk_ecc_get);
944 +
945 +int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
946 +{
947 + enum mtk_ecc_operation op = config->op;
948 + u16 reg_val;
949 + int ret;
950 +
951 + ret = mutex_lock_interruptible(&ecc->lock);
952 + if (ret) {
953 + dev_err(ecc->dev, "interrupted when attempting to lock\n");
954 + return ret;
955 + }
956 +
957 + mtk_ecc_wait_idle(ecc, op);
958 +
959 + ret = mtk_ecc_config(ecc, config);
960 + if (ret) {
961 + mutex_unlock(&ecc->lock);
962 + return ret;
963 + }
964 +
965 + if (config->mode != ECC_NFI_MODE || op != ECC_ENCODE) {
966 + init_completion(&ecc->done);
967 + reg_val = ECC_IRQ_EN;
968 + /*
969 + * For ECC_NFI_MODE, if ecc->caps->pg_irq_sel is 1, then it
970 + * means this chip can only generate one ecc irq during page
971 + * read / write. If is 0, generate one ecc irq each ecc step.
972 + */
973 + if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE)
974 + reg_val |= ECC_PG_IRQ_SEL;
975 + if (op == ECC_ENCODE)
976 + writew(reg_val, ecc->regs +
977 + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
978 + else
979 + writew(reg_val, ecc->regs +
980 + ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
981 + }
982 +
983 + writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
984 +
985 + return 0;
986 +}
987 +EXPORT_SYMBOL(mtk_ecc_enable);
988 +
989 +void mtk_ecc_disable(struct mtk_ecc *ecc)
990 +{
991 + enum mtk_ecc_operation op = ECC_ENCODE;
992 +
993 + /* find out the running operation */
994 + if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE)
995 + op = ECC_DECODE;
996 +
997 + /* disable it */
998 + mtk_ecc_wait_idle(ecc, op);
999 + if (op == ECC_DECODE) {
1000 + /*
1001 + * Clear decode IRQ status in case there is a timeout to wait
1002 + * decode IRQ.
1003 + */
1004 + readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
1005 + writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
1006 + } else {
1007 + writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
1008 + }
1009 +
1010 + writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
1011 +
1012 + mutex_unlock(&ecc->lock);
1013 +}
1014 +EXPORT_SYMBOL(mtk_ecc_disable);
1015 +
1016 +int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op)
1017 +{
1018 + int ret;
1019 +
1020 + ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
1021 + if (!ret) {
1022 + dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
1023 + (op == ECC_ENCODE) ? "encoder" : "decoder");
1024 + return -ETIMEDOUT;
1025 + }
1026 +
1027 + return 0;
1028 +}
1029 +EXPORT_SYMBOL(mtk_ecc_wait_done);
1030 +
1031 +int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
1032 + u8 *data, u32 bytes)
1033 +{
1034 + dma_addr_t addr;
1035 + u32 len;
1036 + int ret;
1037 +
1038 + addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
1039 + ret = dma_mapping_error(ecc->dev, addr);
1040 + if (ret) {
1041 + dev_err(ecc->dev, "dma mapping error\n");
1042 + return -EINVAL;
1043 + }
1044 +
1045 + config->op = ECC_ENCODE;
1046 + config->addr = addr;
1047 + ret = mtk_ecc_enable(ecc, config);
1048 + if (ret) {
1049 + dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
1050 + return ret;
1051 + }
1052 +
1053 + ret = mtk_ecc_wait_done(ecc, ECC_ENCODE);
1054 + if (ret)
1055 + goto timeout;
1056 +
1057 + mtk_ecc_wait_idle(ecc, ECC_ENCODE);
1058 +
1059 + /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
1060 + len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
1061 +
1062 + /* write the parity bytes generated by the ECC back to temp buffer */
1063 + __ioread32_copy(ecc->eccdata,
1064 + ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00],
1065 + round_up(len, 4));
1066 +
1067 + /* copy into possibly unaligned OOB region with actual length */
1068 + memcpy(data + bytes, ecc->eccdata, len);
1069 +timeout:
1070 +
1071 + dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
1072 + mtk_ecc_disable(ecc);
1073 +
1074 + return ret;
1075 +}
1076 +EXPORT_SYMBOL(mtk_ecc_encode);
1077 +
1078 +void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p)
1079 +{
1080 + const u8 *ecc_strength = ecc->caps->ecc_strength;
1081 + int i;
1082 +
1083 + for (i = 0; i < ecc->caps->num_ecc_strength; i++) {
1084 + if (*p <= ecc_strength[i]) {
1085 + if (!i)
1086 + *p = ecc_strength[i];
1087 + else if (*p != ecc_strength[i])
1088 + *p = ecc_strength[i - 1];
1089 + return;
1090 + }
1091 + }
1092 +
1093 + *p = ecc_strength[ecc->caps->num_ecc_strength - 1];
1094 +}
1095 +EXPORT_SYMBOL(mtk_ecc_adjust_strength);
1096 +
1097 +unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc)
1098 +{
1099 + return ecc->caps->parity_bits;
1100 +}
1101 +EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
1102 +
1103 +static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
1104 + .err_mask = 0x3f,
1105 + .err_shift = 8,
1106 + .ecc_strength = ecc_strength_mt2701,
1107 + .ecc_regs = mt2701_ecc_regs,
1108 + .num_ecc_strength = 20,
1109 + .ecc_mode_shift = 5,
1110 + .parity_bits = 14,
1111 + .pg_irq_sel = 0,
1112 +};
1113 +
1114 +static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
1115 + .err_mask = 0x7f,
1116 + .err_shift = 8,
1117 + .ecc_strength = ecc_strength_mt2712,
1118 + .ecc_regs = mt2712_ecc_regs,
1119 + .num_ecc_strength = 23,
1120 + .ecc_mode_shift = 5,
1121 + .parity_bits = 14,
1122 + .pg_irq_sel = 1,
1123 +};
1124 +
1125 +static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
1126 + .err_mask = 0x1f,
1127 + .err_shift = 5,
1128 + .ecc_strength = ecc_strength_mt7622,
1129 + .ecc_regs = mt7622_ecc_regs,
1130 + .num_ecc_strength = 5,
1131 + .ecc_mode_shift = 4,
1132 + .parity_bits = 13,
1133 + .pg_irq_sel = 0,
1134 +};
1135 +
1136 +static const struct of_device_id mtk_ecc_dt_match[] = {
1137 + {
1138 + .compatible = "mediatek,mt2701-ecc",
1139 + .data = &mtk_ecc_caps_mt2701,
1140 + }, {
1141 + .compatible = "mediatek,mt2712-ecc",
1142 + .data = &mtk_ecc_caps_mt2712,
1143 + }, {
1144 + .compatible = "mediatek,mt7622-ecc",
1145 + .data = &mtk_ecc_caps_mt7622,
1146 + },
1147 + {},
1148 +};
1149 +
1150 +static int mtk_ecc_probe(struct platform_device *pdev)
1151 +{
1152 + struct device *dev = &pdev->dev;
1153 + struct mtk_ecc *ecc;
1154 + struct resource *res;
1155 + u32 max_eccdata_size;
1156 + int irq, ret;
1157 +
1158 + ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
1159 + if (!ecc)
1160 + return -ENOMEM;
1161 +
1162 + ecc->caps = of_device_get_match_data(dev);
1163 +
1164 + max_eccdata_size = ecc->caps->num_ecc_strength - 1;
1165 + max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];
1166 + max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3;
1167 + max_eccdata_size = round_up(max_eccdata_size, 4);
1168 + ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL);
1169 + if (!ecc->eccdata)
1170 + return -ENOMEM;
1171 +
1172 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1173 + ecc->regs = devm_ioremap_resource(dev, res);
1174 + if (IS_ERR(ecc->regs))
1175 + return PTR_ERR(ecc->regs);
1176 +
1177 + ecc->clk = devm_clk_get(dev, NULL);
1178 + if (IS_ERR(ecc->clk)) {
1179 + dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
1180 + return PTR_ERR(ecc->clk);
1181 + }
1182 +
1183 + irq = platform_get_irq(pdev, 0);
1184 + if (irq < 0)
1185 + return irq;
1186 +
1187 + ret = dma_set_mask(dev, DMA_BIT_MASK(32));
1188 + if (ret) {
1189 + dev_err(dev, "failed to set DMA mask\n");
1190 + return ret;
1191 + }
1192 +
1193 + ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
1194 + if (ret) {
1195 + dev_err(dev, "failed to request irq\n");
1196 + return -EINVAL;
1197 + }
1198 +
1199 + ecc->dev = dev;
1200 + mutex_init(&ecc->lock);
1201 + platform_set_drvdata(pdev, ecc);
1202 + dev_info(dev, "probed\n");
1203 +
1204 + return 0;
1205 +}
1206 +
1207 +#ifdef CONFIG_PM_SLEEP
1208 +static int mtk_ecc_suspend(struct device *dev)
1209 +{
1210 + struct mtk_ecc *ecc = dev_get_drvdata(dev);
1211 +
1212 + clk_disable_unprepare(ecc->clk);
1213 +
1214 + return 0;
1215 +}
1216 +
1217 +static int mtk_ecc_resume(struct device *dev)
1218 +{
1219 + struct mtk_ecc *ecc = dev_get_drvdata(dev);
1220 + int ret;
1221 +
1222 + ret = clk_prepare_enable(ecc->clk);
1223 + if (ret) {
1224 + dev_err(dev, "failed to enable clk\n");
1225 + return ret;
1226 + }
1227 +
1228 + return 0;
1229 +}
1230 +
1231 +static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
1232 +#endif
1233 +
1234 +MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
1235 +
1236 +static struct platform_driver mtk_ecc_driver = {
1237 + .probe = mtk_ecc_probe,
1238 + .driver = {
1239 + .name = "mtk-ecc",
1240 + .of_match_table = of_match_ptr(mtk_ecc_dt_match),
1241 +#ifdef CONFIG_PM_SLEEP
1242 + .pm = &mtk_ecc_pm_ops,
1243 +#endif
1244 + },
1245 +};
1246 +
1247 +module_platform_driver(mtk_ecc_driver);
1248 +
1249 +MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
1250 +MODULE_DESCRIPTION("MTK Nand ECC Driver");
1251 +MODULE_LICENSE("Dual MIT/GPL");
1252 --- a/drivers/mtd/nand/raw/Kconfig
1253 +++ b/drivers/mtd/nand/raw/Kconfig
1254 @@ -360,6 +360,7 @@ config MTD_NAND_QCOM
1255
1256 config MTD_NAND_MTK
1257 tristate "MTK NAND controller"
1258 + depends on MTD_NAND_ECC_MEDIATEK
1259 depends on ARCH_MEDIATEK || COMPILE_TEST
1260 depends on HAS_IOMEM
1261 help
1262 --- a/drivers/mtd/nand/raw/Makefile
1263 +++ b/drivers/mtd/nand/raw/Makefile
1264 @@ -48,7 +48,7 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_n
1265 obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
1266 obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
1267 obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
1268 -obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
1269 +obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o
1270 obj-$(CONFIG_MTD_NAND_MXIC) += mxic_nand.o
1271 obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
1272 obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o
1273 --- a/drivers/mtd/nand/raw/mtk_nand.c
1274 +++ b/drivers/mtd/nand/raw/mtk_nand.c
1275 @@ -17,7 +17,7 @@
1276 #include <linux/iopoll.h>
1277 #include <linux/of.h>
1278 #include <linux/of_device.h>
1279 -#include "mtk_ecc.h"
1280 +#include <linux/mtd/nand-ecc-mtk.h>
1281
1282 /* NAND controller register definition */
1283 #define NFI_CNFG (0x00)
1284 --- a/drivers/mtd/nand/raw/mtk_ecc.h
1285 +++ /dev/null
1286 @@ -1,47 +0,0 @@
1287 -/* SPDX-License-Identifier: GPL-2.0 OR MIT */
1288 -/*
1289 - * MTK SDG1 ECC controller
1290 - *
1291 - * Copyright (c) 2016 Mediatek
1292 - * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
1293 - * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
1294 - */
1295 -
1296 -#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
1297 -#define __DRIVERS_MTD_NAND_MTK_ECC_H__
1298 -
1299 -#include <linux/types.h>
1300 -
1301 -enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
1302 -enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
1303 -
1304 -struct device_node;
1305 -struct mtk_ecc;
1306 -
1307 -struct mtk_ecc_stats {
1308 - u32 corrected;
1309 - u32 bitflips;
1310 - u32 failed;
1311 -};
1312 -
1313 -struct mtk_ecc_config {
1314 - enum mtk_ecc_operation op;
1315 - enum mtk_ecc_mode mode;
1316 - dma_addr_t addr;
1317 - u32 strength;
1318 - u32 sectors;
1319 - u32 len;
1320 -};
1321 -
1322 -int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32);
1323 -void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
1324 -int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
1325 -int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
1326 -void mtk_ecc_disable(struct mtk_ecc *);
1327 -void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
1328 -unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc);
1329 -
1330 -struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
1331 -void mtk_ecc_release(struct mtk_ecc *);
1332 -
1333 -#endif
1334 --- /dev/null
1335 +++ b/include/linux/mtd/nand-ecc-mtk.h
1336 @@ -0,0 +1,47 @@
1337 +/* SPDX-License-Identifier: GPL-2.0 OR MIT */
1338 +/*
1339 + * MTK SDG1 ECC controller
1340 + *
1341 + * Copyright (c) 2016 Mediatek
1342 + * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
1343 + * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
1344 + */
1345 +
1346 +#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
1347 +#define __DRIVERS_MTD_NAND_MTK_ECC_H__
1348 +
1349 +#include <linux/types.h>
1350 +
1351 +enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
1352 +enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
1353 +
1354 +struct device_node;
1355 +struct mtk_ecc;
1356 +
1357 +struct mtk_ecc_stats {
1358 + u32 corrected;
1359 + u32 bitflips;
1360 + u32 failed;
1361 +};
1362 +
1363 +struct mtk_ecc_config {
1364 + enum mtk_ecc_operation op;
1365 + enum mtk_ecc_mode mode;
1366 + dma_addr_t addr;
1367 + u32 strength;
1368 + u32 sectors;
1369 + u32 len;
1370 +};
1371 +
1372 +int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32);
1373 +void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
1374 +int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
1375 +int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
1376 +void mtk_ecc_disable(struct mtk_ecc *);
1377 +void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
1378 +unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc);
1379 +
1380 +struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
1381 +void mtk_ecc_release(struct mtk_ecc *);
1382 +
1383 +#endif