mediatek: add v4.19 support
[openwrt/staging/hauke.git] / target / linux / mediatek / patches-4.19 / 0306-spi-spi-mem-MediaTek-Add-SPI-NAND-Flash-interface-dr.patch
1 From 1ecb38eabd90efe93957d0a822a167560c39308a Mon Sep 17 00:00:00 2001
2 From: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
3 Date: Wed, 20 Mar 2019 16:19:51 +0800
4 Subject: [PATCH 6/6] spi: spi-mem: MediaTek: Add SPI NAND Flash interface
5 driver for MediaTek MT7622
6
7 Change-Id: I3e78406bb9b46b0049d3988a5c71c7069e4f809c
8 Signed-off-by: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
9 ---
10 drivers/spi/Kconfig | 9 +
11 drivers/spi/Makefile | 1 +
12 drivers/spi/spi-mtk-snfi.c | 1183 ++++++++++++++++++++++++++++++++++++
13 3 files changed, 1193 insertions(+)
14 create mode 100644 drivers/spi/spi-mtk-snfi.c
15
16 Index: linux-4.19.48/drivers/spi/spi-mtk-snfi.c
17 ===================================================================
18 --- /dev/null
19 +++ linux-4.19.48/drivers/spi/spi-mtk-snfi.c
20 @@ -0,0 +1,1183 @@
21 +// SPDX-License-Identifier: GPL-2.0
22 +/*
23 + * Driver for MediaTek SPI Nand interface
24 + *
25 + * Copyright (C) 2018 MediaTek Inc.
26 + * Authors: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
27 + *
28 + */
29 +
30 +#include <linux/clk.h>
31 +#include <linux/delay.h>
32 +#include <linux/dma-mapping.h>
33 +#include <linux/interrupt.h>
34 +#include <linux/iopoll.h>
35 +#include <linux/mtd/mtd.h>
36 +#include <linux/mtd/mtk_ecc.h>
37 +#include <linux/mtd/spinand.h>
38 +#include <linux/module.h>
39 +#include <linux/of.h>
40 +#include <linux/of_device.h>
41 +#include <linux/platform_device.h>
42 +#include <linux/spi/spi.h>
43 +#include <linux/spi/spi-mem.h>
44 +
45 +/* NAND controller register definition */
46 +/* NFI control */
47 +#define NFI_CNFG 0x00
48 +#define CNFG_DMA BIT(0)
49 +#define CNFG_READ_EN BIT(1)
50 +#define CNFG_DMA_BURST_EN BIT(2)
51 +#define CNFG_BYTE_RW BIT(6)
52 +#define CNFG_HW_ECC_EN BIT(8)
53 +#define CNFG_AUTO_FMT_EN BIT(9)
54 +#define CNFG_OP_PROGRAM (3UL << 12)
55 +#define CNFG_OP_CUST (6UL << 12)
56 +#define NFI_PAGEFMT 0x04
57 +#define PAGEFMT_512 0
58 +#define PAGEFMT_2K 1
59 +#define PAGEFMT_4K 2
60 +#define PAGEFMT_FDM_SHIFT 8
61 +#define PAGEFMT_FDM_ECC_SHIFT 12
62 +#define NFI_CON 0x08
63 +#define CON_FIFO_FLUSH BIT(0)
64 +#define CON_NFI_RST BIT(1)
65 +#define CON_BRD BIT(8)
66 +#define CON_BWR BIT(9)
67 +#define CON_SEC_SHIFT 12
68 +#define NFI_INTR_EN 0x10
69 +#define INTR_AHB_DONE_EN BIT(6)
70 +#define NFI_INTR_STA 0x14
71 +#define NFI_CMD 0x20
72 +#define NFI_STA 0x60
73 +#define STA_EMP_PAGE BIT(12)
74 +#define NAND_FSM_MASK (0x1f << 24)
75 +#define NFI_FSM_MASK (0xf << 16)
76 +#define NFI_ADDRCNTR 0x70
77 +#define CNTR_MASK GENMASK(16, 12)
78 +#define ADDRCNTR_SEC_SHIFT 12
79 +#define ADDRCNTR_SEC(val) \
80 + (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
81 +#define NFI_STRADDR 0x80
82 +#define NFI_BYTELEN 0x84
83 +#define NFI_CSEL 0x90
84 +#define NFI_FDML(x) (0xa0 + (x) * sizeof(u32) * 2)
85 +#define NFI_FDMM(x) (0xa4 + (x) * sizeof(u32) * 2)
86 +#define NFI_MASTER_STA 0x224
87 +#define MASTER_STA_MASK 0x0fff
88 +/* NFI_SPI control */
89 +#define SNFI_MAC_OUTL 0x504
90 +#define SNFI_MAC_INL 0x508
91 +#define SNFI_RD_CTL2 0x510
92 +#define RD_CMD_MASK 0x00ff
93 +#define RD_DUMMY_SHIFT 8
94 +#define SNFI_RD_CTL3 0x514
95 +#define RD_ADDR_MASK 0xffff
96 +#define SNFI_MISC_CTL 0x538
97 +#define RD_MODE_X2 BIT(16)
98 +#define RD_MODE_X4 (2UL << 16)
99 +#define RD_QDUAL_IO (4UL << 16)
100 +#define RD_MODE_MASK (7UL << 16)
101 +#define RD_CUSTOM_EN BIT(6)
102 +#define WR_CUSTOM_EN BIT(7)
103 +#define WR_X4_EN BIT(20)
104 +#define SW_RST BIT(28)
105 +#define SNFI_MISC_CTL2 0x53c
106 +#define WR_LEN_SHIFT 16
107 +#define SNFI_PG_CTL1 0x524
108 +#define WR_LOAD_CMD_SHIFT 8
109 +#define SNFI_PG_CTL2 0x528
110 +#define WR_LOAD_ADDR_MASK 0xffff
111 +#define SNFI_MAC_CTL 0x500
112 +#define MAC_WIP BIT(0)
113 +#define MAC_WIP_READY BIT(1)
114 +#define MAC_TRIG BIT(2)
115 +#define MAC_EN BIT(3)
116 +#define MAC_SIO_SEL BIT(4)
117 +#define SNFI_STA_CTL1 0x550
118 +#define SPI_STATE_IDLE 0xf
119 +#define SNFI_CNFG 0x55c
120 +#define SNFI_MODE_EN BIT(0)
121 +#define SNFI_GPRAM_DATA 0x800
122 +#define SNFI_GPRAM_MAX_LEN 16
123 +
124 +/* Dummy command trigger NFI to spi mode */
125 +#define NAND_CMD_DUMMYREAD 0x00
126 +#define NAND_CMD_DUMMYPROG 0x80
127 +
128 +#define MTK_TIMEOUT 500000
129 +#define MTK_RESET_TIMEOUT 1000000
130 +#define MTK_SNFC_MIN_SPARE 16
131 +#define KB(x) ((x) * 1024UL)
132 +
133 +/*
134 + * supported spare size of each IP.
135 + * order should be the same with the spare size bitfiled defination of
136 + * register NFI_PAGEFMT.
137 + */
138 +static const u8 spare_size_mt7622[] = {
139 + 16, 26, 27, 28
140 +};
141 +
142 +struct mtk_snfi_caps {
143 + const u8 *spare_size;
144 + u8 num_spare_size;
145 + u32 nand_sec_size;
146 + u8 nand_fdm_size;
147 + u8 nand_fdm_ecc_size;
148 + u8 ecc_parity_bits;
149 + u8 pageformat_spare_shift;
150 + u8 bad_mark_swap;
151 +};
152 +
153 +struct mtk_snfi_bad_mark_ctl {
154 + void (*bm_swap)(struct spi_mem *mem, u8 *buf, int raw);
155 + u32 sec;
156 + u32 pos;
157 +};
158 +
159 +struct mtk_snfi_nand_chip {
160 + struct mtk_snfi_bad_mark_ctl bad_mark;
161 + u32 spare_per_sector;
162 +};
163 +
164 +struct mtk_snfi_clk {
165 + struct clk *nfi_clk;
166 + struct clk *spi_clk;
167 +};
168 +
169 +struct mtk_snfi {
170 + const struct mtk_snfi_caps *caps;
171 + struct mtk_snfi_nand_chip snfi_nand;
172 + struct mtk_snfi_clk clk;
173 + struct mtk_ecc_config ecc_cfg;
174 + struct mtk_ecc *ecc;
175 + struct completion done;
176 + struct device *dev;
177 +
178 + void __iomem *regs;
179 +
180 + u8 *buffer;
181 +};
182 +
183 +static inline u8 *oob_ptr(struct spi_mem *mem, int i)
184 +{
185 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
186 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
187 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
188 + u8 *poi;
189 +
190 + /* map the sector's FDM data to free oob:
191 + * the beginning of the oob area stores the FDM data of bad mark
192 + */
193 +
194 + if (i < snfi_nand->bad_mark.sec)
195 + poi = spinand->oobbuf + (i + 1) * snfi->caps->nand_fdm_size;
196 + else if (i == snfi_nand->bad_mark.sec)
197 + poi = spinand->oobbuf;
198 + else
199 + poi = spinand->oobbuf + i * snfi->caps->nand_fdm_size;
200 +
201 + return poi;
202 +}
203 +
204 +static inline int mtk_data_len(struct spi_mem *mem)
205 +{
206 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
207 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
208 +
209 + return snfi->caps->nand_sec_size + snfi_nand->spare_per_sector;
210 +}
211 +
212 +static inline u8 *mtk_oob_ptr(struct spi_mem *mem,
213 + const u8 *p, int i)
214 +{
215 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
216 +
217 + return (u8 *)p + i * mtk_data_len(mem) + snfi->caps->nand_sec_size;
218 +}
219 +
220 +static void mtk_snfi_bad_mark_swap(struct spi_mem *mem,
221 + u8 *buf, int raw)
222 +{
223 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
224 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
225 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
226 + u32 bad_pos = snfi_nand->bad_mark.pos;
227 +
228 + if (raw)
229 + bad_pos += snfi_nand->bad_mark.sec * mtk_data_len(mem);
230 + else
231 + bad_pos += snfi_nand->bad_mark.sec * snfi->caps->nand_sec_size;
232 +
233 + swap(spinand->oobbuf[0], buf[bad_pos]);
234 +}
235 +
236 +static void mtk_snfi_set_bad_mark_ctl(struct mtk_snfi_bad_mark_ctl *bm_ctl,
237 + struct spi_mem *mem)
238 +{
239 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
240 + struct mtd_info *mtd = spinand_to_mtd(spinand);
241 +
242 + bm_ctl->bm_swap = mtk_snfi_bad_mark_swap;
243 + bm_ctl->sec = mtd->writesize / mtk_data_len(mem);
244 + bm_ctl->pos = mtd->writesize % mtk_data_len(mem);
245 +}
246 +
247 +static void mtk_snfi_mac_enable(struct mtk_snfi *snfi)
248 +{
249 + u32 mac;
250 +
251 + mac = readl(snfi->regs + SNFI_MAC_CTL);
252 + mac &= ~MAC_SIO_SEL;
253 + mac |= MAC_EN;
254 +
255 + writel(mac, snfi->regs + SNFI_MAC_CTL);
256 +}
257 +
258 +static int mtk_snfi_mac_trigger(struct mtk_snfi *snfi)
259 +{
260 + u32 mac, reg;
261 + int ret = 0;
262 +
263 + mac = readl(snfi->regs + SNFI_MAC_CTL);
264 + mac |= MAC_TRIG;
265 + writel(mac, snfi->regs + SNFI_MAC_CTL);
266 +
267 + ret = readl_poll_timeout_atomic(snfi->regs + SNFI_MAC_CTL, reg,
268 + reg & MAC_WIP_READY, 10,
269 + MTK_TIMEOUT);
270 + if (ret < 0) {
271 + dev_err(snfi->dev, "polling wip ready for read timeout\n");
272 + return -EIO;
273 + }
274 +
275 + ret = readl_poll_timeout_atomic(snfi->regs + SNFI_MAC_CTL, reg,
276 + !(reg & MAC_WIP), 10,
277 + MTK_TIMEOUT);
278 + if (ret < 0) {
279 + dev_err(snfi->dev, "polling flash update timeout\n");
280 + return -EIO;
281 + }
282 +
283 + return ret;
284 +}
285 +
286 +static void mtk_snfi_mac_leave(struct mtk_snfi *snfi)
287 +{
288 + u32 mac;
289 +
290 + mac = readl(snfi->regs + SNFI_MAC_CTL);
291 + mac &= ~(MAC_TRIG | MAC_EN | MAC_SIO_SEL);
292 + writel(mac, snfi->regs + SNFI_MAC_CTL);
293 +}
294 +
295 +static int mtk_snfi_mac_op(struct mtk_snfi *snfi)
296 +{
297 + int ret = 0;
298 +
299 + mtk_snfi_mac_enable(snfi);
300 +
301 + ret = mtk_snfi_mac_trigger(snfi);
302 + if (ret)
303 + return ret;
304 +
305 + mtk_snfi_mac_leave(snfi);
306 +
307 + return ret;
308 +}
309 +
310 +static irqreturn_t mtk_snfi_irq(int irq, void *id)
311 +{
312 + struct mtk_snfi *snfi = id;
313 + u16 sta, ien;
314 +
315 + sta = readw(snfi->regs + NFI_INTR_STA);
316 + ien = readw(snfi->regs + NFI_INTR_EN);
317 +
318 + if (!(sta & ien))
319 + return IRQ_NONE;
320 +
321 + writew(~sta & ien, snfi->regs + NFI_INTR_EN);
322 + complete(&snfi->done);
323 +
324 + return IRQ_HANDLED;
325 +}
326 +
327 +static int mtk_snfi_enable_clk(struct device *dev, struct mtk_snfi_clk *clk)
328 +{
329 + int ret;
330 +
331 + ret = clk_prepare_enable(clk->nfi_clk);
332 + if (ret) {
333 + dev_err(dev, "failed to enable nfi clk\n");
334 + return ret;
335 + }
336 +
337 + ret = clk_prepare_enable(clk->spi_clk);
338 + if (ret) {
339 + dev_err(dev, "failed to enable spi clk\n");
340 + clk_disable_unprepare(clk->nfi_clk);
341 + return ret;
342 + }
343 +
344 + return 0;
345 +}
346 +
347 +static void mtk_snfi_disable_clk(struct mtk_snfi_clk *clk)
348 +{
349 + clk_disable_unprepare(clk->nfi_clk);
350 + clk_disable_unprepare(clk->spi_clk);
351 +}
352 +
353 +static int mtk_snfi_reset(struct mtk_snfi *snfi)
354 +{
355 + u32 val;
356 + int ret;
357 +
358 + /* SW reset controller */
359 + val = readl(snfi->regs + SNFI_MISC_CTL) | SW_RST;
360 + writel(val, snfi->regs + SNFI_MISC_CTL);
361 +
362 + ret = readw_poll_timeout(snfi->regs + SNFI_STA_CTL1, val,
363 + !(val & SPI_STATE_IDLE), 50,
364 + MTK_RESET_TIMEOUT);
365 + if (ret) {
366 + dev_warn(snfi->dev, "spi state active in reset [0x%x] = 0x%x\n",
367 + SNFI_STA_CTL1, val);
368 + return ret;
369 + }
370 +
371 + val = readl(snfi->regs + SNFI_MISC_CTL);
372 + val &= ~SW_RST;
373 + writel(val, snfi->regs + SNFI_MISC_CTL);
374 +
375 + /* reset all registers and force the NFI master to terminate */
376 + writew(CON_FIFO_FLUSH | CON_NFI_RST, snfi->regs + NFI_CON);
377 + ret = readw_poll_timeout(snfi->regs + NFI_STA, val,
378 + !(val & (NFI_FSM_MASK | NAND_FSM_MASK)), 50,
379 + MTK_RESET_TIMEOUT);
380 + if (ret) {
381 + dev_warn(snfi->dev, "nfi active in reset [0x%x] = 0x%x\n",
382 + NFI_STA, val);
383 + return ret;
384 + }
385 +
386 + return 0;
387 +}
388 +
389 +static int mtk_snfi_set_spare_per_sector(struct spinand_device *spinand,
390 + const struct mtk_snfi_caps *caps,
391 + u32 *sps)
392 +{
393 + struct mtd_info *mtd = spinand_to_mtd(spinand);
394 + const u8 *spare = caps->spare_size;
395 + u32 sectors, i, closest_spare = 0;
396 +
397 + sectors = mtd->writesize / caps->nand_sec_size;
398 + *sps = mtd->oobsize / sectors;
399 +
400 + if (*sps < MTK_SNFC_MIN_SPARE)
401 + return -EINVAL;
402 +
403 + for (i = 0; i < caps->num_spare_size; i++) {
404 + if (*sps >= spare[i] && spare[i] >= spare[closest_spare]) {
405 + closest_spare = i;
406 + if (*sps == spare[i])
407 + break;
408 + }
409 + }
410 +
411 + *sps = spare[closest_spare];
412 +
413 + return 0;
414 +}
415 +
416 +static void mtk_snfi_read_fdm_data(struct spi_mem *mem,
417 + u32 sectors)
418 +{
419 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
420 + const struct mtk_snfi_caps *caps = snfi->caps;
421 + u32 vall, valm;
422 + int i, j;
423 + u8 *oobptr;
424 +
425 + for (i = 0; i < sectors; i++) {
426 + oobptr = oob_ptr(mem, i);
427 + vall = readl(snfi->regs + NFI_FDML(i));
428 + valm = readl(snfi->regs + NFI_FDMM(i));
429 +
430 + for (j = 0; j < caps->nand_fdm_size; j++)
431 + oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
432 + }
433 +}
434 +
435 +static void mtk_snfi_write_fdm_data(struct spi_mem *mem,
436 + u32 sectors)
437 +{
438 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
439 + const struct mtk_snfi_caps *caps = snfi->caps;
440 + u32 vall, valm;
441 + int i, j;
442 + u8 *oobptr;
443 +
444 + for (i = 0; i < sectors; i++) {
445 + oobptr = oob_ptr(mem, i);
446 + vall = 0;
447 + valm = 0;
448 + for (j = 0; j < 8; j++) {
449 + if (j < 4)
450 + vall |= (j < caps->nand_fdm_size ? oobptr[j] :
451 + 0xff) << (j * 8);
452 + else
453 + valm |= (j < caps->nand_fdm_size ? oobptr[j] :
454 + 0xff) << ((j - 4) * 8);
455 + }
456 + writel(vall, snfi->regs + NFI_FDML(i));
457 + writel(valm, snfi->regs + NFI_FDMM(i));
458 + }
459 +}
460 +
461 +static int mtk_snfi_update_ecc_stats(struct spi_mem *mem,
462 + u8 *buf, u32 sectors)
463 +{
464 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
465 + struct mtd_info *mtd = spinand_to_mtd(spinand);
466 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
467 + struct mtk_ecc_stats stats;
468 + int rc, i;
469 +
470 + rc = readl(snfi->regs + NFI_STA) & STA_EMP_PAGE;
471 + if (rc) {
472 + memset(buf, 0xff, sectors * snfi->caps->nand_sec_size);
473 + for (i = 0; i < sectors; i++)
474 + memset(spinand->oobbuf, 0xff,
475 + snfi->caps->nand_fdm_size);
476 + return 0;
477 + }
478 +
479 + mtk_ecc_get_stats(snfi->ecc, &stats, sectors);
480 + mtd->ecc_stats.corrected += stats.corrected;
481 + mtd->ecc_stats.failed += stats.failed;
482 +
483 + return 0;
484 +}
485 +
486 +static int mtk_snfi_hw_runtime_config(struct spi_mem *mem)
487 +{
488 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
489 + struct mtd_info *mtd = spinand_to_mtd(spinand);
490 + struct nand_device *nand = mtd_to_nanddev(mtd);
491 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
492 + const struct mtk_snfi_caps *caps = snfi->caps;
493 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
494 + u32 fmt, spare, i = 0;
495 + int ret;
496 +
497 + ret = mtk_snfi_set_spare_per_sector(spinand, caps, &spare);
498 + if (ret)
499 + return ret;
500 +
501 + /* calculate usable oob bytes for ecc parity data */
502 + snfi_nand->spare_per_sector = spare;
503 + spare -= caps->nand_fdm_size;
504 +
505 + nand->memorg.oobsize = snfi_nand->spare_per_sector
506 + * (mtd->writesize / caps->nand_sec_size);
507 + mtd->oobsize = nanddev_per_page_oobsize(nand);
508 +
509 + snfi->ecc_cfg.strength = (spare << 3) / caps->ecc_parity_bits;
510 + mtk_ecc_adjust_strength(snfi->ecc, &snfi->ecc_cfg.strength);
511 +
512 + switch (mtd->writesize) {
513 + case 512:
514 + fmt = PAGEFMT_512;
515 + break;
516 + case KB(2):
517 + fmt = PAGEFMT_2K;
518 + break;
519 + case KB(4):
520 + fmt = PAGEFMT_4K;
521 + break;
522 + default:
523 + dev_err(snfi->dev, "invalid page len: %d\n", mtd->writesize);
524 + return -EINVAL;
525 + }
526 +
527 + /* Setup PageFormat */
528 + while (caps->spare_size[i] != snfi_nand->spare_per_sector) {
529 + i++;
530 + if (i == (caps->num_spare_size - 1)) {
531 + dev_err(snfi->dev, "invalid spare size %d\n",
532 + snfi_nand->spare_per_sector);
533 + return -EINVAL;
534 + }
535 + }
536 +
537 + fmt |= i << caps->pageformat_spare_shift;
538 + fmt |= caps->nand_fdm_size << PAGEFMT_FDM_SHIFT;
539 + fmt |= caps->nand_fdm_ecc_size << PAGEFMT_FDM_ECC_SHIFT;
540 + writel(fmt, snfi->regs + NFI_PAGEFMT);
541 +
542 + snfi->ecc_cfg.len = caps->nand_sec_size + caps->nand_fdm_ecc_size;
543 +
544 + mtk_snfi_set_bad_mark_ctl(&snfi_nand->bad_mark, mem);
545 +
546 + return 0;
547 +}
548 +
549 +static int mtk_snfi_read_from_cache(struct spi_mem *mem,
550 + const struct spi_mem_op *op, int oob_on)
551 +{
552 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
553 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
554 + struct mtd_info *mtd = spinand_to_mtd(spinand);
555 + u32 sectors = mtd->writesize / snfi->caps->nand_sec_size;
556 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
557 + u32 reg, len, col_addr = 0;
558 + int dummy_cycle, ret;
559 + dma_addr_t dma_addr;
560 +
561 + len = sectors * (snfi->caps->nand_sec_size
562 + + snfi_nand->spare_per_sector);
563 +
564 + dma_addr = dma_map_single(snfi->dev, snfi->buffer,
565 + len, DMA_FROM_DEVICE);
566 + ret = dma_mapping_error(snfi->dev, dma_addr);
567 + if (ret) {
568 + dev_err(snfi->dev, "dma mapping error\n");
569 + return -EINVAL;
570 + }
571 +
572 + /* set Read cache command and dummy cycle */
573 + dummy_cycle = (op->dummy.nbytes << 3) >> (ffs(op->dummy.buswidth) - 1);
574 + reg = ((op->cmd.opcode & RD_CMD_MASK) |
575 + (dummy_cycle << RD_DUMMY_SHIFT));
576 + writel(reg, snfi->regs + SNFI_RD_CTL2);
577 +
578 + writel((col_addr & RD_ADDR_MASK), snfi->regs + SNFI_RD_CTL3);
579 +
580 + reg = readl(snfi->regs + SNFI_MISC_CTL);
581 + reg |= RD_CUSTOM_EN;
582 + reg &= ~(RD_MODE_MASK | WR_X4_EN);
583 +
584 + /* set data and addr buswidth */
585 + if (op->data.buswidth == 4)
586 + reg |= RD_MODE_X4;
587 + else if (op->data.buswidth == 2)
588 + reg |= RD_MODE_X2;
589 +
590 + if (op->addr.buswidth == 4 || op->addr.buswidth == 2)
591 + reg |= RD_QDUAL_IO;
592 + writel(reg, snfi->regs + SNFI_MISC_CTL);
593 +
594 + writel(len, snfi->regs + SNFI_MISC_CTL2);
595 + writew(sectors << CON_SEC_SHIFT, snfi->regs + NFI_CON);
596 + reg = readw(snfi->regs + NFI_CNFG);
597 + reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_DMA | CNFG_OP_CUST;
598 +
599 + if (!oob_on) {
600 + reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
601 + writew(reg, snfi->regs + NFI_CNFG);
602 +
603 + snfi->ecc_cfg.mode = ECC_NFI_MODE;
604 + snfi->ecc_cfg.sectors = sectors;
605 + snfi->ecc_cfg.op = ECC_DECODE;
606 + ret = mtk_ecc_enable(snfi->ecc, &snfi->ecc_cfg);
607 + if (ret) {
608 + dev_err(snfi->dev, "ecc enable failed\n");
609 + /* clear NFI_CNFG */
610 + reg &= ~(CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_DMA |
611 + CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
612 + writew(reg, snfi->regs + NFI_CNFG);
613 + goto out;
614 + }
615 + } else {
616 + writew(reg, snfi->regs + NFI_CNFG);
617 + }
618 +
619 + writel(lower_32_bits(dma_addr), snfi->regs + NFI_STRADDR);
620 + readw(snfi->regs + NFI_INTR_STA);
621 + writew(INTR_AHB_DONE_EN, snfi->regs + NFI_INTR_EN);
622 +
623 + init_completion(&snfi->done);
624 +
625 + /* set dummy command to trigger NFI enter SPI mode */
626 + writew(NAND_CMD_DUMMYREAD, snfi->regs + NFI_CMD);
627 + reg = readl(snfi->regs + NFI_CON) | CON_BRD;
628 + writew(reg, snfi->regs + NFI_CON);
629 +
630 + ret = wait_for_completion_timeout(&snfi->done, msecs_to_jiffies(500));
631 + if (!ret) {
632 + dev_err(snfi->dev, "read ahb done timeout\n");
633 + writew(0, snfi->regs + NFI_INTR_EN);
634 + ret = -ETIMEDOUT;
635 + goto out;
636 + }
637 +
638 + ret = readl_poll_timeout_atomic(snfi->regs + NFI_BYTELEN, reg,
639 + ADDRCNTR_SEC(reg) >= sectors, 10,
640 + MTK_TIMEOUT);
641 + if (ret < 0) {
642 + dev_err(snfi->dev, "polling read byte len timeout\n");
643 + ret = -EIO;
644 + } else {
645 + if (!oob_on) {
646 + ret = mtk_ecc_wait_done(snfi->ecc, ECC_DECODE);
647 + if (ret) {
648 + dev_warn(snfi->dev, "wait ecc done timeout\n");
649 + } else {
650 + mtk_snfi_update_ecc_stats(mem, snfi->buffer,
651 + sectors);
652 + mtk_snfi_read_fdm_data(mem, sectors);
653 + }
654 + }
655 + }
656 +
657 + if (oob_on)
658 + goto out;
659 +
660 + mtk_ecc_disable(snfi->ecc);
661 +out:
662 + dma_unmap_single(snfi->dev, dma_addr, len, DMA_FROM_DEVICE);
663 + writel(0, snfi->regs + NFI_CON);
664 + writel(0, snfi->regs + NFI_CNFG);
665 + reg = readl(snfi->regs + SNFI_MISC_CTL);
666 + reg &= ~RD_CUSTOM_EN;
667 + writel(reg, snfi->regs + SNFI_MISC_CTL);
668 +
669 + return ret;
670 +}
671 +
672 +static int mtk_snfi_write_to_cache(struct spi_mem *mem,
673 + const struct spi_mem_op *op,
674 + int oob_on)
675 +{
676 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
677 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
678 + struct mtd_info *mtd = spinand_to_mtd(spinand);
679 + u32 sectors = mtd->writesize / snfi->caps->nand_sec_size;
680 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
681 + u32 reg, len, col_addr = 0;
682 + dma_addr_t dma_addr;
683 + int ret;
684 +
685 + len = sectors * (snfi->caps->nand_sec_size
686 + + snfi_nand->spare_per_sector);
687 +
688 + dma_addr = dma_map_single(snfi->dev, snfi->buffer, len,
689 + DMA_TO_DEVICE);
690 + ret = dma_mapping_error(snfi->dev, dma_addr);
691 + if (ret) {
692 + dev_err(snfi->dev, "dma mapping error\n");
693 + return -EINVAL;
694 + }
695 +
696 + /* set program load cmd and address */
697 + reg = (op->cmd.opcode << WR_LOAD_CMD_SHIFT);
698 + writel(reg, snfi->regs + SNFI_PG_CTL1);
699 + writel(col_addr & WR_LOAD_ADDR_MASK, snfi->regs + SNFI_PG_CTL2);
700 +
701 + reg = readl(snfi->regs + SNFI_MISC_CTL);
702 + reg |= WR_CUSTOM_EN;
703 + reg &= ~(RD_MODE_MASK | WR_X4_EN);
704 +
705 + if (op->data.buswidth == 4)
706 + reg |= WR_X4_EN;
707 + writel(reg, snfi->regs + SNFI_MISC_CTL);
708 +
709 + writel(len << WR_LEN_SHIFT, snfi->regs + SNFI_MISC_CTL2);
710 + writew(sectors << CON_SEC_SHIFT, snfi->regs + NFI_CON);
711 +
712 + reg = readw(snfi->regs + NFI_CNFG);
713 + reg &= ~(CNFG_READ_EN | CNFG_BYTE_RW);
714 + reg |= CNFG_DMA | CNFG_DMA_BURST_EN | CNFG_OP_PROGRAM;
715 +
716 + if (!oob_on) {
717 + reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
718 + writew(reg, snfi->regs + NFI_CNFG);
719 +
720 + snfi->ecc_cfg.mode = ECC_NFI_MODE;
721 + snfi->ecc_cfg.op = ECC_ENCODE;
722 + ret = mtk_ecc_enable(snfi->ecc, &snfi->ecc_cfg);
723 + if (ret) {
724 + dev_err(snfi->dev, "ecc enable failed\n");
725 + /* clear NFI_CNFG */
726 + reg &= ~(CNFG_DMA_BURST_EN | CNFG_DMA |
727 + CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
728 + writew(reg, snfi->regs + NFI_CNFG);
729 + dma_unmap_single(snfi->dev, dma_addr, len,
730 + DMA_FROM_DEVICE);
731 + goto out;
732 + }
733 + /* write OOB into the FDM registers (OOB area in MTK NAND) */
734 + mtk_snfi_write_fdm_data(mem, sectors);
735 + } else {
736 + writew(reg, snfi->regs + NFI_CNFG);
737 + }
738 + writel(lower_32_bits(dma_addr), snfi->regs + NFI_STRADDR);
739 + readw(snfi->regs + NFI_INTR_STA);
740 + writew(INTR_AHB_DONE_EN, snfi->regs + NFI_INTR_EN);
741 +
742 + init_completion(&snfi->done);
743 +
744 + /* set dummy command to trigger NFI enter SPI mode */
745 + writew(NAND_CMD_DUMMYPROG, snfi->regs + NFI_CMD);
746 + reg = readl(snfi->regs + NFI_CON) | CON_BWR;
747 + writew(reg, snfi->regs + NFI_CON);
748 +
749 + ret = wait_for_completion_timeout(&snfi->done, msecs_to_jiffies(500));
750 + if (!ret) {
751 + dev_err(snfi->dev, "custom program done timeout\n");
752 + writew(0, snfi->regs + NFI_INTR_EN);
753 + ret = -ETIMEDOUT;
754 + goto ecc_disable;
755 + }
756 +
757 + ret = readl_poll_timeout_atomic(snfi->regs + NFI_ADDRCNTR, reg,
758 + ADDRCNTR_SEC(reg) >= sectors,
759 + 10, MTK_TIMEOUT);
760 + if (ret)
761 + dev_err(snfi->dev, "hwecc write timeout\n");
762 +
763 +ecc_disable:
764 + mtk_ecc_disable(snfi->ecc);
765 +
766 +out:
767 + dma_unmap_single(snfi->dev, dma_addr, len, DMA_TO_DEVICE);
768 + writel(0, snfi->regs + NFI_CON);
769 + writel(0, snfi->regs + NFI_CNFG);
770 + reg = readl(snfi->regs + SNFI_MISC_CTL);
771 + reg &= ~WR_CUSTOM_EN;
772 + writel(reg, snfi->regs + SNFI_MISC_CTL);
773 +
774 + return ret;
775 +}
776 +
777 +static int mtk_snfi_read(struct spi_mem *mem,
778 + const struct spi_mem_op *op)
779 +{
780 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
781 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
782 + struct mtd_info *mtd = spinand_to_mtd(spinand);
783 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
784 + u32 col_addr = op->addr.val;
785 + int i, ret, sectors, oob_on = false;
786 +
787 + if (col_addr == mtd->writesize)
788 + oob_on = true;
789 +
790 + ret = mtk_snfi_read_from_cache(mem, op, oob_on);
791 + if (ret) {
792 + dev_warn(snfi->dev, "read from cache fail\n");
793 + return ret;
794 + }
795 +
796 + sectors = mtd->writesize / snfi->caps->nand_sec_size;
797 + for (i = 0; i < sectors; i++) {
798 + if (oob_on)
799 + memcpy(oob_ptr(mem, i),
800 + mtk_oob_ptr(mem, snfi->buffer, i),
801 + snfi->caps->nand_fdm_size);
802 +
803 + if (i == snfi_nand->bad_mark.sec && snfi->caps->bad_mark_swap)
804 + snfi_nand->bad_mark.bm_swap(mem, snfi->buffer,
805 + oob_on);
806 + }
807 +
808 + if (!oob_on)
809 + memcpy(spinand->databuf, snfi->buffer, mtd->writesize);
810 +
811 + return ret;
812 +}
813 +
814 +static int mtk_snfi_write(struct spi_mem *mem,
815 + const struct spi_mem_op *op)
816 +{
817 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
818 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
819 + struct mtd_info *mtd = spinand_to_mtd(spinand);
820 + struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
821 + u32 ret, i, sectors, col_addr = op->addr.val;
822 + int oob_on = false;
823 +
824 + if (col_addr == mtd->writesize)
825 + oob_on = true;
826 +
827 + sectors = mtd->writesize / snfi->caps->nand_sec_size;
828 + memset(snfi->buffer, 0xff, mtd->writesize + mtd->oobsize);
829 +
830 + if (!oob_on)
831 + memcpy(snfi->buffer, spinand->databuf, mtd->writesize);
832 +
833 + for (i = 0; i < sectors; i++) {
834 + if (i == snfi_nand->bad_mark.sec && snfi->caps->bad_mark_swap)
835 + snfi_nand->bad_mark.bm_swap(mem, snfi->buffer, oob_on);
836 +
837 + if (oob_on)
838 + memcpy(mtk_oob_ptr(mem, snfi->buffer, i),
839 + oob_ptr(mem, i),
840 + snfi->caps->nand_fdm_size);
841 + }
842 +
843 + ret = mtk_snfi_write_to_cache(mem, op, oob_on);
844 + if (ret)
845 + dev_warn(snfi->dev, "write to cache fail\n");
846 +
847 + return ret;
848 +}
849 +
850 +static int mtk_snfi_command_exec(struct mtk_snfi *snfi,
851 + const u8 *txbuf, u8 *rxbuf,
852 + const u32 txlen, const u32 rxlen)
853 +{
854 + u32 tmp, i, j, reg, m;
855 + u8 *p_tmp = (u8 *)(&tmp);
856 + int ret = 0;
857 +
858 + /* Moving tx data to NFI_SPI GPRAM */
859 + for (i = 0, m = 0; i < txlen; ) {
860 + for (j = 0, tmp = 0; i < txlen && j < 4; i++, j++)
861 + p_tmp[j] = txbuf[i];
862 +
863 + writel(tmp, snfi->regs + SNFI_GPRAM_DATA + m);
864 + m += 4;
865 + }
866 +
867 + writel(txlen, snfi->regs + SNFI_MAC_OUTL);
868 + writel(rxlen, snfi->regs + SNFI_MAC_INL);
869 + ret = mtk_snfi_mac_op(snfi);
870 + if (ret)
871 + return ret;
872 +
873 + /* For NULL input data, this loop will be skipped */
874 + if (rxlen)
875 + for (i = 0, m = 0; i < rxlen; ) {
876 + reg = readl(snfi->regs +
877 + SNFI_GPRAM_DATA + m);
878 + for (j = 0; i < rxlen && j < 4; i++, j++, rxbuf++) {
879 + if (m == 0 && i == 0)
880 + j = i + txlen;
881 + *rxbuf = (reg >> (j * 8)) & 0xFF;
882 + }
883 + m += 4;
884 + }
885 +
886 + return ret;
887 +}
888 +
889 +/*
890 + * mtk_snfi_exec_op - to process command/data to send to the
891 + * SPI NAND by mtk controller
892 + */
893 +static int mtk_snfi_exec_op(struct spi_mem *mem,
894 + const struct spi_mem_op *op)
895 +
896 +{
897 + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
898 + struct spinand_device *spinand = spi_mem_get_drvdata(mem);
899 + struct mtd_info *mtd = spinand_to_mtd(spinand);
900 + struct nand_device *nand = mtd_to_nanddev(mtd);
901 + const struct spi_mem_op *read_cache;
902 + const struct spi_mem_op *write_cache;
903 + u32 tmpbufsize, txlen = 0, rxlen = 0;
904 + u8 *txbuf, *rxbuf = NULL, *buf;
905 + int i, ret = 0;
906 +
907 + ret = mtk_snfi_reset(snfi);
908 + if (ret) {
909 + dev_warn(snfi->dev, "reset spi memory controller fail\n");
910 + return ret;
911 + }
912 +
913 + /*if bbt initial, framework have detect nand information */
914 + if (nand->bbt.cache) {
915 + read_cache = spinand->op_templates.read_cache;
916 + write_cache = spinand->op_templates.write_cache;
917 +
918 + ret = mtk_snfi_hw_runtime_config(mem);
919 + if (ret)
920 + return ret;
921 +
922 + /* For Read/Write with cache, Erase use framework flow */
923 + if (op->cmd.opcode == read_cache->cmd.opcode) {
924 + ret = mtk_snfi_read(mem, op);
925 + if (ret)
926 + dev_warn(snfi->dev, "snfi read fail\n");
927 + return ret;
928 + } else if (op->cmd.opcode == write_cache->cmd.opcode) {
929 + ret = mtk_snfi_write(mem, op);
930 + if (ret)
931 + dev_warn(snfi->dev, "snfi write fail\n");
932 + return ret;
933 + }
934 + }
935 +
936 + tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
937 + op->dummy.nbytes + op->data.nbytes;
938 +
939 + txbuf = kzalloc(tmpbufsize, GFP_KERNEL);
940 + if (!txbuf)
941 + return -ENOMEM;
942 +
943 + txbuf[txlen++] = op->cmd.opcode;
944 +
945 + if (op->addr.nbytes)
946 + for (i = 0; i < op->addr.nbytes; i++)
947 + txbuf[txlen++] = op->addr.val >>
948 + (8 * (op->addr.nbytes - i - 1));
949 +
950 + txlen += op->dummy.nbytes;
951 +
952 + if (op->data.dir == SPI_MEM_DATA_OUT)
953 + for (i = 0; i < op->data.nbytes; i++) {
954 + buf = (u8 *)op->data.buf.out;
955 + txbuf[txlen++] = buf[i];
956 + }
957 +
958 + if (op->data.dir == SPI_MEM_DATA_IN) {
959 + rxbuf = (u8 *)op->data.buf.in;
960 + rxlen += op->data.nbytes;
961 + }
962 +
963 + ret = mtk_snfi_command_exec(snfi, txbuf, rxbuf, txlen, rxlen);
964 + kfree(txbuf);
965 +
966 + return ret;
967 +}
968 +
969 +static int mtk_snfi_init(struct mtk_snfi *snfi)
970 +{
971 + int ret;
972 +
973 + /* Reset the state machine and data FIFO */
974 + ret = mtk_snfi_reset(snfi);
975 + if (ret) {
976 + dev_warn(snfi->dev, "MTK reset controller fail\n");
977 + return ret;
978 + }
979 +
980 + snfi->buffer = devm_kzalloc(snfi->dev, 4096 + 256, GFP_KERNEL);
981 + if (!snfi->buffer)
982 + return -ENOMEM;
983 +
984 + /* Clear interrupt, read clear. */
985 + readw(snfi->regs + NFI_INTR_STA);
986 + writew(0, snfi->regs + NFI_INTR_EN);
987 +
988 + writel(0, snfi->regs + NFI_CON);
989 + writel(0, snfi->regs + NFI_CNFG);
990 +
991 + /* Change to NFI_SPI mode. */
992 + writel(SNFI_MODE_EN, snfi->regs + SNFI_CNFG);
993 +
994 + return 0;
995 +}
996 +
997 +static int mtk_snfi_check_buswidth(u8 width)
998 +{
999 + switch (width) {
1000 + case 1:
1001 + case 2:
1002 + case 4:
1003 + return 0;
1004 +
1005 + default:
1006 + break;
1007 + }
1008 +
1009 + return -ENOTSUPP;
1010 +}
1011 +
1012 +static bool mtk_snfi_supports_op(struct spi_mem *mem,
1013 + const struct spi_mem_op *op)
1014 +{
1015 + int ret = 0;
1016 +
1017 + /* For MTK Spi Nand controller, cmd buswidth just support 1 bit*/
1018 + if (op->cmd.buswidth != 1)
1019 + ret = -ENOTSUPP;
1020 +
1021 + if (op->addr.nbytes)
1022 + ret |= mtk_snfi_check_buswidth(op->addr.buswidth);
1023 +
1024 + if (op->dummy.nbytes)
1025 + ret |= mtk_snfi_check_buswidth(op->dummy.buswidth);
1026 +
1027 + if (op->data.nbytes)
1028 + ret |= mtk_snfi_check_buswidth(op->data.buswidth);
1029 +
1030 + if (ret)
1031 + return false;
1032 +
1033 + return true;
1034 +}
1035 +
1036 +static const struct spi_controller_mem_ops mtk_snfi_ops = {
1037 + .supports_op = mtk_snfi_supports_op,
1038 + .exec_op = mtk_snfi_exec_op,
1039 +};
1040 +
1041 +static const struct mtk_snfi_caps snfi_mt7622 = {
1042 + .spare_size = spare_size_mt7622,
1043 + .num_spare_size = 4,
1044 + .nand_sec_size = 512,
1045 + .nand_fdm_size = 8,
1046 + .nand_fdm_ecc_size = 1,
1047 + .ecc_parity_bits = 13,
1048 + .pageformat_spare_shift = 4,
1049 + .bad_mark_swap = 0,
1050 +};
1051 +
1052 +static const struct of_device_id mtk_snfi_id_table[] = {
1053 + { .compatible = "mediatek,mt7622-snfi", .data = &snfi_mt7622, },
1054 + { /* sentinel */ }
1055 +};
1056 +
1057 +static int mtk_snfi_probe(struct platform_device *pdev)
1058 +{
1059 + struct device *dev = &pdev->dev;
1060 + struct device_node *np = dev->of_node;
1061 + struct spi_controller *ctlr;
1062 + struct mtk_snfi *snfi;
1063 + struct resource *res;
1064 + int ret = 0, irq;
1065 +
1066 + ctlr = spi_alloc_master(&pdev->dev, sizeof(*snfi));
1067 + if (!ctlr)
1068 + return -ENOMEM;
1069 +
1070 + snfi = spi_controller_get_devdata(ctlr);
1071 + snfi->caps = of_device_get_match_data(dev);
1072 + snfi->dev = dev;
1073 +
1074 + snfi->ecc = of_mtk_ecc_get(np);
1075 + if (IS_ERR_OR_NULL(snfi->ecc))
1076 + goto err_put_master;
1077 +
1078 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1079 + snfi->regs = devm_ioremap_resource(dev, res);
1080 + if (IS_ERR(snfi->regs)) {
1081 + ret = PTR_ERR(snfi->regs);
1082 + goto release_ecc;
1083 + }
1084 +
1085 + /* find the clocks */
1086 + snfi->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
1087 + if (IS_ERR(snfi->clk.nfi_clk)) {
1088 + dev_err(dev, "no nfi clk\n");
1089 + ret = PTR_ERR(snfi->clk.nfi_clk);
1090 + goto release_ecc;
1091 + }
1092 +
1093 + snfi->clk.spi_clk = devm_clk_get(dev, "spi_clk");
1094 + if (IS_ERR(snfi->clk.spi_clk)) {
1095 + dev_err(dev, "no spi clk\n");
1096 + ret = PTR_ERR(snfi->clk.spi_clk);
1097 + goto release_ecc;
1098 + }
1099 +
1100 + ret = mtk_snfi_enable_clk(dev, &snfi->clk);
1101 + if (ret)
1102 + goto release_ecc;
1103 +
1104 + /* find the irq */
1105 + irq = platform_get_irq(pdev, 0);
1106 + if (irq < 0) {
1107 + dev_err(dev, "no snfi irq resource\n");
1108 + ret = -EINVAL;
1109 + goto clk_disable;
1110 + }
1111 +
1112 + ret = devm_request_irq(dev, irq, mtk_snfi_irq, 0, "mtk-snfi", snfi);
1113 + if (ret) {
1114 + dev_err(dev, "failed to request snfi irq\n");
1115 + goto clk_disable;
1116 + }
1117 +
1118 + ret = dma_set_mask(dev, DMA_BIT_MASK(32));
1119 + if (ret) {
1120 + dev_err(dev, "failed to set dma mask\n");
1121 + goto clk_disable;
1122 + }
1123 +
1124 + ctlr->dev.of_node = np;
1125 + ctlr->mem_ops = &mtk_snfi_ops;
1126 +
1127 + platform_set_drvdata(pdev, snfi);
1128 + ret = mtk_snfi_init(snfi);
1129 + if (ret) {
1130 + dev_err(dev, "failed to init snfi\n");
1131 + goto clk_disable;
1132 + }
1133 +
1134 + ret = devm_spi_register_master(dev, ctlr);
1135 + if (ret)
1136 + goto clk_disable;
1137 +
1138 + return 0;
1139 +
1140 +clk_disable:
1141 + mtk_snfi_disable_clk(&snfi->clk);
1142 +
1143 +release_ecc:
1144 + mtk_ecc_release(snfi->ecc);
1145 +
1146 +err_put_master:
1147 + spi_master_put(ctlr);
1148 +
1149 + dev_err(dev, "MediaTek SPI NAND interface probe failed %d\n", ret);
1150 + return ret;
1151 +}
1152 +
1153 +static int mtk_snfi_remove(struct platform_device *pdev)
1154 +{
1155 + struct mtk_snfi *snfi = platform_get_drvdata(pdev);
1156 +
1157 + mtk_snfi_disable_clk(&snfi->clk);
1158 +
1159 + return 0;
1160 +}
1161 +
1162 +static int mtk_snfi_suspend(struct platform_device *pdev, pm_message_t state)
1163 +{
1164 + struct mtk_snfi *snfi = platform_get_drvdata(pdev);
1165 +
1166 + mtk_snfi_disable_clk(&snfi->clk);
1167 +
1168 + return 0;
1169 +}
1170 +
1171 +static int mtk_snfi_resume(struct platform_device *pdev)
1172 +{
1173 + struct device *dev = &pdev->dev;
1174 + struct mtk_snfi *snfi = dev_get_drvdata(dev);
1175 + int ret;
1176 +
1177 + ret = mtk_snfi_enable_clk(dev, &snfi->clk);
1178 + if (ret)
1179 + return ret;
1180 +
1181 + ret = mtk_snfi_init(snfi);
1182 + if (ret)
1183 + dev_err(dev, "failed to init snfi controller\n");
1184 +
1185 + return ret;
1186 +}
1187 +
1188 +static struct platform_driver mtk_snfi_driver = {
1189 + .driver = {
1190 + .name = "mtk-snfi",
1191 + .of_match_table = mtk_snfi_id_table,
1192 + },
1193 + .probe = mtk_snfi_probe,
1194 + .remove = mtk_snfi_remove,
1195 + .suspend = mtk_snfi_suspend,
1196 + .resume = mtk_snfi_resume,
1197 +};
1198 +
1199 +module_platform_driver(mtk_snfi_driver);
1200 +
1201 +MODULE_LICENSE("GPL v2");
1202 +MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>");
1203 +MODULE_DESCRIPTION("Mediatek SPI Memory Interface Driver");
1204 Index: linux-4.19.48/drivers/spi/Kconfig
1205 ===================================================================
1206 --- linux-4.19.48.orig/drivers/spi/Kconfig
1207 +++ linux-4.19.48/drivers/spi/Kconfig
1208 @@ -389,6 +389,15 @@ config SPI_MT65XX
1209 say Y or M here.If you are not sure, say N.
1210 SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
1211
1212 +config SPI_MTK_SNFI
1213 + tristate "MediaTek SPI NAND interface"
1214 + select MTD_SPI_NAND
1215 + help
1216 + This selects the SPI NAND FLASH interface(SNFI),
1217 + which could be found on MediaTek Soc.
1218 + Say Y or M here.If you are not sure, say N.
1219 + Note Parallel Nand and SPI NAND is alternative on MediaTek SoCs.
1220 +
1221 config SPI_NUC900
1222 tristate "Nuvoton NUC900 series SPI"
1223 depends on ARCH_W90X900
1224 Index: linux-4.19.48/drivers/spi/Makefile
1225 ===================================================================
1226 --- linux-4.19.48.orig/drivers/spi/Makefile
1227 +++ linux-4.19.48/drivers/spi/Makefile
1228 @@ -57,6 +57,7 @@ obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mp
1229 obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
1230 obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
1231 obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
1232 +obj-$(CONFIG_SPI_MTK_SNFI) += spi-mtk-snfi.o
1233 obj-$(CONFIG_SPI_MXS) += spi-mxs.o
1234 obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
1235 obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o