1 From de8b6cf615be20b25d0f3c817866de2c0d46a704 Mon Sep 17 00:00:00 2001
2 From: Sam Shih <sam.shih@mediatek.com>
3 Date: Mon, 20 Apr 2020 17:10:05 +0800
4 Subject: [PATCH 1/3] nand: add spi nand driver
6 Add spi nand driver support for mt7622 based on nfi controller
8 Signed-off-by: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
10 drivers/mtd/Kconfig | 7 +
11 drivers/mtd/Makefile | 4 +
12 drivers/mtd/nand/raw/nand.c | 2 +
13 drivers/mtd/nandx/NOTICE | 52 +
14 drivers/mtd/nandx/Nandx.config | 17 +
15 drivers/mtd/nandx/Nandx.mk | 91 ++
16 drivers/mtd/nandx/README | 31 +
17 drivers/mtd/nandx/core/Nandx.mk | 38 +
18 drivers/mtd/nandx/core/core_io.c | 735 +++++++++
19 drivers/mtd/nandx/core/core_io.h | 39 +
20 drivers/mtd/nandx/core/nand/device_spi.c | 200 +++
21 drivers/mtd/nandx/core/nand/device_spi.h | 132 ++
22 drivers/mtd/nandx/core/nand/nand_spi.c | 526 +++++++
23 drivers/mtd/nandx/core/nand/nand_spi.h | 35 +
24 drivers/mtd/nandx/core/nand_base.c | 304 ++++
25 drivers/mtd/nandx/core/nand_base.h | 71 +
26 drivers/mtd/nandx/core/nand_chip.c | 272 ++++
27 drivers/mtd/nandx/core/nand_chip.h | 103 ++
28 drivers/mtd/nandx/core/nand_device.c | 285 ++++
29 drivers/mtd/nandx/core/nand_device.h | 608 ++++++++
30 drivers/mtd/nandx/core/nfi.h | 51 +
31 drivers/mtd/nandx/core/nfi/nfi_base.c | 1357 +++++++++++++++++
32 drivers/mtd/nandx/core/nfi/nfi_base.h | 95 ++
33 drivers/mtd/nandx/core/nfi/nfi_regs.h | 114 ++
34 drivers/mtd/nandx/core/nfi/nfi_spi.c | 689 +++++++++
35 drivers/mtd/nandx/core/nfi/nfi_spi.h | 44 +
36 drivers/mtd/nandx/core/nfi/nfi_spi_regs.h | 64 +
37 drivers/mtd/nandx/core/nfi/nfiecc.c | 510 +++++++
38 drivers/mtd/nandx/core/nfi/nfiecc.h | 90 ++
39 drivers/mtd/nandx/core/nfi/nfiecc_regs.h | 51 +
40 drivers/mtd/nandx/driver/Nandx.mk | 18 +
41 drivers/mtd/nandx/driver/bbt/bbt.c | 408 +++++
42 drivers/mtd/nandx/driver/uboot/driver.c | 574 +++++++
43 drivers/mtd/nandx/include/Nandx.mk | 16 +
44 drivers/mtd/nandx/include/internal/bbt.h | 62 +
45 .../mtd/nandx/include/internal/nandx_core.h | 250 +++
46 .../mtd/nandx/include/internal/nandx_errno.h | 40 +
47 .../mtd/nandx/include/internal/nandx_util.h | 221 +++
48 drivers/mtd/nandx/include/uboot/nandx_os.h | 78 +
49 include/configs/mt7622.h | 25 +
50 40 files changed, 8309 insertions(+)
51 create mode 100644 drivers/mtd/nandx/NOTICE
52 create mode 100644 drivers/mtd/nandx/Nandx.config
53 create mode 100644 drivers/mtd/nandx/Nandx.mk
54 create mode 100644 drivers/mtd/nandx/README
55 create mode 100644 drivers/mtd/nandx/core/Nandx.mk
56 create mode 100644 drivers/mtd/nandx/core/core_io.c
57 create mode 100644 drivers/mtd/nandx/core/core_io.h
58 create mode 100644 drivers/mtd/nandx/core/nand/device_spi.c
59 create mode 100644 drivers/mtd/nandx/core/nand/device_spi.h
60 create mode 100644 drivers/mtd/nandx/core/nand/nand_spi.c
61 create mode 100644 drivers/mtd/nandx/core/nand/nand_spi.h
62 create mode 100644 drivers/mtd/nandx/core/nand_base.c
63 create mode 100644 drivers/mtd/nandx/core/nand_base.h
64 create mode 100644 drivers/mtd/nandx/core/nand_chip.c
65 create mode 100644 drivers/mtd/nandx/core/nand_chip.h
66 create mode 100644 drivers/mtd/nandx/core/nand_device.c
67 create mode 100644 drivers/mtd/nandx/core/nand_device.h
68 create mode 100644 drivers/mtd/nandx/core/nfi.h
69 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_base.c
70 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_base.h
71 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_regs.h
72 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_spi.c
73 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_spi.h
74 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_spi_regs.h
75 create mode 100644 drivers/mtd/nandx/core/nfi/nfiecc.c
76 create mode 100644 drivers/mtd/nandx/core/nfi/nfiecc.h
77 create mode 100644 drivers/mtd/nandx/core/nfi/nfiecc_regs.h
78 create mode 100644 drivers/mtd/nandx/driver/Nandx.mk
79 create mode 100644 drivers/mtd/nandx/driver/bbt/bbt.c
80 create mode 100644 drivers/mtd/nandx/driver/uboot/driver.c
81 create mode 100644 drivers/mtd/nandx/include/Nandx.mk
82 create mode 100644 drivers/mtd/nandx/include/internal/bbt.h
83 create mode 100644 drivers/mtd/nandx/include/internal/nandx_core.h
84 create mode 100644 drivers/mtd/nandx/include/internal/nandx_errno.h
85 create mode 100644 drivers/mtd/nandx/include/internal/nandx_util.h
86 create mode 100644 drivers/mtd/nandx/include/uboot/nandx_os.h
88 --- a/drivers/mtd/Kconfig
89 +++ b/drivers/mtd/Kconfig
90 @@ -108,6 +108,13 @@ config HBMC_AM654
91 This is the driver for HyperBus controller on TI's AM65x and
95 + tristate "Mediatek SPI Nand"
98 + This option will support SPI Nand device via Mediatek
101 source "drivers/mtd/nand/Kconfig"
103 source "drivers/mtd/spi/Kconfig"
104 --- a/drivers/mtd/Makefile
105 +++ b/drivers/mtd/Makefile
106 @@ -41,3 +41,7 @@ obj-$(CONFIG_$(SPL_TPL_)SPI_FLASH_SUPPOR
107 obj-$(CONFIG_SPL_UBI) += ubispl/
111 +ifeq ($(CONFIG_MTK_SPI_NAND), y)
112 +include $(srctree)/drivers/mtd/nandx/Nandx.mk
114 --- a/drivers/mtd/nand/raw/nand.c
115 +++ b/drivers/mtd/nand/raw/nand.c
116 @@ -91,8 +91,10 @@ static void nand_init_chip(int i)
117 if (board_nand_init(nand))
120 +#ifndef CONFIG_MTK_SPI_NAND
121 if (nand_scan(mtd, maxchips))
125 nand_register(i, mtd);
128 +++ b/drivers/mtd/nandx/NOTICE
132 + * Nandx - Mediatek Common Nand Driver
133 + * Copyright (C) 2017 MediaTek Inc.
135 + * Nandx is dual licensed: you can use it either under the terms of
136 + * the GPL, or the BSD license, at your option.
138 + * a) This program is free software; you can redistribute it and/or modify
139 + * it under the terms of the GNU General Public License version 2 as
140 + * published by the Free Software Foundation.
142 + * This library is distributed in the hope that it will be useful,
143 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
144 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
145 + * GNU General Public License for more details.
147 + * This program is distributed in the hope that it will be useful,
148 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
149 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
150 + * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
154 + * b) Redistribution and use in source and binary forms, with or
155 + * without modification, are permitted provided that the following
156 + * conditions are met:
158 + * 1. Redistributions of source code must retain the above
159 + * copyright notice, this list of conditions and the following
161 + * 2. Redistributions in binary form must reproduce the above
162 + * copyright notice, this list of conditions and the following
163 + * disclaimer in the documentation and/or other materials
164 + * provided with the distribution.
166 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
167 + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
168 + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
169 + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
170 + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
171 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
172 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
173 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
174 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
175 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
176 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
177 + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
178 + * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
181 +####################################################################################################
182 \ No newline at end of file
184 +++ b/drivers/mtd/nandx/Nandx.config
186 +NANDX_SIMULATOR_SUPPORT := n
187 +NANDX_CTP_SUPPORT := n
188 +NANDX_DA_SUPPORT := n
189 +NANDX_PRELOADER_SUPPORT := n
190 +NANDX_LK_SUPPORT := n
191 +NANDX_KERNEL_SUPPORT := n
192 +NANDX_BROM_SUPPORT := n
193 +NANDX_UBOOT_SUPPORT := y
194 +NANDX_BBT_SUPPORT := y
204 +++ b/drivers/mtd/nandx/Nandx.mk
207 +# Copyright (C) 2017 MediaTek Inc.
208 +# Licensed under either
209 +# BSD Licence, (see NOTICE for more details)
210 +# GNU General Public License, version 2.0, (see NOTICE for more details)
213 +nandx_dir := $(shell dirname $(lastword $(MAKEFILE_LIST)))
214 +include $(nandx_dir)/Nandx.config
216 +ifeq ($(NANDX_SIMULATOR_SUPPORT), y)
219 +nandx-obj := sim-obj
221 +nandx-postfix := %.o
222 +sim-inc += -I$(nandx-prefix)/include/internal
223 +sim-inc += -I$(nandx-prefix)/include/simulator
226 +ifeq ($(NANDX_CTP_SUPPORT), y)
227 +nandx-obj := C_SRC_FILES
228 +nandx-prefix := $(nandx_dir)
229 +nandx-postfix := %.c
230 +INC_DIRS += $(nandx_dir)/include/internal
231 +INC_DIRS += $(nandx_dir)/include/ctp
234 +ifeq ($(NANDX_DA_SUPPORT), y)
236 +nandx-prefix := $(nandx_dir)
237 +nandx-postfix := %.o
238 +INCLUDE_PATH += $(TOPDIR)/platform/$(CODE_BASE)/dev/nand/nandx/include/internal
239 +INCLUDE_PATH += $(TOPDIR)/platform/$(CODE_BASE)/dev/nand/nandx/include/da
242 +ifeq ($(NANDX_PRELOADER_SUPPORT), y)
243 +nandx-obj := MOD_SRC
244 +nandx-prefix := $(nandx_dir)
245 +nandx-postfix := %.c
246 +C_OPTION += -I$(MTK_PATH_PLATFORM)/src/drivers/nandx/include/internal
247 +C_OPTION += -I$(MTK_PATH_PLATFORM)/src/drivers/nandx/include/preloader
250 +ifeq ($(NANDX_LK_SUPPORT), y)
251 +nandx-obj := MODULE_SRCS
252 +nandx-prefix := $(nandx_dir)
253 +nandx-postfix := %.c
254 +GLOBAL_INCLUDES += $(nandx_dir)/include/internal
255 +GLOBAL_INCLUDES += $(nandx_dir)/include/lk
258 +ifeq ($(NANDX_KERNEL_SUPPORT), y)
260 +nandx-prefix := nandx
261 +nandx-postfix := %.o
262 +ccflags-y += -I$(nandx_dir)/include/internal
263 +ccflags-y += -I$(nandx_dir)/include/kernel
266 +ifeq ($(NANDX_UBOOT_SUPPORT), y)
268 +nandx-prefix := nandx
269 +nandx-postfix := %.o
270 +ccflags-y += -I$(nandx_dir)/include/internal
271 +ccflags-y += -I$(nandx_dir)/include/uboot
275 +include $(nandx_dir)/core/Nandx.mk
276 +nandx-target := $(nandx-prefix)/core/$(nandx-postfix)
277 +$(nandx-obj) += $(patsubst %.c, $(nandx-target), $(nandx-y))
281 +include $(nandx_dir)/driver/Nandx.mk
282 +nandx-target := $(nandx-prefix)/driver/$(nandx-postfix)
283 +$(nandx-obj) += $(patsubst %.c, $(nandx-target), $(nandx-y))
285 +ifeq ($(NANDX_SIMULATOR_SUPPORT), y)
287 +CFLAGS += $(sim-inc)
291 + $(cc) $(sim-obj) -o nandx
295 + rm -rf $(sim-obj) nandx
298 +++ b/drivers/mtd/nandx/README
302 + ===============================
304 + NAND2.0 is a common nand driver which designed for accessing
305 +different type of NANDs(SLC, SPI-NAND, MLC, TLC) on various OS. This
306 +driver can work on mostly SoCs of Mediatek.
308 + Although there already has a common nand driver, it doesn't cover
309 +SPI-NAND, and not match our IC-Verification's reqirement. We need
310 +a driver that can be exten or cut easily.
312 + This driver is base on NANDX & SLC. We try to refactor structures,
313 +and make them inheritable. We also refactor some operations' flow
314 +principally for adding SPI-NAND support.
316 + This driver's architecture is like:
318 + Driver @LK/Uboot/DA... |IC verify/other purposes
319 + ----------------------------------------------------------------
321 + -------------------------------------- | extend_core
322 + nandx_core/core_io |
323 + ----------------------------------------------------------------
324 + nand_chip/nand_base |
325 + -------------------------------------- | extend_nfi
326 + nand_device | nfi/nfi_base |
328 + Any block of above graph can be extended at your will, if you
329 +want add new feature into this code, please make sure that your code
330 +would follow the framework, and we will be appreciated about it.
332 +++ b/drivers/mtd/nandx/core/Nandx.mk
335 +# Copyright (C) 2017 MediaTek Inc.
336 +# Licensed under either
337 +# BSD Licence, (see NOTICE for more details)
338 +# GNU General Public License, version 2.0, (see NOTICE for more details)
341 +nandx-y += nand_device.c
342 +nandx-y += nand_base.c
343 +nandx-y += nand_chip.c
344 +nandx-y += core_io.c
346 +nandx-header-y += nand_device.h
347 +nandx-header-y += nand_base.h
348 +nandx-header-y += nand_chip.h
349 +nandx-header-y += core_io.h
350 +nandx-header-y += nfi.h
352 +nandx-$(NANDX_NAND_SPI) += nand/device_spi.c
353 +nandx-$(NANDX_NAND_SPI) += nand/nand_spi.c
354 +nandx-$(NANDX_NAND_SLC) += nand/device_slc.c
355 +nandx-$(NANDX_NAND_SLC) += nand/nand_slc.c
357 +nandx-header-$(NANDX_NAND_SPI) += nand/device_spi.h
358 +nandx-header-$(NANDX_NAND_SPI) += nand/nand_spi.h
359 +nandx-header-$(NANDX_NAND_SLC) += nand/device_slc.h
360 +nandx-header-$(NANDX_NAND_SLC) += nand/nand_slc.h
362 +nandx-$(NANDX_NFI_BASE) += nfi/nfi_base.c
363 +nandx-$(NANDX_NFI_ECC) += nfi/nfiecc.c
364 +nandx-$(NANDX_NFI_SPI) += nfi/nfi_spi.c
366 +nandx-header-$(NANDX_NFI_BASE) += nfi/nfi_base.h
367 +nandx-header-$(NANDX_NFI_BASE) += nfi/nfi_regs.h
368 +nandx-header-$(NANDX_NFI_ECC) += nfi/nfiecc.h
369 +nandx-header-$(NANDX_NFI_ECC) += nfi/nfiecc_regs.h
370 +nandx-header-$(NANDX_NFI_SPI) += nfi/nfi_spi.h
371 +nandx-header-$(NANDX_NFI_SPI) += nfi/nfi_spi_regs.h
373 +++ b/drivers/mtd/nandx/core/core_io.c
376 + * Copyright (C) 2017 MediaTek Inc.
377 + * Licensed under either
378 + * BSD Licence, (see NOTICE for more details)
379 + * GNU General Public License, version 2.0, (see NOTICE for more details)
382 +/*NOTE: switch cache/multi*/
383 +#include "nandx_util.h"
384 +#include "nandx_core.h"
385 +#include "nand_chip.h"
386 +#include "core_io.h"
388 +static struct nandx_desc *g_nandx;
390 +static inline bool is_sector_align(u64 val)
392 + return reminder(val, g_nandx->chip->sector_size) ? false : true;
395 +static inline bool is_page_align(u64 val)
397 + return reminder(val, g_nandx->chip->page_size) ? false : true;
400 +static inline bool is_block_align(u64 val)
402 + return reminder(val, g_nandx->chip->block_size) ? false : true;
405 +static inline u32 page_sectors(void)
407 + return div_down(g_nandx->chip->page_size, g_nandx->chip->sector_size);
410 +static inline u32 sector_oob(void)
412 + return div_down(g_nandx->chip->oob_size, page_sectors());
415 +static inline u32 sector_padded_size(void)
417 + return g_nandx->chip->sector_size + g_nandx->chip->sector_spare_size;
420 +static inline u32 page_padded_size(void)
422 + return page_sectors() * sector_padded_size();
425 +static inline u32 offset_to_padded_col(u64 offset)
427 + struct nandx_desc *nandx = g_nandx;
430 + col = reminder(offset, nandx->chip->page_size);
431 + sectors = div_down(col, nandx->chip->sector_size);
433 + return col + sectors * nandx->chip->sector_spare_size;
436 +static inline u32 offset_to_row(u64 offset)
438 + return div_down(offset, g_nandx->chip->page_size);
441 +static inline u32 offset_to_col(u64 offset)
443 + return reminder(offset, g_nandx->chip->page_size);
446 +static inline u32 oob_upper_size(void)
448 + return g_nandx->ecc_en ? g_nandx->chip->oob_size :
449 + g_nandx->chip->sector_spare_size * page_sectors();
452 +static inline bool is_upper_oob_align(u64 val)
454 + return reminder(val, oob_upper_size()) ? false : true;
457 +#define prepare_op(_op, _row, _col, _len, _data, _oob) \
459 + (_op).row = (_row); \
460 + (_op).col = (_col); \
461 + (_op).len = (_len); \
462 + (_op).data = (_data); \
463 + (_op).oob = (_oob); \
466 +static int operation_multi(enum nandx_op_mode mode, u8 *data, u8 *oob,
467 + u64 offset, size_t len)
469 + struct nandx_desc *nandx = g_nandx;
470 + u32 row = offset_to_row(offset);
471 + u32 col = offset_to_padded_col(offset);
473 + if (nandx->mode == NANDX_IDLE) {
474 + nandx->mode = mode;
475 + nandx->ops_current = 0;
476 + } else if (nandx->mode != mode) {
477 + pr_info("forbid mixed operations.\n");
478 + return -EOPNOTSUPP;
481 + prepare_op(nandx->ops[nandx->ops_current], row, col, len, data, oob);
482 + nandx->ops_current++;
484 + if (nandx->ops_current == nandx->ops_multi_len)
485 + return nandx_sync();
487 + return nandx->ops_multi_len - nandx->ops_current;
490 +static int operation_sequent(enum nandx_op_mode mode, u8 *data, u8 *oob,
491 + u64 offset, size_t len)
493 + struct nandx_desc *nandx = g_nandx;
494 + struct nand_chip *chip = nandx->chip;
495 + u32 row = offset_to_row(offset);
496 + func_chip_ops chip_ops;
497 + u8 *ref_data = data, *ref_oob = oob;
498 + int align, ops, row_step;
501 + align = data ? chip->page_size : oob_upper_size();
502 + ops = data ? div_down(len, align) : div_down(len, oob_upper_size());
507 + chip_ops = chip->erase_block;
508 + align = chip->block_size;
509 + ops = div_down(len, align);
510 + row_step = chip->block_pages;
514 + chip_ops = chip->read_page;
518 + chip_ops = chip->write_page;
526 + ref_data = nandx->head_buf;
527 + memset(ref_data, 0xff, chip->page_size);
531 + ref_oob = nandx->head_buf + chip->page_size;
532 + memset(ref_oob, 0xff, oob_upper_size());
535 + for (i = 0; i < ops; i++) {
536 + prepare_op(nandx->ops[nandx->ops_current],
537 + row + i * row_step, 0, align, ref_data, ref_oob);
538 + nandx->ops_current++;
539 + /* if data or oob is null, nandx->head_buf or
540 + * nandx->head_buf + chip->page_size should not been used
541 + * so, here it is safe to use the buf.
543 + ref_data = data ? ref_data + chip->page_size : nandx->head_buf;
544 + ref_oob = oob ? ref_oob + oob_upper_size() :
545 + nandx->head_buf + chip->page_size;
548 + if (nandx->mode == NANDX_WRITE) {
549 + rem = reminder(nandx->ops_current, nandx->min_write_pages);
551 + return nandx->min_write_pages - rem;
554 + nandx->ops_current = 0;
555 + return chip_ops(chip, nandx->ops, ops);
558 +static int read_pages(u8 *data, u8 *oob, u64 offset, size_t len)
560 + struct nandx_desc *nandx = g_nandx;
561 + struct nand_chip *chip = nandx->chip;
562 + struct nandx_split64 split = {0};
563 + u8 *ref_data = data, *ref_oob;
565 + int ret = 0, i, ops;
566 + u32 head_offset = 0;
570 + return operation_sequent(NANDX_READ, NULL, oob, offset, len);
572 + ref_oob = oob ? oob : nandx->head_buf + chip->page_size;
574 + nandx_split(&split, offset, len, val, chip->page_size);
576 + if (split.head_len) {
577 + row = offset_to_row(split.head);
578 + col = offset_to_col(split.head);
579 + prepare_op(nandx->ops[nandx->ops_current], row, 0,
581 + nandx->head_buf, ref_oob);
582 + nandx->ops_current++;
586 + ref_data += split.head_len;
587 + ref_oob = oob ? ref_oob + oob_upper_size() :
588 + nandx->head_buf + chip->page_size;
591 + if (split.body_len) {
592 + ops = div_down(split.body_len, chip->page_size);
593 + row = offset_to_row(split.body);
594 + for (i = 0; i < ops; i++) {
595 + prepare_op(nandx->ops[nandx->ops_current],
596 + row + i, 0, chip->page_size,
597 + ref_data, ref_oob);
598 + nandx->ops_current++;
599 + ref_data += chip->page_size;
600 + ref_oob = oob ? ref_oob + oob_upper_size() :
601 + nandx->head_buf + chip->page_size;
605 + if (split.tail_len) {
606 + row = offset_to_row(split.tail);
607 + prepare_op(nandx->ops[nandx->ops_current], row, 0,
608 + chip->page_size, nandx->tail_buf, ref_oob);
609 + nandx->ops_current++;
612 + ret = chip->read_page(chip, nandx->ops, nandx->ops_current);
614 + if (split.head_len)
615 + memcpy(data, nandx->head_buf + head_offset, split.head_len);
616 + if (split.tail_len)
617 + memcpy(ref_data, nandx->tail_buf, split.tail_len);
619 + nandx->ops_current = 0;
623 +int nandx_read(u8 *data, u8 *oob, u64 offset, size_t len)
625 + struct nandx_desc *nandx = g_nandx;
627 + if (!len || len > nandx->info.total_size)
629 + if (div_up(len, nandx->chip->page_size) > nandx->ops_len)
634 + * as design, oob not support partial read
635 + * and, the length of oob buf should be oob size aligned
637 + if (!data && !is_upper_oob_align(len))
640 + if (g_nandx->multi_en) {
641 + /* as design, there only 2 buf for partial read,
642 + * if partial read allowed for multi read,
643 + * there are not enough buf
645 + if (!is_sector_align(offset))
647 + if (data && !is_sector_align(len))
649 + return operation_multi(NANDX_READ, data, oob, offset, len);
652 + nandx->ops_current = 0;
653 + nandx->mode = NANDX_IDLE;
654 + return read_pages(data, oob, offset, len);
657 +static int write_pages(u8 *data, u8 *oob, u64 offset, size_t len)
659 + struct nandx_desc *nandx = g_nandx;
660 + struct nand_chip *chip = nandx->chip;
661 + struct nandx_split64 split = {0};
662 + int ret, rem, i, ops;
667 + nandx->mode = NANDX_WRITE;
670 + return operation_sequent(NANDX_WRITE, NULL, oob, offset, len);
673 + ref_oob = nandx->head_buf + chip->page_size;
674 + memset(ref_oob, 0xff, oob_upper_size());
677 + nandx_split(&split, offset, len, val, chip->page_size);
679 + /*NOTE: slc can support sector write, here copy too many data.*/
680 + if (split.head_len) {
681 + row = offset_to_row(split.head);
682 + col = offset_to_col(split.head);
683 + memset(nandx->head_buf, 0xff, page_padded_size());
684 + memcpy(nandx->head_buf + col, data, split.head_len);
685 + prepare_op(nandx->ops[nandx->ops_current], row, 0,
686 + chip->page_size, nandx->head_buf, ref_oob);
687 + nandx->ops_current++;
689 + data += split.head_len;
690 + ref_oob = oob ? ref_oob + oob_upper_size() :
691 + nandx->head_buf + chip->page_size;
694 + if (split.body_len) {
695 + row = offset_to_row(split.body);
696 + ops = div_down(split.body_len, chip->page_size);
697 + for (i = 0; i < ops; i++) {
698 + prepare_op(nandx->ops[nandx->ops_current],
699 + row + i, 0, chip->page_size, data, ref_oob);
700 + nandx->ops_current++;
701 + data += chip->page_size;
702 + ref_oob = oob ? ref_oob + oob_upper_size() :
703 + nandx->head_buf + chip->page_size;
707 + if (split.tail_len) {
708 + row = offset_to_row(split.tail);
709 + memset(nandx->tail_buf, 0xff, page_padded_size());
710 + memcpy(nandx->tail_buf, data, split.tail_len);
711 + prepare_op(nandx->ops[nandx->ops_current], row, 0,
712 + chip->page_size, nandx->tail_buf, ref_oob);
713 + nandx->ops_current++;
716 + rem = reminder(nandx->ops_current, nandx->min_write_pages);
718 + return nandx->min_write_pages - rem;
720 + ret = chip->write_page(chip, nandx->ops, nandx->ops_current);
722 + nandx->ops_current = 0;
723 + nandx->mode = NANDX_IDLE;
727 +int nandx_write(u8 *data, u8 *oob, u64 offset, size_t len)
729 + struct nandx_desc *nandx = g_nandx;
731 + if (!len || len > nandx->info.total_size)
733 + if (div_up(len, nandx->chip->page_size) > nandx->ops_len)
737 + if (!data && !is_upper_oob_align(len))
740 + if (nandx->multi_en) {
741 + if (!is_page_align(offset))
743 + if (data && !is_page_align(len))
746 + return operation_multi(NANDX_WRITE, data, oob, offset, len);
749 + return write_pages(data, oob, offset, len);
752 +int nandx_erase(u64 offset, size_t len)
754 + struct nandx_desc *nandx = g_nandx;
756 + if (!len || len > nandx->info.total_size)
758 + if (div_down(len, nandx->chip->block_size) > nandx->ops_len)
760 + if (!is_block_align(offset) || !is_block_align(len))
763 + if (g_nandx->multi_en)
764 + return operation_multi(NANDX_ERASE, NULL, NULL, offset, len);
766 + nandx->ops_current = 0;
767 + nandx->mode = NANDX_IDLE;
768 + return operation_sequent(NANDX_ERASE, NULL, NULL, offset, len);
771 +int nandx_sync(void)
773 + struct nandx_desc *nandx = g_nandx;
774 + struct nand_chip *chip = nandx->chip;
775 + func_chip_ops chip_ops;
778 + if (!nandx->ops_current)
781 + rem = reminder(nandx->ops_current, nandx->ops_multi_len);
782 + if (nandx->multi_en && rem) {
787 + switch (nandx->mode) {
791 + chip_ops = chip->erase_block;
794 + chip_ops = chip->read_page;
797 + chip_ops = chip->write_page;
803 + rem = reminder(nandx->ops_current, nandx->min_write_pages);
804 + if (!nandx->multi_en && nandx->mode == NANDX_WRITE && rem) {
805 + /* in one process of program, only allow 2 pages to do partial
806 + * write, here we supposed 1st buf would be used, and 2nd
807 + * buf should be not used.
809 + memset(nandx->tail_buf, 0xff,
810 + chip->page_size + oob_upper_size());
811 + for (i = 0; i < rem; i++) {
812 + prepare_op(nandx->ops[nandx->ops_current],
813 + nandx->ops[nandx->ops_current - 1].row + 1,
814 + 0, chip->page_size, nandx->tail_buf,
815 + nandx->tail_buf + chip->page_size);
816 + nandx->ops_current++;
820 + ret = chip_ops(nandx->chip, nandx->ops, nandx->ops_current);
823 + nandx->mode = NANDX_IDLE;
824 + nandx->ops_current = 0;
829 +int nandx_ioctl(int cmd, void *arg)
831 + struct nandx_desc *nandx = g_nandx;
832 + struct nand_chip *chip = nandx->chip;
836 + case CORE_CTRL_NAND_INFO:
837 + *(struct nandx_info *)arg = nandx->info;
840 + case CHIP_CTRL_OPS_MULTI:
841 + ret = chip->chip_ctrl(chip, cmd, arg);
843 + nandx->multi_en = *(bool *)arg;
847 + ret = chip->chip_ctrl(chip, cmd, arg);
849 + nandx->ecc_en = *(bool *)arg;
853 + ret = chip->chip_ctrl(chip, cmd, arg);
860 +bool nandx_is_bad_block(u64 offset)
862 + struct nandx_desc *nandx = g_nandx;
864 + prepare_op(nandx->ops[0], offset_to_row(offset), 0,
865 + nandx->chip->page_size, nandx->head_buf,
866 + nandx->head_buf + nandx->chip->page_size);
868 + return nandx->chip->is_bad_block(nandx->chip, nandx->ops, 1);
871 +int nandx_suspend(void)
873 + return g_nandx->chip->suspend(g_nandx->chip);
876 +int nandx_resume(void)
878 + return g_nandx->chip->resume(g_nandx->chip);
881 +int nandx_init(struct nfi_resource *res)
883 + struct nand_chip *chip;
884 + struct nandx_desc *nandx;
890 + chip = nand_chip_init(res);
892 + pr_info("nand chip init fail.\n");
896 + nandx = (struct nandx_desc *)mem_alloc(1, sizeof(struct nandx_desc));
902 + nandx->chip = chip;
903 + nandx->min_write_pages = chip->min_program_pages;
904 + nandx->ops_multi_len = nandx->min_write_pages * chip->plane_num;
905 + nandx->ops_len = chip->block_pages * chip->plane_num;
906 + nandx->ops = mem_alloc(1, sizeof(struct nand_ops) * nandx->ops_len);
912 +#if NANDX_BULK_IO_USE_DRAM
913 + nandx->head_buf = NANDX_CORE_BUF_ADDR;
915 + nandx->head_buf = mem_alloc(2, page_padded_size());
917 + if (!nandx->head_buf) {
921 + nandx->tail_buf = nandx->head_buf + page_padded_size();
922 + memset(nandx->head_buf, 0xff, 2 * page_padded_size());
923 + nandx->multi_en = false;
924 + nandx->ecc_en = false;
925 + nandx->ops_current = 0;
926 + nandx->mode = NANDX_IDLE;
928 + nandx->info.max_io_count = nandx->ops_len;
929 + nandx->info.min_write_pages = nandx->min_write_pages;
930 + nandx->info.plane_num = chip->plane_num;
931 + nandx->info.oob_size = chip->oob_size;
932 + nandx->info.page_parity_size = chip->sector_spare_size * page_sectors();
933 + nandx->info.page_size = chip->page_size;
934 + nandx->info.block_size = chip->block_size;
935 + nandx->info.total_size = chip->block_size * chip->block_num;
936 + nandx->info.fdm_ecc_size = chip->fdm_ecc_size;
937 + nandx->info.fdm_reg_size = chip->fdm_reg_size;
938 + nandx->info.ecc_strength = chip->ecc_strength;
939 + nandx->info.sector_size = chip->sector_size;
944 +#if !NANDX_BULK_IO_USE_DRAM
945 + mem_free(nandx->head_buf);
953 +void nandx_exit(void)
955 + nand_chip_exit(g_nandx->chip);
956 +#if !NANDX_BULK_IO_USE_DRAM
957 + mem_free(g_nandx->head_buf);
959 + mem_free(g_nandx->ops);
963 +#ifdef NANDX_UNIT_TEST
964 +static void dump_buf(u8 *buf, u32 len)
968 + pr_info("dump buf@0x%X start", (u32)buf);
969 + for (i = 0; i < len; i++) {
970 + if (!reminder(i, 16))
972 + pr_info("%x ", buf[i]);
974 + pr_info("\ndump buf done.\n");
977 +int nandx_unit_test(u64 offset, size_t len)
979 + u8 *src_buf, *dst_buf;
983 + if (!len || len > g_nandx->chip->block_size)
986 +#if NANDX_BULK_IO_USE_DRAM
987 + src_buf = NANDX_UT_SRC_ADDR;
988 + dst_buf = NANDX_UT_DST_ADDR;
991 + src_buf = mem_alloc(1, g_nandx->chip->page_size);
994 + dst_buf = mem_alloc(1, g_nandx->chip->page_size);
1001 + pr_info("%s: src_buf address 0x%x, dst_buf address 0x%x\n",
1002 + __func__, (int)((unsigned long)src_buf),
1003 + (int)((unsigned long)dst_buf));
1005 + memset(dst_buf, 0, g_nandx->chip->page_size);
1006 + pr_info("read page 0 data...!\n");
1007 + ret = nandx_read(dst_buf, NULL, 0, g_nandx->chip->page_size);
1009 + pr_info("read fail with ret %d\n", ret);
1011 + pr_info("read page success!\n");
1014 + for (i = 0; i < g_nandx->chip->page_size; i++) {
1015 + src_buf[i] = 0x5a;
1018 + ret = nandx_erase(offset, g_nandx->chip->block_size);
1020 + pr_info("erase fail with ret %d\n", ret);
1024 + for (j = 0; j < g_nandx->chip->block_pages; j++) {
1025 + memset(dst_buf, 0, g_nandx->chip->page_size);
1026 + pr_info("check data after erase...!\n");
1027 + ret = nandx_read(dst_buf, NULL, offset, g_nandx->chip->page_size);
1029 + pr_info("read fail with ret %d\n", ret);
1033 + for (i = 0; i < g_nandx->chip->page_size; i++) {
1034 + if (dst_buf[i] != 0xff) {
1035 + pr_info("read after erase, check fail @%d\n", i);
1036 + pr_info("all data should be 0xff\n");
1037 + ret = -ENANDERASE;
1038 + dump_buf(dst_buf, 128);
1044 + pr_info("write data...!\n");
1045 + ret = nandx_write(src_buf, NULL, offset, g_nandx->chip->page_size);
1047 + pr_info("write fail with ret %d\n", ret);
1051 + memset(dst_buf, 0, g_nandx->chip->page_size);
1052 + pr_info("read data...!\n");
1053 + ret = nandx_read(dst_buf, NULL, offset, g_nandx->chip->page_size);
1055 + pr_info("read fail with ret %d\n", ret);
1059 + for (i = 0; i < g_nandx->chip->page_size; i++) {
1060 + if (dst_buf[i] != src_buf[i]) {
1061 + pr_info("read after write, check fail @%d\n", i);
1062 + pr_info("dst_buf should be same as src_buf\n");
1064 + dump_buf(src_buf + i, 128);
1065 + dump_buf(dst_buf + i, 128);
1070 + pr_err("%s %d %s@%d\n", __func__, __LINE__, ret?"Failed":"OK", j);
1074 + offset += g_nandx->chip->page_size;
1077 + ret = nandx_erase(offset, g_nandx->chip->block_size);
1079 + pr_info("erase fail with ret %d\n", ret);
1083 + memset(dst_buf, 0, g_nandx->chip->page_size);
1084 + ret = nandx_read(dst_buf, NULL, offset, g_nandx->chip->page_size);
1086 + pr_info("read fail with ret %d\n", ret);
1090 + for (i = 0; i < g_nandx->chip->page_size; i++) {
1091 + if (dst_buf[i] != 0xff) {
1092 + pr_info("read after erase, check fail\n");
1093 + pr_info("all data should be 0xff\n");
1094 + ret = -ENANDERASE;
1095 + dump_buf(dst_buf, 128);
1103 +#if !NANDX_BULK_IO_USE_DRAM
1104 + mem_free(src_buf);
1105 + mem_free(dst_buf);
1111 +++ b/drivers/mtd/nandx/core/core_io.h
1114 + * Copyright (C) 2017 MediaTek Inc.
1115 + * Licensed under either
1116 + * BSD Licence, (see NOTICE for more details)
1117 + * GNU General Public License, version 2.0, (see NOTICE for more details)
1120 +#ifndef __CORE_IO_H__
1121 +#define __CORE_IO_H__
1123 +typedef int (*func_chip_ops)(struct nand_chip *, struct nand_ops *,
1126 +enum nandx_op_mode {
1133 +struct nandx_desc {
1134 + struct nand_chip *chip;
1135 + struct nandx_info info;
1136 + enum nandx_op_mode mode;
1141 + struct nand_ops *ops;
1143 + int ops_multi_len;
1145 + int min_write_pages;
1151 +#endif /* __CORE_IO_H__ */
1153 +++ b/drivers/mtd/nandx/core/nand/device_spi.c
1156 + * Copyright (C) 2017 MediaTek Inc.
1157 + * Licensed under either
1158 + * BSD Licence, (see NOTICE for more details)
1159 + * GNU General Public License, version 2.0, (see NOTICE for more details)
1162 +#include "nandx_util.h"
1163 +#include "../nand_device.h"
1164 +#include "device_spi.h"
1166 +/* spi nand basic commands */
1167 +static struct nand_cmds spi_cmds = {
1170 + .read_status = 0x0f,
1171 + .read_param_page = 0x03,
1172 + .set_feature = 0x1f,
1173 + .get_feature = 0x0f,
1176 + .random_out_1st = 0x03,
1177 + .random_out_2nd = -1,
1178 + .program_1st = 0x02,
1179 + .program_2nd = 0x10,
1180 + .erase_1st = 0xd8,
1182 + .read_cache = 0x30,
1183 + .read_cache_last = 0x3f,
1184 + .program_cache = 0x02
1187 +/* spi nand extend commands */
1188 +static struct spi_extend_cmds spi_extend_cmds = {
1189 + .die_select = 0xc2,
1190 + .write_enable = 0x06
1193 +/* means the start bit of addressing type */
1194 +static struct nand_addressing spi_addressing = {
1195 + .row_bit_start = 0,
1196 + .block_bit_start = 0,
1197 + .plane_bit_start = 12,
1198 + .lun_bit_start = 0,
1201 +/* spi nand endurance */
1202 +static struct nand_endurance spi_endurance = {
1203 + .pe_cycle = 100000,
1208 +/* array_busy, write_protect, erase_fail, program_fail */
1209 +static struct nand_status spi_status[] = {
1210 + {.array_busy = BIT(0),
1211 + .write_protect = BIT(1),
1212 + .erase_fail = BIT(2),
1213 + .program_fail = BIT(3)}
1216 +/* measure time by the us */
1217 +static struct nand_array_timing spi_array_timing = {
1229 +/* spi nand device table */
1230 +static struct device_spi spi_nand[] = {
1232 + NAND_DEVICE("W25N01GV",
1233 + NAND_PACK_ID(0xef, 0xaa, 0x21, 0, 0, 0, 0, 0),
1235 + 1, 1, 1, 1024, KB(128), KB(2), 64, 1,
1236 + &spi_cmds, &spi_addressing, &spi_status[0],
1237 + &spi_endurance, &spi_array_timing),
1239 + NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1240 + NAND_SPI_CONFIG(0xb0, 4, 6, 0),
1241 + NAND_SPI_STATUS(0xc0, 4, 5),
1242 + NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1244 + &spi_extend_cmds, 0xff, 0xff
1247 + NAND_DEVICE("MX35LF1G",
1248 + NAND_PACK_ID(0xc2, 0x12, 0x21, 0, 0, 0, 0, 0),
1250 + 1, 1, 1, 1024, KB(128), KB(2), 64, 1,
1251 + &spi_cmds, &spi_addressing, &spi_status[0],
1252 + &spi_endurance, &spi_array_timing),
1254 + NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1255 + NAND_SPI_CONFIG(0xb0, 4, 6, 1),
1256 + NAND_SPI_STATUS(0xc0, 4, 5),
1257 + NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1259 + &spi_extend_cmds, 0xff, 0xff
1262 + NAND_DEVICE("MT29F4G01ABAFDWB",
1263 + NAND_PACK_ID(0x2c, 0x34, 0, 0, 0, 0, 0, 0),
1265 + 1, 1, 1, 2048, KB(256), KB(4), 256, 1,
1266 + &spi_cmds, &spi_addressing, &spi_status[0],
1267 + &spi_endurance, &spi_array_timing),
1269 + NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1270 + NAND_SPI_CONFIG(0xb0, 4, 6, 1),
1271 + NAND_SPI_STATUS(0xc0, 4, 5),
1272 + NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1274 + &spi_extend_cmds, 0xff, 0xff
1277 + NAND_DEVICE("GD5F4GQ4UB",
1278 + NAND_PACK_ID(0xc8, 0xd4, 0, 0, 0, 0, 0, 0),
1280 + 1, 1, 1, 2048, KB(256), KB(4), 256, 1,
1281 + &spi_cmds, &spi_addressing, &spi_status[0],
1282 + &spi_endurance, &spi_array_timing),
1284 + NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1285 + NAND_SPI_CONFIG(0xb0, 4, 6, 1),
1286 + NAND_SPI_STATUS(0xc0, 4, 5),
1287 + NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1289 + &spi_extend_cmds, 0xff, 0xff
1292 + NAND_DEVICE("TC58CVG2S0HRAIJ",
1293 + NAND_PACK_ID(0x98, 0xED, 0x51, 0, 0, 0, 0, 0),
1295 + 1, 1, 1, 2048, KB(256), KB(4), 256, 1,
1296 + &spi_cmds, &spi_addressing, &spi_status[0],
1297 + &spi_endurance, &spi_array_timing),
1299 + NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1300 + NAND_SPI_CONFIG(0xb0, 4, 6, 1),
1301 + NAND_SPI_STATUS(0xc0, 4, 5),
1302 + NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1304 + &spi_extend_cmds, 0xff, 0xff
1307 + NAND_DEVICE("NO-DEVICE",
1308 + NAND_PACK_ID(0, 0, 0, 0, 0, 0, 0, 0), 0, 0, 0, 0,
1309 + 0, 0, 0, 0, 0, 0, 0, 1,
1310 + &spi_cmds, &spi_addressing, &spi_status[0],
1311 + &spi_endurance, &spi_array_timing),
1313 + NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1314 + NAND_SPI_CONFIG(0xb0, 4, 6, 0),
1315 + NAND_SPI_STATUS(0xc0, 4, 5),
1316 + NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1318 + &spi_extend_cmds, 0xff, 0xff
1322 +u8 spi_replace_rx_cmds(u8 mode)
1324 + u8 rx_replace_cmds[] = {0x03, 0x3b, 0x6b, 0xbb, 0xeb};
1326 + return rx_replace_cmds[mode];
1329 +u8 spi_replace_tx_cmds(u8 mode)
1331 + u8 tx_replace_cmds[] = {0x02, 0x32};
1333 + return tx_replace_cmds[mode];
1336 +u8 spi_replace_rx_col_cycle(u8 mode)
1338 + u8 rx_replace_col_cycle[] = {3, 3, 3, 3, 4};
1340 + return rx_replace_col_cycle[mode];
1343 +u8 spi_replace_tx_col_cycle(u8 mode)
1345 + u8 tx_replace_col_cycle[] = {2, 2};
1347 + return tx_replace_col_cycle[mode];
1350 +struct nand_device *nand_get_device(int index)
1352 + return &spi_nand[index].dev;
1356 +++ b/drivers/mtd/nandx/core/nand/device_spi.h
1359 + * Copyright (C) 2017 MediaTek Inc.
1360 + * Licensed under either
1361 + * BSD Licence, (see NOTICE for more details)
1362 + * GNU General Public License, version 2.0, (see NOTICE for more details)
1365 +#ifndef __DEVICE_SPI_H__
1366 +#define __DEVICE_SPI_H__
1370 + * @die_select: select nand device die command
1371 + * @write_enable: enable write command before write data to spi nand
1372 + * spi nand device will auto to be disable after write done
1374 +struct spi_extend_cmds {
1376 + short write_enable;
1380 + * protection feature register
1381 + * @addr: register address
1382 + * @wp_en_bit: write protection enable bit
1383 + * @bp_start_bit: block protection mask start bit
1384 + * @bp_end_bit: block protection mask end bit
1386 +struct feature_protect {
1394 + * configuration feature register
1395 + * @addr: register address
1396 + * @ecc_en_bit: in-die ecc enable bit
1397 + * @otp_en_bit: enter otp access mode bit
1398 + * @need_qe: quad io enable bit
1400 +struct feature_config {
1408 + * status feature register
1409 + * @addr: register address
1410 + * @ecc_start_bit: ecc status mask start bit for error bits number
1411 + * @ecc_end_bit: ecc status mask end bit for error bits number
1413 + * operations status (ex. array busy status) could see on struct nand_status
1415 +struct feature_status {
1422 + * character feature register
1423 + * @addr: register address
1424 + * @die_sel_bit: die select bit
1425 + * @drive_start_bit: drive strength mask start bit
1426 + * @drive_end_bit: drive strength mask end bit
1428 +struct feature_character {
1431 + u8 drive_start_bit;
1437 + * @protect: protection feature register
1438 + * @config: configuration feature register
1439 + * @status: status feature register
1440 + * @character: character feature register
1442 +struct spi_features {
1443 + struct feature_protect protect;
1444 + struct feature_config config;
1445 + struct feature_status status;
1446 + struct feature_character character;
1451 + * configurations of spi nand device table
1452 + * @dev: base information of nand device
1453 + * @feature: feature information for spi nand
1454 + * @extend_cmds: extended the nand base commands
1455 + * @tx_mode_mask: tx mode mask for chip read
1456 + * @rx_mode_mask: rx mode mask for chip write
1458 +struct device_spi {
1459 + struct nand_device dev;
1460 + struct spi_features feature;
1461 + struct spi_extend_cmds *extend_cmds;
1467 +#define NAND_SPI_PROTECT(addr, wp_en_bit, bp_start_bit, bp_end_bit) \
1468 + {addr, wp_en_bit, bp_start_bit, bp_end_bit}
1470 +#define NAND_SPI_CONFIG(addr, ecc_en_bit, otp_en_bit, need_qe) \
1471 + {addr, ecc_en_bit, otp_en_bit, need_qe}
1473 +#define NAND_SPI_STATUS(addr, ecc_start_bit, ecc_end_bit) \
1474 + {addr, ecc_start_bit, ecc_end_bit}
1476 +#define NAND_SPI_CHARACTER(addr, die_sel_bit, drive_start_bit, drive_end_bit) \
1477 + {addr, die_sel_bit, drive_start_bit, drive_end_bit}
1479 +static inline struct device_spi *device_to_spi(struct nand_device *dev)
1481 + return container_of(dev, struct device_spi, dev);
1484 +u8 spi_replace_rx_cmds(u8 mode);
1485 +u8 spi_replace_tx_cmds(u8 mode);
1486 +u8 spi_replace_rx_col_cycle(u8 mode);
1487 +u8 spi_replace_tx_col_cycle(u8 mode);
1489 +#endif /* __DEVICE_SPI_H__ */
1491 +++ b/drivers/mtd/nandx/core/nand/nand_spi.c
1494 + * Copyright (C) 2017 MediaTek Inc.
1495 + * Licensed under either
1496 + * BSD Licence, (see NOTICE for more details)
1497 + * GNU General Public License, version 2.0, (see NOTICE for more details)
1500 +#include "nandx_util.h"
1501 +#include "nandx_core.h"
1502 +#include "../nand_chip.h"
1503 +#include "../nand_device.h"
1504 +#include "../nfi.h"
1505 +#include "../nand_base.h"
1506 +#include "device_spi.h"
1507 +#include "nand_spi.h"
1509 +#define READY_TIMEOUT 500000 /* us */
1511 +static int nand_spi_read_status(struct nand_base *nand)
1513 + struct device_spi *dev = device_to_spi(nand->dev);
1516 + nand->get_feature(nand, dev->feature.status.addr, &status, 1);
1521 +static int nand_spi_wait_ready(struct nand_base *nand, u32 timeout)
1526 + end = get_current_time_us() + timeout;
1529 + status = nand_spi_read_status(nand);
1530 + status &= nand->dev->status->array_busy;
1531 + now = get_current_time_us();
1537 + return status ? -EBUSY : 0;
1540 +static int nand_spi_set_op_mode(struct nand_base *nand, u8 mode)
1542 + struct nand_spi *spi_nand = base_to_spi(nand);
1543 + struct nfi *nfi = nand->nfi;
1546 + if (spi_nand->op_mode != mode) {
1547 + ret = nfi->nfi_ctrl(nfi, SNFI_CTRL_OP_MODE, (void *)&mode);
1548 + spi_nand->op_mode = mode;
1554 +static int nand_spi_set_config(struct nand_base *nand, u8 addr, u8 mask,
1559 + nand->get_feature(nand, addr, &configs, 1);
1566 + nand->set_feature(nand, addr, &configs, 1);
1569 + nand->get_feature(nand, addr, &configs, 1);
1571 + return (configs & mask) == en ? 0 : -EFAULT;
1574 +static int nand_spi_die_select(struct nand_base *nand, int *row)
1576 + struct device_spi *dev = device_to_spi(nand->dev);
1577 + struct nfi *nfi = nand->nfi;
1578 + int lun_blocks, block_pages, lun, blocks;
1579 + int page = *row, ret = 0;
1580 + u8 param = 0, die_sel;
1582 + if (nand->dev->lun_num < 2)
1585 + block_pages = nand_block_pages(nand->dev);
1586 + lun_blocks = nand_lun_blocks(nand->dev);
1587 + blocks = div_down(page, block_pages);
1588 + lun = div_down(blocks, lun_blocks);
1590 + if (dev->extend_cmds->die_select == -1) {
1591 + die_sel = (u8)(lun << dev->feature.character.die_sel_bit);
1592 + nand->get_feature(nand, dev->feature.character.addr, ¶m, 1);
1594 + nand->set_feature(nand, dev->feature.character.addr, ¶m, 1);
1596 + nand->get_feature(nand, dev->feature.character.addr, ¶m, 1);
1597 + ret = (param & die_sel) ? 0 : -EFAULT;
1600 + nfi->send_cmd(nfi, dev->extend_cmds->die_select);
1601 + nfi->send_addr(nfi, lun, 0, 1, 0);
1602 + nfi->trigger(nfi);
1605 + *row = page - (lun_blocks * block_pages) * lun;
1610 +static int nand_spi_select_device(struct nand_base *nand, int cs)
1612 + struct nand_spi *spi = base_to_spi(nand);
1613 + struct nand_base *parent = spi->parent;
1615 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1617 + return parent->select_device(nand, cs);
1620 +static int nand_spi_reset(struct nand_base *nand)
1622 + struct nand_spi *spi = base_to_spi(nand);
1623 + struct nand_base *parent = spi->parent;
1625 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1627 + parent->reset(nand);
1629 + return nand_spi_wait_ready(nand, READY_TIMEOUT);
1632 +static int nand_spi_read_id(struct nand_base *nand, u8 *id, int count)
1634 + struct nand_spi *spi = base_to_spi(nand);
1635 + struct nand_base *parent = spi->parent;
1637 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1639 + return parent->read_id(nand, id, count);
1642 +static int nand_spi_read_param_page(struct nand_base *nand, u8 *data,
1645 + struct device_spi *dev = device_to_spi(nand->dev);
1646 + struct nand_spi *spi = base_to_spi(nand);
1647 + struct nfi *nfi = nand->nfi;
1648 + int sectors, value;
1651 + sectors = div_round_up(count, nfi->sector_size);
1653 + nand->get_feature(nand, dev->feature.config.addr, ¶m, 1);
1654 + param |= BIT(dev->feature.config.otp_en_bit);
1655 + nand->set_feature(nand, dev->feature.config.addr, ¶m, 1);
1658 + nand->get_feature(nand, dev->feature.config.addr, ¶m, 1);
1659 + if (param & BIT(dev->feature.config.otp_en_bit)) {
1661 + nfi->nfi_ctrl(nfi, NFI_CTRL_ECC, &value);
1662 + nand->dev->col_cycle = spi_replace_rx_col_cycle(spi->rx_mode);
1663 + nand->read_page(nand, 0x01);
1664 + nand->read_data(nand, 0x01, 0, sectors, data, NULL);
1667 + param &= ~BIT(dev->feature.config.otp_en_bit);
1668 + nand->set_feature(nand, dev->feature.config.addr, ¶m, 1);
1673 +static int nand_spi_set_feature(struct nand_base *nand, u8 addr,
1677 + struct nand_spi *spi = base_to_spi(nand);
1678 + struct nand_base *parent = spi->parent;
1680 + nand->write_enable(nand);
1682 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1684 + return parent->set_feature(nand, addr, param, count);
1687 +static int nand_spi_get_feature(struct nand_base *nand, u8 addr,
1691 + struct nand_spi *spi = base_to_spi(nand);
1692 + struct nand_base *parent = spi->parent;
1694 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1696 + return parent->get_feature(nand, addr, param, count);
1699 +static int nand_spi_addressing(struct nand_base *nand, int *row,
1702 + struct nand_device *dev = nand->dev;
1703 + int plane, block, block_pages;
1706 + ret = nand_spi_die_select(nand, row);
1710 + block_pages = nand_block_pages(dev);
1711 + block = div_down(*row, block_pages);
1713 + plane = block % dev->plane_num;
1714 + *col |= (plane << dev->addressing->plane_bit_start);
1719 +static int nand_spi_read_page(struct nand_base *nand, int row)
1721 + struct nand_spi *spi = base_to_spi(nand);
1722 + struct nand_base *parent = spi->parent;
1724 + if (spi->op_mode == SNFI_AUTO_MODE)
1725 + nand_spi_set_op_mode(nand, SNFI_AUTO_MODE);
1727 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1729 + parent->read_page(nand, row);
1731 + return nand_spi_wait_ready(nand, READY_TIMEOUT);
1734 +static int nand_spi_read_data(struct nand_base *nand, int row, int col,
1735 + int sectors, u8 *data, u8 *oob)
1737 + struct device_spi *dev = device_to_spi(nand->dev);
1738 + struct nand_spi *spi = base_to_spi(nand);
1739 + struct nand_base *parent = spi->parent;
1742 + if ((spi->rx_mode == SNFI_RX_114 || spi->rx_mode == SNFI_RX_144) &&
1743 + dev->feature.config.need_qe)
1744 + nand_spi_set_config(nand, dev->feature.config.addr,
1747 + nand->dev->col_cycle = spi_replace_rx_col_cycle(spi->rx_mode);
1749 + nand_spi_set_op_mode(nand, SNFI_CUSTOM_MODE);
1751 + ret = parent->read_data(nand, row, col, sectors, data, oob);
1753 + return -ENANDREAD;
1755 + if (spi->ondie_ecc) {
1756 + ret = nand_spi_read_status(nand);
1757 + ret &= GENMASK(dev->feature.status.ecc_end_bit,
1758 + dev->feature.status.ecc_start_bit);
1759 + ret >>= dev->feature.status.ecc_start_bit;
1760 + if (ret > nand->dev->endurance->ecc_req)
1761 + return -ENANDREAD;
1762 + else if (ret > nand->dev->endurance->max_bitflips)
1763 + return -ENANDFLIPS;
1769 +static int nand_spi_write_enable(struct nand_base *nand)
1771 + struct device_spi *dev = device_to_spi(nand->dev);
1772 + struct nfi *nfi = nand->nfi;
1775 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1778 + nfi->send_cmd(nfi, dev->extend_cmds->write_enable);
1780 + nfi->trigger(nfi);
1782 + status = nand_spi_read_status(nand);
1783 + status &= nand->dev->status->write_protect;
1788 +static int nand_spi_program_data(struct nand_base *nand, int row,
1790 + u8 *data, u8 *oob)
1792 + struct device_spi *dev = device_to_spi(nand->dev);
1793 + struct nand_spi *spi = base_to_spi(nand);
1795 + if (spi->tx_mode == SNFI_TX_114 && dev->feature.config.need_qe)
1796 + nand_spi_set_config(nand, dev->feature.config.addr,
1799 + nand_spi_set_op_mode(nand, SNFI_CUSTOM_MODE);
1801 + nand->dev->col_cycle = spi_replace_tx_col_cycle(spi->tx_mode);
1803 + return spi->parent->program_data(nand, row, col, data, oob);
1806 +static int nand_spi_program_page(struct nand_base *nand, int row)
1808 + struct nand_spi *spi = base_to_spi(nand);
1809 + struct nand_device *dev = nand->dev;
1810 + struct nfi *nfi = nand->nfi;
1812 + if (spi->op_mode == SNFI_AUTO_MODE)
1813 + nand_spi_set_op_mode(nand, SNFI_AUTO_MODE);
1815 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1818 + nfi->send_cmd(nfi, dev->cmds->program_2nd);
1819 + nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
1820 + nfi->trigger(nfi);
1822 + return nand_spi_wait_ready(nand, READY_TIMEOUT);
1825 +static int nand_spi_erase_block(struct nand_base *nand, int row)
1827 + struct nand_spi *spi = base_to_spi(nand);
1828 + struct nand_base *parent = spi->parent;
1830 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1832 + parent->erase_block(nand, row);
1834 + return nand_spi_wait_ready(nand, READY_TIMEOUT);
1837 +static int nand_chip_spi_ctrl(struct nand_chip *chip, int cmd,
1840 + struct nand_base *nand = chip->nand;
1841 + struct device_spi *dev = device_to_spi(nand->dev);
1842 + struct nand_spi *spi = base_to_spi(nand);
1843 + struct nfi *nfi = nand->nfi;
1844 + int ret = 0, value = *(int *)args;
1847 + case CHIP_CTRL_ONDIE_ECC:
1848 + spi->ondie_ecc = (bool)value;
1849 + ret = nand_spi_set_config(nand, dev->feature.config.addr,
1850 + BIT(dev->feature.config.ecc_en_bit),
1854 + case SNFI_CTRL_TX_MODE:
1855 + if (value < 0 || value > SNFI_TX_114)
1856 + return -EOPNOTSUPP;
1858 + if (dev->tx_mode_mask & BIT(value)) {
1859 + spi->tx_mode = value;
1860 + nand->dev->cmds->random_out_1st = spi_replace_tx_cmds(
1862 + ret = nfi->nfi_ctrl(nfi, cmd, args);
1867 + case SNFI_CTRL_RX_MODE:
1868 + if (value < 0 || value > SNFI_RX_144)
1869 + return -EOPNOTSUPP;
1871 + if (dev->rx_mode_mask & BIT(value)) {
1872 + spi->rx_mode = value;
1873 + nand->dev->cmds->program_1st = spi_replace_rx_cmds(
1875 + ret = nfi->nfi_ctrl(nfi, cmd, args);
1880 + case CHIP_CTRL_OPS_CACHE:
1881 + case CHIP_CTRL_OPS_MULTI:
1882 + case CHIP_CTRL_PSLC_MODE:
1883 + case CHIP_CTRL_DDR_MODE:
1884 + case CHIP_CTRL_DRIVE_STRENGTH:
1885 + case CHIP_CTRL_TIMING_MODE:
1886 + ret = -EOPNOTSUPP;
1890 + ret = nfi->nfi_ctrl(nfi, cmd, args);
1897 +int nand_chip_spi_resume(struct nand_chip *chip)
1899 + struct nand_base *nand = chip->nand;
1900 + struct nand_spi *spi = base_to_spi(nand);
1901 + struct device_spi *dev = device_to_spi(nand->dev);
1902 + struct nfi *nfi = nand->nfi;
1903 + struct nfi_format format;
1906 + nand->reset(nand);
1908 + mask = GENMASK(dev->feature.protect.bp_end_bit,
1909 + dev->feature.protect.bp_start_bit);
1910 + nand_spi_set_config(nand, dev->feature.config.addr, mask, false);
1911 + mask = BIT(dev->feature.config.ecc_en_bit);
1912 + nand_spi_set_config(nand, dev->feature.config.addr, mask,
1915 + format.page_size = nand->dev->page_size;
1916 + format.spare_size = nand->dev->spare_size;
1917 + format.ecc_req = nand->dev->endurance->ecc_req;
1919 + return nfi->set_format(nfi, &format);
1922 +static int nand_spi_set_format(struct nand_base *nand)
1924 + struct nfi_format format = {
1925 + nand->dev->page_size,
1926 + nand->dev->spare_size,
1927 + nand->dev->endurance->ecc_req
1930 + return nand->nfi->set_format(nand->nfi, &format);
1933 +struct nand_base *nand_device_init(struct nand_chip *chip)
1935 + struct nand_base *nand;
1936 + struct nand_spi *spi;
1937 + struct device_spi *dev;
1941 + spi = mem_alloc(1, sizeof(struct nand_spi));
1943 + pr_info("alloc nand_spi fail\n");
1947 + spi->ondie_ecc = false;
1948 + spi->op_mode = SNFI_CUSTOM_MODE;
1949 + spi->rx_mode = SNFI_RX_114;
1950 + spi->tx_mode = SNFI_TX_114;
1952 + spi->parent = chip->nand;
1953 + nand = &spi->base;
1954 + nand->dev = spi->parent->dev;
1955 + nand->nfi = spi->parent->nfi;
1957 + nand->select_device = nand_spi_select_device;
1958 + nand->reset = nand_spi_reset;
1959 + nand->read_id = nand_spi_read_id;
1960 + nand->read_param_page = nand_spi_read_param_page;
1961 + nand->set_feature = nand_spi_set_feature;
1962 + nand->get_feature = nand_spi_get_feature;
1963 + nand->read_status = nand_spi_read_status;
1964 + nand->addressing = nand_spi_addressing;
1965 + nand->read_page = nand_spi_read_page;
1966 + nand->read_data = nand_spi_read_data;
1967 + nand->write_enable = nand_spi_write_enable;
1968 + nand->program_data = nand_spi_program_data;
1969 + nand->program_page = nand_spi_program_page;
1970 + nand->erase_block = nand_spi_erase_block;
1972 + chip->chip_ctrl = nand_chip_spi_ctrl;
1973 + chip->nand_type = NAND_SPI;
1974 + chip->resume = nand_chip_spi_resume;
1976 + ret = nand_detect_device(nand);
1980 + nand->select_device(nand, 0);
1982 + ret = nand_spi_set_format(nand);
1986 + dev = (struct device_spi *)nand->dev;
1988 + nand->dev->cmds->random_out_1st =
1989 + spi_replace_rx_cmds(spi->rx_mode);
1990 + nand->dev->cmds->program_1st =
1991 + spi_replace_tx_cmds(spi->tx_mode);
1993 + mask = GENMASK(dev->feature.protect.bp_end_bit,
1994 + dev->feature.protect.bp_start_bit);
1995 + ret = nand_spi_set_config(nand, dev->feature.protect.addr, mask, false);
1999 + mask = BIT(dev->feature.config.ecc_en_bit);
2000 + ret = nand_spi_set_config(nand, dev->feature.config.addr, mask,
2012 +void nand_exit(struct nand_base *nand)
2014 + struct nand_spi *spi = base_to_spi(nand);
2016 + nand_base_exit(spi->parent);
2020 +++ b/drivers/mtd/nandx/core/nand/nand_spi.h
2023 + * Copyright (C) 2017 MediaTek Inc.
2024 + * Licensed under either
2025 + * BSD Licence, (see NOTICE for more details)
2026 + * GNU General Public License, version 2.0, (see NOTICE for more details)
2029 +#ifndef __NAND_SPI_H__
2030 +#define __NAND_SPI_H__
2033 + * spi nand handler
2034 + * @base: spi nand base functions
2035 + * @parent: common parent nand base functions
2036 + * @tx_mode: spi bus width of transfer to device
2037 + * @rx_mode: spi bus width of transfer from device
2038 + * @op_mode: spi nand controller (NFI) operation mode
2039 + * @ondie_ecc: spi nand on-die ecc flag
2043 + struct nand_base base;
2044 + struct nand_base *parent;
2051 +static inline struct nand_spi *base_to_spi(struct nand_base *base)
2053 + return container_of(base, struct nand_spi, base);
2056 +#endif /* __NAND_SPI_H__ */
2058 +++ b/drivers/mtd/nandx/core/nand_base.c
2061 + * Copyright (C) 2017 MediaTek Inc.
2062 + * Licensed under either
2063 + * BSD Licence, (see NOTICE for more details)
2064 + * GNU General Public License, version 2.0, (see NOTICE for more details)
2067 +#include "nandx_util.h"
2068 +#include "nandx_core.h"
2069 +#include "nand_chip.h"
2070 +#include "nand_device.h"
2072 +#include "nand_base.h"
2074 +static int nand_base_select_device(struct nand_base *nand, int cs)
2076 + struct nfi *nfi = nand->nfi;
2080 + return nfi->select_chip(nfi, cs);
2083 +static int nand_base_reset(struct nand_base *nand)
2085 + struct nfi *nfi = nand->nfi;
2086 + struct nand_device *dev = nand->dev;
2089 + nfi->send_cmd(nfi, dev->cmds->reset);
2090 + nfi->trigger(nfi);
2092 + return nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tRST);
2095 +static int nand_base_read_id(struct nand_base *nand, u8 *id, int count)
2097 + struct nfi *nfi = nand->nfi;
2098 + struct nand_device *dev = nand->dev;
2101 + nfi->send_cmd(nfi, dev->cmds->read_id);
2102 + nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tWHR);
2103 + nfi->send_addr(nfi, 0, 0, 1, 0);
2105 + return nfi->read_bytes(nfi, id, count);
2108 +static int nand_base_read_param_page(struct nand_base *nand, u8 *data,
2111 + struct nfi *nfi = nand->nfi;
2112 + struct nand_device *dev = nand->dev;
2115 + nfi->send_cmd(nfi, dev->cmds->read_param_page);
2116 + nfi->send_addr(nfi, 0, 0, 1, 0);
2118 + nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tR);
2120 + return nfi->read_bytes(nfi, data, count);
2123 +static int nand_base_set_feature(struct nand_base *nand, u8 addr,
2127 + struct nfi *nfi = nand->nfi;
2128 + struct nand_device *dev = nand->dev;
2131 + nfi->send_cmd(nfi, dev->cmds->set_feature);
2132 + nfi->send_addr(nfi, addr, 0, 1, 0);
2134 + nfi->write_bytes(nfi, param, count);
2136 + return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2137 + dev->array_timing->tFEAT);
2140 +static int nand_base_get_feature(struct nand_base *nand, u8 addr,
2144 + struct nfi *nfi = nand->nfi;
2145 + struct nand_device *dev = nand->dev;
2148 + nfi->send_cmd(nfi, dev->cmds->get_feature);
2149 + nfi->send_addr(nfi, addr, 0, 1, 0);
2150 + nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tFEAT);
2152 + return nfi->read_bytes(nfi, param, count);
2155 +static int nand_base_read_status(struct nand_base *nand)
2157 + struct nfi *nfi = nand->nfi;
2158 + struct nand_device *dev = nand->dev;
2162 + nfi->send_cmd(nfi, dev->cmds->read_status);
2163 + nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tWHR);
2164 + nfi->read_bytes(nfi, &status, 1);
2169 +static int nand_base_addressing(struct nand_base *nand, int *row,
2172 + struct nand_device *dev = nand->dev;
2173 + int lun, plane, block, page, cs = 0;
2174 + int block_pages, target_blocks, wl = 0;
2177 + if (dev->target_num > 1) {
2178 + block_pages = nand_block_pages(dev);
2179 + target_blocks = nand_target_blocks(dev);
2180 + cs = div_down(*row, block_pages * target_blocks);
2181 + *row -= cs * block_pages * target_blocks;
2184 + nand->select_device(nand, cs);
2186 + block_pages = nand_block_pages(dev);
2187 + block = div_down(*row, block_pages);
2188 + page = *row - block * block_pages;
2189 + plane = reminder(block, dev->plane_num);
2190 + lun = div_down(block, nand_lun_blocks(dev));
2192 + wl |= (page << dev->addressing->row_bit_start);
2193 + wl |= (block << dev->addressing->block_bit_start);
2194 + wl |= (plane << dev->addressing->plane_bit_start);
2195 + wl |= (lun << dev->addressing->lun_bit_start);
2203 +static int nand_base_read_page(struct nand_base *nand, int row)
2205 + struct nfi *nfi = nand->nfi;
2206 + struct nand_device *dev = nand->dev;
2209 + nfi->send_cmd(nfi, dev->cmds->read_1st);
2210 + nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
2211 + nfi->send_cmd(nfi, dev->cmds->read_2nd);
2212 + nfi->trigger(nfi);
2214 + return nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tR);
2217 +static int nand_base_read_data(struct nand_base *nand, int row, int col,
2218 + int sectors, u8 *data, u8 *oob)
2220 + struct nfi *nfi = nand->nfi;
2221 + struct nand_device *dev = nand->dev;
2224 + nfi->send_cmd(nfi, dev->cmds->random_out_1st);
2225 + nfi->send_addr(nfi, col, row, dev->col_cycle, dev->row_cycle);
2226 + nfi->send_cmd(nfi, dev->cmds->random_out_2nd);
2227 + nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tRCBSY);
2229 + return nfi->read_sectors(nfi, data, oob, sectors);
2232 +static int nand_base_write_enable(struct nand_base *nand)
2234 + struct nand_device *dev = nand->dev;
2237 + status = nand_base_read_status(nand);
2238 + if (status & dev->status->write_protect)
2244 +static int nand_base_program_data(struct nand_base *nand, int row,
2246 + u8 *data, u8 *oob)
2248 + struct nfi *nfi = nand->nfi;
2249 + struct nand_device *dev = nand->dev;
2252 + nfi->send_cmd(nfi, dev->cmds->program_1st);
2253 + nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
2255 + return nfi->write_page(nfi, data, oob);
2258 +static int nand_base_program_page(struct nand_base *nand, int row)
2260 + struct nfi *nfi = nand->nfi;
2261 + struct nand_device *dev = nand->dev;
2264 + nfi->send_cmd(nfi, dev->cmds->program_2nd);
2265 + nfi->trigger(nfi);
2267 + return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2268 + dev->array_timing->tPROG);
2271 +static int nand_base_erase_block(struct nand_base *nand, int row)
2273 + struct nfi *nfi = nand->nfi;
2274 + struct nand_device *dev = nand->dev;
2277 + nfi->send_cmd(nfi, dev->cmds->erase_1st);
2278 + nfi->send_addr(nfi, 0, row, 0, dev->row_cycle);
2279 + nfi->send_cmd(nfi, dev->cmds->erase_2nd);
2280 + nfi->trigger(nfi);
2282 + return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2283 + dev->array_timing->tBERS);
2286 +static int nand_base_read_cache(struct nand_base *nand, int row)
2288 + struct nfi *nfi = nand->nfi;
2289 + struct nand_device *dev = nand->dev;
2292 + nfi->send_cmd(nfi, dev->cmds->read_1st);
2293 + nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
2294 + nfi->send_cmd(nfi, dev->cmds->read_cache);
2295 + nfi->trigger(nfi);
2297 + return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2298 + dev->array_timing->tRCBSY);
2301 +static int nand_base_read_last(struct nand_base *nand)
2303 + struct nfi *nfi = nand->nfi;
2304 + struct nand_device *dev = nand->dev;
2307 + nfi->send_cmd(nfi, dev->cmds->read_cache_last);
2308 + nfi->trigger(nfi);
2310 + return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2311 + dev->array_timing->tRCBSY);
2314 +static int nand_base_program_cache(struct nand_base *nand)
2316 + struct nfi *nfi = nand->nfi;
2317 + struct nand_device *dev = nand->dev;
2320 + nfi->send_cmd(nfi, dev->cmds->program_cache);
2321 + nfi->trigger(nfi);
2323 + return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2324 + dev->array_timing->tPCBSY);
2327 +struct nand_base *nand_base_init(struct nand_device *dev,
2330 + struct nand_base *nand;
2332 + nand = mem_alloc(1, sizeof(struct nand_base));
2338 + nand->select_device = nand_base_select_device;
2339 + nand->reset = nand_base_reset;
2340 + nand->read_id = nand_base_read_id;
2341 + nand->read_param_page = nand_base_read_param_page;
2342 + nand->set_feature = nand_base_set_feature;
2343 + nand->get_feature = nand_base_get_feature;
2344 + nand->read_status = nand_base_read_status;
2345 + nand->addressing = nand_base_addressing;
2346 + nand->read_page = nand_base_read_page;
2347 + nand->read_data = nand_base_read_data;
2348 + nand->read_cache = nand_base_read_cache;
2349 + nand->read_last = nand_base_read_last;
2350 + nand->write_enable = nand_base_write_enable;
2351 + nand->program_data = nand_base_program_data;
2352 + nand->program_page = nand_base_program_page;
2353 + nand->program_cache = nand_base_program_cache;
2354 + nand->erase_block = nand_base_erase_block;
2359 +void nand_base_exit(struct nand_base *base)
2361 + nfi_exit(base->nfi);
2365 +++ b/drivers/mtd/nandx/core/nand_base.h
2368 + * Copyright (C) 2017 MediaTek Inc.
2369 + * Licensed under either
2370 + * BSD Licence, (see NOTICE for more details)
2371 + * GNU General Public License, version 2.0, (see NOTICE for more details)
2374 +#ifndef __NAND_BASE_H__
2375 +#define __NAND_BASE_H__
2378 + * nand base functions
2379 + * @dev: nand device infomations
2380 + * @nfi: nand host controller
2381 + * @select_device: select one nand device of multi nand on chip
2382 + * @reset: reset current nand device
2383 + * @read_id: read current nand id
2384 + * @read_param_page: read current nand parameters page
2385 + * @set_feature: configurate the nand device feature
2386 + * @get_feature: get the nand device feature
2387 + * @read_status: read nand device status
2388 + * @addressing: addressing the address to nand device physical address
2389 + * @read_page: read page data to device cache register
2390 + * @read_data: read data from device cache register by bus protocol
2391 + * @read_cache: nand cache read operation for data output
2392 + * @read_last: nand cache read operation for last page output
2393 + * @write_enable: enable program/erase for nand, especially spi nand
2394 + * @program_data: program data to nand device cache register
2395 + * @program_page: program page data from nand device cache register to array
2396 + * @program_cache: nand cache program operation for data input
2397 + * @erase_block: erase nand block operation
2400 + struct nand_device *dev;
2402 + int (*select_device)(struct nand_base *nand, int cs);
2403 + int (*reset)(struct nand_base *nand);
2404 + int (*read_id)(struct nand_base *nand, u8 *id, int count);
2405 + int (*read_param_page)(struct nand_base *nand, u8 *data, int count);
2406 + int (*set_feature)(struct nand_base *nand, u8 addr, u8 *param,
2408 + int (*get_feature)(struct nand_base *nand, u8 addr, u8 *param,
2410 + int (*read_status)(struct nand_base *nand);
2411 + int (*addressing)(struct nand_base *nand, int *row, int *col);
2413 + int (*read_page)(struct nand_base *nand, int row);
2414 + int (*read_data)(struct nand_base *nand, int row, int col, int sectors,
2415 + u8 *data, u8 *oob);
2416 + int (*read_cache)(struct nand_base *nand, int row);
2417 + int (*read_last)(struct nand_base *nand);
2419 + int (*write_enable)(struct nand_base *nand);
2420 + int (*program_data)(struct nand_base *nand, int row, int col, u8 *data,
2422 + int (*program_page)(struct nand_base *nand, int row);
2423 + int (*program_cache)(struct nand_base *nand);
2425 + int (*erase_block)(struct nand_base *nand, int row);
2428 +struct nand_base *nand_base_init(struct nand_device *device,
2430 +void nand_base_exit(struct nand_base *base);
2432 +struct nand_base *nand_device_init(struct nand_chip *nand);
2433 +void nand_exit(struct nand_base *nand);
2435 +int nand_detect_device(struct nand_base *nand);
2437 +#endif /* __NAND_BASE_H__ */
2439 +++ b/drivers/mtd/nandx/core/nand_chip.c
2442 + * Copyright (C) 2017 MediaTek Inc.
2443 + * Licensed under either
2444 + * BSD Licence, (see NOTICE for more details)
2445 + * GNU General Public License, version 2.0, (see NOTICE for more details)
2448 +#include "nandx_util.h"
2449 +#include "nandx_core.h"
2450 +#include "nand_chip.h"
2451 +#include "nand_device.h"
2453 +#include "nand_base.h"
2455 +static int nand_chip_read_page(struct nand_chip *chip,
2456 + struct nand_ops *ops,
2459 + struct nand_base *nand = chip->nand;
2460 + struct nand_device *dev = nand->dev;
2462 + int row, col, sectors;
2465 + for (i = 0; i < count; i++) {
2469 + nand->addressing(nand, &row, &col);
2470 + ops[i].status = nand->read_page(nand, row);
2471 + if (ops[i].status < 0) {
2472 + ret = ops[i].status;
2476 + data = ops[i].data;
2478 + sectors = ops[i].len / chip->sector_size;
2479 + ops[i].status = nand->read_data(nand, row, col,
2480 + sectors, data, oob);
2481 + if (ops[i].status > 0)
2482 + ops[i].status = ops[i].status >=
2483 + dev->endurance->max_bitflips ?
2486 + ret = min_t(int, ret, ops[i].status);
2492 +static int nand_chip_write_page(struct nand_chip *chip,
2493 + struct nand_ops *ops,
2496 + struct nand_base *nand = chip->nand;
2497 + struct nand_device *dev = nand->dev;
2502 + for (i = 0; i < count; i++) {
2506 + nand->addressing(nand, &row, &col);
2508 + ops[i].status = nand->write_enable(nand);
2509 + if (ops[i].status) {
2510 + pr_debug("Write Protect at %x!\n", row);
2511 + ops[i].status = -ENANDWP;
2515 + data = ops[i].data;
2517 + ops[i].status = nand->program_data(nand, row, col, data, oob);
2518 + if (ops[i].status < 0) {
2519 + ret = ops[i].status;
2523 + ops[i].status = nand->program_page(nand, row);
2524 + if (ops[i].status < 0) {
2525 + ret = ops[i].status;
2529 + ops[i].status = nand->read_status(nand);
2530 + if (ops[i].status & dev->status->program_fail)
2531 + ops[i].status = -ENANDWRITE;
2533 + ret = min_t(int, ret, ops[i].status);
2539 +static int nand_chip_erase_block(struct nand_chip *chip,
2540 + struct nand_ops *ops,
2543 + struct nand_base *nand = chip->nand;
2544 + struct nand_device *dev = nand->dev;
2548 + for (i = 0; i < count; i++) {
2552 + nand->addressing(nand, &row, &col);
2554 + ops[i].status = nand->write_enable(nand);
2555 + if (ops[i].status) {
2556 + pr_debug("Write Protect at %x!\n", row);
2557 + ops[i].status = -ENANDWP;
2561 + ops[i].status = nand->erase_block(nand, row);
2562 + if (ops[i].status < 0) {
2563 + ret = ops[i].status;
2567 + ops[i].status = nand->read_status(nand);
2568 + if (ops[i].status & dev->status->erase_fail)
2569 + ops[i].status = -ENANDERASE;
2571 + ret = min_t(int, ret, ops[i].status);
2577 +/* read first bad mark on spare */
2578 +static int nand_chip_is_bad_block(struct nand_chip *chip,
2579 + struct nand_ops *ops,
2582 + int i, ret, value;
2584 + u8 *data, *tmp_buf;
2586 + tmp_buf = mem_alloc(1, chip->page_size);
2590 + memset(tmp_buf, 0x00, chip->page_size);
2594 + ret = chip->chip_ctrl(chip, NFI_CTRL_ECC, &value);
2598 + ret = chip->read_page(chip, ops, count);
2602 + for (i = 0; i < count; i++) {
2603 + data = ops[i].data;
2605 + /* temp solution for mt7622, because of no bad mark swap */
2606 + if (!memcmp(data, tmp_buf, chip->page_size)) {
2607 + ops[i].status = -ENANDBAD;
2608 + status = -ENANDBAD;
2611 + ops[i].status = 0;
2617 + ret = chip->chip_ctrl(chip, NFI_CTRL_ECC, &value);
2621 + mem_free(tmp_buf);
2625 + mem_free(tmp_buf);
2629 +static int nand_chip_ctrl(struct nand_chip *chip, int cmd, void *args)
2631 + return -EOPNOTSUPP;
2634 +static int nand_chip_suspend(struct nand_chip *chip)
2639 +static int nand_chip_resume(struct nand_chip *chip)
2644 +struct nand_chip *nand_chip_init(struct nfi_resource *res)
2646 + struct nand_chip *chip;
2647 + struct nand_base *nand;
2650 + chip = mem_alloc(1, sizeof(struct nand_chip));
2652 + pr_info("nand chip alloc fail!\n");
2656 + nfi = nfi_init(res);
2658 + pr_info("nfi init fail!\n");
2662 + nand = nand_base_init(NULL, nfi);
2664 + pr_info("nand base init fail!\n");
2668 + chip->nand = (void *)nand;
2669 + chip->read_page = nand_chip_read_page;
2670 + chip->write_page = nand_chip_write_page;
2671 + chip->erase_block = nand_chip_erase_block;
2672 + chip->is_bad_block = nand_chip_is_bad_block;
2673 + chip->chip_ctrl = nand_chip_ctrl;
2674 + chip->suspend = nand_chip_suspend;
2675 + chip->resume = nand_chip_resume;
2677 + nand = nand_device_init(chip);
2681 + chip->nand = (void *)nand;
2682 + chip->plane_num = nand->dev->plane_num;
2683 + chip->block_num = nand_total_blocks(nand->dev);
2684 + chip->block_size = nand->dev->block_size;
2685 + chip->block_pages = nand_block_pages(nand->dev);
2686 + chip->page_size = nand->dev->page_size;
2687 + chip->oob_size = nfi->fdm_size * div_down(chip->page_size,
2688 + nfi->sector_size);
2689 + chip->sector_size = nfi->sector_size;
2690 + chip->sector_spare_size = nfi->sector_spare_size;
2691 + chip->min_program_pages = nand->dev->min_program_pages;
2692 + chip->ecc_strength = nfi->ecc_strength;
2693 + chip->ecc_parity_size = nfi->ecc_parity_size;
2694 + chip->fdm_ecc_size = nfi->fdm_ecc_size;
2695 + chip->fdm_reg_size = nfi->fdm_size;
2708 +void nand_chip_exit(struct nand_chip *chip)
2710 + nand_exit(chip->nand);
2714 +++ b/drivers/mtd/nandx/core/nand_chip.h
2717 + * Copyright (C) 2017 MediaTek Inc.
2718 + * Licensed under either
2719 + * BSD Licence, (see NOTICE for more details)
2720 + * GNU General Public License, version 2.0, (see NOTICE for more details)
2723 +#ifndef __NAND_CHIP_H__
2724 +#define __NAND_CHIP_H__
2734 + * nand chip operation unit
2735 + * one nand_ops indicates one row operation
2736 + * @row: nand chip row address, like as nand row
2737 + * @col: nand chip column address, like as nand column
2738 + * @len: operate data length, min is sector_size,
2739 + * max is page_size and sector_size aligned
2740 + * @status: one operation result status
2741 + * @data: data buffer for operation
2742 + * @oob: oob buffer for operation, like as nand spare area
2754 + * nand chip descriptions
2755 + * nand chip includes nand controller and the several same nand devices
2756 + * @nand_type: the nand type on this chip,
2757 + * the chip maybe have several nand device and the type must be same
2758 + * @plane_num: the whole plane number on the chip
2759 + * @block_num: the whole block number on the chip
2760 + * @block_size: nand device block size
2761 + * @block_pages: nand device block has page number
2762 + * @page_size: nand device page size
2763 + * @oob_size: chip out of band size, like as nand spare szie,
2764 + * but restricts this:
2765 + * the size is provied by nand controller(NFI),
2766 + * because NFI would use some nand spare size
2767 + * @min_program_pages: chip needs min pages per program operations
2768 + * one page as one nand_ops
2769 + * @sector_size: chip min read size
2770 + * @sector_spare_size: spare size for sector, is spare_size/page_sectors
2771 + * @ecc_strength: ecc stregth per sector_size, it would be for calculated ecc
2772 + * @ecc_parity_size: ecc parity size for one sector_size data
2773 + * @nand: pointer to inherited struct nand_base
2774 + * @read_page: read %count pages on chip
2775 + * @write_page: write %count pages on chip
2776 + * @erase_block: erase %count blocks on chip, one block is one nand_ops
2777 + * it is better to set nand_ops.row to block start row
2778 + * @is_bad_block: judge the %count blocks on chip if they are bad
2779 + * by vendor specification
2780 + * @chip_ctrl: control the chip features by nandx_ctrl_cmd
2781 + * @suspend: suspend nand chip
2782 + * @resume: resume nand chip
2793 + int min_program_pages;
2795 + int sector_spare_size;
2797 + int ecc_parity_size;
2803 + int (*read_page)(struct nand_chip *chip, struct nand_ops *ops,
2805 + int (*write_page)(struct nand_chip *chip, struct nand_ops *ops,
2807 + int (*erase_block)(struct nand_chip *chip, struct nand_ops *ops,
2809 + int (*is_bad_block)(struct nand_chip *chip, struct nand_ops *ops,
2811 + int (*chip_ctrl)(struct nand_chip *chip, int cmd, void *args);
2812 + int (*suspend)(struct nand_chip *chip);
2813 + int (*resume)(struct nand_chip *chip);
2816 +struct nand_chip *nand_chip_init(struct nfi_resource *res);
2817 +void nand_chip_exit(struct nand_chip *chip);
2818 +#endif /* __NAND_CHIP_H__ */
2820 +++ b/drivers/mtd/nandx/core/nand_device.c
2823 + * Copyright (C) 2017 MediaTek Inc.
2824 + * Licensed under either
2825 + * BSD Licence, (see NOTICE for more details)
2826 + * GNU General Public License, version 2.0, (see NOTICE for more details)
2829 +#include "nandx_util.h"
2830 +#include "nandx_core.h"
2831 +#include "nand_chip.h"
2832 +#include "nand_device.h"
2833 +#include "nand_base.h"
2835 +#define MAX_CHIP_DEVICE 4
2836 +#define PARAM_PAGE_LEN 2048
2837 +#define ONFI_CRC_BASE 0x4f4e
2839 +static u16 nand_onfi_crc16(u16 crc, u8 const *p, size_t len)
2846 + for (i = 0; i < 8; i++)
2847 + crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
2853 +static inline void decode_addr_cycle(u8 addr_cycle, u8 *row_cycle,
2856 + *row_cycle = addr_cycle & 0xf;
2857 + *col_cycle = (addr_cycle >> 4) & 0xf;
2860 +static int detect_onfi(struct nand_device *dev,
2861 + struct nand_onfi_params *onfi)
2863 + struct nand_endurance *endurance = dev->endurance;
2864 + u16 size, i, crc16;
2867 + size = sizeof(struct nand_onfi_params) - sizeof(u16);
2869 + for (i = 0; i < 3; i++) {
2870 + crc16 = nand_onfi_crc16(ONFI_CRC_BASE, (u8 *)&onfi[i], size);
2872 + if (onfi[i].signature[0] == 'O' &&
2873 + onfi[i].signature[1] == 'N' &&
2874 + onfi[i].signature[2] == 'F' &&
2875 + onfi[i].signature[3] == 'I' &&
2876 + onfi[i].crc16 == crc16)
2879 + /* in some spi nand, onfi signature maybe "NAND" */
2880 + if (onfi[i].signature[0] == 'N' &&
2881 + onfi[i].signature[1] == 'A' &&
2882 + onfi[i].signature[2] == 'N' &&
2883 + onfi[i].signature[3] == 'D' &&
2884 + onfi[i].crc16 == crc16)
2891 + memcpy(dev->name, onfi[i].model, 20);
2892 + id = onfi[i].manufacturer;
2893 + dev->id = NAND_PACK_ID(id[0], id[1], id[2], id[3], id[4], id[5], id[6],
2895 + dev->id_len = MAX_ID_NUM;
2896 + dev->io_width = (onfi[i].features & 1) ? NAND_IO16 : NAND_IO8;
2897 + decode_addr_cycle(onfi[i].addr_cycle, &dev->row_cycle,
2899 + dev->target_num = 1;
2900 + dev->lun_num = onfi[i].lun_num;
2901 + dev->plane_num = BIT(onfi[i].plane_address_bits);
2902 + dev->block_num = onfi[i].lun_blocks / dev->plane_num;
2903 + dev->block_size = onfi[i].block_pages * onfi[i].page_size;
2904 + dev->page_size = onfi[i].page_size;
2905 + dev->spare_size = onfi[i].spare_size;
2907 + endurance->ecc_req = onfi[i].ecc_req;
2908 + endurance->pe_cycle = onfi[i].valid_block_endurance;
2909 + endurance->max_bitflips = endurance->ecc_req >> 1;
2914 +static int detect_jedec(struct nand_device *dev,
2915 + struct nand_jedec_params *jedec)
2917 + struct nand_endurance *endurance = dev->endurance;
2918 + u16 size, i, crc16;
2921 + size = sizeof(struct nand_jedec_params) - sizeof(u16);
2923 + for (i = 0; i < 3; i++) {
2924 + crc16 = nand_onfi_crc16(ONFI_CRC_BASE, (u8 *)&jedec[i], size);
2926 + if (jedec[i].signature[0] == 'J' &&
2927 + jedec[i].signature[1] == 'E' &&
2928 + jedec[i].signature[2] == 'S' &&
2929 + jedec[i].signature[3] == 'D' &&
2930 + jedec[i].crc16 == crc16)
2937 + memcpy(dev->name, jedec[i].model, 20);
2938 + id = jedec[i].manufacturer;
2939 + dev->id = NAND_PACK_ID(id[0], id[1], id[2], id[3], id[4], id[5], id[6],
2941 + dev->id_len = MAX_ID_NUM;
2942 + dev->io_width = (jedec[i].features & 1) ? NAND_IO16 : NAND_IO8;
2943 + decode_addr_cycle(jedec[i].addr_cycle, &dev->row_cycle,
2945 + dev->target_num = 1;
2946 + dev->lun_num = jedec[i].lun_num;
2947 + dev->plane_num = BIT(jedec[i].plane_address_bits);
2948 + dev->block_num = jedec[i].lun_blocks / dev->plane_num;
2949 + dev->block_size = jedec[i].block_pages * jedec[i].page_size;
2950 + dev->page_size = jedec[i].page_size;
2951 + dev->spare_size = jedec[i].spare_size;
2953 + endurance->ecc_req = jedec[i].endurance_block0[0];
2954 + endurance->pe_cycle = jedec[i].valid_block_endurance;
2955 + endurance->max_bitflips = endurance->ecc_req >> 1;
2960 +static struct nand_device *detect_parameters_page(struct nand_base
2963 + struct nand_device *dev = nand->dev;
2967 + params = mem_alloc(1, PARAM_PAGE_LEN);
2971 + memset(params, 0, PARAM_PAGE_LEN);
2972 + ret = nand->read_param_page(nand, params, PARAM_PAGE_LEN);
2974 + pr_info("read parameters page fail!\n");
2978 + ret = detect_onfi(dev, params);
2980 + pr_info("detect onfi device fail! try to detect jedec\n");
2981 + ret = detect_jedec(dev, params);
2983 + pr_info("detect jedec device fail!\n");
2996 +static int read_device_id(struct nand_base *nand, int cs, u8 *id)
3000 + nand->select_device(nand, cs);
3001 + nand->reset(nand);
3002 + nand->read_id(nand, id, MAX_ID_NUM);
3003 + pr_info("device %d ID: ", cs);
3005 + for (i = 0; i < MAX_ID_NUM; i++)
3006 + pr_info("%x ", id[i]);
3013 +static int detect_more_device(struct nand_base *nand, u8 *id)
3015 + u8 id_ext[MAX_ID_NUM];
3016 + int i, j, target_num = 0;
3018 + for (i = 1; i < MAX_CHIP_DEVICE; i++) {
3019 + memset(id_ext, 0xff, MAX_ID_NUM);
3020 + read_device_id(nand, i, id_ext);
3022 + for (j = 0; j < MAX_ID_NUM; j++) {
3023 + if (id_ext[j] != id[j])
3031 + return target_num;
3034 +static struct nand_device *scan_device_table(const u8 *id, int id_len)
3036 + struct nand_device *dev;
3038 + u8 ids[MAX_ID_NUM] = {0};
3041 + dev = nand_get_device(i);
3043 + if (!strcmp(dev->name, "NO-DEVICE"))
3046 + if (id_len < dev->id_len) {
3051 + NAND_UNPACK_ID(dev->id, ids, MAX_ID_NUM);
3052 + for (j = 0; j < dev->id_len; j++) {
3053 + if (ids[j] != id[j])
3057 + if (j == dev->id_len)
3066 +int nand_detect_device(struct nand_base *nand)
3068 + struct nand_device *dev;
3069 + u8 id[MAX_ID_NUM] = { 0 };
3070 + int target_num = 0;
3072 + /* Get nand device default setting for reset/read_id */
3073 + nand->dev = scan_device_table(NULL, -1);
3075 + read_device_id(nand, 0, id);
3076 + dev = scan_device_table(id, MAX_ID_NUM);
3078 + if (!strcmp(dev->name, "NO-DEVICE")) {
3079 + pr_info("device scan fail\n");
3083 + /* TobeFix: has null pointer issue in this funciton */
3084 + if (!strcmp(dev->name, "NO-DEVICE")) {
3085 + pr_info("device scan fail, detect parameters page\n");
3086 + dev = detect_parameters_page(nand);
3088 + pr_info("detect parameters fail\n");
3093 + if (dev->target_num > 1)
3094 + target_num = detect_more_device(nand, id);
3097 + pr_debug("chip has target device num: %d\n", target_num);
3099 + if (dev->target_num != target_num)
3100 + dev->target_num = target_num;
3108 +++ b/drivers/mtd/nandx/core/nand_device.h
3111 + * Copyright (C) 2017 MediaTek Inc.
3112 + * Licensed under either
3113 + * BSD Licence, (see NOTICE for more details)
3114 + * GNU General Public License, version 2.0, (see NOTICE for more details)
3117 +#ifndef __NAND_DEVICE_H__
3118 +#define __NAND_DEVICE_H__
3121 +struct nand_onfi_params {
3122 + /* Revision information and features block. 0 */
3131 + * 9-15 Reserved (0)
3132 + * 8 1 = supports ONFI version 3.2
3133 + * 7 1 = supports ONFI version 3.1
3134 + * 6 1 = supports ONFI version 3.0
3135 + * 5 1 = supports ONFI version 2.3
3136 + * 4 1 = supports ONFI version 2.2
3137 + * 3 1 = supports ONFI version 2.1
3138 + * 2 1 = supports ONFI version 2.0
3139 + * 1 1 = supports ONFI version 1.0
3144 + * 13-15 Reserved (0)
3145 + * 12 1 = supports external Vpp
3146 + * 11 1 = supports Volume addressing
3147 + * 10 1 = supports NV-DDR2
3148 + * 9 1 = supports EZ NAND
3149 + * 8 1 = supports program page register clear enhancement
3150 + * 7 1 = supports extended parameter page
3151 + * 6 1 = supports multi-plane read operations
3152 + * 5 1 = supports NV-DDR
3153 + * 4 1 = supports odd to even page Copyback
3154 + * 3 1 = supports multi-plane program and erase operations
3155 + * 2 1 = supports non-sequential page programming
3156 + * 1 1 = supports multiple LUN operations
3157 + * 0 1 = supports 16-bit data bus width
3161 + * 13-15 Reserved (0)
3162 + * 12 1 = supports LUN Get and LUN Set Features
3163 + * 11 1 = supports ODT Configure
3164 + * 10 1 = supports Volume Select
3165 + * 9 1 = supports Reset LUN
3166 + * 8 1 = supports Small Data Move
3167 + * 7 1 = supports Change Row Address
3168 + * 6 1 = supports Change Read Column Enhanced
3169 + * 5 1 = supports Read Unique ID
3170 + * 4 1 = supports Copyback
3171 + * 3 1 = supports Read Status Enhanced
3172 + * 2 1 = supports Get Features and Set Features
3173 + * 1 1 = supports Read Cache commands
3174 + * 0 1 = supports Page Cache Program command
3178 + * 4-7 Reserved (0)
3179 + * 3 1 = supports Multi-plane Block Erase
3180 + * 2 1 = supports Multi-plane Copyback Program
3181 + * 1 1 = supports Multi-plane Page Program
3182 + * 0 1 = supports Random Data Out
3186 + u16 extend_param_len;
3187 + u8 param_page_num;
3190 + /* Manufacturer information block. 32 */
3191 + u8 manufacturer[12];
3197 + /* Memory organization block. 80 */
3200 + u32 partial_page_size; /* obsolete */
3201 + u16 partial_spare_size; /* obsolete */
3206 + * 4-7 Column address cycles
3207 + * 0-3 Row address cycles
3211 + u16 lun_max_bad_blocks;
3212 + u16 block_endurance;
3213 + u8 target_begin_valid_blocks;
3214 + u16 valid_block_endurance;
3215 + u8 page_program_num;
3216 + u8 partial_program_attr; /* obsolete */
3219 + * 4-7 Reserved (0)
3220 + * 0-3 Number of plane address bits
3222 + u8 plane_address_bits;
3224 + * 6-7 Reserved (0)
3225 + * 5 1 = lower bit XNOR block address restriction
3226 + * 4 1 = read cache supported
3227 + * 3 Address restrictions for cache operations
3228 + * 2 1 = program cache supported
3229 + * 1 1 = no block address restrictions
3230 + * 0 Overlapped / concurrent multi-plane support
3232 + u8 multi_plane_attr;
3233 + u8 ez_nand_support;
3236 + /* Electrical parameters block. 128 */
3237 + u8 io_pin_max_capacitance;
3239 + * 6-15 Reserved (0)
3240 + * 5 1 = supports timing mode 5
3241 + * 4 1 = supports timing mode 4
3242 + * 3 1 = supports timing mode 3
3243 + * 2 1 = supports timing mode 2
3244 + * 1 1 = supports timing mode 1
3245 + * 0 1 = supports timing mode 0, shall be 1
3247 + u16 sdr_timing_mode;
3248 + u16 sdr_program_cache_timing_mode; /* obsolete */
3255 + * 6 1 = supports NV-DDR2 timing mode 8
3256 + * 5 1 = supports NV-DDR timing mode 5
3257 + * 4 1 = supports NV-DDR timing mode 4
3258 + * 3 1 = supports NV-DDR timing mode 3
3259 + * 2 1 = supports NV-DDR timing mode 2
3260 + * 1 1 = supports NV-DDR timing mode 1
3261 + * 0 1 = supports NV-DDR timing mode 0
3263 + u8 nvddr_timing_mode;
3265 + * 7 1 = supports timing mode 7
3266 + * 6 1 = supports timing mode 6
3267 + * 5 1 = supports timing mode 5
3268 + * 4 1 = supports timing mode 4
3269 + * 3 1 = supports timing mode 3
3270 + * 2 1 = supports timing mode 2
3271 + * 1 1 = supports timing mode 1
3272 + * 0 1 = supports timing mode 0
3274 + u8 nvddr2_timing_mode;
3276 + * 4-7 Reserved (0)
3277 + * 3 1 = device requires Vpp enablement sequence
3278 + * 2 1 = device supports CLK stopped for data input
3279 + * 1 1 = typical capacitance
3280 + * 0 tCAD value to use
3283 + u16 clk_pin_capacitance;
3284 + u16 io_pin_capacitance;
3285 + u16 input_pin_capacitance;
3286 + u8 input_pin_max_capacitance;
3288 + * 3-7 Reserved (0)
3289 + * 2 1 = supports 18 Ohm drive strength
3290 + * 1 1 = supports 25 Ohm drive strength
3291 + * 0 1 = supports driver strength settings
3293 + u8 drive_strength;
3294 + u16 tR_multi_plane;
3298 + * 6-7 Reserved (0)
3299 + * 5 1 = external VREFQ required for >= 200 MT/s
3300 + * 4 1 = supports differential signaling for DQS
3301 + * 3 1 = supports differential signaling for RE_n
3302 + * 2 1 = supports ODT value of 30 Ohms
3303 + * 1 1 = supports matrix termination ODT
3304 + * 0 1 = supports self-termination ODT
3306 + u8 nvddr2_features;
3307 + u8 nvddr2_warmup_cycles;
3310 + /* vendor block. 164 */
3311 + u16 vendor_revision;
3312 + u8 vendor_spec[88];
3314 + /* CRC for Parameter Page. 254 */
3319 +struct nand_jedec_params {
3320 + /* Revision information and features block. 0 */
3329 + * 3-15: Reserved (0)
3330 + * 2: 1 = supports parameter page revision 1.0 and standard revision 1.0
3331 + * 1: 1 = supports vendor specific parameter page
3336 + * 9-15 Reserved (0)
3337 + * 8: 1 = supports program page register clear enhancement
3338 + * 7: 1 = supports external Vpp
3339 + * 6: 1 = supports Toggle Mode DDR
3340 + * 5: 1 = supports Synchronous DDR
3341 + * 4: 1 = supports multi-plane read operations
3342 + * 3: 1 = supports multi-plane program and erase operations
3343 + * 2: 1 = supports non-sequential page programming
3344 + * 1: 1 = supports multiple LUN operations
3345 + * 0: 1 = supports 16-bit data bus width
3349 + * 11-23: Reserved (0)
3350 + * 10: 1 = supports Synchronous Reset
3351 + * 9: 1 = supports Reset LUN (Primary)
3352 + * 8: 1 = supports Small Data Move
3353 + * 7: 1 = supports Multi-plane Copyback Program (Primary)
3354 + * 6: 1 = supports Random Data Out (Primary)
3355 + * 5: 1 = supports Read Unique ID
3356 + * 4: 1 = supports Copyback
3357 + * 3: 1 = supports Read Status Enhanced (Primary)
3358 + * 2: 1 = supports Get Features and Set Features
3359 + * 1: 1 = supports Read Cache commands
3360 + * 0: 1 = supports Page Cache Program command
3364 + * 8-15: Reserved (0)
3365 + * 7: 1 = supports secondary Read Status Enhanced
3366 + * 6: 1 = supports secondary Multi-plane Block Erase
3367 + * 5: 1 = supports secondary Multi-plane Copyback Program
3368 + * 4: 1 = supports secondary Multi-plane Program
3369 + * 3: 1 = supports secondary Random Data Out
3370 + * 2: 1 = supports secondary Multi-plane Copyback Read
3371 + * 1: 1 = supports secondary Multi-plane Read Cache Random
3372 + * 0: 1 = supports secondary Multi-plane Read
3374 + u16 secondary_cmds;
3375 + u8 param_page_num;
3378 + /* Manufacturer information block. 32*/
3379 + u8 manufacturer[12];
3384 + /* Memory organization block. 80 */
3392 + * 4-7 Column address cycles
3393 + * 0-3 Row address cycles
3397 + u8 page_program_num;
3399 + * 4-7 Reserved (0)
3400 + * 0-3 Number of plane address bits
3402 + u8 plane_address_bits;
3404 + * 3-7: Reserved (0)
3405 + * 2: 1= read cache supported
3406 + * 1: 1 = program cache supported
3407 + * 0: 1= No multi-plane block address restrictions
3409 + u8 multi_plane_attr;
3412 + /* Electrical parameters block. 144 */
3414 + * 6-15: Reserved (0)
3415 + * 5: 1 = supports 20 ns speed grade (50 MHz)
3416 + * 4: 1 = supports 25 ns speed grade (40 MHz)
3417 + * 3: 1 = supports 30 ns speed grade (~33 MHz)
3418 + * 2: 1 = supports 35 ns speed grade (~28 MHz)
3419 + * 1: 1 = supports 50 ns speed grade (20 MHz)
3420 + * 0: 1 = supports 100 ns speed grade (10 MHz)
3424 + * 8-15: Reserved (0)
3425 + * 7: 1 = supports 5 ns speed grade (200 MHz)
3426 + * 6: 1 = supports 6 ns speed grade (~166 MHz)
3427 + * 5: 1 = supports 7.5 ns speed grade (~133 MHz)
3428 + * 4: 1 = supports 10 ns speed grade (100 MHz)
3429 + * 3: 1 = supports 12 ns speed grade (~83 MHz)
3430 + * 2: 1 = supports 15 ns speed grade (~66 MHz)
3431 + * 1: 1 = supports 25 ns speed grade (40 MHz)
3432 + * 0: 1 = supports 30 ns speed grade (~33 MHz)
3434 + u16 toggle_ddr_speed;
3436 + * 6-15: Reserved (0)
3437 + * 5: 1 = supports 10 ns speed grade (100 MHz)
3438 + * 4: 1 = supports 12 ns speed grade (~83 MHz)
3439 + * 3: 1 = supports 15 ns speed grade (~66 MHz)
3440 + * 2: 1 = supports 20 ns speed grade (50 MHz)
3441 + * 1: 1 = supports 30 ns speed grade (~33 MHz)
3442 + * 0: 1 = supports 50 ns speed grade (20 MHz)
3444 + u16 sync_ddr_speed;
3446 + u8 toggle_ddr_features;
3448 + * 2-7: Reserved (0)
3449 + * 1: Device supports CK stopped for data input
3450 + * 0: tCAD value to use
3452 + u8 sync_ddr_features;
3456 + u16 tR_multi_plane;
3458 + u16 io_pin_capacitance;
3459 + u16 input_pin_capacitance;
3460 + u16 ck_pin_capacitance;
3462 + * 3-7: Reserved (0)
3463 + * 2: 1 = supports 18 ohm drive strength
3464 + * 1: 1 = supports 25 ohm drive strength
3465 + * 0: 1 = supports 35ohm/50ohm drive strength
3467 + u8 drive_strength;
3471 + /* ECC and endurance block. 208 */
3472 + u8 target_begin_valid_blocks;
3473 + u16 valid_block_endurance;
3475 + * Byte 0: Number of bits ECC correctability
3476 + * Byte 1: Codeword size
3477 + * Byte 2-3: Bad blocks maximum per LUN
3478 + * Byte 4-5: Block endurance
3479 + * Byte 6-7: Reserved (0)
3481 + u8 endurance_block0[8];
3482 + u8 endurance_block1[8];
3483 + u8 endurance_block2[8];
3484 + u8 endurance_block3[8];
3487 + /* Reserved. 272 */
3488 + u8 reserved6[148];
3490 + /* Vendor specific block. 420 */
3491 + u16 vendor_revision;
3492 + u8 vendor_spec[88];
3494 + /* CRC for Parameter Page. 510 */
3498 +/* parallel nand io width */
3499 +enum nand_io_width {
3504 +/* all supported nand timming type */
3505 +enum nand_timing_type {
3507 + NAND_TIMING_SYNC_DDR,
3508 + NAND_TIMING_TOGGLE_DDR,
3509 + NAND_TIMING_NVDDR2
3512 +/* nand basic commands */
3516 + short read_status;
3517 + short read_param_page;
3518 + short set_feature;
3519 + short get_feature;
3522 + short random_out_1st;
3523 + short random_out_2nd;
3524 + short program_1st;
3525 + short program_2nd;
3529 + short read_cache_last;
3530 + short program_cache;
3534 + * addressing for nand physical address
3535 + * @row_bit_start: row address start bit
3536 + * @block_bit_start: block address start bit
3537 + * @plane_bit_start: plane address start bit
3538 + * @lun_bit_start: lun address start bit
3540 +struct nand_addressing {
3542 + u8 block_bit_start;
3543 + u8 plane_bit_start;
3548 + * nand operations status
3549 + * @array_busy: indicates device array operation busy
3550 + * @write_protect: indicates the device cannot be wrote or erased
3551 + * @erase_fail: indicates erase operation fail
3552 + * @program_fail: indicates program operation fail
3554 +struct nand_status {
3562 + * nand endurance information
3563 + * @pe_cycle: max program/erase cycle for nand stored data stability
3564 + * @ecc_req: ecc strength required for the nand, measured per 1KB
3565 + * @max_bitflips: bitflips is ecc corrected bits,
3566 + * max_bitflips is the threshold for nand stored data stability
3567 + * if corrected bits is over max_bitflips, stored data must be moved
3568 + * to another good block
3570 +struct nand_endurance {
3576 +/* wait for nand busy type */
3577 +enum nand_wait_type {
3579 + NAND_WAIT_POLLING,
3583 +/* each nand array operations time */
3584 +struct nand_array_timing {
3596 +/* nand sdr interface timing required */
3597 +struct nand_sdr_timing {
3613 +/* nand onfi ddr (nvddr) interface timing required */
3614 +struct nand_onfi_timing {
3623 +/* nand toggle ddr (toggle 1.0) interface timing required */
3624 +struct nand_toggle_timing {
3643 +/* nand basic device information */
3644 +struct nand_device {
3658 + int min_program_pages;
3659 + struct nand_cmds *cmds;
3660 + struct nand_addressing *addressing;
3661 + struct nand_status *status;
3662 + struct nand_endurance *endurance;
3663 + struct nand_array_timing *array_timing;
3666 +#define NAND_DEVICE(_name, _id, _id_len, _io_width, _row_cycle, \
3667 + _col_cycle, _target_num, _lun_num, _plane_num, \
3668 + _block_num, _block_size, _page_size, _spare_size, \
3669 + _min_program_pages, _cmds, _addressing, _status, \
3670 + _endurance, _array_timing) \
3672 + _name, _id, _id_len, _io_width, _row_cycle, \
3673 + _col_cycle, _target_num, _lun_num, _plane_num, \
3674 + _block_num, _block_size, _page_size, _spare_size, \
3675 + _min_program_pages, _cmds, _addressing, _status, \
3676 + _endurance, _array_timing \
3679 +#define MAX_ID_NUM sizeof(u64)
3681 +#define NAND_PACK_ID(id0, id1, id2, id3, id4, id5, id6, id7) \
3683 + id0 | id1 << 8 | id2 << 16 | id3 << 24 | \
3684 + (u64)id4 << 32 | (u64)id5 << 40 | \
3685 + (u64)id6 << 48 | (u64)id7 << 56 \
3688 +#define NAND_UNPACK_ID(id, ids, len) \
3691 + for (_i = 0; _i < len; _i++) \
3692 + ids[_i] = id >> (_i << 3) & 0xff; \
3695 +static inline int nand_block_pages(struct nand_device *device)
3697 + return div_down(device->block_size, device->page_size);
3700 +static inline int nand_lun_blocks(struct nand_device *device)
3702 + return device->plane_num * device->block_num;
3705 +static inline int nand_target_blocks(struct nand_device *device)
3707 + return device->lun_num * device->plane_num * device->block_num;
3710 +static inline int nand_total_blocks(struct nand_device *device)
3712 + return device->target_num * device->lun_num * device->plane_num *
3713 + device->block_num;
3716 +struct nand_device *nand_get_device(int index);
3717 +#endif /* __NAND_DEVICE_H__ */
3719 +++ b/drivers/mtd/nandx/core/nfi.h
3722 + * Copyright (C) 2017 MediaTek Inc.
3723 + * Licensed under either
3724 + * BSD Licence, (see NOTICE for more details)
3725 + * GNU General Public License, version 2.0, (see NOTICE for more details)
3731 +struct nfi_format {
3739 + int sector_spare_size;
3740 + int fdm_size; /*for sector*/
3743 + int ecc_parity_size; /*for sector*/
3745 + int (*select_chip)(struct nfi *nfi, int cs);
3746 + int (*set_format)(struct nfi *nfi, struct nfi_format *format);
3747 + int (*set_timing)(struct nfi *nfi, void *timing, int type);
3748 + int (*nfi_ctrl)(struct nfi *nfi, int cmd, void *args);
3750 + int (*reset)(struct nfi *nfi);
3751 + int (*send_cmd)(struct nfi *nfi, short cmd);
3752 + int (*send_addr)(struct nfi *nfi, int col, int row,
3753 + int col_cycle, int row_cycle);
3754 + int (*trigger)(struct nfi *nfi);
3756 + int (*write_page)(struct nfi *nfi, u8 *data, u8 *fdm);
3757 + int (*write_bytes)(struct nfi *nfi, u8 *data, int count);
3758 + int (*read_sectors)(struct nfi *nfi, u8 *data, u8 *fdm,
3760 + int (*read_bytes)(struct nfi *nfi, u8 *data, int count);
3762 + int (*wait_ready)(struct nfi *nfi, int type, u32 timeout);
3764 + int (*enable_randomizer)(struct nfi *nfi, u32 row, bool encode);
3765 + int (*disable_randomizer)(struct nfi *nfi);
3768 +struct nfi *nfi_init(struct nfi_resource *res);
3769 +void nfi_exit(struct nfi *nfi);
3771 +#endif /* __NFI_H__ */
3773 +++ b/drivers/mtd/nandx/core/nfi/nfi_base.c
3776 + * Copyright (C) 2017 MediaTek Inc.
3777 + * Licensed under either
3778 + * BSD Licence, (see NOTICE for more details)
3779 + * GNU General Public License, version 2.0, (see NOTICE for more details)
3783 + * nfi_base.c - the base logic for nfi to access nand flash
3785 + * slc/mlc/tlc could use same code to access nand
3786 + * of cause, there still some work need to do
3787 + * even for spi nand, there should be a chance to integrate code together
3790 +#include "nandx_util.h"
3791 +#include "nandx_core.h"
3792 +#include "../nfi.h"
3793 +#include "../nand_device.h"
3794 +#include "nfi_regs.h"
3795 +#include "nfiecc.h"
3796 +#include "nfi_base.h"
3798 +static const int spare_size_mt7622[] = {
3802 +#define RAND_SEED_SHIFT(op) \
3803 + ((op) == RAND_ENCODE ? ENCODE_SEED_SHIFT : DECODE_SEED_SHIFT)
3804 +#define RAND_EN(op) \
3805 + ((op) == RAND_ENCODE ? RAN_ENCODE_EN : RAN_DECODE_EN)
3807 +#define SS_SEED_NUM 128
3808 +static u16 ss_randomizer_seed[SS_SEED_NUM] = {
3809 + 0x576A, 0x05E8, 0x629D, 0x45A3, 0x649C, 0x4BF0, 0x2342, 0x272E,
3810 + 0x7358, 0x4FF3, 0x73EC, 0x5F70, 0x7A60, 0x1AD8, 0x3472, 0x3612,
3811 + 0x224F, 0x0454, 0x030E, 0x70A5, 0x7809, 0x2521, 0x484F, 0x5A2D,
3812 + 0x492A, 0x043D, 0x7F61, 0x3969, 0x517A, 0x3B42, 0x769D, 0x0647,
3813 + 0x7E2A, 0x1383, 0x49D9, 0x07B8, 0x2578, 0x4EEC, 0x4423, 0x352F,
3814 + 0x5B22, 0x72B9, 0x367B, 0x24B6, 0x7E8E, 0x2318, 0x6BD0, 0x5519,
3815 + 0x1783, 0x18A7, 0x7B6E, 0x7602, 0x4B7F, 0x3648, 0x2C53, 0x6B99,
3816 + 0x0C23, 0x67CF, 0x7E0E, 0x4D8C, 0x5079, 0x209D, 0x244A, 0x747B,
3817 + 0x350B, 0x0E4D, 0x7004, 0x6AC3, 0x7F3E, 0x21F5, 0x7A15, 0x2379,
3818 + 0x1517, 0x1ABA, 0x4E77, 0x15A1, 0x04FA, 0x2D61, 0x253A, 0x1302,
3819 + 0x1F63, 0x5AB3, 0x049A, 0x5AE8, 0x1CD7, 0x4A00, 0x30C8, 0x3247,
3820 + 0x729C, 0x5034, 0x2B0E, 0x57F2, 0x00E4, 0x575B, 0x6192, 0x38F8,
3821 + 0x2F6A, 0x0C14, 0x45FC, 0x41DF, 0x38DA, 0x7AE1, 0x7322, 0x62DF,
3822 + 0x5E39, 0x0E64, 0x6D85, 0x5951, 0x5937, 0x6281, 0x33A1, 0x6A32,
3823 + 0x3A5A, 0x2BAC, 0x743A, 0x5E74, 0x3B2E, 0x7EC7, 0x4FD2, 0x5D28,
3824 + 0x751F, 0x3EF8, 0x39B1, 0x4E49, 0x746B, 0x6EF6, 0x44BE, 0x6DB7
3828 +static void dump_register(void *regs)
3832 + pr_info("registers:\n");
3833 + for (i = 0; i < 0x600; i += 0x10) {
3834 + pr_info(" address 0x%X : %X %X %X %X\n",
3835 + (u32)((unsigned long)regs + i),
3836 + (u32)readl(regs + i),
3837 + (u32)readl(regs + i + 0x4),
3838 + (u32)readl(regs + i + 0x8),
3839 + (u32)readl(regs + i + 0xC));
3844 +static int nfi_enable_randomizer(struct nfi *nfi, u32 row, bool encode)
3846 + struct nfi_base *nb = nfi_to_base(nfi);
3847 + enum randomizer_op op = RAND_ENCODE;
3848 + void *regs = nb->res.nfi_regs;
3854 + /* randomizer type and reseed type setup */
3855 + val = readl(regs + NFI_CNFG);
3856 + val |= CNFG_RAND_SEL | CNFG_RESEED_SEC_EN;
3857 + writel(val, regs + NFI_CNFG);
3859 + /* randomizer seed and type setup */
3860 + val = ss_randomizer_seed[row % SS_SEED_NUM] & RAN_SEED_MASK;
3861 + val <<= RAND_SEED_SHIFT(op);
3862 + val |= RAND_EN(op);
3863 + writel(val, regs + NFI_RANDOM_CNFG);
3868 +static int nfi_disable_randomizer(struct nfi *nfi)
3870 + struct nfi_base *nb = nfi_to_base(nfi);
3872 + writel(0, nb->res.nfi_regs + NFI_RANDOM_CNFG);
3877 +static int nfi_irq_handler(int irq, void *data)
3879 + struct nfi_base *nb = (struct nfi_base *) data;
3880 + void *regs = nb->res.nfi_regs;
3883 + status = readw(regs + NFI_INTR_STA);
3884 + en = readw(regs + NFI_INTR_EN);
3886 + if (!(status & en))
3887 + return NAND_IRQ_NONE;
3889 + writew(~status & en, regs + NFI_INTR_EN);
3891 + nandx_event_complete(nb->done);
3893 + return NAND_IRQ_HANDLED;
3896 +static int nfi_select_chip(struct nfi *nfi, int cs)
3898 + struct nfi_base *nb = nfi_to_base(nfi);
3900 + writel(cs, nb->res.nfi_regs + NFI_CSEL);
3905 +static inline void set_op_mode(void *regs, u32 mode)
3907 + u32 val = readl(regs + NFI_CNFG);
3909 + val &= ~CNFG_OP_MODE_MASK;
3912 + writel(val, regs + NFI_CNFG);
3915 +static int nfi_reset(struct nfi *nfi)
3917 + struct nfi_base *nb = nfi_to_base(nfi);
3918 + void *regs = nb->res.nfi_regs;
3921 + /* The NFI reset to reset all registers and force the NFI
3922 + * master be early terminated
3924 + writel(CON_FIFO_FLUSH | CON_NFI_RST, regs + NFI_CON);
3926 + /* check state of NFI internal FSM and NAND interface FSM */
3927 + ret = readl_poll_timeout_atomic(regs + NFI_MASTER_STA, val,
3928 + !(val & MASTER_BUS_BUSY),
3931 + pr_info("nfi reset timeout...\n");
3933 + writel(CON_FIFO_FLUSH | CON_NFI_RST, regs + NFI_CON);
3934 + writew(STAR_DE, regs + NFI_STRDATA);
3939 +static void bad_mark_swap(struct nfi *nfi, u8 *buf, u8 *fdm)
3941 + struct nfi_base *nb = nfi_to_base(nfi);
3942 + u32 start_sector = div_down(nb->col, nfi->sector_size);
3943 + u32 data_mark_pos;
3946 + /* raw access, no need to do swap. */
3953 + if (nb->bad_mark_ctrl.sector < start_sector ||
3954 + nb->bad_mark_ctrl.sector > start_sector + nb->rw_sectors)
3957 + data_mark_pos = nb->bad_mark_ctrl.position +
3958 + (nb->bad_mark_ctrl.sector - start_sector) *
3962 + *fdm = *(buf + data_mark_pos);
3963 + *(buf + data_mark_pos) = temp;
3966 +static u8 *fdm_shift(struct nfi *nfi, u8 *fdm, int sector)
3968 + struct nfi_base *nb = nfi_to_base(nfi);
3974 + /* map the sector's FDM data to free oob:
3975 + * the beginning of the oob area stores the FDM data of bad mark sectors
3977 + if (sector < nb->bad_mark_ctrl.sector)
3978 + pos = fdm + (sector + 1) * nfi->fdm_size;
3979 + else if (sector == nb->bad_mark_ctrl.sector)
3982 + pos = fdm + sector * nfi->fdm_size;
3988 +static void set_bad_mark_ctrl(struct nfi_base *nb)
3990 + int temp, page_size = nb->format.page_size;
3992 + nb->bad_mark_ctrl.bad_mark_swap = bad_mark_swap;
3993 + nb->bad_mark_ctrl.fdm_shift = fdm_shift;
3995 + temp = nb->nfi.sector_size + nb->nfi.sector_spare_size;
3996 + nb->bad_mark_ctrl.sector = div_down(page_size, temp);
3997 + nb->bad_mark_ctrl.position = reminder(page_size, temp);
4000 +/* NOTE: check if page_size valid future */
4001 +static int setup_format(struct nfi_base *nb, int spare_idx)
4003 + struct nfi *nfi = &nb->nfi;
4004 + u32 page_size = nb->format.page_size;
4007 + switch (page_size) {
4009 + val = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
4013 + if (nfi->sector_size == 512)
4014 + val = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
4016 + val = PAGEFMT_512_2K;
4021 + if (nfi->sector_size == 512)
4022 + val = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
4024 + val = PAGEFMT_2K_4K;
4029 + if (nfi->sector_size == 512)
4030 + val = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
4032 + val = PAGEFMT_4K_8K;
4037 + val = PAGEFMT_8K_16K;
4041 + pr_info("invalid page len: %d\n", page_size);
4045 + val |= spare_idx << PAGEFMT_SPARE_SHIFT;
4046 + val |= nfi->fdm_size << PAGEFMT_FDM_SHIFT;
4047 + val |= nfi->fdm_ecc_size << PAGEFMT_FDM_ECC_SHIFT;
4048 + writel(val, nb->res.nfi_regs + NFI_PAGEFMT);
4050 + if (nb->custom_sector_en) {
4051 + val = nfi->sector_spare_size + nfi->sector_size;
4052 + val |= SECCUS_SIZE_EN;
4053 + writel(val, nb->res.nfi_regs + NFI_SECCUS_SIZE);
4059 +static int adjust_spare(struct nfi_base *nb, int *spare)
4061 + int multi = nb->nfi.sector_size == 512 ? 1 : 2;
4062 + int i, count = nb->caps->spare_size_num;
4064 + if (*spare >= nb->caps->spare_size[count - 1] * multi) {
4065 + *spare = nb->caps->spare_size[count - 1] * multi;
4069 + if (*spare < nb->caps->spare_size[0] * multi)
4072 + for (i = 1; i < count; i++) {
4073 + if (*spare < nb->caps->spare_size[i] * multi) {
4074 + *spare = nb->caps->spare_size[i - 1] * multi;
4082 +static int nfi_set_format(struct nfi *nfi, struct nfi_format *format)
4084 + struct nfi_base *nb = nfi_to_base(nfi);
4085 + struct nfiecc *ecc = nb->ecc;
4086 + int ecc_strength = format->ecc_req;
4087 + int min_fdm, min_ecc, max_ecc;
4088 + u32 temp, page_sectors;
4089 + int spare_idx = 0;
4092 +#if NANDX_BULK_IO_USE_DRAM
4093 + nb->buf = NANDX_NFI_BUF_ADDR;
4095 + nb->buf = mem_alloc(1, format->page_size + format->spare_size);
4101 + nb->format = *format;
4103 + /* ToBeFixed: for spi nand, now sector size is 512,
4104 + * it should be same with slc.
4106 + nfi->sector_size = 512;
4107 + /* format->ecc_req is the requirement per 1KB */
4108 + ecc_strength >>= 1;
4110 + page_sectors = div_down(format->page_size, nfi->sector_size);
4111 + nfi->sector_spare_size = div_down(format->spare_size, page_sectors);
4113 + if (!nb->custom_sector_en) {
4114 + spare_idx = adjust_spare(nb, &nfi->sector_spare_size);
4115 + if (spare_idx < 0)
4119 + /* calculate ecc strength and fdm size */
4120 + temp = (nfi->sector_spare_size - nb->caps->max_fdm_size) * 8;
4121 + min_ecc = div_down(temp, nb->caps->ecc_parity_bits);
4122 + min_ecc = ecc->adjust_strength(ecc, min_ecc);
4126 + temp = div_up(nb->res.min_oob_req, page_sectors);
4127 + temp = (nfi->sector_spare_size - temp) * 8;
4128 + max_ecc = div_down(temp, nb->caps->ecc_parity_bits);
4129 + max_ecc = ecc->adjust_strength(ecc, max_ecc);
4133 + temp = div_up(temp * nb->caps->ecc_parity_bits, 8);
4134 + temp = nfi->sector_spare_size - temp;
4135 + min_fdm = min_t(u32, temp, (u32)nb->caps->max_fdm_size);
4137 + if (ecc_strength > max_ecc) {
4138 + pr_info("required ecc strength %d, max supported %d\n",
4139 + ecc_strength, max_ecc);
4140 + nfi->ecc_strength = max_ecc;
4141 + nfi->fdm_size = min_fdm;
4142 + } else if (format->ecc_req < min_ecc) {
4143 + nfi->ecc_strength = min_ecc;
4144 + nfi->fdm_size = nb->caps->max_fdm_size;
4146 + ecc_strength = ecc->adjust_strength(ecc, ecc_strength);
4147 + if (ecc_strength < 0)
4150 + nfi->ecc_strength = ecc_strength;
4151 + temp = div_up(ecc_strength * nb->caps->ecc_parity_bits, 8);
4152 + nfi->fdm_size = nfi->sector_spare_size - temp;
4155 + nb->page_sectors = div_down(format->page_size, nfi->sector_size);
4157 + /* some IC has fixed fdm_ecc_size, if not assigend, set to fdm_size */
4158 + nfi->fdm_ecc_size = nb->caps->fdm_ecc_size ? : nfi->fdm_size;
4160 + nfi->ecc_parity_size = div_up(nfi->ecc_strength *
4161 + nb->caps->ecc_parity_bits,
4163 + set_bad_mark_ctrl(nb);
4165 + pr_debug("sector_size: %d\n", nfi->sector_size);
4166 + pr_debug("sector_spare_size: %d\n", nfi->sector_spare_size);
4167 + pr_debug("fdm_size: %d\n", nfi->fdm_size);
4168 + pr_debug("fdm_ecc_size: %d\n", nfi->fdm_ecc_size);
4169 + pr_debug("ecc_strength: %d\n", nfi->ecc_strength);
4170 + pr_debug("ecc_parity_size: %d\n", nfi->ecc_parity_size);
4172 + return setup_format(nb, spare_idx);
4175 +static int nfi_ctrl(struct nfi *nfi, int cmd, void *args)
4177 + struct nfi_base *nb = nfi_to_base(nfi);
4181 + case NFI_CTRL_DMA:
4182 + nb->dma_en = *(bool *)args;
4185 + case NFI_CTRL_AUTOFORMAT:
4186 + nb->auto_format = *(bool *)args;
4189 + case NFI_CTRL_NFI_IRQ:
4190 + nb->nfi_irq_en = *(bool *)args;
4193 + case NFI_CTRL_PAGE_IRQ:
4194 + nb->page_irq_en = *(bool *)args;
4197 + case NFI_CTRL_BAD_MARK_SWAP:
4198 + nb->bad_mark_swap_en = *(bool *)args;
4201 + case NFI_CTRL_ECC:
4202 + nb->ecc_en = *(bool *)args;
4205 + case NFI_CTRL_ECC_MODE:
4206 + nb->ecc_mode = *(enum nfiecc_mode *)args;
4209 + case NFI_CTRL_ECC_CLOCK:
4210 + /* NOTE: it seems that there's nothing need to do
4211 + * if new IC need, just add tht logic
4213 + nb->ecc_clk_en = *(bool *)args;
4216 + case NFI_CTRL_ECC_IRQ:
4217 + nb->ecc_irq_en = *(bool *)args;
4220 + case NFI_CTRL_ECC_DECODE_MODE:
4221 + nb->ecc_deccon = *(enum nfiecc_deccon *)args;
4225 + pr_info("invalid arguments.\n");
4226 + ret = -EOPNOTSUPP;
4230 + pr_debug("%s: set cmd(%d) to %d\n", __func__, cmd, *(int *)args);
4234 +static int nfi_send_cmd(struct nfi *nfi, short cmd)
4236 + struct nfi_base *nb = nfi_to_base(nfi);
4237 + void *regs = nb->res.nfi_regs;
4241 + pr_debug("%s: cmd 0x%x\n", __func__, cmd);
4246 + set_op_mode(regs, nb->op_mode);
4248 + writel(cmd, regs + NFI_CMD);
4250 + ret = readl_poll_timeout_atomic(regs + NFI_STA,
4251 + val, !(val & STA_CMD),
4254 + pr_info("send cmd 0x%x timeout\n", cmd);
4259 +static int nfi_send_addr(struct nfi *nfi, int col, int row,
4260 + int col_cycle, int row_cycle)
4262 + struct nfi_base *nb = nfi_to_base(nfi);
4263 + void *regs = nb->res.nfi_regs;
4267 + pr_debug("%s: col 0x%x, row 0x%x, col_cycle 0x%x, row_cycle 0x%x\n",
4268 + __func__, col, row, col_cycle, row_cycle);
4273 + writel(col, regs + NFI_COLADDR);
4274 + writel(row, regs + NFI_ROWADDR);
4275 + writel(col_cycle | (row_cycle << ROW_SHIFT), regs + NFI_ADDRNOB);
4277 + ret = readl_poll_timeout_atomic(regs + NFI_STA,
4278 + val, !(val & STA_ADDR),
4281 + pr_info("send address timeout\n");
4286 +static int nfi_trigger(struct nfi *nfi)
4288 + /* Nothing need to do. */
4292 +static inline int wait_io_ready(void *regs)
4297 + ret = readl_poll_timeout_atomic(regs + NFI_PIO_DIRDY,
4298 + val, val & PIO_DI_RDY,
4301 + pr_info("wait io ready timeout\n");
4306 +static int wait_ready_irq(struct nfi_base *nb, u32 timeout)
4308 + void *regs = nb->res.nfi_regs;
4312 + writel(0xf1, regs + NFI_CNRNB);
4313 + nandx_event_init(nb->done);
4315 + writel(INTR_BUSY_RETURN_EN, (void *)(regs + NFI_INTR_EN));
4318 + * check if nand already bean ready,
4319 + * avoid issue that casued by missing irq-event.
4321 + val = readl(regs + NFI_STA);
4322 + if (val & STA_BUSY2READY) {
4323 + readl(regs + NFI_INTR_STA);
4324 + writel(0, (void *)(regs + NFI_INTR_EN));
4328 + ret = nandx_event_wait_complete(nb->done, timeout);
4330 + writew(0, regs + NFI_CNRNB);
4334 +static void wait_ready_twhr2(struct nfi_base *nb, u32 timeout)
4336 + /* NOTE: this for tlc */
4339 +static int wait_ready_poll(struct nfi_base *nb, u32 timeout)
4341 + void *regs = nb->res.nfi_regs;
4345 + writel(0x21, regs + NFI_CNRNB);
4346 + ret = readl_poll_timeout_atomic(regs + NFI_STA, val,
4347 + val & STA_BUSY2READY,
4349 + writew(0, regs + NFI_CNRNB);
4354 +static int nfi_wait_ready(struct nfi *nfi, int type, u32 timeout)
4356 + struct nfi_base *nb = nfi_to_base(nfi);
4360 + case NAND_WAIT_IRQ:
4361 + if (nb->nfi_irq_en)
4362 + ret = wait_ready_irq(nb, timeout);
4368 + case NAND_WAIT_POLLING:
4369 + ret = wait_ready_poll(nb, timeout);
4372 + case NAND_WAIT_TWHR2:
4373 + wait_ready_twhr2(nb, timeout);
4383 + pr_info("%s: type 0x%x, timeout 0x%x\n",
4384 + __func__, type, timeout);
4389 +static int enable_ecc_decode(struct nfi_base *nb, int sectors)
4391 + struct nfi *nfi = &nb->nfi;
4392 + struct nfiecc *ecc = nb->ecc;
4394 + ecc->config.op = ECC_DECODE;
4395 + ecc->config.mode = nb->ecc_mode;
4396 + ecc->config.deccon = nb->ecc_deccon;
4397 + ecc->config.sectors = sectors;
4398 + ecc->config.len = nfi->sector_size + nfi->fdm_ecc_size;
4399 + ecc->config.strength = nfi->ecc_strength;
4401 + return ecc->enable(ecc);
4404 +static int enable_ecc_encode(struct nfi_base *nb)
4406 + struct nfiecc *ecc = nb->ecc;
4407 + struct nfi *nfi = &nb->nfi;
4409 + ecc->config.op = ECC_ENCODE;
4410 + ecc->config.mode = nb->ecc_mode;
4411 + ecc->config.len = nfi->sector_size + nfi->fdm_ecc_size;
4412 + ecc->config.strength = nfi->ecc_strength;
4414 + return ecc->enable(ecc);
4417 +static void read_fdm(struct nfi_base *nb, u8 *fdm, int start_sector,
4420 + void *regs = nb->res.nfi_regs;
4421 + int j, i = start_sector;
4425 + for (; i < start_sector + sectors; i++) {
4426 + if (nb->bad_mark_swap_en)
4427 + buf = nb->bad_mark_ctrl.fdm_shift(&nb->nfi, fdm, i);
4429 + vall = readl(regs + NFI_FDML(i));
4430 + valm = readl(regs + NFI_FDMM(i));
4432 + for (j = 0; j < nb->nfi.fdm_size; j++)
4433 + *buf++ = (j >= 4 ? valm : vall) >> ((j & 3) << 3);
4437 +static void write_fdm(struct nfi_base *nb, u8 *fdm)
4439 + struct nfi *nfi = &nb->nfi;
4440 + void *regs = nb->res.nfi_regs;
4445 + for (i = 0; i < nb->page_sectors; i++) {
4446 + if (nb->bad_mark_swap_en)
4447 + buf = nb->bad_mark_ctrl.fdm_shift(nfi, fdm, i);
4450 + for (j = 0; j < 4; j++)
4451 + vall |= (j < nfi->fdm_size ? *buf++ : 0xff) << (j * 8);
4452 + writel(vall, regs + NFI_FDML(i));
4455 + for (j = 0; j < 4; j++)
4456 + valm |= (j < nfi->fdm_size ? *buf++ : 0xff) << (j * 8);
4457 + writel(valm, regs + NFI_FDMM(i));
4461 +/* NOTE: pio not use auto format */
4462 +static int pio_rx_data(struct nfi_base *nb, u8 *data, u8 *fdm,
4465 + struct nfiecc_status ecc_status;
4466 + struct nfi *nfi = &nb->nfi;
4467 + void *regs = nb->res.nfi_regs;
4468 + u32 val, bitflips = 0;
4472 + val = readl(regs + NFI_CNFG) | CNFG_BYTE_RW;
4473 + writel(val, regs + NFI_CNFG);
4475 + len = nfi->sector_size + nfi->sector_spare_size;
4478 + for (i = 0; i < len; i++) {
4479 + ret = wait_io_ready(regs);
4483 + nb->buf[i] = readb(regs + NFI_DATAR);
4486 + /* TODO: do error handle for autoformat setting of pio */
4488 + for (i = 0; i < sectors; i++) {
4489 + buf = nb->buf + i * (nfi->sector_size +
4490 + nfi->sector_spare_size);
4491 + ret = nb->ecc->correct_data(nb->ecc, &ecc_status,
4494 + memcpy(data + i * nfi->sector_size,
4495 + buf, nfi->sector_size);
4497 + memcpy(fdm + i * nfi->fdm_size,
4498 + buf + nfi->sector_size, nfi->fdm_size);
4500 + ret = nb->ecc->decode_status(nb->ecc, i, 1);
4504 + bitflips = max_t(int, (int)bitflips, ret);
4511 + /* raw read, only data not null, and its length should be $len */
4513 + memcpy(data, nb->buf, len);
4518 +static int pio_tx_data(struct nfi_base *nb, u8 *data, u8 *fdm,
4521 + struct nfi *nfi = &nb->nfi;
4522 + void *regs = nb->res.nfi_regs;
4526 + val = readw(regs + NFI_CNFG) | CNFG_BYTE_RW;
4527 + writew(val, regs + NFI_CNFG);
4529 + len = nb->ecc_en ? nfi->sector_size :
4530 + nfi->sector_size + nfi->sector_spare_size;
4533 + /* data shouldn't null,
4534 + * and if ecc enable ,fdm been written in prepare process
4536 + for (i = 0; i < len; i++) {
4537 + ret = wait_io_ready(regs);
4540 + writeb(data[i], regs + NFI_DATAW);
4546 +static bool is_page_empty(struct nfi_base *nb, u8 *data, u8 *fdm,
4549 + u32 empty = readl(nb->res.nfi_regs + NFI_STA) & STA_EMP_PAGE;
4552 + pr_info("empty page!\n");
4559 +static int rw_prepare(struct nfi_base *nb, int sectors, u8 *data,
4560 + u8 *fdm, bool read)
4562 + void *regs = nb->res.nfi_regs;
4563 + u32 len = nb->nfi.sector_size * sectors;
4564 + bool irq_en = nb->dma_en && nb->nfi_irq_en;
4569 + nb->rw_sectors = sectors;
4572 + nandx_event_init(nb->done);
4573 + writel(INTR_AHB_DONE_EN, regs + NFI_INTR_EN);
4576 + val = readw(regs + NFI_CNFG);
4578 + val |= CNFG_READ_EN;
4580 + val &= ~CNFG_READ_EN;
4582 + /* as design, now, auto format enabled when ecc enabled */
4584 + val |= CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
4587 + ret = enable_ecc_decode(nb, sectors);
4589 + ret = enable_ecc_encode(nb);
4592 + pr_info("%s: ecc enable %s fail!\n", __func__,
4593 + read ? "decode" : "encode");
4597 + val &= ~(CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN);
4600 + if (!read && nb->bad_mark_swap_en)
4601 + nb->bad_mark_ctrl.bad_mark_swap(&nb->nfi, data, fdm);
4603 + if (!nb->ecc_en && read)
4604 + len += sectors * nb->nfi.sector_spare_size;
4607 + val |= CNFG_DMA_BURST_EN | CNFG_AHB;
4610 + dma_addr = (void *)(unsigned long)nandx_dma_map(
4611 + nb->res.dev, nb->buf,
4612 + (u64)len, NDMA_FROM_DEV);
4614 + memcpy(nb->buf, data, len);
4615 + dma_addr = (void *)(unsigned long)nandx_dma_map(
4616 + nb->res.dev, nb->buf,
4617 + (u64)len, NDMA_TO_DEV);
4620 + writel((unsigned long)dma_addr, (void *)regs + NFI_STRADDR);
4622 + nb->access_len = len;
4623 + nb->dma_addr = dma_addr;
4626 + if (nb->ecc_en && !read && fdm)
4627 + write_fdm(nb, fdm);
4629 + writew(val, regs + NFI_CNFG);
4630 + /* setup R/W sector number */
4631 + writel(sectors << CON_SEC_SHIFT, regs + NFI_CON);
4636 +static void rw_trigger(struct nfi_base *nb, bool read)
4638 + void *regs = nb->res.nfi_regs;
4641 + val = read ? CON_BRD : CON_BWR;
4642 + val |= readl(regs + NFI_CON);
4643 + writel(val, regs + NFI_CON);
4645 + writel(STAR_EN, regs + NFI_STRDATA);
4648 +static int rw_wait_done(struct nfi_base *nb, int sectors, bool read)
4650 + void *regs = nb->res.nfi_regs;
4651 + bool irq_en = nb->dma_en && nb->nfi_irq_en;
4656 + ret = nandx_event_wait_complete(nb->done, NFI_TIMEOUT);
4658 + writew(0, regs + NFI_INTR_EN);
4664 + ret = readl_poll_timeout_atomic(regs + NFI_BYTELEN, val,
4665 + ADDRCNTR_SEC(val) >=
4668 + /* HW issue: if not wait ahb done, need polling bus busy */
4669 + if (!ret && !irq_en)
4670 + ret = readl_poll_timeout_atomic(regs + NFI_MASTER_STA,
4676 + ret = readl_poll_timeout_atomic(regs + NFI_ADDRCNTR, val,
4677 + ADDRCNTR_SEC(val) >=
4683 + pr_info("do page %s timeout\n", read ? "read" : "write");
4687 + if (read && nb->ecc_en) {
4688 + ret = nb->ecc->wait_done(nb->ecc);
4692 + return nb->ecc->decode_status(nb->ecc, 0, sectors);
4698 +static int rw_data(struct nfi_base *nb, u8 *data, u8 *fdm, int sectors,
4701 + if (read && nb->dma_en && nb->ecc_en && fdm)
4702 + read_fdm(nb, fdm, 0, sectors);
4704 + if (!nb->dma_en) {
4706 + return pio_rx_data(nb, data, fdm, sectors);
4708 + return pio_tx_data(nb, data, fdm, sectors);
4714 +static void rw_complete(struct nfi_base *nb, u8 *data, u8 *fdm,
4722 + nandx_dma_unmap(nb->res.dev, nb->buf, nb->dma_addr,
4723 + (u64)nb->access_len, NDMA_FROM_DEV);
4726 + data_len = nb->rw_sectors * nb->nfi.sector_size;
4727 + memcpy(data, nb->buf, data_len);
4731 + memcpy(fdm, nb->buf + data_len,
4732 + nb->access_len - data_len);
4734 + if (nb->read_status == -ENANDREAD) {
4735 + is_empty = nb->is_page_empty(nb, data, fdm,
4738 + nb->read_status = 0;
4741 + nandx_dma_unmap(nb->res.dev, nb->buf, nb->dma_addr,
4742 + (u64)nb->access_len, NDMA_TO_DEV);
4746 + /* whether it's reading or writing, we all check if nee swap
4747 + * for write, we need to restore data
4749 + if (nb->bad_mark_swap_en)
4750 + nb->bad_mark_ctrl.bad_mark_swap(&nb->nfi, data, fdm);
4753 + nb->ecc->disable(nb->ecc);
4755 + writel(0, nb->res.nfi_regs + NFI_CNFG);
4756 + writel(0, nb->res.nfi_regs + NFI_CON);
4759 +static int nfi_read_sectors(struct nfi *nfi, u8 *data, u8 *fdm,
4762 + struct nfi_base *nb = nfi_to_base(nfi);
4763 + int bitflips = 0, ret;
4765 + pr_debug("%s: read page#%d\n", __func__, nb->row);
4766 + pr_debug("%s: data address 0x%x, fdm address 0x%x, sectors 0x%x\n",
4767 + __func__, (u32)((unsigned long)data),
4768 + (u32)((unsigned long)fdm), sectors);
4770 + nb->read_status = 0;
4772 + ret = nb->rw_prepare(nb, sectors, data, fdm, true);
4776 + nb->rw_trigger(nb, true);
4779 + ret = nb->rw_wait_done(nb, sectors, true);
4782 + else if (ret == -ENANDREAD)
4783 + nb->read_status = -ENANDREAD;
4789 + ret = nb->rw_data(nb, data, fdm, sectors, true);
4791 + ret = max_t(int, ret, bitflips);
4794 + nb->rw_complete(nb, data, fdm, true);
4796 + if (nb->read_status == -ENANDREAD)
4797 + return -ENANDREAD;
4802 +int nfi_write_page(struct nfi *nfi, u8 *data, u8 *fdm)
4804 + struct nfi_base *nb = nfi_to_base(nfi);
4805 + u32 sectors = div_down(nb->format.page_size, nfi->sector_size);
4808 + pr_debug("%s: data address 0x%x, fdm address 0x%x\n",
4809 + __func__, (int)((unsigned long)data),
4810 + (int)((unsigned long)fdm));
4812 + ret = nb->rw_prepare(nb, sectors, data, fdm, false);
4816 + nb->rw_trigger(nb, false);
4818 + ret = nb->rw_data(nb, data, fdm, sectors, false);
4822 + ret = nb->rw_wait_done(nb, sectors, false);
4824 + nb->rw_complete(nb, data, fdm, false);
4829 +static int nfi_rw_bytes(struct nfi *nfi, u8 *data, int count, bool read)
4831 + struct nfi_base *nb = nfi_to_base(nfi);
4832 + void *regs = nb->res.nfi_regs;
4836 + for (i = 0; i < count; i++) {
4837 + val = readl(regs + NFI_STA) & NFI_FSM_MASK;
4838 + if (val != NFI_FSM_CUSTDATA) {
4839 + val = readw(regs + NFI_CNFG) | CNFG_BYTE_RW;
4841 + val |= CNFG_READ_EN;
4842 + writew(val, regs + NFI_CNFG);
4844 + val = div_up(count, nfi->sector_size);
4845 + val = (val << CON_SEC_SHIFT) | CON_BRD | CON_BWR;
4846 + writel(val, regs + NFI_CON);
4848 + writew(STAR_EN, regs + NFI_STRDATA);
4851 + ret = wait_io_ready(regs);
4856 + data[i] = readb(regs + NFI_DATAR);
4858 + writeb(data[i], regs + NFI_DATAW);
4861 + writel(0, nb->res.nfi_regs + NFI_CNFG);
4866 +static int nfi_read_bytes(struct nfi *nfi, u8 *data, int count)
4868 + return nfi_rw_bytes(nfi, data, count, true);
4871 +static int nfi_write_bytes(struct nfi *nfi, u8 *data, int count)
4873 + return nfi_rw_bytes(nfi, data, count, false);
4876 +/* As register map says, only when flash macro is idle,
4877 + * sw reset or nand interface change can be issued
4879 +static inline int wait_flash_macro_idle(void *regs)
4883 + return readl_poll_timeout_atomic(regs + NFI_STA, val,
4884 + val & FLASH_MACRO_IDLE, 2,
4888 +#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
4889 + ((tpoecs) << 28 | (tprecs) << 22 | (tc2r) << 16 | \
4890 + (tw2r) << 12 | (twh) << 8 | (twst) << 4 | (trlt))
4892 +static int nfi_set_sdr_timing(struct nfi *nfi, void *timing, u8 type)
4894 + struct nand_sdr_timing *sdr = (struct nand_sdr_timing *) timing;
4895 + struct nfi_base *nb = nfi_to_base(nfi);
4896 + void *regs = nb->res.nfi_regs;
4897 + u32 tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt, tstrobe;
4901 + ret = wait_flash_macro_idle(regs);
4905 + /* turn clock rate into KHZ */
4906 + rate = nb->res.clock_1x / 1000;
4908 + tpoecs = max_t(u16, sdr->tALH, sdr->tCLH);
4909 + tpoecs = div_up(tpoecs * rate, 1000000);
4912 + tprecs = max_t(u16, sdr->tCLS, sdr->tALS);
4913 + tprecs = div_up(tprecs * rate, 1000000);
4916 + /* tc2r is in unit of 2T */
4917 + tc2r = div_up(sdr->tCR * rate, 1000000);
4918 + tc2r = div_down(tc2r, 2);
4921 + tw2r = div_up(sdr->tWHR * rate, 1000000);
4922 + tw2r = div_down(tw2r, 2);
4925 + twh = max_t(u16, sdr->tREH, sdr->tWH);
4926 + twh = div_up(twh * rate, 1000000) - 1;
4929 + twst = div_up(sdr->tWP * rate, 1000000) - 1;
4932 + trlt = div_up(sdr->tRP * rate, 1000000) - 1;
4935 + /* If tREA is bigger than tRP, setup strobe sel here */
4936 + if ((trlt + 1) * 1000000 / rate < sdr->tREA) {
4937 + tstrobe = sdr->tREA - (trlt + 1) * 1000000 / rate;
4938 + tstrobe = div_up(tstrobe * rate, 1000000);
4939 + val = readl(regs + NFI_DEBUG_CON1);
4940 + val &= ~STROBE_MASK;
4941 + val |= tstrobe << STROBE_SHIFT;
4942 + writel(val, regs + NFI_DEBUG_CON1);
4946 + * ACCON: access timing control register
4947 + * -------------------------------------
4948 + * 31:28: tpoecs, minimum required time for CS post pulling down after
4949 + * accessing the device
4950 + * 27:22: tprecs, minimum required time for CS pre pulling down before
4951 + * accessing the device
4952 + * 21:16: tc2r, minimum required time from NCEB low to NREB low
4953 + * 15:12: tw2r, minimum required time from NWEB high to NREB low.
4954 + * 11:08: twh, write enable hold time
4955 + * 07:04: twst, write wait states
4956 + * 03:00: trlt, read wait states
4958 + val = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt);
4959 + pr_info("acctiming: 0x%x\n", val);
4960 + writel(val, regs + NFI_ACCCON);
4962 + /* set NAND type */
4963 + writel(NAND_TYPE_ASYNC, regs + NFI_NAND_TYPE_CNFG);
4968 +static int nfi_set_timing(struct nfi *nfi, void *timing, int type)
4971 + case NAND_TIMING_SDR:
4972 + return nfi_set_sdr_timing(nfi, timing, type);
4974 + /* NOTE: for mlc/tlc */
4975 + case NAND_TIMING_SYNC_DDR:
4976 + case NAND_TIMING_TOGGLE_DDR:
4977 + case NAND_TIMING_NVDDR2:
4985 +static void set_nfi_funcs(struct nfi *nfi)
4987 + nfi->select_chip = nfi_select_chip;
4988 + nfi->set_format = nfi_set_format;
4989 + nfi->nfi_ctrl = nfi_ctrl;
4990 + nfi->set_timing = nfi_set_timing;
4992 + nfi->reset = nfi_reset;
4993 + nfi->send_cmd = nfi_send_cmd;
4994 + nfi->send_addr = nfi_send_addr;
4995 + nfi->trigger = nfi_trigger;
4997 + nfi->write_page = nfi_write_page;
4998 + nfi->write_bytes = nfi_write_bytes;
4999 + nfi->read_sectors = nfi_read_sectors;
5000 + nfi->read_bytes = nfi_read_bytes;
5002 + nfi->wait_ready = nfi_wait_ready;
5004 + nfi->enable_randomizer = nfi_enable_randomizer;
5005 + nfi->disable_randomizer = nfi_disable_randomizer;
5008 +static struct nfi_caps nfi_caps_mt7622 = {
5009 + .max_fdm_size = 8,
5010 + .fdm_ecc_size = 1,
5011 + .ecc_parity_bits = 13,
5012 + .spare_size = spare_size_mt7622,
5013 + .spare_size_num = 4,
5016 +static struct nfi_caps *nfi_get_match_data(enum mtk_ic_version ic)
5018 + /* NOTE: add other IC's data */
5019 + return &nfi_caps_mt7622;
5022 +static void set_nfi_base_params(struct nfi_base *nb)
5024 + nb->ecc_en = false;
5025 + nb->dma_en = false;
5026 + nb->nfi_irq_en = false;
5027 + nb->ecc_irq_en = false;
5028 + nb->page_irq_en = false;
5029 + nb->ecc_clk_en = false;
5030 + nb->randomize_en = false;
5031 + nb->custom_sector_en = false;
5032 + nb->bad_mark_swap_en = false;
5034 + nb->op_mode = CNFG_CUSTOM_MODE;
5035 + nb->ecc_deccon = ECC_DEC_CORRECT;
5036 + nb->ecc_mode = ECC_NFI_MODE;
5038 + nb->done = nandx_event_create();
5039 + nb->caps = nfi_get_match_data(nb->res.ic_ver);
5041 + nb->set_op_mode = set_op_mode;
5042 + nb->is_page_empty = is_page_empty;
5044 + nb->rw_prepare = rw_prepare;
5045 + nb->rw_trigger = rw_trigger;
5046 + nb->rw_wait_done = rw_wait_done;
5047 + nb->rw_data = rw_data;
5048 + nb->rw_complete = rw_complete;
5051 +struct nfi *__weak nfi_extend_init(struct nfi_base *nb)
5056 +void __weak nfi_extend_exit(struct nfi_base *nb)
5061 +struct nfi *nfi_init(struct nfi_resource *res)
5063 + struct nfiecc_resource ecc_res;
5064 + struct nfi_base *nb;
5065 + struct nfiecc *ecc;
5069 + nb = mem_alloc(1, sizeof(struct nfi_base));
5071 + pr_info("nfi alloc memory fail @%s.\n", __func__);
5077 + ret = nandx_irq_register(res->dev, res->nfi_irq_id, nfi_irq_handler,
5080 + pr_info("nfi irq register failed!\n");
5084 + /* fill ecc paras and init ecc */
5085 + ecc_res.ic_ver = nb->res.ic_ver;
5086 + ecc_res.dev = nb->res.dev;
5087 + ecc_res.irq_id = nb->res.ecc_irq_id;
5088 + ecc_res.regs = nb->res.ecc_regs;
5089 + ecc = nfiecc_init(&ecc_res);
5091 + pr_info("nfiecc init fail.\n");
5097 + set_nfi_base_params(nb);
5098 + set_nfi_funcs(&nb->nfi);
5100 + /* Assign a temp sector size for reading ID & para page.
5101 + * We may assign new value later.
5103 + nb->nfi.sector_size = 512;
5105 + /* give a default timing, and as discuss
5106 + * this is the only thing what we need do for nfi init
5107 + * if need do more, then we can add a function
5109 + writel(0x30C77FFF, nb->res.nfi_regs + NFI_ACCCON);
5111 + nfi = nfi_extend_init(nb);
5120 +void nfi_exit(struct nfi *nfi)
5122 + struct nfi_base *nb = nfi_to_base(nfi);
5124 + nandx_event_destroy(nb->done);
5125 + nfiecc_exit(nb->ecc);
5126 +#if !NANDX_BULK_IO_USE_DRAM
5127 + mem_free(nb->buf);
5129 + nfi_extend_exit(nb);
5133 +++ b/drivers/mtd/nandx/core/nfi/nfi_base.h
5136 + * Copyright (C) 2017 MediaTek Inc.
5137 + * Licensed under either
5138 + * BSD Licence, (see NOTICE for more details)
5139 + * GNU General Public License, version 2.0, (see NOTICE for more details)
5142 +#ifndef __NFI_BASE_H__
5143 +#define __NFI_BASE_H__
5145 +#define NFI_TIMEOUT 1000000
5147 +enum randomizer_op {
5152 +struct bad_mark_ctrl {
5153 + void (*bad_mark_swap)(struct nfi *nfi, u8 *buf, u8 *fdm);
5154 + u8 *(*fdm_shift)(struct nfi *nfi, u8 *fdm, int sector);
5162 + u8 ecc_parity_bits;
5163 + const int *spare_size;
5164 + u32 spare_size_num;
5169 + struct nfi_resource res;
5170 + struct nfiecc *ecc;
5171 + struct nfi_format format;
5172 + struct nfi_caps *caps;
5173 + struct bad_mark_ctrl bad_mark_ctrl;
5175 + /* page_size + spare_size */
5178 + /* used for spi nand */
5186 + /* for read/write */
5201 + bool randomize_en;
5202 + bool custom_sector_en;
5203 + bool bad_mark_swap_en;
5205 + enum nfiecc_deccon ecc_deccon;
5206 + enum nfiecc_mode ecc_mode;
5208 + void (*set_op_mode)(void *regs, u32 mode);
5209 + bool (*is_page_empty)(struct nfi_base *nb, u8 *data, u8 *fdm,
5212 + int (*rw_prepare)(struct nfi_base *nb, int sectors, u8 *data, u8 *fdm,
5214 + void (*rw_trigger)(struct nfi_base *nb, bool read);
5215 + int (*rw_wait_done)(struct nfi_base *nb, int sectors, bool read);
5216 + int (*rw_data)(struct nfi_base *nb, u8 *data, u8 *fdm, int sectors,
5218 + void (*rw_complete)(struct nfi_base *nb, u8 *data, u8 *fdm, bool read);
5221 +static inline struct nfi_base *nfi_to_base(struct nfi *nfi)
5223 + return container_of(nfi, struct nfi_base, nfi);
5226 +struct nfi *nfi_extend_init(struct nfi_base *nb);
5227 +void nfi_extend_exit(struct nfi_base *nb);
5229 +#endif /* __NFI_BASE_H__ */
5231 +++ b/drivers/mtd/nandx/core/nfi/nfi_regs.h
5234 + * Copyright (C) 2017 MediaTek Inc.
5235 + * Licensed under either
5236 + * BSD Licence, (see NOTICE for more details)
5237 + * GNU General Public License, version 2.0, (see NOTICE for more details)
5240 +#ifndef __NFI_REGS_H__
5241 +#define __NFI_REGS_H__
5243 +#define NFI_CNFG 0x000
5244 +#define CNFG_AHB BIT(0)
5245 +#define CNFG_READ_EN BIT(1)
5246 +#define CNFG_DMA_BURST_EN BIT(2)
5247 +#define CNFG_RESEED_SEC_EN BIT(4)
5248 +#define CNFG_RAND_SEL BIT(5)
5249 +#define CNFG_BYTE_RW BIT(6)
5250 +#define CNFG_HW_ECC_EN BIT(8)
5251 +#define CNFG_AUTO_FMT_EN BIT(9)
5252 +#define CNFG_RAND_MASK GENMASK(5, 4)
5253 +#define CNFG_OP_MODE_MASK GENMASK(14, 12)
5254 +#define CNFG_IDLE_MOD 0
5255 +#define CNFG_READ_MODE (1 << 12)
5256 +#define CNFG_SINGLE_READ_MODE (2 << 12)
5257 +#define CNFG_PROGRAM_MODE (3 << 12)
5258 +#define CNFG_ERASE_MODE (4 << 12)
5259 +#define CNFG_RESET_MODE (5 << 12)
5260 +#define CNFG_CUSTOM_MODE (6 << 12)
5261 +#define NFI_PAGEFMT 0x004
5262 +#define PAGEFMT_SPARE_SHIFT 4
5263 +#define PAGEFMT_FDM_ECC_SHIFT 12
5264 +#define PAGEFMT_FDM_SHIFT 8
5265 +#define PAGEFMT_SEC_SEL_512 BIT(2)
5266 +#define PAGEFMT_512_2K 0
5267 +#define PAGEFMT_2K_4K 1
5268 +#define PAGEFMT_4K_8K 2
5269 +#define PAGEFMT_8K_16K 3
5270 +#define NFI_CON 0x008
5271 +#define CON_FIFO_FLUSH BIT(0)
5272 +#define CON_NFI_RST BIT(1)
5273 +#define CON_BRD BIT(8)
5274 +#define CON_BWR BIT(9)
5275 +#define CON_SEC_SHIFT 12
5276 +#define NFI_ACCCON 0x00c
5277 +#define NFI_INTR_EN 0x010
5278 +#define INTR_BUSY_RETURN_EN BIT(4)
5279 +#define INTR_AHB_DONE_EN BIT(6)
5280 +#define NFI_INTR_STA 0x014
5281 +#define NFI_CMD 0x020
5282 +#define NFI_ADDRNOB 0x030
5283 +#define ROW_SHIFT 4
5284 +#define NFI_COLADDR 0x034
5285 +#define NFI_ROWADDR 0x038
5286 +#define NFI_STRDATA 0x040
5289 +#define NFI_CNRNB 0x044
5290 +#define NFI_DATAW 0x050
5291 +#define NFI_DATAR 0x054
5292 +#define NFI_PIO_DIRDY 0x058
5293 +#define PIO_DI_RDY 1
5294 +#define NFI_STA 0x060
5295 +#define STA_CMD BIT(0)
5296 +#define STA_ADDR BIT(1)
5297 +#define FLASH_MACRO_IDLE BIT(5)
5298 +#define STA_BUSY BIT(8)
5299 +#define STA_BUSY2READY BIT(9)
5300 +#define STA_EMP_PAGE BIT(12)
5301 +#define NFI_FSM_CUSTDATA (0xe << 16)
5302 +#define NFI_FSM_MASK GENMASK(19, 16)
5303 +#define NAND_FSM_MASK GENMASK(29, 23)
5304 +#define NFI_ADDRCNTR 0x070
5305 +#define CNTR_VALID_MASK GENMASK(16, 0)
5306 +#define CNTR_MASK GENMASK(15, 12)
5307 +#define ADDRCNTR_SEC_SHIFT 12
5308 +#define ADDRCNTR_SEC(val) \
5309 + (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
5310 +#define NFI_STRADDR 0x080
5311 +#define NFI_BYTELEN 0x084
5312 +#define NFI_CSEL 0x090
5313 +#define NFI_FDML(x) (0x0a0 + (x) * 8)
5314 +#define NFI_FDMM(x) (0x0a4 + (x) * 8)
5315 +#define NFI_DEBUG_CON1 0x220
5316 +#define STROBE_MASK GENMASK(4, 3)
5317 +#define STROBE_SHIFT 3
5318 +#define ECC_CLK_EN BIT(11)
5319 +#define AUTOC_SRAM_MODE BIT(12)
5320 +#define BYPASS_MASTER_EN BIT(15)
5321 +#define NFI_MASTER_STA 0x224
5322 +#define MASTER_BUS_BUSY 0x3
5323 +#define NFI_SECCUS_SIZE 0x22c
5324 +#define SECCUS_SIZE_EN BIT(17)
5325 +#define NFI_RANDOM_CNFG 0x238
5326 +#define RAN_ENCODE_EN BIT(0)
5327 +#define ENCODE_SEED_SHIFT 1
5328 +#define RAN_DECODE_EN BIT(16)
5329 +#define DECODE_SEED_SHIFT 17
5330 +#define RAN_SEED_MASK 0x7fff
5331 +#define NFI_EMPTY_THRESH 0x23c
5332 +#define NFI_NAND_TYPE_CNFG 0x240
5333 +#define NAND_TYPE_ASYNC 0
5334 +#define NAND_TYPE_TOGGLE 1
5335 +#define NAND_TYPE_SYNC 2
5336 +#define NFI_ACCCON1 0x244
5337 +#define NFI_DELAY_CTRL 0x248
5338 +#define NFI_TLC_RD_WHR2 0x300
5339 +#define TLC_RD_WHR2_EN BIT(12)
5340 +#define TLC_RD_WHR2_MASK GENMASK(11, 0)
5341 +#define SNF_SNF_CNFG 0x55c
5342 +#define SPI_MODE_EN 1
5343 +#define SPI_MODE_DIS 0
5345 +#endif /* __NFI_REGS_H__ */
5348 +++ b/drivers/mtd/nandx/core/nfi/nfi_spi.c
5351 + * Copyright (C) 2017 MediaTek Inc.
5352 + * Licensed under either
5353 + * BSD Licence, (see NOTICE for more details)
5354 + * GNU General Public License, version 2.0, (see NOTICE for more details)
5357 +#include "nandx_util.h"
5358 +#include "nandx_core.h"
5359 +#include "../nfi.h"
5360 +#include "nfiecc.h"
5361 +#include "nfi_regs.h"
5362 +#include "nfi_base.h"
5363 +#include "nfi_spi_regs.h"
5364 +#include "nfi_spi.h"
5366 +#define NFI_CMD_DUMMY_RD 0x00
5367 +#define NFI_CMD_DUMMY_WR 0x80
5369 +static struct nfi_spi_delay spi_delay[SPI_NAND_MAX_DELAY] = {
5371 + * tCLK_SAM_DLY, tCLK_OUT_DLY, tCS_DLY, tWR_EN_DLY,
5372 + * tIO_IN_DLY[4], tIO_OUT_DLY[4], tREAD_LATCH_LATENCY
5374 + {0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
5375 + {21, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
5376 + {63, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
5377 + {0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1},
5378 + {21, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1},
5379 + {63, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1}
5382 +static inline struct nfi_spi *base_to_snfi(struct nfi_base *nb)
5384 + return container_of(nb, struct nfi_spi, base);
5387 +static void snfi_mac_enable(struct nfi_base *nb)
5389 + void *regs = nb->res.nfi_regs;
5392 + val = readl(regs + SNF_MAC_CTL);
5393 + val &= ~MAC_XIO_SEL;
5396 + writel(val, regs + SNF_MAC_CTL);
5399 +static void snfi_mac_disable(struct nfi_base *nb)
5401 + void *regs = nb->res.nfi_regs;
5404 + val = readl(regs + SNF_MAC_CTL);
5405 + val &= ~(SF_TRIG | SF_MAC_EN);
5406 + writel(val, regs + SNF_MAC_CTL);
5409 +static int snfi_mac_trigger(struct nfi_base *nb)
5411 + void *regs = nb->res.nfi_regs;
5415 + val = readl(regs + SNF_MAC_CTL);
5417 + writel(val, regs + SNF_MAC_CTL);
5419 + ret = readl_poll_timeout_atomic(regs + SNF_MAC_CTL, val,
5420 + val & WIP_READY, 10,
5423 + pr_info("polling wip ready for read timeout\n");
5427 + return readl_poll_timeout_atomic(regs + SNF_MAC_CTL, val,
5432 +static int snfi_mac_op(struct nfi_base *nb)
5436 + snfi_mac_enable(nb);
5437 + ret = snfi_mac_trigger(nb);
5438 + snfi_mac_disable(nb);
5443 +static void snfi_write_mac(struct nfi_spi *nfi_spi, u8 *data, int count)
5445 + struct nandx_split32 split = {0};
5446 + u32 reg_offset = round_down(nfi_spi->tx_count, 4);
5447 + void *regs = nfi_spi->base.res.nfi_regs;
5448 + u32 data_offset = 0, i, val;
5449 + u8 *p_val = (u8 *)(&val);
5451 + nandx_split(&split, nfi_spi->tx_count, count, val, 4);
5453 + if (split.head_len) {
5454 + val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
5456 + for (i = 0; i < split.head_len; i++)
5457 + p_val[split.head + i] = data[i];
5459 + writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
5462 + if (split.body_len) {
5463 + reg_offset = split.body;
5464 + data_offset = split.head_len;
5466 + for (i = 0; i < split.body_len; i++) {
5467 + p_val[i & 3] = data[data_offset + i];
5469 + if ((i & 3) == 3) {
5470 + writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
5476 + if (split.tail_len) {
5477 + reg_offset = split.tail;
5478 + data_offset += split.body_len;
5480 + for (i = 0; i < split.tail_len; i++) {
5481 + p_val[i] = data[data_offset + i];
5483 + if (i == split.tail_len - 1)
5484 + writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
5489 +static void snfi_read_mac(struct nfi_spi *nfi_spi, u8 *data, int count)
5491 + void *regs = nfi_spi->base.res.nfi_regs;
5492 + u32 reg_offset = round_down(nfi_spi->tx_count, 4);
5493 + struct nandx_split32 split = {0};
5494 + u32 data_offset = 0, i, val;
5495 + u8 *p_val = (u8 *)&val;
5497 + nandx_split(&split, nfi_spi->tx_count, count, val, 4);
5499 + if (split.head_len) {
5500 + val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
5502 + for (i = 0; i < split.head_len; i++)
5503 + data[data_offset + i] = p_val[split.head + i];
5506 + if (split.body_len) {
5507 + reg_offset = split.body;
5508 + data_offset = split.head_len;
5510 + for (i = 0; i < split.body_len; i++) {
5511 + if ((i & 3) == 0) {
5512 + val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
5516 + data[data_offset + i] = p_val[i % 4];
5520 + if (split.tail_len) {
5521 + reg_offset = split.tail;
5522 + data_offset += split.body_len;
5523 + val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
5525 + for (i = 0; i < split.tail_len; i++)
5526 + data[data_offset + i] = p_val[i];
5530 +static int snfi_send_command(struct nfi *nfi, short cmd)
5532 + struct nfi_base *nb = nfi_to_base(nfi);
5533 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5538 + if (nfi_spi->snfi_mode == SNFI_MAC_MODE) {
5539 + snfi_write_mac(nfi_spi, (u8 *)&cmd, 1);
5540 + nfi_spi->tx_count++;
5544 + nfi_spi->cmd[nfi_spi->cur_cmd_idx++] = cmd;
5548 +static int snfi_send_address(struct nfi *nfi, int col, int row,
5552 + struct nfi_base *nb = nfi_to_base(nfi);
5553 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5554 + u32 addr, cycle, temp;
5559 + if (nfi_spi->snfi_mode == SNFI_MAC_MODE) {
5561 + cycle = row_cycle;
5565 + cycle = col_cycle;
5568 + temp = nandx_cpu_to_be32(addr) >> ((4 - cycle) << 3);
5569 + snfi_write_mac(nfi_spi, (u8 *)&temp, cycle);
5570 + nfi_spi->tx_count += cycle;
5572 + nfi_spi->row_addr[nfi_spi->cur_addr_idx++] = row;
5573 + nfi_spi->col_addr[nfi_spi->cur_addr_idx++] = col;
5579 +static int snfi_trigger(struct nfi *nfi)
5581 + struct nfi_base *nb = nfi_to_base(nfi);
5582 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5583 + void *regs = nb->res.nfi_regs;
5585 + writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
5586 + writel(0, regs + SNF_MAC_INL);
5588 + nfi_spi->tx_count = 0;
5589 + nfi_spi->cur_cmd_idx = 0;
5590 + nfi_spi->cur_addr_idx = 0;
5592 + return snfi_mac_op(nb);
5595 +static int snfi_select_chip(struct nfi *nfi, int cs)
5597 + struct nfi_base *nb = nfi_to_base(nfi);
5598 + void *regs = nb->res.nfi_regs;
5601 + val = readl(regs + SNF_MISC_CTL);
5604 + val &= ~SF2CS_SEL;
5606 + } else if (cs == 1) {
5613 + writel(val, regs + SNF_MISC_CTL);
5618 +static int snfi_set_delay(struct nfi_base *nb, u8 delay_mode)
5620 + void *regs = nb->res.nfi_regs;
5621 + struct nfi_spi_delay *delay;
5624 + if (delay_mode < 0 || delay_mode > SPI_NAND_MAX_DELAY)
5627 + delay = &spi_delay[delay_mode];
5629 + val = delay->tIO_OUT_DLY[0] | delay->tIO_OUT_DLY[1] << 8 |
5630 + delay->tIO_OUT_DLY[2] << 16 |
5631 + delay->tIO_OUT_DLY[3] << 24;
5632 + writel(val, regs + SNF_DLY_CTL1);
5634 + val = delay->tIO_IN_DLY[0] | (delay->tIO_IN_DLY[1] << 8) |
5635 + delay->tIO_IN_DLY[2] << 16 |
5636 + delay->tIO_IN_DLY[3] << 24;
5637 + writel(val, regs + SNF_DLY_CTL2);
5639 + val = delay->tCLK_SAM_DLY | delay->tCLK_OUT_DLY << 8 |
5640 + delay->tCS_DLY << 16 |
5641 + delay->tWR_EN_DLY << 24;
5642 + writel(val, regs + SNF_DLY_CTL3);
5644 + writel(delay->tCS_DLY, regs + SNF_DLY_CTL4);
5646 + val = readl(regs + SNF_MISC_CTL);
5647 + val |= (delay->tREAD_LATCH_LATENCY) <<
5649 + writel(val, regs + SNF_MISC_CTL);
5654 +static int snfi_set_timing(struct nfi *nfi, void *timing, int type)
5656 + /* Nothing need to do. */
5660 +static int snfi_wait_ready(struct nfi *nfi, int type, u32 timeout)
5662 + /* Nothing need to do. */
5666 +static int snfi_ctrl(struct nfi *nfi, int cmd, void *args)
5668 + struct nfi_base *nb = nfi_to_base(nfi);
5669 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5676 + case NFI_CTRL_DMA:
5677 + nb->dma_en = *(bool *)args;
5680 + case NFI_CTRL_NFI_IRQ:
5681 + nb->nfi_irq_en = *(bool *)args;
5684 + case NFI_CTRL_ECC_IRQ:
5685 + nb->ecc_irq_en = *(bool *)args;
5688 + case NFI_CTRL_PAGE_IRQ:
5689 + nb->page_irq_en = *(bool *)args;
5692 + case NFI_CTRL_ECC:
5693 + nb->ecc_en = *(bool *)args;
5696 + case NFI_CTRL_BAD_MARK_SWAP:
5697 + nb->bad_mark_swap_en = *(bool *)args;
5700 + case NFI_CTRL_ECC_CLOCK:
5701 + nb->ecc_clk_en = *(bool *)args;
5704 + case SNFI_CTRL_OP_MODE:
5705 + nfi_spi->snfi_mode = *(u8 *)args;
5708 + case SNFI_CTRL_RX_MODE:
5709 + nfi_spi->read_cache_mode = *(u8 *)args;
5712 + case SNFI_CTRL_TX_MODE:
5713 + nfi_spi->write_cache_mode = *(u8 *)args;
5716 + case SNFI_CTRL_DELAY_MODE:
5717 + ret = snfi_set_delay(nb, *(u8 *)args);
5721 + pr_info("operation not support.\n");
5722 + ret = -EOPNOTSUPP;
5729 +static int snfi_read_bytes(struct nfi *nfi, u8 *data, int count)
5731 + struct nfi_base *nb = nfi_to_base(nfi);
5732 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5733 + void *regs = nb->res.nfi_regs;
5736 + writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
5737 + writel(count, regs + SNF_MAC_INL);
5739 + ret = snfi_mac_op(nb);
5743 + snfi_read_mac(nfi_spi, data, count);
5745 + nfi_spi->tx_count = 0;
5750 +static int snfi_write_bytes(struct nfi *nfi, u8 *data, int count)
5752 + struct nfi_base *nb = nfi_to_base(nfi);
5753 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5754 + void *regs = nb->res.nfi_regs;
5756 + snfi_write_mac(nfi_spi, data, count);
5757 + nfi_spi->tx_count += count;
5759 + writel(0, regs + SNF_MAC_INL);
5760 + writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
5762 + nfi_spi->tx_count = 0;
5764 + return snfi_mac_op(nb);
5767 +static int snfi_reset(struct nfi *nfi)
5769 + struct nfi_base *nb = nfi_to_base(nfi);
5770 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5771 + void *regs = nb->res.nfi_regs;
5775 + ret = nfi_spi->parent->nfi.reset(nfi);
5779 + val = readl(regs + SNF_MISC_CTL);
5781 + writel(val, regs + SNF_MISC_CTL);
5783 + ret = readx_poll_timeout_atomic(readw, regs + SNF_STA_CTL1, val,
5784 + !(val & SPI_STATE), 50,
5787 + pr_info("spi state active in reset [0x%x] = 0x%x\n",
5788 + SNF_STA_CTL1, val);
5792 + val = readl(regs + SNF_MISC_CTL);
5794 + writel(val, regs + SNF_MISC_CTL);
5799 +static int snfi_config_for_write(struct nfi_base *nb, int count)
5801 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5802 + void *regs = nb->res.nfi_regs;
5805 + nb->set_op_mode(regs, CNFG_CUSTOM_MODE);
5807 + val = readl(regs + SNF_MISC_CTL);
5809 + if (nfi_spi->write_cache_mode == SNFI_TX_114)
5810 + val |= PG_LOAD_X4_EN;
5812 + if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
5813 + val |= PG_LOAD_CUSTOM_EN;
5815 + writel(val, regs + SNF_MISC_CTL);
5817 + val = count * (nb->nfi.sector_size + nb->nfi.sector_spare_size);
5818 + writel(val << PG_LOAD_SHIFT, regs + SNF_MISC_CTL2);
5820 + val = readl(regs + SNF_PG_CTL1);
5822 + if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
5823 + val |= nfi_spi->cmd[0] << PG_LOAD_CMD_SHIFT;
5825 + val |= nfi_spi->cmd[0] | nfi_spi->cmd[1] << PG_LOAD_CMD_SHIFT |
5826 + nfi_spi->cmd[2] << PG_EXE_CMD_SHIFT;
5828 + writel(nfi_spi->row_addr[1], regs + SNF_PG_CTL3);
5829 + writel(nfi_spi->cmd[3] << GF_CMD_SHIFT | nfi_spi->col_addr[2] <<
5830 + GF_ADDR_SHIFT, regs + SNF_GF_CTL1);
5833 + writel(val, regs + SNF_PG_CTL1);
5834 + writel(nfi_spi->col_addr[1], regs + SNF_PG_CTL2);
5836 + writel(NFI_CMD_DUMMY_WR, regs + NFI_CMD);
5841 +static int snfi_config_for_read(struct nfi_base *nb, int count)
5843 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5844 + void *regs = nb->res.nfi_regs;
5848 + nb->set_op_mode(regs, CNFG_CUSTOM_MODE);
5850 + val = readl(regs + SNF_MISC_CTL);
5851 + val &= ~DARA_READ_MODE_MASK;
5853 + switch (nfi_spi->read_cache_mode) {
5859 + val |= X2_DATA_MODE << READ_MODE_SHIFT;
5863 + val |= X4_DATA_MODE << READ_MODE_SHIFT;
5867 + val |= DUAL_IO_MODE << READ_MODE_SHIFT;
5871 + val |= QUAD_IO_MODE << READ_MODE_SHIFT;
5875 + pr_info("Not support this read operarion: %d!\n",
5876 + nfi_spi->read_cache_mode);
5881 + if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
5882 + val |= DATARD_CUSTOM_EN;
5884 + writel(val, regs + SNF_MISC_CTL);
5886 + val = count * (nb->nfi.sector_size + nb->nfi.sector_spare_size);
5887 + writel(val, regs + SNF_MISC_CTL2);
5889 + val = readl(regs + SNF_RD_CTL2);
5891 + if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE) {
5892 + val |= nfi_spi->cmd[0];
5893 + writel(nfi_spi->col_addr[1], regs + SNF_RD_CTL3);
5895 + val |= nfi_spi->cmd[2];
5896 + writel(nfi_spi->cmd[0] << PAGE_READ_CMD_SHIFT |
5897 + nfi_spi->row_addr[0], regs + SNF_RD_CTL1);
5898 + writel(nfi_spi->cmd[1] << GF_CMD_SHIFT |
5899 + nfi_spi->col_addr[1] << GF_ADDR_SHIFT,
5900 + regs + SNF_GF_CTL1);
5901 + writel(nfi_spi->col_addr[2], regs + SNF_RD_CTL3);
5904 + writel(val, regs + SNF_RD_CTL2);
5906 + writel(NFI_CMD_DUMMY_RD, regs + NFI_CMD);
5911 +static bool is_page_empty(struct nfi_base *nb, u8 *data, u8 *fdm,
5914 + u32 *data32 = (u32 *)data;
5915 + u32 *fdm32 = (u32 *)fdm;
5918 + for (i = 0; i < nb->format.page_size >> 2; i++) {
5919 + if (data32[i] != 0xffff) {
5920 + count += zero_popcount(data32[i]);
5922 + pr_info("%s %d %d count:%d\n",
5923 + __func__, __LINE__, i, count);
5930 + for (i = 0; i < (nb->nfi.fdm_size * sectors >> 2); i++)
5931 + if (fdm32[i] != 0xffff) {
5932 + count += zero_popcount(fdm32[i]);
5934 + pr_info("%s %d %d count:%d\n",
5935 + __func__, __LINE__, i, count);
5944 +static int rw_prepare(struct nfi_base *nb, int sectors, u8 *data,
5948 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5951 + ret = nfi_spi->parent->rw_prepare(nb, sectors, data, fdm, read);
5956 + ret = snfi_config_for_read(nb, sectors);
5958 + ret = snfi_config_for_write(nb, sectors);
5963 +static void rw_complete(struct nfi_base *nb, u8 *data, u8 *fdm,
5966 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5967 + void *regs = nb->res.nfi_regs;
5970 + nfi_spi->parent->rw_complete(nb, data, fdm, read);
5972 + val = readl(regs + SNF_MISC_CTL);
5975 + val &= ~DATARD_CUSTOM_EN;
5977 + val &= ~PG_LOAD_CUSTOM_EN;
5979 + writel(val, regs + SNF_MISC_CTL);
5981 + nfi_spi->tx_count = 0;
5982 + nfi_spi->cur_cmd_idx = 0;
5983 + nfi_spi->cur_addr_idx = 0;
5986 +static void set_nfi_base_funcs(struct nfi_base *nb)
5988 + nb->nfi.reset = snfi_reset;
5989 + nb->nfi.set_timing = snfi_set_timing;
5990 + nb->nfi.wait_ready = snfi_wait_ready;
5992 + nb->nfi.send_cmd = snfi_send_command;
5993 + nb->nfi.send_addr = snfi_send_address;
5994 + nb->nfi.trigger = snfi_trigger;
5995 + nb->nfi.nfi_ctrl = snfi_ctrl;
5996 + nb->nfi.select_chip = snfi_select_chip;
5998 + nb->nfi.read_bytes = snfi_read_bytes;
5999 + nb->nfi.write_bytes = snfi_write_bytes;
6001 + nb->rw_prepare = rw_prepare;
6002 + nb->rw_complete = rw_complete;
6003 + nb->is_page_empty = is_page_empty;
6007 +struct nfi *nfi_extend_init(struct nfi_base *nb)
6009 + struct nfi_spi *nfi_spi;
6011 + nfi_spi = mem_alloc(1, sizeof(struct nfi_spi));
6013 + pr_info("snfi alloc memory fail @%s.\n", __func__);
6017 + memcpy(&nfi_spi->base, nb, sizeof(struct nfi_base));
6018 + nfi_spi->parent = nb;
6020 + nfi_spi->read_cache_mode = SNFI_RX_114;
6021 + nfi_spi->write_cache_mode = SNFI_TX_114;
6023 + set_nfi_base_funcs(&nfi_spi->base);
6025 + /* Change nfi to spi mode */
6026 + writel(SPI_MODE, nb->res.nfi_regs + SNF_SNF_CNFG);
6028 + return &(nfi_spi->base.nfi);
6031 +void nfi_extend_exit(struct nfi_base *nb)
6033 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
6035 + mem_free(nfi_spi->parent);
6036 + mem_free(nfi_spi);
6040 +++ b/drivers/mtd/nandx/core/nfi/nfi_spi.h
6043 + * Copyright (C) 2017 MediaTek Inc.
6044 + * Licensed under either
6045 + * BSD Licence, (see NOTICE for more details)
6046 + * GNU General Public License, version 2.0, (see NOTICE for more details)
6049 +#ifndef __NFI_SPI_H__
6050 +#define __NFI_SPI_H__
6052 +#define SPI_NAND_MAX_DELAY 6
6053 +#define SPI_NAND_MAX_OP 4
6055 +/*TODO - add comments */
6056 +struct nfi_spi_delay {
6062 + u8 tIO_OUT_DLY[4];
6063 + u8 tREAD_LATCH_LATENCY;
6066 +/* SPI Nand structure */
6068 + struct nfi_base base;
6069 + struct nfi_base *parent;
6074 + u8 cmd[SPI_NAND_MAX_OP];
6077 + u32 row_addr[SPI_NAND_MAX_OP];
6078 + u32 col_addr[SPI_NAND_MAX_OP];
6081 + u8 read_cache_mode;
6082 + u8 write_cache_mode;
6085 +#endif /* __NFI_SPI_H__ */
6087 +++ b/drivers/mtd/nandx/core/nfi/nfi_spi_regs.h
6090 + * Copyright (C) 2017 MediaTek Inc.
6091 + * Licensed under either
6092 + * BSD Licence, (see NOTICE for more details)
6093 + * GNU General Public License, version 2.0, (see NOTICE for more details)
6096 +#ifndef __NFI_SPI_REGS_H__
6097 +#define __NFI_SPI_REGS_H__
6099 +#define SNF_MAC_CTL 0x500
6101 +#define WIP_READY BIT(1)
6102 +#define SF_TRIG BIT(2)
6103 +#define SF_MAC_EN BIT(3)
6104 +#define MAC_XIO_SEL BIT(4)
6105 +#define SNF_MAC_OUTL 0x504
6106 +#define SNF_MAC_INL 0x508
6107 +#define SNF_RD_CTL1 0x50c
6108 +#define PAGE_READ_CMD_SHIFT 24
6109 +#define SNF_RD_CTL2 0x510
6110 +#define SNF_RD_CTL3 0x514
6111 +#define SNF_GF_CTL1 0x518
6112 +#define GF_ADDR_SHIFT 16
6113 +#define GF_CMD_SHIFT 24
6114 +#define SNF_GF_CTL3 0x520
6115 +#define SNF_PG_CTL1 0x524
6116 +#define PG_EXE_CMD_SHIFT 16
6117 +#define PG_LOAD_CMD_SHIFT 8
6118 +#define SNF_PG_CTL2 0x528
6119 +#define SNF_PG_CTL3 0x52c
6120 +#define SNF_ER_CTL 0x530
6121 +#define SNF_ER_CTL2 0x534
6122 +#define SNF_MISC_CTL 0x538
6123 +#define SW_RST BIT(28)
6124 +#define PG_LOAD_X4_EN BIT(20)
6125 +#define X2_DATA_MODE 1
6126 +#define X4_DATA_MODE 2
6127 +#define DUAL_IO_MODE 5
6128 +#define QUAD_IO_MODE 6
6129 +#define READ_MODE_SHIFT 16
6130 +#define LATCH_LAT_SHIFT 8
6131 +#define LATCH_LAT_MASK GENMASK(9, 8)
6132 +#define DARA_READ_MODE_MASK GENMASK(18, 16)
6133 +#define SF2CS_SEL BIT(13)
6134 +#define SF2CS_EN BIT(12)
6135 +#define PG_LOAD_CUSTOM_EN BIT(7)
6136 +#define DATARD_CUSTOM_EN BIT(6)
6137 +#define SNF_MISC_CTL2 0x53c
6138 +#define PG_LOAD_SHIFT 16
6139 +#define SNF_DLY_CTL1 0x540
6140 +#define SNF_DLY_CTL2 0x544
6141 +#define SNF_DLY_CTL3 0x548
6142 +#define SNF_DLY_CTL4 0x54c
6143 +#define SNF_STA_CTL1 0x550
6144 +#define SPI_STATE GENMASK(3, 0)
6145 +#define SNF_STA_CTL2 0x554
6146 +#define SNF_STA_CTL3 0x558
6147 +#define SNF_SNF_CNFG 0x55c
6148 +#define SPI_MODE BIT(0)
6149 +#define SNF_DEBUG_SEL 0x560
6150 +#define SPI_GPRAM_ADDR 0x800
6152 +#endif /* __NFI_SPI_REGS_H__ */
6154 +++ b/drivers/mtd/nandx/core/nfi/nfiecc.c
6157 + * Copyright (C) 2017 MediaTek Inc.
6158 + * Licensed under either
6159 + * BSD Licence, (see NOTICE for more details)
6160 + * GNU General Public License, version 2.0, (see NOTICE for more details)
6163 +#include "nandx_util.h"
6164 +#include "nandx_core.h"
6165 +#include "nfiecc_regs.h"
6166 +#include "nfiecc.h"
6168 +#define NFIECC_IDLE_REG(op) \
6169 + ((op) == ECC_ENCODE ? NFIECC_ENCIDLE : NFIECC_DECIDLE)
6170 +#define IDLE_MASK 1
6171 +#define NFIECC_CTL_REG(op) \
6172 + ((op) == ECC_ENCODE ? NFIECC_ENCCON : NFIECC_DECCON)
6173 +#define NFIECC_IRQ_REG(op) \
6174 + ((op) == ECC_ENCODE ? NFIECC_ENCIRQEN : NFIECC_DECIRQEN)
6175 +#define NFIECC_ADDR(op) \
6176 + ((op) == ECC_ENCODE ? NFIECC_ENCDIADDR : NFIECC_DECDIADDR)
6178 +#define ECC_TIMEOUT 500000
6180 +/* ecc strength that each IP supports */
6181 +static const int ecc_strength_mt7622[] = {
6182 + 4, 6, 8, 10, 12, 14, 16
6185 +static int nfiecc_irq_handler(void *data)
6187 + struct nfiecc *ecc = data;
6188 + void *regs = ecc->res.regs;
6191 + status = readl(regs + NFIECC_DECIRQSTA) & DEC_IRQSTA_GEN;
6193 + status = readl(regs + NFIECC_DECDONE);
6194 + if (!(status & ecc->config.sectors))
6195 + return NAND_IRQ_NONE;
6198 + * Clear decode IRQ status once again to ensure that
6199 + * there will be no extra IRQ.
6201 + readl(regs + NFIECC_DECIRQSTA);
6202 + ecc->config.sectors = 0;
6203 + nandx_event_complete(ecc->done);
6205 + status = readl(regs + NFIECC_ENCIRQSTA) & ENC_IRQSTA_GEN;
6207 + return NAND_IRQ_NONE;
6209 + nandx_event_complete(ecc->done);
6212 + return NAND_IRQ_HANDLED;
6215 +static inline int nfiecc_wait_idle(struct nfiecc *ecc)
6217 + int op = ecc->config.op;
6220 + ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_IDLE_REG(op),
6221 + val, val & IDLE_MASK,
6224 + pr_info("%s not idle\n",
6225 + op == ECC_ENCODE ? "encoder" : "decoder");
6230 +static int nfiecc_wait_encode_done(struct nfiecc *ecc)
6234 + if (ecc->ecc_irq_en) {
6235 + /* poll one time to avoid missing irq event */
6236 + ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_ENCSTA,
6237 + val, val & ENC_FSM_IDLE, 1, 1);
6241 + /* irq done, if not, we can go on to poll status for a while */
6242 + ret = nandx_event_wait_complete(ecc->done, ECC_TIMEOUT);
6247 + ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_ENCSTA,
6248 + val, val & ENC_FSM_IDLE,
6251 + pr_info("encode timeout\n");
6257 +static int nfiecc_wait_decode_done(struct nfiecc *ecc)
6259 + u32 secbit = BIT(ecc->config.sectors - 1);
6260 + void *regs = ecc->res.regs;
6263 + if (ecc->ecc_irq_en) {
6264 + ret = readl_poll_timeout_atomic(regs + NFIECC_DECDONE,
6265 + val, val & secbit, 1, 1);
6269 + ret = nandx_event_wait_complete(ecc->done, ECC_TIMEOUT);
6274 + ret = readl_poll_timeout_atomic(regs + NFIECC_DECDONE,
6275 + val, val & secbit,
6278 + pr_info("decode timeout\n");
6282 + /* decode done does not stands for ecc all work done.
6283 + * we need check syn, bma, chien, autoc all idle.
6284 + * just check it when ECC_DECCNFG[13:12] is 3,
6285 + * which means auto correct.
6287 + ret = readl_poll_timeout_atomic(regs + NFIECC_DECFSM,
6288 + val, (val & FSM_MASK) == FSM_IDLE,
6291 + pr_info("decode fsm(0x%x) is not idle\n",
6292 + readl(regs + NFIECC_DECFSM));
6297 +static int nfiecc_wait_done(struct nfiecc *ecc)
6299 + if (ecc->config.op == ECC_ENCODE)
6300 + return nfiecc_wait_encode_done(ecc);
6302 + return nfiecc_wait_decode_done(ecc);
6305 +static void nfiecc_encode_config(struct nfiecc *ecc, u32 ecc_idx)
6307 + struct nfiecc_config *config = &ecc->config;
6310 + val = ecc_idx | (config->mode << ecc->caps->ecc_mode_shift);
6312 + if (config->mode == ECC_DMA_MODE)
6313 + val |= ENC_BURST_EN;
6315 + val |= (config->len << 3) << ENCCNFG_MS_SHIFT;
6316 + writel(val, ecc->res.regs + NFIECC_ENCCNFG);
6319 +static void nfiecc_decode_config(struct nfiecc *ecc, u32 ecc_idx)
6321 + struct nfiecc_config *config = &ecc->config;
6322 + u32 dec_sz = (config->len << 3) +
6323 + config->strength * ecc->caps->parity_bits;
6326 + val = ecc_idx | (config->mode << ecc->caps->ecc_mode_shift);
6328 + if (config->mode == ECC_DMA_MODE)
6329 + val |= DEC_BURST_EN;
6331 + val |= (dec_sz << DECCNFG_MS_SHIFT) |
6332 + (config->deccon << DEC_CON_SHIFT);
6333 + val |= DEC_EMPTY_EN;
6334 + writel(val, ecc->res.regs + NFIECC_DECCNFG);
6337 +static void nfiecc_config(struct nfiecc *ecc)
6341 + for (idx = 0; idx < ecc->caps->ecc_strength_num; idx++) {
6342 + if (ecc->config.strength == ecc->caps->ecc_strength[idx])
6346 + if (ecc->config.op == ECC_ENCODE)
6347 + nfiecc_encode_config(ecc, idx);
6349 + nfiecc_decode_config(ecc, idx);
6352 +static int nfiecc_enable(struct nfiecc *ecc)
6354 + enum nfiecc_operation op = ecc->config.op;
6355 + void *regs = ecc->res.regs;
6357 + nfiecc_config(ecc);
6359 + writel(ECC_OP_EN, regs + NFIECC_CTL_REG(op));
6361 + if (ecc->ecc_irq_en) {
6362 + writel(ECC_IRQEN, regs + NFIECC_IRQ_REG(op));
6364 + if (ecc->page_irq_en)
6365 + writel(ECC_IRQEN | ECC_PG_IRQ_SEL,
6366 + regs + NFIECC_IRQ_REG(op));
6368 + nandx_event_init(ecc->done);
6374 +static int nfiecc_disable(struct nfiecc *ecc)
6376 + enum nfiecc_operation op = ecc->config.op;
6377 + void *regs = ecc->res.regs;
6379 + nfiecc_wait_idle(ecc);
6381 + writel(0, regs + NFIECC_IRQ_REG(op));
6382 + writel(~ECC_OP_EN, regs + NFIECC_CTL_REG(op));
6387 +static int nfiecc_correct_data(struct nfiecc *ecc,
6388 + struct nfiecc_status *status,
6389 + u8 *data, u32 sector)
6391 + u32 err, offset, i;
6392 + u32 loc, byteloc, bitloc;
6394 + status->corrected = 0;
6395 + status->failed = 0;
6397 + offset = (sector >> 2);
6398 + err = readl(ecc->res.regs + NFIECC_DECENUM(offset));
6399 + err >>= (sector % 4) * 8;
6400 + err &= ecc->caps->err_mask;
6402 + if (err == ecc->caps->err_mask) {
6404 + return -ENANDREAD;
6407 + status->corrected += err;
6408 + status->bitflips = max_t(u32, status->bitflips, err);
6410 + for (i = 0; i < err; i++) {
6411 + loc = readl(ecc->res.regs + NFIECC_DECEL(i >> 1));
6412 + loc >>= ((i & 0x1) << 4);
6413 + byteloc = loc >> 3;
6414 + bitloc = loc & 0x7;
6415 + data[byteloc] ^= (1 << bitloc);
6421 +static int nfiecc_fill_data(struct nfiecc *ecc, u8 *data)
6423 + struct nfiecc_config *config = &ecc->config;
6424 + void *regs = ecc->res.regs;
6428 + if (config->mode == ECC_DMA_MODE) {
6429 + if ((unsigned long)config->dma_addr & 0x3)
6430 + pr_info("encode address is not 4B aligned: 0x%x\n",
6431 + (u32)(unsigned long)config->dma_addr);
6433 + writel((unsigned long)config->dma_addr,
6434 + regs + NFIECC_ADDR(config->op));
6435 + } else if (config->mode == ECC_PIO_MODE) {
6436 + if (config->op == ECC_ENCODE) {
6437 + size = (config->len + 3) >> 2;
6439 + size = config->strength * ecc->caps->parity_bits;
6440 + size = (size + 7) >> 3;
6441 + size += config->len;
6445 + for (i = 0; i < size; i++) {
6446 + ret = readl_poll_timeout_atomic(regs + NFIECC_PIO_DIRDY,
6447 + val, val & PIO_DI_RDY,
6452 + writel(*((u32 *)data + i), regs + NFIECC_PIO_DI);
6459 +static int nfiecc_encode(struct nfiecc *ecc, u8 *data)
6461 + struct nfiecc_config *config = &ecc->config;
6462 + u32 len, i, val = 0;
6466 + /* Under NFI mode, nothing need to do */
6467 + if (config->mode == ECC_NFI_MODE)
6470 + ret = nfiecc_fill_data(ecc, data);
6474 + ret = nfiecc_wait_encode_done(ecc);
6478 + ret = nfiecc_wait_idle(ecc);
6482 + /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
6483 + len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
6484 + p = data + config->len;
6486 + /* Write the parity bytes generated by the ECC back to the OOB region */
6487 + for (i = 0; i < len; i++) {
6489 + val = readl(ecc->res.regs + NFIECC_ENCPAR(i / 4));
6491 + p[i] = (val >> ((i % 4) * 8)) & 0xff;
6497 +static int nfiecc_decode(struct nfiecc *ecc, u8 *data)
6501 + /* Under NFI mode, nothing need to do */
6502 + if (ecc->config.mode == ECC_NFI_MODE)
6505 + ret = nfiecc_fill_data(ecc, data);
6509 + return nfiecc_wait_decode_done(ecc);
6512 +static int nfiecc_decode_status(struct nfiecc *ecc, u32 start_sector,
6515 + void *regs = ecc->res.regs;
6516 + u32 i, val = 0, err;
6519 + for (i = start_sector; i < start_sector + sectors; i++) {
6521 + val = readl(regs + NFIECC_DECENUM(i / 4));
6523 + err = val >> ((i % 4) * 5);
6524 + err &= ecc->caps->err_mask;
6526 + if (err == ecc->caps->err_mask)
6527 + pr_err("sector %d is uncorrect\n", i);
6529 + bitflips = max_t(u32, bitflips, err);
6532 + if (bitflips == ecc->caps->err_mask)
6533 + return -ENANDREAD;
6536 + pr_info("bitflips %d is corrected\n", bitflips);
6541 +static int nfiecc_adjust_strength(struct nfiecc *ecc, int strength)
6543 + struct nfiecc_caps *caps = ecc->caps;
6544 + int i, count = caps->ecc_strength_num;
6546 + if (strength >= caps->ecc_strength[count - 1])
6547 + return caps->ecc_strength[count - 1];
6549 + if (strength < caps->ecc_strength[0])
6552 + for (i = 1; i < count; i++) {
6553 + if (strength < caps->ecc_strength[i])
6554 + return caps->ecc_strength[i - 1];
6560 +static int nfiecc_ctrl(struct nfiecc *ecc, int cmd, void *args)
6565 + case NFI_CTRL_ECC_IRQ:
6566 + ecc->ecc_irq_en = *(bool *)args;
6569 + case NFI_CTRL_ECC_PAGE_IRQ:
6570 + ecc->page_irq_en = *(bool *)args;
6574 + pr_info("invalid arguments.\n");
6582 +static int nfiecc_hw_init(struct nfiecc *ecc)
6586 + ret = nfiecc_wait_idle(ecc);
6590 + writel(~ECC_OP_EN, ecc->res.regs + NFIECC_ENCCON);
6592 + ret = nfiecc_wait_idle(ecc);
6596 + writel(~ECC_OP_EN, ecc->res.regs + NFIECC_DECCON);
6601 +static struct nfiecc_caps nfiecc_caps_mt7622 = {
6603 + .ecc_mode_shift = 4,
6604 + .parity_bits = 13,
6605 + .ecc_strength = ecc_strength_mt7622,
6606 + .ecc_strength_num = 7,
6609 +static struct nfiecc_caps *nfiecc_get_match_data(enum mtk_ic_version ic)
6611 + /* NOTE: add other IC's data */
6612 + return &nfiecc_caps_mt7622;
6615 +struct nfiecc *nfiecc_init(struct nfiecc_resource *res)
6617 + struct nfiecc *ecc;
6620 + ecc = mem_alloc(1, sizeof(struct nfiecc));
6626 + ret = nandx_irq_register(res->dev, res->irq_id, nfiecc_irq_handler,
6629 + pr_info("ecc irq register failed!\n");
6633 + ecc->ecc_irq_en = false;
6634 + ecc->page_irq_en = false;
6635 + ecc->done = nandx_event_create();
6636 + ecc->caps = nfiecc_get_match_data(res->ic_ver);
6638 + ecc->adjust_strength = nfiecc_adjust_strength;
6639 + ecc->enable = nfiecc_enable;
6640 + ecc->disable = nfiecc_disable;
6641 + ecc->decode = nfiecc_decode;
6642 + ecc->encode = nfiecc_encode;
6643 + ecc->wait_done = nfiecc_wait_done;
6644 + ecc->decode_status = nfiecc_decode_status;
6645 + ecc->correct_data = nfiecc_correct_data;
6646 + ecc->nfiecc_ctrl = nfiecc_ctrl;
6648 + ret = nfiecc_hw_init(ecc);
6660 +void nfiecc_exit(struct nfiecc *ecc)
6662 + nandx_event_destroy(ecc->done);
6667 +++ b/drivers/mtd/nandx/core/nfi/nfiecc.h
6670 + * Copyright (C) 2017 MediaTek Inc.
6671 + * Licensed under either
6672 + * BSD Licence, (see NOTICE for more details)
6673 + * GNU General Public License, version 2.0, (see NOTICE for more details)
6676 +#ifndef __NFIECC_H__
6677 +#define __NFIECC_H__
6685 +enum nfiecc_operation {
6690 +enum nfiecc_deccon {
6692 + ECC_DEC_LOCATE = 2,
6693 + ECC_DEC_CORRECT = 3
6696 +struct nfiecc_resource {
6704 +struct nfiecc_status {
6710 +struct nfiecc_caps {
6712 + u32 ecc_mode_shift;
6714 + const int *ecc_strength;
6715 + u32 ecc_strength_num;
6718 +struct nfiecc_config {
6719 + enum nfiecc_operation op;
6720 + enum nfiecc_mode mode;
6721 + enum nfiecc_deccon deccon;
6723 + void *dma_addr; /* DMA use only */
6730 + struct nfiecc_resource res;
6731 + struct nfiecc_config config;
6732 + struct nfiecc_caps *caps;
6739 + int (*adjust_strength)(struct nfiecc *ecc, int strength);
6740 + int (*enable)(struct nfiecc *ecc);
6741 + int (*disable)(struct nfiecc *ecc);
6743 + int (*decode)(struct nfiecc *ecc, u8 *data);
6744 + int (*encode)(struct nfiecc *ecc, u8 *data);
6746 + int (*decode_status)(struct nfiecc *ecc, u32 start_sector, u32 sectors);
6747 + int (*correct_data)(struct nfiecc *ecc,
6748 + struct nfiecc_status *status,
6749 + u8 *data, u32 sector);
6750 + int (*wait_done)(struct nfiecc *ecc);
6752 + int (*nfiecc_ctrl)(struct nfiecc *ecc, int cmd, void *args);
6755 +struct nfiecc *nfiecc_init(struct nfiecc_resource *res);
6756 +void nfiecc_exit(struct nfiecc *ecc);
6758 +#endif /* __NFIECC_H__ */
6760 +++ b/drivers/mtd/nandx/core/nfi/nfiecc_regs.h
6763 + * Copyright (C) 2017 MediaTek Inc.
6764 + * Licensed under either
6765 + * BSD Licence, (see NOTICE for more details)
6766 + * GNU General Public License, version 2.0, (see NOTICE for more details)
6769 +#ifndef __NFIECC_REGS_H__
6770 +#define __NFIECC_REGS_H__
6772 +#define NFIECC_ENCCON 0x000
6773 +/* NFIECC_DECCON has same bit define */
6774 +#define ECC_OP_EN BIT(0)
6775 +#define NFIECC_ENCCNFG 0x004
6776 +#define ENCCNFG_MS_SHIFT 16
6777 +#define ENC_BURST_EN BIT(8)
6778 +#define NFIECC_ENCDIADDR 0x008
6779 +#define NFIECC_ENCIDLE 0x00c
6780 +#define NFIECC_ENCSTA 0x02c
6781 +#define ENC_FSM_IDLE 1
6782 +#define NFIECC_ENCIRQEN 0x030
6783 +/* NFIECC_DECIRQEN has same bit define */
6784 +#define ECC_IRQEN BIT(0)
6785 +#define ECC_PG_IRQ_SEL BIT(1)
6786 +#define NFIECC_ENCIRQSTA 0x034
6787 +#define ENC_IRQSTA_GEN BIT(0)
6788 +#define NFIECC_PIO_DIRDY 0x080
6789 +#define PIO_DI_RDY BIT(0)
6790 +#define NFIECC_PIO_DI 0x084
6791 +#define NFIECC_DECCON 0x100
6792 +#define NFIECC_DECCNFG 0x104
6793 +#define DEC_BURST_EN BIT(8)
6794 +#define DEC_EMPTY_EN BIT(31)
6795 +#define DEC_CON_SHIFT 12
6796 +#define DECCNFG_MS_SHIFT 16
6797 +#define NFIECC_DECDIADDR 0x108
6798 +#define NFIECC_DECIDLE 0x10c
6799 +#define NFIECC_DECENUM(x) (0x114 + (x) * 4)
6800 +#define NFIECC_DECDONE 0x11c
6801 +#define NFIECC_DECIRQEN 0x140
6802 +#define NFIECC_DECIRQSTA 0x144
6803 +#define DEC_IRQSTA_GEN BIT(0)
6804 +#define NFIECC_DECFSM 0x14c
6805 +#define FSM_MASK 0x7f0f0f0f
6806 +#define FSM_IDLE 0x01010101
6807 +#define NFIECC_BYPASS 0x20c
6808 +#define NFIECC_BYPASS_EN BIT(0)
6809 +#define NFIECC_ENCPAR(x) (0x010 + (x) * 4)
6810 +#define NFIECC_DECEL(x) (0x120 + (x) * 4)
6812 +#endif /* __NFIECC_REGS_H__ */
6814 +++ b/drivers/mtd/nandx/driver/Nandx.mk
6817 +# Copyright (C) 2017 MediaTek Inc.
6818 +# Licensed under either
6819 +# BSD Licence, (see NOTICE for more details)
6820 +# GNU General Public License, version 2.0, (see NOTICE for more details)
6823 +nandx-$(NANDX_SIMULATOR_SUPPORT) += simulator/driver.c
6825 +nandx-$(NANDX_CTP_SUPPORT) += ctp/ts_nand.c
6826 +nandx-$(NANDX_CTP_SUPPORT) += ctp/nand_test.c
6827 +nandx-header-$(NANDX_CTP_SUPPORT) += ctp/nand_test.h
6829 +nandx-$(NANDX_BBT_SUPPORT) += bbt/bbt.c
6830 +nandx-$(NANDX_BROM_SUPPORT) += brom/driver.c
6831 +nandx-$(NANDX_KERNEL_SUPPORT) += kernel/driver.c
6832 +nandx-$(NANDX_LK_SUPPORT) += lk/driver.c
6833 +nandx-$(NANDX_UBOOT_SUPPORT) += uboot/driver.c
6835 +++ b/drivers/mtd/nandx/driver/bbt/bbt.c
6838 + * Copyright (C) 2017 MediaTek Inc.
6839 + * Licensed under either
6840 + * BSD Licence, (see NOTICE for more details)
6841 + * GNU General Public License, version 2.0, (see NOTICE for more details)
6844 +#include "nandx_util.h"
6845 +#include "nandx_core.h"
6848 +/* Not support: multi-chip */
6849 +static u8 main_bbt_pattern[] = {'B', 'b', 't', '0' };
6850 +static u8 mirror_bbt_pattern[] = {'1', 't', 'b', 'B' };
6852 +static struct bbt_manager g_bbt_manager = {
6853 + { {{main_bbt_pattern, 4}, 0, BBT_INVALID_ADDR},
6854 + {{mirror_bbt_pattern, 4}, 0, BBT_INVALID_ADDR}
6856 + NAND_BBT_SCAN_MAXBLOCKS, NULL
6859 +static inline void set_bbt_mark(u8 *bbt, int block, u8 mark)
6861 + int index, offset;
6863 + index = GET_ENTRY(block);
6864 + offset = GET_POSITION(block);
6866 + bbt[index] &= ~(BBT_ENTRY_MASK << offset);
6867 + bbt[index] |= (mark & BBT_ENTRY_MASK) << offset;
6868 + pr_info("%s %d block:%d, bbt[%d]:0x%x, offset:%d, mark:%d\n",
6869 + __func__, __LINE__, block, index, bbt[index], offset, mark);
6872 +static inline u8 get_bbt_mark(u8 *bbt, int block)
6874 + int offset = GET_POSITION(block);
6875 + int index = GET_ENTRY(block);
6876 + u8 value = bbt[index];
6878 + return (value >> offset) & BBT_ENTRY_MASK;
6881 +static void mark_nand_bad(struct nandx_info *nand, int block)
6885 + buf = mem_alloc(1, nand->page_size + nand->oob_size);
6887 + pr_info("%s, %d, memory alloc fail, pagesize:%d, oobsize:%d\n",
6888 + __func__, __LINE__, nand->page_size, nand->oob_size);
6891 + memset(buf, 0, nand->page_size + nand->oob_size);
6892 + nandx_erase(block * nand->block_size, nand->block_size);
6893 + nandx_write(buf, buf + nand->page_size, block * nand->block_size,
6898 +static inline bool is_bbt_data(u8 *buf, struct bbt_pattern *pattern)
6902 + for (i = 0; i < pattern->len; i++) {
6903 + if (buf[i] != pattern->data[i])
6910 +static u64 get_bbt_address(struct nandx_info *nand, u8 *bbt,
6914 + u64 addr, end_addr;
6917 + addr = nand->total_size;
6918 + end_addr = nand->total_size - nand->block_size * max_blocks;
6920 + while (addr > end_addr) {
6921 + addr -= nand->block_size;
6922 + mark = get_bbt_mark(bbt, div_down(addr, nand->block_size));
6924 + if (mark == BBT_BLOCK_WORN || mark == BBT_BLOCK_FACTORY_BAD)
6926 + if (addr != mirror_addr)
6930 + return BBT_INVALID_ADDR;
6933 +static int read_bbt(struct bbt_desc *desc, u8 *bbt, u32 len)
6937 + ret = nandx_read(bbt, NULL, desc->bbt_addr + desc->pattern.len + 1,
6940 + pr_info("nand_bbt: error reading BBT page, ret:-%x\n", ret);
6945 +static void create_bbt(struct nandx_info *nand, u8 *bbt)
6947 + u32 offset = 0, block = 0;
6950 + if (nandx_is_bad_block(offset)) {
6951 + pr_info("Create bbt at bad block:%d\n", block);
6952 + set_bbt_mark(bbt, block, BBT_BLOCK_FACTORY_BAD);
6955 + offset += nand->block_size;
6956 + } while (offset < nand->total_size);
6959 +static int search_bbt(struct nandx_info *nand, struct bbt_desc *desc,
6962 + u64 addr, end_addr;
6966 + buf = mem_alloc(1, nand->page_size);
6968 + pr_info("%s, %d, mem alloc fail!!! len:%d\n",
6969 + __func__, __LINE__, nand->page_size);
6973 + addr = nand->total_size;
6974 + end_addr = nand->total_size - max_blocks * nand->block_size;
6975 + while (addr > end_addr) {
6976 + addr -= nand->block_size;
6978 + nandx_read(buf, NULL, addr, nand->page_size);
6980 + if (is_bbt_data(buf, &desc->pattern)) {
6981 + desc->bbt_addr = addr;
6982 + desc->version = buf[desc->pattern.len];
6983 + pr_info("BBT is found at addr 0x%llx, version %d\n",
6984 + desc->bbt_addr, desc->version);
6995 +static int save_bbt(struct nandx_info *nand, struct bbt_desc *desc,
6998 + u32 page_size_mask, total_block;
7003 + ret = nandx_erase(desc->bbt_addr, nand->block_size);
7005 + pr_info("erase addr 0x%llx fail !!!, ret %d\n",
7006 + desc->bbt_addr, ret);
7010 + total_block = div_down(nand->total_size, nand->block_size);
7011 + write_len = GET_BBT_LENGTH(total_block) + desc->pattern.len + 1;
7012 + page_size_mask = nand->page_size - 1;
7013 + write_len = (write_len + page_size_mask) & (~page_size_mask);
7015 + buf = (u8 *)mem_alloc(1, write_len);
7017 + pr_info("%s, %d, mem alloc fail!!! len:%d\n",
7018 + __func__, __LINE__, write_len);
7021 + memset(buf, 0xFF, write_len);
7023 + memcpy(buf, desc->pattern.data, desc->pattern.len);
7024 + buf[desc->pattern.len] = desc->version;
7026 + memcpy(buf + desc->pattern.len + 1, bbt, GET_BBT_LENGTH(total_block));
7028 + ret = nandx_write(buf, NULL, desc->bbt_addr, write_len);
7031 + pr_info("nandx_write fail(%d), offset:0x%llx, len(%d)\n",
7032 + ret, desc->bbt_addr, write_len);
7038 +static int write_bbt(struct nandx_info *nand, struct bbt_desc *main,
7039 + struct bbt_desc *mirror, u8 *bbt, int max_blocks)
7045 + if (main->bbt_addr == BBT_INVALID_ADDR) {
7046 + main->bbt_addr = get_bbt_address(nand, bbt,
7047 + mirror->bbt_addr, max_blocks);
7048 + if (main->bbt_addr == BBT_INVALID_ADDR)
7052 + ret = save_bbt(nand, main, bbt);
7056 + block = div_down(main->bbt_addr, nand->block_size);
7057 + set_bbt_mark(bbt, block, BBT_BLOCK_WORN);
7059 + mark_nand_bad(nand, block);
7060 + main->bbt_addr = BBT_INVALID_ADDR;
7066 +static void mark_bbt_region(struct nandx_info *nand, u8 *bbt, int bbt_blocks)
7072 + total_block = div_down(nand->total_size, nand->block_size);
7073 + block = total_block - bbt_blocks;
7075 + while (bbt_blocks) {
7076 + mark = get_bbt_mark(bbt, block);
7077 + if (mark == BBT_BLOCK_GOOD)
7078 + set_bbt_mark(bbt, block, BBT_BLOCK_RESERVED);
7084 +static void unmark_bbt_region(struct nandx_info *nand, u8 *bbt, int bbt_blocks)
7090 + total_block = div_down(nand->total_size, nand->block_size);
7091 + block = total_block - bbt_blocks;
7093 + while (bbt_blocks) {
7094 + mark = get_bbt_mark(bbt, block);
7095 + if (mark == BBT_BLOCK_RESERVED)
7096 + set_bbt_mark(bbt, block, BBT_BLOCK_GOOD);
7102 +static int update_bbt(struct nandx_info *nand, struct bbt_desc *desc,
7108 + /* The reserved info is not stored in NAND*/
7109 + unmark_bbt_region(nand, bbt, max_blocks);
7111 + desc[0].version++;
7112 + for (i = 0; i < 2; i++) {
7114 + desc[i].version = desc[i - 1].version;
7116 + ret = write_bbt(nand, &desc[i], &desc[1 - i], bbt, max_blocks);
7120 + mark_bbt_region(nand, bbt, max_blocks);
7125 +int scan_bbt(struct nandx_info *nand)
7127 + struct bbt_manager *manager = &g_bbt_manager;
7128 + struct bbt_desc *pdesc;
7129 + int total_block, len, i;
7130 + int valid_desc = 0;
7134 + total_block = div_down(nand->total_size, nand->block_size);
7135 + len = GET_BBT_LENGTH(total_block);
7137 + if (!manager->bbt) {
7138 + manager->bbt = (u8 *)mem_alloc(1, len);
7139 + if (!manager->bbt) {
7140 + pr_info("%s, %d, mem alloc fail!!! len:%d\n",
7141 + __func__, __LINE__, len);
7145 + bbt = manager->bbt;
7146 + memset(bbt, 0xFF, len);
7149 + for (i = 0; i < 2; i++) {
7150 + pdesc = &manager->desc[i];
7151 + pdesc->bbt_addr = BBT_INVALID_ADDR;
7152 + pdesc->version = 0;
7153 + ret = search_bbt(nand, pdesc, manager->max_blocks);
7154 + if (!ret && (pdesc->bbt_addr != BBT_INVALID_ADDR))
7155 + valid_desc += 1 << i;
7158 + pdesc = &manager->desc[0];
7159 + if ((valid_desc == 0x3) && (pdesc[0].version != pdesc[1].version))
7160 + valid_desc = (pdesc[0].version > pdesc[1].version) ? 1 : 2;
7163 + for (i = 0; i < 2; i++) {
7164 + if (!(valid_desc & (1 << i)))
7166 + ret = read_bbt(&pdesc[i], bbt, len);
7168 + pdesc->bbt_addr = BBT_INVALID_ADDR;
7169 + pdesc->version = 0;
7170 + valid_desc &= ~(1 << i);
7172 + /* If two BBT version is same, only need to read the first bbt*/
7173 + if ((valid_desc == 0x3) &&
7174 + (pdesc[0].version == pdesc[1].version))
7178 + if (!valid_desc) {
7179 + create_bbt(nand, bbt);
7180 + pdesc[0].version = 1;
7181 + pdesc[1].version = 1;
7184 + pdesc[0].version = max_t(u8, pdesc[0].version, pdesc[1].version);
7185 + pdesc[1].version = pdesc[0].version;
7187 + for (i = 0; i < 2; i++) {
7188 + if (valid_desc & (1 << i))
7191 + ret = write_bbt(nand, &pdesc[i], &pdesc[1 - i], bbt,
7192 + manager->max_blocks);
7194 + pr_info("write bbt(%d) fail, ret:%d\n", i, ret);
7195 + manager->bbt = NULL;
7200 + /* Prevent the bbt regions from erasing / writing */
7201 + mark_bbt_region(nand, manager->bbt, manager->max_blocks);
7203 + for (i = 0; i < total_block; i++) {
7204 + if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_WORN)
7205 + pr_info("Checked WORN bad blk: %d\n", i);
7206 + else if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_FACTORY_BAD)
7207 + pr_info("Checked Factory bad blk: %d\n", i);
7208 + else if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_RESERVED)
7209 + pr_info("Checked Reserved blk: %d\n", i);
7210 + else if (get_bbt_mark(manager->bbt, i) != BBT_BLOCK_GOOD)
7211 + pr_info("Checked unknown blk: %d\n", i);
7217 +int bbt_mark_bad(struct nandx_info *nand, off_t offset)
7219 + struct bbt_manager *manager = &g_bbt_manager;
7220 + int block = div_down(offset, nand->block_size);
7223 + mark_nand_bad(nand, block);
7226 + set_bbt_mark(manager->bbt, block, BBT_BLOCK_WORN);
7228 + /* Update flash-based bad block table */
7229 + ret = update_bbt(nand, manager->desc, manager->bbt,
7230 + manager->max_blocks);
7232 + pr_info("block %d, update result %d.\n", block, ret);
7237 +int bbt_is_bad(struct nandx_info *nand, off_t offset)
7241 + block = div_down(offset, nand->block_size);
7243 + return get_bbt_mark(g_bbt_manager.bbt, block) != BBT_BLOCK_GOOD;
7246 +++ b/drivers/mtd/nandx/driver/uboot/driver.c
7249 + * Copyright (C) 2017 MediaTek Inc.
7250 + * Licensed under either
7251 + * BSD Licence, (see NOTICE for more details)
7252 + * GNU General Public License, version 2.0, (see NOTICE for more details)
7255 +#include <common.h>
7256 +#include <linux/io.h>
7260 +#include <linux/iopoll.h>
7261 +#include <linux/delay.h>
7262 +#include <linux/mtd/nand.h>
7263 +#include <linux/mtd/mtd.h>
7264 +#include <linux/mtd/partitions.h>
7265 +#include "nandx_core.h"
7266 +#include "nandx_util.h"
7269 +typedef int (*func_nandx_operation)(u8 *, u8 *, u64, size_t);
7272 + struct clk *nfi_clk;
7273 + struct clk *ecc_clk;
7274 + struct clk *snfi_clk;
7275 + struct clk *snfi_clk_sel;
7276 + struct clk *snfi_parent_50m;
7280 + struct nandx_info info;
7281 + struct nandx_clk clk;
7282 + struct nfi_resource *res;
7284 + struct nand_chip *nand;
7288 +/* Default flash layout for MTK nand controller
7289 + * 64Bytes oob format.
7291 +static struct nand_ecclayout eccoob = {
7294 + 17, 18, 19, 20, 21, 22, 23, 24, 25,
7295 + 26, 27, 28, 29, 30, 31, 32, 33, 34,
7296 + 35, 36, 37, 38, 39, 40, 41
7307 +static struct nandx_nfc *mtd_to_nfc(struct mtd_info *mtd)
7309 + struct nand_chip *nand = mtd_to_nand(mtd);
7311 + return (struct nandx_nfc *)nand_get_controller_data(nand);
7314 +static int nandx_enable_clk(struct nandx_clk *clk)
7318 + ret = clk_enable(clk->nfi_clk);
7320 + pr_info("failed to enable nfi clk\n");
7324 + ret = clk_enable(clk->ecc_clk);
7326 + pr_info("failed to enable ecc clk\n");
7327 + goto disable_nfi_clk;
7330 + ret = clk_enable(clk->snfi_clk);
7332 + pr_info("failed to enable snfi clk\n");
7333 + goto disable_ecc_clk;
7336 + ret = clk_enable(clk->snfi_clk_sel);
7338 + pr_info("failed to enable snfi clk sel\n");
7339 + goto disable_snfi_clk;
7342 + ret = clk_set_parent(clk->snfi_clk_sel, clk->snfi_parent_50m);
7344 + pr_info("failed to set snfi parent 50MHz\n");
7345 + goto disable_snfi_clk;
7351 + clk_disable(clk->snfi_clk);
7353 + clk_disable(clk->ecc_clk);
7355 + clk_disable(clk->nfi_clk);
7360 +static void nandx_disable_clk(struct nandx_clk *clk)
7362 + clk_disable(clk->ecc_clk);
7363 + clk_disable(clk->nfi_clk);
7364 + clk_disable(clk->snfi_clk);
7367 +static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
7368 + struct mtd_oob_region *oob_region)
7370 + struct nandx_nfc *nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
7373 + eccsteps = div_down(mtd->writesize, mtd->ecc_step_size);
7375 + if (section >= eccsteps)
7378 + oob_region->length = nfc->info.fdm_reg_size - nfc->info.fdm_ecc_size;
7379 + oob_region->offset = section * nfc->info.fdm_reg_size
7380 + + nfc->info.fdm_ecc_size;
7385 +static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
7386 + struct mtd_oob_region *oob_region)
7388 + struct nandx_nfc *nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
7394 + eccsteps = div_down(mtd->writesize, mtd->ecc_step_size);
7395 + oob_region->offset = nfc->info.fdm_reg_size * eccsteps;
7396 + oob_region->length = mtd->oobsize - oob_region->offset;
7401 +static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
7402 + .rfree = mtk_nfc_ooblayout_free,
7403 + .ecc = mtk_nfc_ooblayout_ecc,
7406 +struct nfc_compatible {
7407 + enum mtk_ic_version ic_ver;
7416 +static const struct nfc_compatible nfc_compats_mt7622 = {
7417 + .ic_ver = NANDX_MT7622,
7418 + .clock_1x = 26000000,
7420 + .clock_2x_num = 8,
7424 +static const struct udevice_id ic_of_match[] = {
7425 + {.compatible = "mediatek,mt7622-nfc", .data = &nfc_compats_mt7622},
7429 +static int nand_operation(struct mtd_info *mtd, loff_t addr, size_t len,
7430 + size_t *retlen, uint8_t *data, uint8_t *oob, bool read)
7432 + struct nandx_split64 split = {0};
7433 + func_nandx_operation operation;
7434 + u64 block_oobs, val, align;
7435 + uint8_t *databuf, *oobbuf;
7436 + struct nandx_nfc *nfc;
7440 + nfc = (struct nandx_nfc *)nand_get_controller_data;
7441 + spin_lock(&nfc->lock);
7446 + readoob = data ? false : true;
7447 + block_oobs = div_up(mtd->erasesize, mtd->writesize) * mtd->oobavail;
7448 + align = readoob ? block_oobs : mtd->erasesize;
7450 + operation = read ? nandx_read : nandx_write;
7452 + nandx_split(&split, addr, len, val, align);
7454 + if (split.head_len) {
7455 + ret = operation((u8 *) databuf, oobbuf, addr, split.head_len);
7458 + databuf += split.head_len;
7461 + oobbuf += split.head_len;
7463 + addr += split.head_len;
7464 + *retlen += split.head_len;
7467 + if (split.body_len) {
7468 + while (div_up(split.body_len, align)) {
7469 + ret = operation((u8 *) databuf, oobbuf, addr, align);
7472 + databuf += mtd->erasesize;
7473 + split.body_len -= mtd->erasesize;
7474 + *retlen += mtd->erasesize;
7478 + oobbuf += block_oobs;
7479 + split.body_len -= block_oobs;
7480 + *retlen += block_oobs;
7483 + addr += mtd->erasesize;
7488 + if (split.tail_len) {
7489 + ret = operation((u8 *) databuf, oobbuf, addr, split.tail_len);
7490 + *retlen += split.tail_len;
7493 + spin_unlock(&nfc->lock);
7498 +static int mtk_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
7499 + size_t *retlen, u_char *buf)
7501 + return nand_operation(mtd, from, len, retlen, buf, NULL, true);
7504 +static int mtk_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
7505 + size_t *retlen, const u_char *buf)
7507 + return nand_operation(mtd, to, len, retlen, (uint8_t *)buf,
7511 +int mtk_nand_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
7515 + return nand_operation(mtd, from, ops->ooblen, &retlen, NULL,
7516 + ops->oobbuf, true);
7519 +int mtk_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
7523 + return nand_operation(mtd, to, ops->ooblen, &retlen, NULL,
7524 + ops->oobbuf, false);
7527 +static int mtk_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
7529 + struct nandx_nfc *nfc;
7530 + u64 erase_len, erase_addr;
7534 + nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
7535 + block_size = nfc->info.block_size;
7536 + erase_len = instr->len;
7537 + erase_addr = instr->addr;
7538 + spin_lock(&nfc->lock);
7539 + instr->state = MTD_ERASING;
7541 + while (erase_len) {
7542 + if (mtk_nand_is_bad(mtd, erase_addr)) {
7543 + pr_info("block(0x%llx) is bad, not erase\n",
7545 + instr->state = MTD_ERASE_FAILED;
7548 + ret = nandx_erase(erase_addr, block_size);
7550 + instr->state = MTD_ERASE_FAILED;
7552 + pr_info("erase fail at blk %llu, ret:%d\n",
7556 + erase_addr += block_size;
7557 + erase_len -= block_size;
7560 + instr->state = MTD_ERASE_DONE;
7563 + ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
7565 + spin_unlock(&nfc->lock);
7566 + /* Do mtd call back function */
7568 + mtd_erase_callback(instr);
7573 +int mtk_nand_is_bad(struct mtd_info *mtd, loff_t ofs)
7575 + struct nandx_nfc *nfc;
7578 + nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
7579 + spin_lock(&nfc->lock);
7581 + /*ret = bbt_is_bad(&nfc->info, ofs);*/
7582 + ret = nandx_is_bad_block(ofs);
7583 + spin_unlock(&nfc->lock);
7586 + pr_info("nand block 0x%x is bad, ret %d!\n", ofs, ret);
7593 +int mtk_nand_mark_bad(struct mtd_info *mtd, loff_t ofs)
7595 + struct nandx_nfc *nfc;
7598 + nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
7599 + spin_lock(&nfc->lock);
7600 + pr_info("%s, %d\n", __func__, __LINE__);
7601 + ret = bbt_mark_bad(&nfc->info, ofs);
7603 + spin_unlock(&nfc->lock);
7608 +void mtk_nand_sync(struct mtd_info *mtd)
7613 +static struct mtd_info *mtd_info_create(struct udevice *pdev,
7614 + struct nandx_nfc *nfc, struct nand_chip *nand)
7616 + struct mtd_info *mtd = nand_to_mtd(nand);
7619 + nand_set_controller_data(nand, nfc);
7621 + nand->flash_node = dev_of_offset(pdev);
7622 + nand->ecc.layout = &eccoob;
7624 + ret = nandx_ioctl(CORE_CTRL_NAND_INFO, &nfc->info);
7626 + pr_info("fail to get nand info (%d)!\n", ret);
7631 + mtd->owner = THIS_MODULE;
7633 + mtd->name = "MTK-SNand";
7634 + mtd->writesize = nfc->info.page_size;
7635 + mtd->erasesize = nfc->info.block_size;
7636 + mtd->oobsize = nfc->info.oob_size;
7637 + mtd->size = nfc->info.total_size;
7638 + mtd->type = MTD_NANDFLASH;
7639 + mtd->flags = MTD_CAP_NANDFLASH;
7640 + mtd->_erase = mtk_nand_erase;
7641 + mtd->_read = mtk_nand_read;
7642 + mtd->_write = mtk_nand_write;
7643 + mtd->_read_oob = mtk_nand_read_oob;
7644 + mtd->_write_oob = mtk_nand_write_oob;
7645 + mtd->_sync = mtk_nand_sync;
7646 + mtd->_lock = NULL;
7647 + mtd->_unlock = NULL;
7648 + mtd->_block_isbad = mtk_nand_is_bad;
7649 + mtd->_block_markbad = mtk_nand_mark_bad;
7650 + mtd->writebufsize = mtd->writesize;
7652 + mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
7654 + mtd->ecc_strength = nfc->info.ecc_strength;
7655 + mtd->ecc_step_size = nfc->info.sector_size;
7657 + if (!mtd->bitflip_threshold)
7658 + mtd->bitflip_threshold = mtd->ecc_strength;
7663 +int board_nand_init(struct nand_chip *nand)
7665 + struct udevice *dev;
7666 + struct mtd_info *mtd;
7667 + struct nandx_nfc *nfc;
7671 + ret = uclass_get_device_by_driver(UCLASS_MTD,
7672 + DM_GET_DRIVER(mtk_snand_drv),
7675 + pr_err("Failed to get mtk_nand_drv. (error %d)\n", ret);
7679 + nfc = dev_get_priv(dev);
7681 + ret = nandx_enable_clk(&nfc->clk);
7683 + pr_err("failed to enable nfi clk (error %d)\n", ret);
7687 + ret = nandx_init(nfc->res);
7689 + pr_err("nandx init error (%d)!\n", ret);
7694 + nandx_ioctl(NFI_CTRL_DMA, &arg);
7695 + nandx_ioctl(NFI_CTRL_ECC, &arg);
7697 +#ifdef NANDX_UNIT_TEST
7698 + nandx_unit_test(0x780000, 0x800);
7701 + mtd = mtd_info_create(dev, nfc, nand);
7707 + spin_lock_init(&nfc->lock);
7709 + ret = scan_bbt(&nfc->info);
7711 + pr_info("bbt init error (%d)!\n", ret);
7718 + nandx_disable_clk(&nfc->clk);
7723 +static int mtk_snand_ofdata_to_platdata(struct udevice *dev)
7725 + struct nandx_nfc *nfc = dev_get_priv(dev);
7726 + struct nfc_compatible *compat;
7727 + struct nfi_resource *res;
7731 + res = mem_alloc(1, sizeof(struct nfi_resource));
7737 + res->nfi_regs = (void *)dev_read_addr_index(dev, 0);
7738 + res->ecc_regs = (void *)dev_read_addr_index(dev, 1);
7739 + pr_debug("mtk snand nfi_regs:0x%x ecc_regs:0x%x\n",
7740 + res->nfi_regs, res->ecc_regs);
7742 + compat = (struct nfc_compatible *)dev_get_driver_data(dev);
7744 + res->ic_ver = (enum mtk_ic_version)(compat->ic_ver);
7745 + res->clock_1x = compat->clock_1x;
7746 + res->clock_2x = compat->clock_2x;
7747 + res->clock_2x_num = compat->clock_2x_num;
7749 + memset(&nfc->clk, 0, sizeof(struct nandx_clk));
7750 + nfc->clk.nfi_clk =
7751 + kmalloc(sizeof(*nfc->clk.nfi_clk), GFP_KERNEL);
7752 + nfc->clk.ecc_clk =
7753 + kmalloc(sizeof(*nfc->clk.ecc_clk), GFP_KERNEL);
7754 + nfc->clk.snfi_clk=
7755 + kmalloc(sizeof(*nfc->clk.snfi_clk), GFP_KERNEL);
7756 + nfc->clk.snfi_clk_sel =
7757 + kmalloc(sizeof(*nfc->clk.snfi_clk_sel), GFP_KERNEL);
7758 + nfc->clk.snfi_parent_50m =
7759 + kmalloc(sizeof(*nfc->clk.snfi_parent_50m), GFP_KERNEL);
7761 + if (!nfc->clk.nfi_clk || !nfc->clk.ecc_clk || !nfc->clk.snfi_clk ||
7762 + !nfc->clk.snfi_clk_sel || !nfc->clk.snfi_parent_50m) {
7767 + ret = clk_get_by_name(dev, "nfi_clk", nfc->clk.nfi_clk);
7768 + if (IS_ERR(nfc->clk.nfi_clk)) {
7769 + ret = PTR_ERR(nfc->clk.nfi_clk);
7773 + ret = clk_get_by_name(dev, "ecc_clk", nfc->clk.ecc_clk);
7774 + if (IS_ERR(nfc->clk.ecc_clk)) {
7775 + ret = PTR_ERR(nfc->clk.ecc_clk);
7779 + ret = clk_get_by_name(dev, "snfi_clk", nfc->clk.snfi_clk);
7780 + if (IS_ERR(nfc->clk.snfi_clk)) {
7781 + ret = PTR_ERR(nfc->clk.snfi_clk);
7785 + ret = clk_get_by_name(dev, "spinfi_sel", nfc->clk.snfi_clk_sel);
7786 + if (IS_ERR(nfc->clk.snfi_clk_sel)) {
7787 + ret = PTR_ERR(nfc->clk.snfi_clk_sel);
7791 + ret = clk_get_by_name(dev, "spinfi_parent_50m", nfc->clk.snfi_parent_50m);
7792 + if (IS_ERR(nfc->clk.snfi_parent_50m))
7793 + pr_info("spinfi parent 50MHz is not configed\n");
7797 + if (nfc->clk.nfi_clk)
7798 + kfree(nfc->clk.nfi_clk);
7799 + if (nfc->clk.snfi_clk)
7800 + kfree(nfc->clk.snfi_clk);
7801 + if (nfc->clk.ecc_clk)
7802 + kfree(nfc->clk.ecc_clk);
7803 + if (nfc->clk.snfi_clk_sel)
7804 + kfree(nfc->clk.snfi_clk_sel);
7805 + if (nfc->clk.snfi_parent_50m)
7806 + kfree(nfc->clk.snfi_parent_50m);
7811 +U_BOOT_DRIVER(mtk_snand_drv) = {
7812 + .name = "mtk_snand",
7814 + .of_match = ic_of_match,
7815 + .ofdata_to_platdata = mtk_snand_ofdata_to_platdata,
7816 + .priv_auto_alloc_size = sizeof(struct nandx_nfc),
7819 +MODULE_LICENSE("GPL v2");
7820 +MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
7821 +MODULE_AUTHOR("MediaTek");
7823 +++ b/drivers/mtd/nandx/include/Nandx.mk
7826 +# Copyright (C) 2017 MediaTek Inc.
7827 +# Licensed under either
7828 +# BSD Licence, (see NOTICE for more details)
7829 +# GNU General Public License, version 2.0, (see NOTICE for more details)
7832 +nandx-header-y += internal/nandx_core.h
7833 +nandx-header-y += internal/nandx_errno.h
7834 +nandx-header-y += internal/nandx_util.h
7835 +nandx-header-$(NANDX_BBT_SUPPORT) += internal/bbt.h
7836 +nandx-header-$(NANDX_SIMULATOR_SUPPORT) += simulator/nandx_os.h
7837 +nandx-header-$(NANDX_CTP_SUPPORT) += ctp/nandx_os.h
7838 +nandx-header-$(NANDX_LK_SUPPORT) += lk/nandx_os.h
7839 +nandx-header-$(NANDX_KERNEL_SUPPORT) += kernel/nandx_os.h
7840 +nandx-header-$(NANDX_UBOOT_SUPPORT) += uboot/nandx_os.h
7842 +++ b/drivers/mtd/nandx/include/internal/bbt.h
7845 + * Copyright (C) 2017 MediaTek Inc.
7846 + * Licensed under either
7847 + * BSD Licence, (see NOTICE for more details)
7848 + * GNU General Public License, version 2.0, (see NOTICE for more details)
7854 +#define BBT_BLOCK_GOOD 0x03
7855 +#define BBT_BLOCK_WORN 0x02
7856 +#define BBT_BLOCK_RESERVED 0x01
7857 +#define BBT_BLOCK_FACTORY_BAD 0x00
7859 +#define BBT_INVALID_ADDR 0
7860 +/* The maximum number of blocks to scan for a bbt */
7861 +#define NAND_BBT_SCAN_MAXBLOCKS 4
7862 +#define NAND_BBT_USE_FLASH 0x00020000
7863 +#define NAND_BBT_NO_OOB 0x00040000
7865 +/* Search good / bad pattern on the first and the second page */
7866 +#define NAND_BBT_SCAN2NDPAGE 0x00008000
7867 +/* Search good / bad pattern on the last page of the eraseblock */
7868 +#define NAND_BBT_SCANLASTPAGE 0x00010000
7870 +#define NAND_DRAM_BUF_DATABUF_ADDR (NAND_BUF_ADDR)
7872 +struct bbt_pattern {
7878 + struct bbt_pattern pattern;
7880 + u64 bbt_addr;/*0: invalid value; otherwise, valid value*/
7883 +struct bbt_manager {
7884 + /* main bbt descriptor and mirror descriptor */
7885 + struct bbt_desc desc[2];/* 0: main bbt; 1: mirror bbt */
7890 +#define BBT_ENTRY_MASK 0x03
7891 +#define BBT_ENTRY_SHIFT 2
7893 +#define GET_BBT_LENGTH(blocks) (blocks >> 2)
7894 +#define GET_ENTRY(block) ((block) >> BBT_ENTRY_SHIFT)
7895 +#define GET_POSITION(block) (((block) & BBT_ENTRY_MASK) * 2)
7896 +#define GET_MARK_VALUE(block, mark) \
7897 + (((mark) & BBT_ENTRY_MASK) << GET_POSITION(block))
7899 +int scan_bbt(struct nandx_info *nand);
7901 +int bbt_mark_bad(struct nandx_info *nand, off_t offset);
7903 +int bbt_is_bad(struct nandx_info *nand, off_t offset);
7905 +#endif /*__BBT_H__*/
7907 +++ b/drivers/mtd/nandx/include/internal/nandx_core.h
7910 + * Copyright (C) 2017 MediaTek Inc.
7911 + * Licensed under either
7912 + * BSD Licence, (see NOTICE for more details)
7913 + * GNU General Public License, version 2.0, (see NOTICE for more details)
7916 +#ifndef __NANDX_CORE_H__
7917 +#define __NANDX_CORE_H__
7920 + * mtk_ic_version - indicates specifical IC, IP need this to load some info
7922 +enum mtk_ic_version {
7927 + * nandx_ioctl_cmd - operations supported by nandx
7929 + * @NFI_CTRL_DMA dma enable or not
7930 + * @NFI_CTRL_NFI_MODE customer/read/program/erase...
7931 + * @NFI_CTRL_ECC ecc enable or not
7932 + * @NFI_CTRL_ECC_MODE nfi/dma/pio
7933 + * @CHIP_CTRL_DRIVE_STRENGTH enum chip_ctrl_drive_strength
7935 +enum nandx_ctrl_cmd {
7936 + CORE_CTRL_NAND_INFO,
7939 + NFI_CTRL_NFI_MODE,
7940 + NFI_CTRL_AUTOFORMAT,
7942 + NFI_CTRL_PAGE_IRQ,
7943 + NFI_CTRL_RANDOMIZE,
7944 + NFI_CTRL_BAD_MARK_SWAP,
7947 + NFI_CTRL_ECC_MODE,
7948 + NFI_CTRL_ECC_CLOCK,
7950 + NFI_CTRL_ECC_PAGE_IRQ,
7951 + NFI_CTRL_ECC_DECODE_MODE,
7953 + SNFI_CTRL_OP_MODE,
7954 + SNFI_CTRL_RX_MODE,
7955 + SNFI_CTRL_TX_MODE,
7956 + SNFI_CTRL_DELAY_MODE,
7958 + CHIP_CTRL_OPS_CACHE,
7959 + CHIP_CTRL_OPS_MULTI,
7960 + CHIP_CTRL_PSLC_MODE,
7961 + CHIP_CTRL_DRIVE_STRENGTH,
7962 + CHIP_CTRL_DDR_MODE,
7963 + CHIP_CTRL_ONDIE_ECC,
7964 + CHIP_CTRL_TIMING_MODE
7967 +enum snfi_ctrl_op_mode {
7973 +enum snfi_ctrl_rx_mode {
7981 +enum snfi_ctrl_tx_mode {
7986 +enum chip_ctrl_drive_strength {
7987 + CHIP_DRIVE_NORMAL,
7989 + CHIP_DRIVE_MIDDLE,
7993 +enum chip_ctrl_timing_mode {
7994 + CHIP_TIMING_MODE0,
7995 + CHIP_TIMING_MODE1,
7996 + CHIP_TIMING_MODE2,
7997 + CHIP_TIMING_MODE3,
7998 + CHIP_TIMING_MODE4,
7999 + CHIP_TIMING_MODE5,
8003 + * nandx_info - basic information
8005 +struct nandx_info {
8007 + u32 min_write_pages;
8010 + u32 page_parity_size;
8021 + * nfi_resource - the resource needed by nfi & ecc to do initialization
8023 +struct nfi_resource {
8041 + * nandx_init - init all related modules below
8043 + * @res: basic resource of the project
8045 + * return 0 if init success, otherwise return negative error code
8047 +int nandx_init(struct nfi_resource *res);
8050 + * nandx_exit - release resource those that obtained in init flow
8052 +void nandx_exit(void);
8055 + * nandx_read - read data from nand this function can read data and related
8056 + * oob from specifical address
8057 + * if do multi_ops, set one operation per time, and call nandx_sync at last
8058 + * in multi mode, not support page partial read
8059 + * oob not support partial read
8061 + * @data: buf to receive data from nand
8062 + * @oob: buf to receive oob data from nand which related to data page
8063 + * length of @oob should oob size aligned, oob not support partial read
8064 + * @offset: offset address on the whole flash
8065 + * @len: the length of @data that need to read
8067 + * if read success return 0, otherwise return negative error code
8069 +int nandx_read(u8 *data, u8 *oob, u64 offset, size_t len);
8072 + * nandx_write - write data to nand
8073 + * this function can write data and related oob to specifical address
8074 + * if do multi_ops, set one operation per time, and call nandx_sync at last
8076 + * @data: source data to be written to nand,
8077 + * for multi operation, the length of @data should be page size aliged
8078 + * @oob: source oob which related to data page to be written to nand,
8079 + * length of @oob should oob size aligned
8080 + * @offset: offset address on the whole flash, the value should be start address
8082 + * @len: the length of @data that need to write,
8083 + * for multi operation, the len should be page size aliged
8085 + * if write success return 0, otherwise return negative error code
8086 + * if return value > 0, it indicates that how many pages still need to write,
8087 + * and data has not been written to nand
8088 + * please call nandx_sync after pages alligned $nandx_info.min_write_pages
8090 +int nandx_write(u8 *data, u8 *oob, u64 offset, size_t len);
8093 + * nandx_erase - erase an area of nand
8094 + * if do multi_ops, set one operation per time, and call nandx_sync at last
8096 + * @offset: offset address on the flash
8097 + * @len: erase length which should be block size aligned
8099 + * if erase success return 0, otherwise return negative error code
8101 +int nandx_erase(u64 offset, size_t len);
8104 + * nandx_sync - sync all operations to nand
8105 + * when do multi_ops, this function will be called at last operation
8106 + * when write data, if number of pages not alligned
8107 + * by $nandx_info.min_write_pages, this interface could be called to do
8108 + * force write, 0xff will be padded to blanked pages.
8110 +int nandx_sync(void);
8113 + * nandx_is_bad_block - check if the block is bad
8114 + * only check the flag that marked by the flash vendor
8116 + * @offset: offset address on the whole flash
8118 + * return true if the block is bad, otherwise return false
8120 +bool nandx_is_bad_block(u64 offset);
8123 + * nandx_ioctl - set/get property of nand chip
8125 + * @cmd: parameter that defined in enum nandx_ioctl_cmd
8126 + * @arg: operate parameter
8128 + * return 0 if operate success, otherwise return negative error code
8130 +int nandx_ioctl(int cmd, void *arg);
8133 + * nandx_suspend - suspend nand, and store some data
8135 + * return 0 if suspend success, otherwise return negative error code
8137 +int nandx_suspend(void);
8140 + * nandx_resume - resume nand, and replay some data
8142 + * return 0 if resume success, otherwise return negative error code
8144 +int nandx_resume(void);
8146 +#ifdef NANDX_UNIT_TEST
8148 + * nandx_unit_test - unit test
8150 + * @offset: offset address on the whole flash
8151 + * @len: should be not larger than a block size, we only test a block per time
8153 + * return 0 if test success, otherwise return negative error code
8155 +int nandx_unit_test(u64 offset, size_t len);
8158 +#endif /* __NANDX_CORE_H__ */
8160 +++ b/drivers/mtd/nandx/include/internal/nandx_errno.h
8163 + * Copyright (C) 2017 MediaTek Inc.
8164 + * Licensed under either
8165 + * BSD Licence, (see NOTICE for more details)
8166 + * GNU General Public License, version 2.0, (see NOTICE for more details)
8169 +#ifndef __NANDX_ERRNO_H__
8170 +#define __NANDX_ERRNO_H__
8173 +#define EIO 5 /* I/O error */
8174 +#define ENOMEM 12 /* Out of memory */
8175 +#define EFAULT 14 /* Bad address */
8176 +#define EBUSY 16 /* Device or resource busy */
8177 +#define ENODEV 19 /* No such device */
8178 +#define EINVAL 22 /* Invalid argument */
8179 +#define ENOSPC 28 /* No space left on device */
8180 +/* Operation not supported on transport endpoint */
8181 +#define EOPNOTSUPP 95
8182 +#define ETIMEDOUT 110 /* Connection timed out */
8185 +#define ENANDFLIPS 1024 /* Too many bitflips, uncorrected */
8186 +#define ENANDREAD 1025 /* Read fail, can't correct */
8187 +#define ENANDWRITE 1026 /* Write fail */
8188 +#define ENANDERASE 1027 /* Erase fail */
8189 +#define ENANDBAD 1028 /* Bad block */
8190 +#define ENANDWP 1029
8192 +#define IS_NAND_ERR(err) ((err) >= -ENANDBAD && (err) <= -ENANDFLIPS)
8195 +#define MAX_ERRNO 4096
8196 +#define ERR_PTR(errno) ((void *)((long)errno))
8197 +#define PTR_ERR(ptr) ((long)(ptr))
8198 +#define IS_ERR(ptr) ((unsigned long)(ptr) > (unsigned long)-MAX_ERRNO)
8201 +#endif /* __NANDX_ERRNO_H__ */
8203 +++ b/drivers/mtd/nandx/include/internal/nandx_util.h
8206 + * Copyright (C) 2017 MediaTek Inc.
8207 + * Licensed under either
8208 + * BSD Licence, (see NOTICE for more details)
8209 + * GNU General Public License, version 2.0, (see NOTICE for more details)
8212 +#ifndef __NANDX_UTIL_H__
8213 +#define __NANDX_UTIL_H__
8215 +typedef unsigned char u8;
8216 +typedef unsigned short u16;
8217 +typedef unsigned int u32;
8218 +typedef unsigned long long u64;
8220 +enum nand_irq_return {
8225 +enum nand_dma_operation {
8232 + * Compatible function
8233 + * used for preloader/lk/kernel environment
8235 +#include "nandx_os.h"
8236 +#include "nandx_errno.h"
8239 +#define BIT(a) (1 << (a))
8243 +#define min_t(type, x, y) ({ \
8244 + type __min1 = (x); \
8245 + type __min2 = (y); \
8246 + __min1 < __min2 ? __min1 : __min2; })
8248 +#define max_t(type, x, y) ({ \
8249 + type __max1 = (x); \
8250 + type __max2 = (y); \
8251 + __max1 > __max2 ? __max1 : __max2; })
8255 +#define GENMASK(h, l) \
8256 + (((~0UL) << (l)) & (~0UL >> ((sizeof(unsigned long) * 8) - 1 - (h))))
8260 +#define __weak __attribute__((__weak__))
8264 +#define __packed __attribute__((__packed__))
8268 +#define KB(x) ((x) << 10)
8269 +#define MB(x) (KB(x) << 10)
8270 +#define GB(x) (MB(x) << 10)
8274 +#define offsetof(type, member) ((size_t)&((type *)0)->member)
8278 +#define NULL (void *)0
8280 +static inline u32 nandx_popcount(u32 x)
8282 + x = (x & 0x55555555) + ((x >> 1) & 0x55555555);
8283 + x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
8284 + x = (x & 0x0F0F0F0F) + ((x >> 4) & 0x0F0F0F0F);
8285 + x = (x & 0x00FF00FF) + ((x >> 8) & 0x00FF00FF);
8286 + x = (x & 0x0000FFFF) + ((x >> 16) & 0x0000FFFF);
8291 +#ifndef zero_popcount
8292 +#define zero_popcount(x) (32 - nandx_popcount(x))
8296 +#define do_div(n, base) \
8298 + u32 __base = (base); \
8300 + __rem = ((u64)(n)) % __base; \
8301 + (n) = ((u64)(n)) / __base; \
8306 +#define div_up(x, y) \
8308 + u64 __temp = ((x) + (y) - 1); \
8309 + do_div(__temp, (y)); \
8313 +#define div_down(x, y) \
8315 + u64 __temp = (x); \
8316 + do_div(__temp, (y)); \
8320 +#define div_round_up(x, y) (div_up(x, y) * (y))
8321 +#define div_round_down(x, y) (div_down(x, y) * (y))
8323 +#define reminder(x, y) \
8325 + u64 __temp = (x); \
8326 + do_div(__temp, (y)); \
8330 +#define round_up(x, y) ((((x) - 1) | ((y) - 1)) + 1)
8331 +#define round_down(x, y) ((x) & ~((y) - 1))
8334 +#ifndef readx_poll_timeout_atomic
8335 +#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
8337 + u64 end = get_current_time_us() + timeout_us; \
8339 + u64 now = get_current_time_us(); \
8340 + (val) = op(addr); \
8343 + if (now > end) { \
8344 + (val) = op(addr); \
8348 + (cond) ? 0 : -ETIMEDOUT; \
8351 +#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
8352 + readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us)
8353 +#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
8354 + readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us)
8355 +#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
8356 + readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us)
8359 +struct nandx_split64 {
8368 +struct nandx_split32 {
8377 +#define nandx_split(split, offset, len, val, align) \
8379 + (split)->head = (offset); \
8380 + (val) = div_round_down((offset), (align)); \
8381 + (val) = (align) - ((offset) - (val)); \
8382 + if ((val) == (align)) \
8383 + (split)->head_len = 0; \
8384 + else if ((val) > (len)) \
8385 + (split)->head_len = len; \
8387 + (split)->head_len = val; \
8388 + (split)->body = (offset) + (split)->head_len; \
8389 + (split)->body_len = div_round_down((len) - \
8390 + (split)->head_len,\
8392 + (split)->tail = (split)->body + (split)->body_len; \
8393 + (split)->tail_len = (len) - (split)->head_len - \
8394 + (split)->body_len; \
8397 +#ifndef container_of
8398 +#define container_of(ptr, type, member) \
8399 + ({const __typeof__(((type *)0)->member) * __mptr = (ptr); \
8400 + (type *)((char *)__mptr - offsetof(type, member)); })
8403 +static inline u32 nandx_cpu_to_be32(u32 val)
8406 + u8 *p_temp = (u8 *)&temp;
8409 + return ((val & 0xff) << 24) | ((val & 0xff00) << 8) |
8410 + ((val >> 8) & 0xff00) | ((val >> 24) & 0xff);
8415 +static inline void nandx_set_bits32(unsigned long addr, u32 mask,
8418 + u32 temp = readl((void *)addr);
8422 + writel(temp, (void *)addr);
8425 +#endif /* __NANDX_UTIL_H__ */
8427 +++ b/drivers/mtd/nandx/include/uboot/nandx_os.h
8430 + * Copyright (C) 2017 MediaTek Inc.
8431 + * Licensed under either
8432 + * BSD Licence, (see NOTICE for more details)
8433 + * GNU General Public License, version 2.0, (see NOTICE for more details)
8436 +#ifndef __NANDX_OS_H__
8437 +#define __NANDX_OS_H__
8439 +#include <common.h>
8442 +#include <asm/dma-mapping.h>
8443 +#include <linux/io.h>
8444 +#include <linux/err.h>
8445 +#include <linux/errno.h>
8446 +#include <linux/bitops.h>
8447 +#include <linux/kernel.h>
8448 +#include <linux/compiler-gcc.h>
8450 +#define NANDX_BULK_IO_USE_DRAM 0
8452 +#define nandx_event_create() NULL
8453 +#define nandx_event_destroy(event)
8454 +#define nandx_event_complete(event)
8455 +#define nandx_event_init(event)
8456 +#define nandx_event_wait_complete(event, timeout) true
8458 +#define nandx_irq_register(dev, irq, irq_handler, name, data) NULL
8460 +static inline void *mem_alloc(u32 count, u32 size)
8462 + return kmalloc(count * size, GFP_KERNEL | __GFP_ZERO);
8465 +static inline void mem_free(void *mem)
8470 +static inline u64 get_current_time_us(void)
8472 + return timer_get_us();
8475 +static inline u32 nandx_dma_map(void *dev, void *buf, u64 len,
8476 + enum nand_dma_operation op)
8478 + unsigned long addr = (unsigned long)buf;
8481 + size = ALIGN(len, ARCH_DMA_MINALIGN);
8483 + if (op == NDMA_FROM_DEV)
8484 + invalidate_dcache_range(addr, addr + size);
8486 + flush_dcache_range(addr, addr + size);
8491 +static inline void nandx_dma_unmap(void *dev, void *buf, void *addr,
8492 + u64 len, enum nand_dma_operation op)
8496 + size = ALIGN(len, ARCH_DMA_MINALIGN);
8498 + if (op != NDMA_FROM_DEV)
8499 + invalidate_dcache_range((unsigned long)addr, addr + size);
8501 + flush_dcache_range((unsigned long)addr, addr + size);
8506 +#endif /* __NANDX_OS_H__ */
8507 --- a/include/configs/mt7622.h
8508 +++ b/include/configs/mt7622.h
8511 #include <linux/sizes.h>
8514 +#if defined(CONFIG_MTD_RAW_NAND)
8515 +#define CONFIG_SYS_MAX_NAND_DEVICE 1
8516 +#define CONFIG_SYS_NAND_BASE 0x1100d000
8518 +#define ENV_BOOT_READ_IMAGE \
8520 + "nand read 0x4007ff28 0x380000 0x1400000" \
8521 + ";iminfo 0x4007ff28 \0"
8523 +#define ENV_BOOT_WRITE_IMAGE \
8525 + "nand write 0x4007ff28 0x380000 0x1400000" \
8526 + ";iminfo 0x4007ff28 \0"
8528 +#define ENV_BOOT_CMD \
8529 + "mtk_boot=run boot_rd_img;bootm;\0"
8531 +#define CONFIG_EXTRA_ENV_SETTINGS \
8532 + ENV_BOOT_READ_IMAGE \
8534 + "bootcmd=run mtk_boot;\0"
8538 #define CONFIG_SYS_MAXARGS 8
8539 #define CONFIG_SYS_BOOTM_LEN SZ_64M
8540 #define CONFIG_SYS_CBSIZE SZ_1K