arm-trusted-firmware-mediatek: update to 2021-03-10
[openwrt/staging/rmilecki.git] / package / boot / uboot-mediatek / patches / 002-nand-add-spi-nand-driver.patch
1 From de8b6cf615be20b25d0f3c817866de2c0d46a704 Mon Sep 17 00:00:00 2001
2 From: Sam Shih <sam.shih@mediatek.com>
3 Date: Mon, 20 Apr 2020 17:10:05 +0800
4 Subject: [PATCH 1/3] nand: add spi nand driver
5
6 Add spi nand driver support for mt7622 based on nfi controller
7
8 Signed-off-by: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
9 ---
10 drivers/mtd/Kconfig | 7 +
11 drivers/mtd/Makefile | 4 +
12 drivers/mtd/nand/raw/nand.c | 2 +
13 drivers/mtd/nandx/NOTICE | 52 +
14 drivers/mtd/nandx/Nandx.config | 17 +
15 drivers/mtd/nandx/Nandx.mk | 91 ++
16 drivers/mtd/nandx/README | 31 +
17 drivers/mtd/nandx/core/Nandx.mk | 38 +
18 drivers/mtd/nandx/core/core_io.c | 735 +++++++++
19 drivers/mtd/nandx/core/core_io.h | 39 +
20 drivers/mtd/nandx/core/nand/device_spi.c | 200 +++
21 drivers/mtd/nandx/core/nand/device_spi.h | 132 ++
22 drivers/mtd/nandx/core/nand/nand_spi.c | 526 +++++++
23 drivers/mtd/nandx/core/nand/nand_spi.h | 35 +
24 drivers/mtd/nandx/core/nand_base.c | 304 ++++
25 drivers/mtd/nandx/core/nand_base.h | 71 +
26 drivers/mtd/nandx/core/nand_chip.c | 272 ++++
27 drivers/mtd/nandx/core/nand_chip.h | 103 ++
28 drivers/mtd/nandx/core/nand_device.c | 285 ++++
29 drivers/mtd/nandx/core/nand_device.h | 608 ++++++++
30 drivers/mtd/nandx/core/nfi.h | 51 +
31 drivers/mtd/nandx/core/nfi/nfi_base.c | 1357 +++++++++++++++++
32 drivers/mtd/nandx/core/nfi/nfi_base.h | 95 ++
33 drivers/mtd/nandx/core/nfi/nfi_regs.h | 114 ++
34 drivers/mtd/nandx/core/nfi/nfi_spi.c | 689 +++++++++
35 drivers/mtd/nandx/core/nfi/nfi_spi.h | 44 +
36 drivers/mtd/nandx/core/nfi/nfi_spi_regs.h | 64 +
37 drivers/mtd/nandx/core/nfi/nfiecc.c | 510 +++++++
38 drivers/mtd/nandx/core/nfi/nfiecc.h | 90 ++
39 drivers/mtd/nandx/core/nfi/nfiecc_regs.h | 51 +
40 drivers/mtd/nandx/driver/Nandx.mk | 18 +
41 drivers/mtd/nandx/driver/bbt/bbt.c | 408 +++++
42 drivers/mtd/nandx/driver/uboot/driver.c | 574 +++++++
43 drivers/mtd/nandx/include/Nandx.mk | 16 +
44 drivers/mtd/nandx/include/internal/bbt.h | 62 +
45 .../mtd/nandx/include/internal/nandx_core.h | 250 +++
46 .../mtd/nandx/include/internal/nandx_errno.h | 40 +
47 .../mtd/nandx/include/internal/nandx_util.h | 221 +++
48 drivers/mtd/nandx/include/uboot/nandx_os.h | 78 +
49 include/configs/mt7622.h | 25 +
50 40 files changed, 8309 insertions(+)
51 create mode 100644 drivers/mtd/nandx/NOTICE
52 create mode 100644 drivers/mtd/nandx/Nandx.config
53 create mode 100644 drivers/mtd/nandx/Nandx.mk
54 create mode 100644 drivers/mtd/nandx/README
55 create mode 100644 drivers/mtd/nandx/core/Nandx.mk
56 create mode 100644 drivers/mtd/nandx/core/core_io.c
57 create mode 100644 drivers/mtd/nandx/core/core_io.h
58 create mode 100644 drivers/mtd/nandx/core/nand/device_spi.c
59 create mode 100644 drivers/mtd/nandx/core/nand/device_spi.h
60 create mode 100644 drivers/mtd/nandx/core/nand/nand_spi.c
61 create mode 100644 drivers/mtd/nandx/core/nand/nand_spi.h
62 create mode 100644 drivers/mtd/nandx/core/nand_base.c
63 create mode 100644 drivers/mtd/nandx/core/nand_base.h
64 create mode 100644 drivers/mtd/nandx/core/nand_chip.c
65 create mode 100644 drivers/mtd/nandx/core/nand_chip.h
66 create mode 100644 drivers/mtd/nandx/core/nand_device.c
67 create mode 100644 drivers/mtd/nandx/core/nand_device.h
68 create mode 100644 drivers/mtd/nandx/core/nfi.h
69 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_base.c
70 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_base.h
71 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_regs.h
72 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_spi.c
73 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_spi.h
74 create mode 100644 drivers/mtd/nandx/core/nfi/nfi_spi_regs.h
75 create mode 100644 drivers/mtd/nandx/core/nfi/nfiecc.c
76 create mode 100644 drivers/mtd/nandx/core/nfi/nfiecc.h
77 create mode 100644 drivers/mtd/nandx/core/nfi/nfiecc_regs.h
78 create mode 100644 drivers/mtd/nandx/driver/Nandx.mk
79 create mode 100644 drivers/mtd/nandx/driver/bbt/bbt.c
80 create mode 100644 drivers/mtd/nandx/driver/uboot/driver.c
81 create mode 100644 drivers/mtd/nandx/include/Nandx.mk
82 create mode 100644 drivers/mtd/nandx/include/internal/bbt.h
83 create mode 100644 drivers/mtd/nandx/include/internal/nandx_core.h
84 create mode 100644 drivers/mtd/nandx/include/internal/nandx_errno.h
85 create mode 100644 drivers/mtd/nandx/include/internal/nandx_util.h
86 create mode 100644 drivers/mtd/nandx/include/uboot/nandx_os.h
87
88 --- a/drivers/mtd/Kconfig
89 +++ b/drivers/mtd/Kconfig
90 @@ -108,6 +108,13 @@ config HBMC_AM654
91 This is the driver for HyperBus controller on TI's AM65x and
92 other SoCs
93
94 +config MTK_SPI_NAND
95 + tristate "Mediatek SPI Nand"
96 + depends on DM_MTD
97 + help
98 + This option will support SPI Nand device via Mediatek
99 + NFI controller.
100 +
101 source "drivers/mtd/nand/Kconfig"
102
103 source "drivers/mtd/spi/Kconfig"
104 --- a/drivers/mtd/Makefile
105 +++ b/drivers/mtd/Makefile
106 @@ -41,3 +41,7 @@ obj-$(CONFIG_$(SPL_TPL_)SPI_FLASH_SUPPOR
107 obj-$(CONFIG_SPL_UBI) += ubispl/
108
109 endif
110 +
111 +ifeq ($(CONFIG_MTK_SPI_NAND), y)
112 +include $(srctree)/drivers/mtd/nandx/Nandx.mk
113 +endif
114 --- a/drivers/mtd/nand/raw/nand.c
115 +++ b/drivers/mtd/nand/raw/nand.c
116 @@ -91,8 +91,10 @@ static void nand_init_chip(int i)
117 if (board_nand_init(nand))
118 return;
119
120 +#ifndef CONFIG_MTK_SPI_NAND
121 if (nand_scan(mtd, maxchips))
122 return;
123 +#endif
124
125 nand_register(i, mtd);
126 }
127 --- /dev/null
128 +++ b/drivers/mtd/nandx/NOTICE
129 @@ -0,0 +1,52 @@
130 +
131 +/*
132 + * Nandx - Mediatek Common Nand Driver
133 + * Copyright (C) 2017 MediaTek Inc.
134 + *
135 + * Nandx is dual licensed: you can use it either under the terms of
136 + * the GPL, or the BSD license, at your option.
137 + *
138 + * a) This program is free software; you can redistribute it and/or modify
139 + * it under the terms of the GNU General Public License version 2 as
140 + * published by the Free Software Foundation.
141 + *
142 + * This library is distributed in the hope that it will be useful,
143 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
144 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
145 + * GNU General Public License for more details.
146 + *
147 + * This program is distributed in the hope that it will be useful,
148 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
149 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
150 + * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
151 + *
152 + * Alternatively,
153 + *
154 + * b) Redistribution and use in source and binary forms, with or
155 + * without modification, are permitted provided that the following
156 + * conditions are met:
157 + *
158 + * 1. Redistributions of source code must retain the above
159 + * copyright notice, this list of conditions and the following
160 + * disclaimer.
161 + * 2. Redistributions in binary form must reproduce the above
162 + * copyright notice, this list of conditions and the following
163 + * disclaimer in the documentation and/or other materials
164 + * provided with the distribution.
165 + *
166 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
167 + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
168 + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
169 + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
170 + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
171 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
172 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
173 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
174 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
175 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
176 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
177 + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
178 + * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
179 + */
180 +
181 +####################################################################################################
182 \ No newline at end of file
183 --- /dev/null
184 +++ b/drivers/mtd/nandx/Nandx.config
185 @@ -0,0 +1,17 @@
186 +NANDX_SIMULATOR_SUPPORT := n
187 +NANDX_CTP_SUPPORT := n
188 +NANDX_DA_SUPPORT := n
189 +NANDX_PRELOADER_SUPPORT := n
190 +NANDX_LK_SUPPORT := n
191 +NANDX_KERNEL_SUPPORT := n
192 +NANDX_BROM_SUPPORT := n
193 +NANDX_UBOOT_SUPPORT := y
194 +NANDX_BBT_SUPPORT := y
195 +
196 +NANDX_NAND_SPI := y
197 +NANDX_NAND_SLC := n
198 +NANDX_NAND_MLC := n
199 +NANDX_NAND_TLC := n
200 +NANDX_NFI_BASE := y
201 +NANDX_NFI_ECC := y
202 +NANDX_NFI_SPI := y
203 --- /dev/null
204 +++ b/drivers/mtd/nandx/Nandx.mk
205 @@ -0,0 +1,91 @@
206 +#
207 +# Copyright (C) 2017 MediaTek Inc.
208 +# Licensed under either
209 +# BSD Licence, (see NOTICE for more details)
210 +# GNU General Public License, version 2.0, (see NOTICE for more details)
211 +#
212 +
213 +nandx_dir := $(shell dirname $(lastword $(MAKEFILE_LIST)))
214 +include $(nandx_dir)/Nandx.config
215 +
216 +ifeq ($(NANDX_SIMULATOR_SUPPORT), y)
217 +sim-obj :=
218 +sim-inc :=
219 +nandx-obj := sim-obj
220 +nandx-prefix := .
221 +nandx-postfix := %.o
222 +sim-inc += -I$(nandx-prefix)/include/internal
223 +sim-inc += -I$(nandx-prefix)/include/simulator
224 +endif
225 +
226 +ifeq ($(NANDX_CTP_SUPPORT), y)
227 +nandx-obj := C_SRC_FILES
228 +nandx-prefix := $(nandx_dir)
229 +nandx-postfix := %.c
230 +INC_DIRS += $(nandx_dir)/include/internal
231 +INC_DIRS += $(nandx_dir)/include/ctp
232 +endif
233 +
234 +ifeq ($(NANDX_DA_SUPPORT), y)
235 +nandx-obj := obj-y
236 +nandx-prefix := $(nandx_dir)
237 +nandx-postfix := %.o
238 +INCLUDE_PATH += $(TOPDIR)/platform/$(CODE_BASE)/dev/nand/nandx/include/internal
239 +INCLUDE_PATH += $(TOPDIR)/platform/$(CODE_BASE)/dev/nand/nandx/include/da
240 +endif
241 +
242 +ifeq ($(NANDX_PRELOADER_SUPPORT), y)
243 +nandx-obj := MOD_SRC
244 +nandx-prefix := $(nandx_dir)
245 +nandx-postfix := %.c
246 +C_OPTION += -I$(MTK_PATH_PLATFORM)/src/drivers/nandx/include/internal
247 +C_OPTION += -I$(MTK_PATH_PLATFORM)/src/drivers/nandx/include/preloader
248 +endif
249 +
250 +ifeq ($(NANDX_LK_SUPPORT), y)
251 +nandx-obj := MODULE_SRCS
252 +nandx-prefix := $(nandx_dir)
253 +nandx-postfix := %.c
254 +GLOBAL_INCLUDES += $(nandx_dir)/include/internal
255 +GLOBAL_INCLUDES += $(nandx_dir)/include/lk
256 +endif
257 +
258 +ifeq ($(NANDX_KERNEL_SUPPORT), y)
259 +nandx-obj := obj-y
260 +nandx-prefix := nandx
261 +nandx-postfix := %.o
262 +ccflags-y += -I$(nandx_dir)/include/internal
263 +ccflags-y += -I$(nandx_dir)/include/kernel
264 +endif
265 +
266 +ifeq ($(NANDX_UBOOT_SUPPORT), y)
267 +nandx-obj := obj-y
268 +nandx-prefix := nandx
269 +nandx-postfix := %.o
270 +ccflags-y += -I$(nandx_dir)/include/internal
271 +ccflags-y += -I$(nandx_dir)/include/uboot
272 +endif
273 +
274 +nandx-y :=
275 +include $(nandx_dir)/core/Nandx.mk
276 +nandx-target := $(nandx-prefix)/core/$(nandx-postfix)
277 +$(nandx-obj) += $(patsubst %.c, $(nandx-target), $(nandx-y))
278 +
279 +
280 +nandx-y :=
281 +include $(nandx_dir)/driver/Nandx.mk
282 +nandx-target := $(nandx-prefix)/driver/$(nandx-postfix)
283 +$(nandx-obj) += $(patsubst %.c, $(nandx-target), $(nandx-y))
284 +
285 +ifeq ($(NANDX_SIMULATOR_SUPPORT), y)
286 +cc := gcc
287 +CFLAGS += $(sim-inc)
288 +
289 +.PHONY:nandx
290 +nandx: $(sim-obj)
291 + $(cc) $(sim-obj) -o nandx
292 +
293 +.PHONY:clean
294 +clean:
295 + rm -rf $(sim-obj) nandx
296 +endif
297 --- /dev/null
298 +++ b/drivers/mtd/nandx/README
299 @@ -0,0 +1,31 @@
300 +
301 + NAND2.0
302 + ===============================
303 +
304 + NAND2.0 is a common nand driver which designed for accessing
305 +different type of NANDs(SLC, SPI-NAND, MLC, TLC) on various OS. This
306 +driver can work on mostly SoCs of Mediatek.
307 +
308 + Although there already has a common nand driver, it doesn't cover
309 +SPI-NAND, and not match our IC-Verification's reqirement. We need
310 +a driver that can be exten or cut easily.
311 +
312 + This driver is base on NANDX & SLC. We try to refactor structures,
313 +and make them inheritable. We also refactor some operations' flow
314 +principally for adding SPI-NAND support.
315 +
316 + This driver's architecture is like:
317 +
318 + Driver @LK/Uboot/DA... |IC verify/other purposes
319 + ----------------------------------------------------------------
320 + partition | BBM |
321 + -------------------------------------- | extend_core
322 + nandx_core/core_io |
323 + ----------------------------------------------------------------
324 + nand_chip/nand_base |
325 + -------------------------------------- | extend_nfi
326 + nand_device | nfi/nfi_base |
327 +
328 + Any block of above graph can be extended at your will, if you
329 +want add new feature into this code, please make sure that your code
330 +would follow the framework, and we will be appreciated about it.
331 --- /dev/null
332 +++ b/drivers/mtd/nandx/core/Nandx.mk
333 @@ -0,0 +1,38 @@
334 +#
335 +# Copyright (C) 2017 MediaTek Inc.
336 +# Licensed under either
337 +# BSD Licence, (see NOTICE for more details)
338 +# GNU General Public License, version 2.0, (see NOTICE for more details)
339 +#
340 +
341 +nandx-y += nand_device.c
342 +nandx-y += nand_base.c
343 +nandx-y += nand_chip.c
344 +nandx-y += core_io.c
345 +
346 +nandx-header-y += nand_device.h
347 +nandx-header-y += nand_base.h
348 +nandx-header-y += nand_chip.h
349 +nandx-header-y += core_io.h
350 +nandx-header-y += nfi.h
351 +
352 +nandx-$(NANDX_NAND_SPI) += nand/device_spi.c
353 +nandx-$(NANDX_NAND_SPI) += nand/nand_spi.c
354 +nandx-$(NANDX_NAND_SLC) += nand/device_slc.c
355 +nandx-$(NANDX_NAND_SLC) += nand/nand_slc.c
356 +
357 +nandx-header-$(NANDX_NAND_SPI) += nand/device_spi.h
358 +nandx-header-$(NANDX_NAND_SPI) += nand/nand_spi.h
359 +nandx-header-$(NANDX_NAND_SLC) += nand/device_slc.h
360 +nandx-header-$(NANDX_NAND_SLC) += nand/nand_slc.h
361 +
362 +nandx-$(NANDX_NFI_BASE) += nfi/nfi_base.c
363 +nandx-$(NANDX_NFI_ECC) += nfi/nfiecc.c
364 +nandx-$(NANDX_NFI_SPI) += nfi/nfi_spi.c
365 +
366 +nandx-header-$(NANDX_NFI_BASE) += nfi/nfi_base.h
367 +nandx-header-$(NANDX_NFI_BASE) += nfi/nfi_regs.h
368 +nandx-header-$(NANDX_NFI_ECC) += nfi/nfiecc.h
369 +nandx-header-$(NANDX_NFI_ECC) += nfi/nfiecc_regs.h
370 +nandx-header-$(NANDX_NFI_SPI) += nfi/nfi_spi.h
371 +nandx-header-$(NANDX_NFI_SPI) += nfi/nfi_spi_regs.h
372 --- /dev/null
373 +++ b/drivers/mtd/nandx/core/core_io.c
374 @@ -0,0 +1,735 @@
375 +/*
376 + * Copyright (C) 2017 MediaTek Inc.
377 + * Licensed under either
378 + * BSD Licence, (see NOTICE for more details)
379 + * GNU General Public License, version 2.0, (see NOTICE for more details)
380 + */
381 +
382 +/*NOTE: switch cache/multi*/
383 +#include "nandx_util.h"
384 +#include "nandx_core.h"
385 +#include "nand_chip.h"
386 +#include "core_io.h"
387 +
388 +static struct nandx_desc *g_nandx;
389 +
390 +static inline bool is_sector_align(u64 val)
391 +{
392 + return reminder(val, g_nandx->chip->sector_size) ? false : true;
393 +}
394 +
395 +static inline bool is_page_align(u64 val)
396 +{
397 + return reminder(val, g_nandx->chip->page_size) ? false : true;
398 +}
399 +
400 +static inline bool is_block_align(u64 val)
401 +{
402 + return reminder(val, g_nandx->chip->block_size) ? false : true;
403 +}
404 +
405 +static inline u32 page_sectors(void)
406 +{
407 + return div_down(g_nandx->chip->page_size, g_nandx->chip->sector_size);
408 +}
409 +
410 +static inline u32 sector_oob(void)
411 +{
412 + return div_down(g_nandx->chip->oob_size, page_sectors());
413 +}
414 +
415 +static inline u32 sector_padded_size(void)
416 +{
417 + return g_nandx->chip->sector_size + g_nandx->chip->sector_spare_size;
418 +}
419 +
420 +static inline u32 page_padded_size(void)
421 +{
422 + return page_sectors() * sector_padded_size();
423 +}
424 +
425 +static inline u32 offset_to_padded_col(u64 offset)
426 +{
427 + struct nandx_desc *nandx = g_nandx;
428 + u32 col, sectors;
429 +
430 + col = reminder(offset, nandx->chip->page_size);
431 + sectors = div_down(col, nandx->chip->sector_size);
432 +
433 + return col + sectors * nandx->chip->sector_spare_size;
434 +}
435 +
436 +static inline u32 offset_to_row(u64 offset)
437 +{
438 + return div_down(offset, g_nandx->chip->page_size);
439 +}
440 +
441 +static inline u32 offset_to_col(u64 offset)
442 +{
443 + return reminder(offset, g_nandx->chip->page_size);
444 +}
445 +
446 +static inline u32 oob_upper_size(void)
447 +{
448 + return g_nandx->ecc_en ? g_nandx->chip->oob_size :
449 + g_nandx->chip->sector_spare_size * page_sectors();
450 +}
451 +
452 +static inline bool is_upper_oob_align(u64 val)
453 +{
454 + return reminder(val, oob_upper_size()) ? false : true;
455 +}
456 +
457 +#define prepare_op(_op, _row, _col, _len, _data, _oob) \
458 + do { \
459 + (_op).row = (_row); \
460 + (_op).col = (_col); \
461 + (_op).len = (_len); \
462 + (_op).data = (_data); \
463 + (_op).oob = (_oob); \
464 + } while (0)
465 +
466 +static int operation_multi(enum nandx_op_mode mode, u8 *data, u8 *oob,
467 + u64 offset, size_t len)
468 +{
469 + struct nandx_desc *nandx = g_nandx;
470 + u32 row = offset_to_row(offset);
471 + u32 col = offset_to_padded_col(offset);
472 +
473 + if (nandx->mode == NANDX_IDLE) {
474 + nandx->mode = mode;
475 + nandx->ops_current = 0;
476 + } else if (nandx->mode != mode) {
477 + pr_info("forbid mixed operations.\n");
478 + return -EOPNOTSUPP;
479 + }
480 +
481 + prepare_op(nandx->ops[nandx->ops_current], row, col, len, data, oob);
482 + nandx->ops_current++;
483 +
484 + if (nandx->ops_current == nandx->ops_multi_len)
485 + return nandx_sync();
486 +
487 + return nandx->ops_multi_len - nandx->ops_current;
488 +}
489 +
490 +static int operation_sequent(enum nandx_op_mode mode, u8 *data, u8 *oob,
491 + u64 offset, size_t len)
492 +{
493 + struct nandx_desc *nandx = g_nandx;
494 + struct nand_chip *chip = nandx->chip;
495 + u32 row = offset_to_row(offset);
496 + func_chip_ops chip_ops;
497 + u8 *ref_data = data, *ref_oob = oob;
498 + int align, ops, row_step;
499 + int i, rem;
500 +
501 + align = data ? chip->page_size : oob_upper_size();
502 + ops = data ? div_down(len, align) : div_down(len, oob_upper_size());
503 + row_step = 1;
504 +
505 + switch (mode) {
506 + case NANDX_ERASE:
507 + chip_ops = chip->erase_block;
508 + align = chip->block_size;
509 + ops = div_down(len, align);
510 + row_step = chip->block_pages;
511 + break;
512 +
513 + case NANDX_READ:
514 + chip_ops = chip->read_page;
515 + break;
516 +
517 + case NANDX_WRITE:
518 + chip_ops = chip->write_page;
519 + break;
520 +
521 + default:
522 + return -EINVAL;
523 + }
524 +
525 + if (!data) {
526 + ref_data = nandx->head_buf;
527 + memset(ref_data, 0xff, chip->page_size);
528 + }
529 +
530 + if (!oob) {
531 + ref_oob = nandx->head_buf + chip->page_size;
532 + memset(ref_oob, 0xff, oob_upper_size());
533 + }
534 +
535 + for (i = 0; i < ops; i++) {
536 + prepare_op(nandx->ops[nandx->ops_current],
537 + row + i * row_step, 0, align, ref_data, ref_oob);
538 + nandx->ops_current++;
539 + /* if data or oob is null, nandx->head_buf or
540 + * nandx->head_buf + chip->page_size should not been used
541 + * so, here it is safe to use the buf.
542 + */
543 + ref_data = data ? ref_data + chip->page_size : nandx->head_buf;
544 + ref_oob = oob ? ref_oob + oob_upper_size() :
545 + nandx->head_buf + chip->page_size;
546 + }
547 +
548 + if (nandx->mode == NANDX_WRITE) {
549 + rem = reminder(nandx->ops_current, nandx->min_write_pages);
550 + if (rem)
551 + return nandx->min_write_pages - rem;
552 + }
553 +
554 + nandx->ops_current = 0;
555 + return chip_ops(chip, nandx->ops, ops);
556 +}
557 +
558 +static int read_pages(u8 *data, u8 *oob, u64 offset, size_t len)
559 +{
560 + struct nandx_desc *nandx = g_nandx;
561 + struct nand_chip *chip = nandx->chip;
562 + struct nandx_split64 split = {0};
563 + u8 *ref_data = data, *ref_oob;
564 + u32 row, col;
565 + int ret = 0, i, ops;
566 + u32 head_offset = 0;
567 + u64 val;
568 +
569 + if (!data)
570 + return operation_sequent(NANDX_READ, NULL, oob, offset, len);
571 +
572 + ref_oob = oob ? oob : nandx->head_buf + chip->page_size;
573 +
574 + nandx_split(&split, offset, len, val, chip->page_size);
575 +
576 + if (split.head_len) {
577 + row = offset_to_row(split.head);
578 + col = offset_to_col(split.head);
579 + prepare_op(nandx->ops[nandx->ops_current], row, 0,
580 + chip->page_size,
581 + nandx->head_buf, ref_oob);
582 + nandx->ops_current++;
583 +
584 + head_offset = col;
585 +
586 + ref_data += split.head_len;
587 + ref_oob = oob ? ref_oob + oob_upper_size() :
588 + nandx->head_buf + chip->page_size;
589 + }
590 +
591 + if (split.body_len) {
592 + ops = div_down(split.body_len, chip->page_size);
593 + row = offset_to_row(split.body);
594 + for (i = 0; i < ops; i++) {
595 + prepare_op(nandx->ops[nandx->ops_current],
596 + row + i, 0, chip->page_size,
597 + ref_data, ref_oob);
598 + nandx->ops_current++;
599 + ref_data += chip->page_size;
600 + ref_oob = oob ? ref_oob + oob_upper_size() :
601 + nandx->head_buf + chip->page_size;
602 + }
603 + }
604 +
605 + if (split.tail_len) {
606 + row = offset_to_row(split.tail);
607 + prepare_op(nandx->ops[nandx->ops_current], row, 0,
608 + chip->page_size, nandx->tail_buf, ref_oob);
609 + nandx->ops_current++;
610 + }
611 +
612 + ret = chip->read_page(chip, nandx->ops, nandx->ops_current);
613 +
614 + if (split.head_len)
615 + memcpy(data, nandx->head_buf + head_offset, split.head_len);
616 + if (split.tail_len)
617 + memcpy(ref_data, nandx->tail_buf, split.tail_len);
618 +
619 + nandx->ops_current = 0;
620 + return ret;
621 +}
622 +
623 +int nandx_read(u8 *data, u8 *oob, u64 offset, size_t len)
624 +{
625 + struct nandx_desc *nandx = g_nandx;
626 +
627 + if (!len || len > nandx->info.total_size)
628 + return -EINVAL;
629 + if (div_up(len, nandx->chip->page_size) > nandx->ops_len)
630 + return -EINVAL;
631 + if (!data && !oob)
632 + return -EINVAL;
633 + /**
634 + * as design, oob not support partial read
635 + * and, the length of oob buf should be oob size aligned
636 + */
637 + if (!data && !is_upper_oob_align(len))
638 + return -EINVAL;
639 +
640 + if (g_nandx->multi_en) {
641 + /* as design, there only 2 buf for partial read,
642 + * if partial read allowed for multi read,
643 + * there are not enough buf
644 + */
645 + if (!is_sector_align(offset))
646 + return -EINVAL;
647 + if (data && !is_sector_align(len))
648 + return -EINVAL;
649 + return operation_multi(NANDX_READ, data, oob, offset, len);
650 + }
651 +
652 + nandx->ops_current = 0;
653 + nandx->mode = NANDX_IDLE;
654 + return read_pages(data, oob, offset, len);
655 +}
656 +
657 +static int write_pages(u8 *data, u8 *oob, u64 offset, size_t len)
658 +{
659 + struct nandx_desc *nandx = g_nandx;
660 + struct nand_chip *chip = nandx->chip;
661 + struct nandx_split64 split = {0};
662 + int ret, rem, i, ops;
663 + u32 row, col;
664 + u8 *ref_oob = oob;
665 + u64 val;
666 +
667 + nandx->mode = NANDX_WRITE;
668 +
669 + if (!data)
670 + return operation_sequent(NANDX_WRITE, NULL, oob, offset, len);
671 +
672 + if (!oob) {
673 + ref_oob = nandx->head_buf + chip->page_size;
674 + memset(ref_oob, 0xff, oob_upper_size());
675 + }
676 +
677 + nandx_split(&split, offset, len, val, chip->page_size);
678 +
679 + /*NOTE: slc can support sector write, here copy too many data.*/
680 + if (split.head_len) {
681 + row = offset_to_row(split.head);
682 + col = offset_to_col(split.head);
683 + memset(nandx->head_buf, 0xff, page_padded_size());
684 + memcpy(nandx->head_buf + col, data, split.head_len);
685 + prepare_op(nandx->ops[nandx->ops_current], row, 0,
686 + chip->page_size, nandx->head_buf, ref_oob);
687 + nandx->ops_current++;
688 +
689 + data += split.head_len;
690 + ref_oob = oob ? ref_oob + oob_upper_size() :
691 + nandx->head_buf + chip->page_size;
692 + }
693 +
694 + if (split.body_len) {
695 + row = offset_to_row(split.body);
696 + ops = div_down(split.body_len, chip->page_size);
697 + for (i = 0; i < ops; i++) {
698 + prepare_op(nandx->ops[nandx->ops_current],
699 + row + i, 0, chip->page_size, data, ref_oob);
700 + nandx->ops_current++;
701 + data += chip->page_size;
702 + ref_oob = oob ? ref_oob + oob_upper_size() :
703 + nandx->head_buf + chip->page_size;
704 + }
705 + }
706 +
707 + if (split.tail_len) {
708 + row = offset_to_row(split.tail);
709 + memset(nandx->tail_buf, 0xff, page_padded_size());
710 + memcpy(nandx->tail_buf, data, split.tail_len);
711 + prepare_op(nandx->ops[nandx->ops_current], row, 0,
712 + chip->page_size, nandx->tail_buf, ref_oob);
713 + nandx->ops_current++;
714 + }
715 +
716 + rem = reminder(nandx->ops_current, nandx->min_write_pages);
717 + if (rem)
718 + return nandx->min_write_pages - rem;
719 +
720 + ret = chip->write_page(chip, nandx->ops, nandx->ops_current);
721 +
722 + nandx->ops_current = 0;
723 + nandx->mode = NANDX_IDLE;
724 + return ret;
725 +}
726 +
727 +int nandx_write(u8 *data, u8 *oob, u64 offset, size_t len)
728 +{
729 + struct nandx_desc *nandx = g_nandx;
730 +
731 + if (!len || len > nandx->info.total_size)
732 + return -EINVAL;
733 + if (div_up(len, nandx->chip->page_size) > nandx->ops_len)
734 + return -EINVAL;
735 + if (!data && !oob)
736 + return -EINVAL;
737 + if (!data && !is_upper_oob_align(len))
738 + return -EINVAL;
739 +
740 + if (nandx->multi_en) {
741 + if (!is_page_align(offset))
742 + return -EINVAL;
743 + if (data && !is_page_align(len))
744 + return -EINVAL;
745 +
746 + return operation_multi(NANDX_WRITE, data, oob, offset, len);
747 + }
748 +
749 + return write_pages(data, oob, offset, len);
750 +}
751 +
752 +int nandx_erase(u64 offset, size_t len)
753 +{
754 + struct nandx_desc *nandx = g_nandx;
755 +
756 + if (!len || len > nandx->info.total_size)
757 + return -EINVAL;
758 + if (div_down(len, nandx->chip->block_size) > nandx->ops_len)
759 + return -EINVAL;
760 + if (!is_block_align(offset) || !is_block_align(len))
761 + return -EINVAL;
762 +
763 + if (g_nandx->multi_en)
764 + return operation_multi(NANDX_ERASE, NULL, NULL, offset, len);
765 +
766 + nandx->ops_current = 0;
767 + nandx->mode = NANDX_IDLE;
768 + return operation_sequent(NANDX_ERASE, NULL, NULL, offset, len);
769 +}
770 +
771 +int nandx_sync(void)
772 +{
773 + struct nandx_desc *nandx = g_nandx;
774 + struct nand_chip *chip = nandx->chip;
775 + func_chip_ops chip_ops;
776 + int ret, i, rem;
777 +
778 + if (!nandx->ops_current)
779 + return 0;
780 +
781 + rem = reminder(nandx->ops_current, nandx->ops_multi_len);
782 + if (nandx->multi_en && rem) {
783 + ret = -EIO;
784 + goto error;
785 + }
786 +
787 + switch (nandx->mode) {
788 + case NANDX_IDLE:
789 + return 0;
790 + case NANDX_ERASE:
791 + chip_ops = chip->erase_block;
792 + break;
793 + case NANDX_READ:
794 + chip_ops = chip->read_page;
795 + break;
796 + case NANDX_WRITE:
797 + chip_ops = chip->write_page;
798 + break;
799 + default:
800 + return -EINVAL;
801 + }
802 +
803 + rem = reminder(nandx->ops_current, nandx->min_write_pages);
804 + if (!nandx->multi_en && nandx->mode == NANDX_WRITE && rem) {
805 + /* in one process of program, only allow 2 pages to do partial
806 + * write, here we supposed 1st buf would be used, and 2nd
807 + * buf should be not used.
808 + */
809 + memset(nandx->tail_buf, 0xff,
810 + chip->page_size + oob_upper_size());
811 + for (i = 0; i < rem; i++) {
812 + prepare_op(nandx->ops[nandx->ops_current],
813 + nandx->ops[nandx->ops_current - 1].row + 1,
814 + 0, chip->page_size, nandx->tail_buf,
815 + nandx->tail_buf + chip->page_size);
816 + nandx->ops_current++;
817 + }
818 + }
819 +
820 + ret = chip_ops(nandx->chip, nandx->ops, nandx->ops_current);
821 +
822 +error:
823 + nandx->mode = NANDX_IDLE;
824 + nandx->ops_current = 0;
825 +
826 + return ret;
827 +}
828 +
829 +int nandx_ioctl(int cmd, void *arg)
830 +{
831 + struct nandx_desc *nandx = g_nandx;
832 + struct nand_chip *chip = nandx->chip;
833 + int ret = 0;
834 +
835 + switch (cmd) {
836 + case CORE_CTRL_NAND_INFO:
837 + *(struct nandx_info *)arg = nandx->info;
838 + break;
839 +
840 + case CHIP_CTRL_OPS_MULTI:
841 + ret = chip->chip_ctrl(chip, cmd, arg);
842 + if (!ret)
843 + nandx->multi_en = *(bool *)arg;
844 + break;
845 +
846 + case NFI_CTRL_ECC:
847 + ret = chip->chip_ctrl(chip, cmd, arg);
848 + if (!ret)
849 + nandx->ecc_en = *(bool *)arg;
850 + break;
851 +
852 + default:
853 + ret = chip->chip_ctrl(chip, cmd, arg);
854 + break;
855 + }
856 +
857 + return ret;
858 +}
859 +
860 +bool nandx_is_bad_block(u64 offset)
861 +{
862 + struct nandx_desc *nandx = g_nandx;
863 +
864 + prepare_op(nandx->ops[0], offset_to_row(offset), 0,
865 + nandx->chip->page_size, nandx->head_buf,
866 + nandx->head_buf + nandx->chip->page_size);
867 +
868 + return nandx->chip->is_bad_block(nandx->chip, nandx->ops, 1);
869 +}
870 +
871 +int nandx_suspend(void)
872 +{
873 + return g_nandx->chip->suspend(g_nandx->chip);
874 +}
875 +
876 +int nandx_resume(void)
877 +{
878 + return g_nandx->chip->resume(g_nandx->chip);
879 +}
880 +
881 +int nandx_init(struct nfi_resource *res)
882 +{
883 + struct nand_chip *chip;
884 + struct nandx_desc *nandx;
885 + int ret = 0;
886 +
887 + if (!res)
888 + return -EINVAL;
889 +
890 + chip = nand_chip_init(res);
891 + if (!chip) {
892 + pr_info("nand chip init fail.\n");
893 + return -EFAULT;
894 + }
895 +
896 + nandx = (struct nandx_desc *)mem_alloc(1, sizeof(struct nandx_desc));
897 + if (!nandx)
898 + return -ENOMEM;
899 +
900 + g_nandx = nandx;
901 +
902 + nandx->chip = chip;
903 + nandx->min_write_pages = chip->min_program_pages;
904 + nandx->ops_multi_len = nandx->min_write_pages * chip->plane_num;
905 + nandx->ops_len = chip->block_pages * chip->plane_num;
906 + nandx->ops = mem_alloc(1, sizeof(struct nand_ops) * nandx->ops_len);
907 + if (!nandx->ops) {
908 + ret = -ENOMEM;
909 + goto ops_error;
910 + }
911 +
912 +#if NANDX_BULK_IO_USE_DRAM
913 + nandx->head_buf = NANDX_CORE_BUF_ADDR;
914 +#else
915 + nandx->head_buf = mem_alloc(2, page_padded_size());
916 +#endif
917 + if (!nandx->head_buf) {
918 + ret = -ENOMEM;
919 + goto buf_error;
920 + }
921 + nandx->tail_buf = nandx->head_buf + page_padded_size();
922 + memset(nandx->head_buf, 0xff, 2 * page_padded_size());
923 + nandx->multi_en = false;
924 + nandx->ecc_en = false;
925 + nandx->ops_current = 0;
926 + nandx->mode = NANDX_IDLE;
927 +
928 + nandx->info.max_io_count = nandx->ops_len;
929 + nandx->info.min_write_pages = nandx->min_write_pages;
930 + nandx->info.plane_num = chip->plane_num;
931 + nandx->info.oob_size = chip->oob_size;
932 + nandx->info.page_parity_size = chip->sector_spare_size * page_sectors();
933 + nandx->info.page_size = chip->page_size;
934 + nandx->info.block_size = chip->block_size;
935 + nandx->info.total_size = chip->block_size * chip->block_num;
936 + nandx->info.fdm_ecc_size = chip->fdm_ecc_size;
937 + nandx->info.fdm_reg_size = chip->fdm_reg_size;
938 + nandx->info.ecc_strength = chip->ecc_strength;
939 + nandx->info.sector_size = chip->sector_size;
940 +
941 + return 0;
942 +
943 +buf_error:
944 +#if !NANDX_BULK_IO_USE_DRAM
945 + mem_free(nandx->head_buf);
946 +#endif
947 +ops_error:
948 + mem_free(nandx);
949 +
950 + return ret;
951 +}
952 +
953 +void nandx_exit(void)
954 +{
955 + nand_chip_exit(g_nandx->chip);
956 +#if !NANDX_BULK_IO_USE_DRAM
957 + mem_free(g_nandx->head_buf);
958 +#endif
959 + mem_free(g_nandx->ops);
960 + mem_free(g_nandx);
961 +}
962 +
963 +#ifdef NANDX_UNIT_TEST
964 +static void dump_buf(u8 *buf, u32 len)
965 +{
966 + u32 i;
967 +
968 + pr_info("dump buf@0x%X start", (u32)buf);
969 + for (i = 0; i < len; i++) {
970 + if (!reminder(i, 16))
971 + pr_info("\n0x");
972 + pr_info("%x ", buf[i]);
973 + }
974 + pr_info("\ndump buf done.\n");
975 +}
976 +
977 +int nandx_unit_test(u64 offset, size_t len)
978 +{
979 + u8 *src_buf, *dst_buf;
980 + u32 i, j;
981 + int ret;
982 +
983 + if (!len || len > g_nandx->chip->block_size)
984 + return -EINVAL;
985 +
986 +#if NANDX_BULK_IO_USE_DRAM
987 + src_buf = NANDX_UT_SRC_ADDR;
988 + dst_buf = NANDX_UT_DST_ADDR;
989 +
990 +#else
991 + src_buf = mem_alloc(1, g_nandx->chip->page_size);
992 + if (!src_buf)
993 + return -ENOMEM;
994 + dst_buf = mem_alloc(1, g_nandx->chip->page_size);
995 + if (!dst_buf) {
996 + mem_free(src_buf);
997 + return -ENOMEM;
998 + }
999 +#endif
1000 +
1001 + pr_info("%s: src_buf address 0x%x, dst_buf address 0x%x\n",
1002 + __func__, (int)((unsigned long)src_buf),
1003 + (int)((unsigned long)dst_buf));
1004 +
1005 + memset(dst_buf, 0, g_nandx->chip->page_size);
1006 + pr_info("read page 0 data...!\n");
1007 + ret = nandx_read(dst_buf, NULL, 0, g_nandx->chip->page_size);
1008 + if (ret < 0) {
1009 + pr_info("read fail with ret %d\n", ret);
1010 + } else {
1011 + pr_info("read page success!\n");
1012 + }
1013 +
1014 + for (i = 0; i < g_nandx->chip->page_size; i++) {
1015 + src_buf[i] = 0x5a;
1016 + }
1017 +
1018 + ret = nandx_erase(offset, g_nandx->chip->block_size);
1019 + if (ret < 0) {
1020 + pr_info("erase fail with ret %d\n", ret);
1021 + goto error;
1022 + }
1023 +
1024 + for (j = 0; j < g_nandx->chip->block_pages; j++) {
1025 + memset(dst_buf, 0, g_nandx->chip->page_size);
1026 + pr_info("check data after erase...!\n");
1027 + ret = nandx_read(dst_buf, NULL, offset, g_nandx->chip->page_size);
1028 + if (ret < 0) {
1029 + pr_info("read fail with ret %d\n", ret);
1030 + goto error;
1031 + }
1032 +
1033 + for (i = 0; i < g_nandx->chip->page_size; i++) {
1034 + if (dst_buf[i] != 0xff) {
1035 + pr_info("read after erase, check fail @%d\n", i);
1036 + pr_info("all data should be 0xff\n");
1037 + ret = -ENANDERASE;
1038 + dump_buf(dst_buf, 128);
1039 + //goto error;
1040 + break;
1041 + }
1042 + }
1043 +
1044 + pr_info("write data...!\n");
1045 + ret = nandx_write(src_buf, NULL, offset, g_nandx->chip->page_size);
1046 + if (ret < 0) {
1047 + pr_info("write fail with ret %d\n", ret);
1048 + goto error;
1049 + }
1050 +
1051 + memset(dst_buf, 0, g_nandx->chip->page_size);
1052 + pr_info("read data...!\n");
1053 + ret = nandx_read(dst_buf, NULL, offset, g_nandx->chip->page_size);
1054 + if (ret < 0) {
1055 + pr_info("read fail with ret %d\n", ret);
1056 + goto error;
1057 + }
1058 +
1059 + for (i = 0; i < g_nandx->chip->page_size; i++) {
1060 + if (dst_buf[i] != src_buf[i]) {
1061 + pr_info("read after write, check fail @%d\n", i);
1062 + pr_info("dst_buf should be same as src_buf\n");
1063 + ret = -EIO;
1064 + dump_buf(src_buf + i, 128);
1065 + dump_buf(dst_buf + i, 128);
1066 + break;
1067 + }
1068 + }
1069 +
1070 + pr_err("%s %d %s@%d\n", __func__, __LINE__, ret?"Failed":"OK", j);
1071 + if (ret)
1072 + break;
1073 +
1074 + offset += g_nandx->chip->page_size;
1075 + }
1076 +
1077 + ret = nandx_erase(offset, g_nandx->chip->block_size);
1078 + if (ret < 0) {
1079 + pr_info("erase fail with ret %d\n", ret);
1080 + goto error;
1081 + }
1082 +
1083 + memset(dst_buf, 0, g_nandx->chip->page_size);
1084 + ret = nandx_read(dst_buf, NULL, offset, g_nandx->chip->page_size);
1085 + if (ret < 0) {
1086 + pr_info("read fail with ret %d\n", ret);
1087 + goto error;
1088 + }
1089 +
1090 + for (i = 0; i < g_nandx->chip->page_size; i++) {
1091 + if (dst_buf[i] != 0xff) {
1092 + pr_info("read after erase, check fail\n");
1093 + pr_info("all data should be 0xff\n");
1094 + ret = -ENANDERASE;
1095 + dump_buf(dst_buf, 128);
1096 + goto error;
1097 + }
1098 + }
1099 +
1100 + return 0;
1101 +
1102 +error:
1103 +#if !NANDX_BULK_IO_USE_DRAM
1104 + mem_free(src_buf);
1105 + mem_free(dst_buf);
1106 +#endif
1107 + return ret;
1108 +}
1109 +#endif
1110 --- /dev/null
1111 +++ b/drivers/mtd/nandx/core/core_io.h
1112 @@ -0,0 +1,39 @@
1113 +/*
1114 + * Copyright (C) 2017 MediaTek Inc.
1115 + * Licensed under either
1116 + * BSD Licence, (see NOTICE for more details)
1117 + * GNU General Public License, version 2.0, (see NOTICE for more details)
1118 + */
1119 +
1120 +#ifndef __CORE_IO_H__
1121 +#define __CORE_IO_H__
1122 +
1123 +typedef int (*func_chip_ops)(struct nand_chip *, struct nand_ops *,
1124 + int);
1125 +
1126 +enum nandx_op_mode {
1127 + NANDX_IDLE,
1128 + NANDX_WRITE,
1129 + NANDX_READ,
1130 + NANDX_ERASE
1131 +};
1132 +
1133 +struct nandx_desc {
1134 + struct nand_chip *chip;
1135 + struct nandx_info info;
1136 + enum nandx_op_mode mode;
1137 +
1138 + bool multi_en;
1139 + bool ecc_en;
1140 +
1141 + struct nand_ops *ops;
1142 + int ops_len;
1143 + int ops_multi_len;
1144 + int ops_current;
1145 + int min_write_pages;
1146 +
1147 + u8 *head_buf;
1148 + u8 *tail_buf;
1149 +};
1150 +
1151 +#endif /* __CORE_IO_H__ */
1152 --- /dev/null
1153 +++ b/drivers/mtd/nandx/core/nand/device_spi.c
1154 @@ -0,0 +1,200 @@
1155 +/*
1156 + * Copyright (C) 2017 MediaTek Inc.
1157 + * Licensed under either
1158 + * BSD Licence, (see NOTICE for more details)
1159 + * GNU General Public License, version 2.0, (see NOTICE for more details)
1160 + */
1161 +
1162 +#include "nandx_util.h"
1163 +#include "../nand_device.h"
1164 +#include "device_spi.h"
1165 +
1166 +/* spi nand basic commands */
1167 +static struct nand_cmds spi_cmds = {
1168 + .reset = 0xff,
1169 + .read_id = 0x9f,
1170 + .read_status = 0x0f,
1171 + .read_param_page = 0x03,
1172 + .set_feature = 0x1f,
1173 + .get_feature = 0x0f,
1174 + .read_1st = 0x13,
1175 + .read_2nd = -1,
1176 + .random_out_1st = 0x03,
1177 + .random_out_2nd = -1,
1178 + .program_1st = 0x02,
1179 + .program_2nd = 0x10,
1180 + .erase_1st = 0xd8,
1181 + .erase_2nd = -1,
1182 + .read_cache = 0x30,
1183 + .read_cache_last = 0x3f,
1184 + .program_cache = 0x02
1185 +};
1186 +
1187 +/* spi nand extend commands */
1188 +static struct spi_extend_cmds spi_extend_cmds = {
1189 + .die_select = 0xc2,
1190 + .write_enable = 0x06
1191 +};
1192 +
1193 +/* means the start bit of addressing type */
1194 +static struct nand_addressing spi_addressing = {
1195 + .row_bit_start = 0,
1196 + .block_bit_start = 0,
1197 + .plane_bit_start = 12,
1198 + .lun_bit_start = 0,
1199 +};
1200 +
1201 +/* spi nand endurance */
1202 +static struct nand_endurance spi_endurance = {
1203 + .pe_cycle = 100000,
1204 + .ecc_req = 1,
1205 + .max_bitflips = 1
1206 +};
1207 +
1208 +/* array_busy, write_protect, erase_fail, program_fail */
1209 +static struct nand_status spi_status[] = {
1210 + {.array_busy = BIT(0),
1211 + .write_protect = BIT(1),
1212 + .erase_fail = BIT(2),
1213 + .program_fail = BIT(3)}
1214 +};
1215 +
1216 +/* measure time by the us */
1217 +static struct nand_array_timing spi_array_timing = {
1218 + .tRST = 500,
1219 + .tWHR = 1,
1220 + .tR = 25,
1221 + .tRCBSY = 25,
1222 + .tFEAT = 1,
1223 + .tPROG = 600,
1224 + .tPCBSY = 600,
1225 + .tBERS = 10000,
1226 + .tDBSY = 1
1227 +};
1228 +
1229 +/* spi nand device table */
1230 +static struct device_spi spi_nand[] = {
1231 + {
1232 + NAND_DEVICE("W25N01GV",
1233 + NAND_PACK_ID(0xef, 0xaa, 0x21, 0, 0, 0, 0, 0),
1234 + 3, 0, 3, 3,
1235 + 1, 1, 1, 1024, KB(128), KB(2), 64, 1,
1236 + &spi_cmds, &spi_addressing, &spi_status[0],
1237 + &spi_endurance, &spi_array_timing),
1238 + {
1239 + NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1240 + NAND_SPI_CONFIG(0xb0, 4, 6, 0),
1241 + NAND_SPI_STATUS(0xc0, 4, 5),
1242 + NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1243 + },
1244 + &spi_extend_cmds, 0xff, 0xff
1245 + },
1246 + {
1247 + NAND_DEVICE("MX35LF1G",
1248 + NAND_PACK_ID(0xc2, 0x12, 0x21, 0, 0, 0, 0, 0),
1249 + 2, 0, 3, 3,
1250 + 1, 1, 1, 1024, KB(128), KB(2), 64, 1,
1251 + &spi_cmds, &spi_addressing, &spi_status[0],
1252 + &spi_endurance, &spi_array_timing),
1253 + {
1254 + NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1255 + NAND_SPI_CONFIG(0xb0, 4, 6, 1),
1256 + NAND_SPI_STATUS(0xc0, 4, 5),
1257 + NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1258 + },
1259 + &spi_extend_cmds, 0xff, 0xff
1260 + },
1261 + {
1262 + NAND_DEVICE("MT29F4G01ABAFDWB",
1263 + NAND_PACK_ID(0x2c, 0x34, 0, 0, 0, 0, 0, 0),
1264 + 2, 0, 3, 3,
1265 + 1, 1, 1, 2048, KB(256), KB(4), 256, 1,
1266 + &spi_cmds, &spi_addressing, &spi_status[0],
1267 + &spi_endurance, &spi_array_timing),
1268 + {
1269 + NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1270 + NAND_SPI_CONFIG(0xb0, 4, 6, 1),
1271 + NAND_SPI_STATUS(0xc0, 4, 5),
1272 + NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1273 + },
1274 + &spi_extend_cmds, 0xff, 0xff
1275 + },
1276 + {
1277 + NAND_DEVICE("GD5F4GQ4UB",
1278 + NAND_PACK_ID(0xc8, 0xd4, 0, 0, 0, 0, 0, 0),
1279 + 2, 0, 3, 3,
1280 + 1, 1, 1, 2048, KB(256), KB(4), 256, 1,
1281 + &spi_cmds, &spi_addressing, &spi_status[0],
1282 + &spi_endurance, &spi_array_timing),
1283 + {
1284 + NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1285 + NAND_SPI_CONFIG(0xb0, 4, 6, 1),
1286 + NAND_SPI_STATUS(0xc0, 4, 5),
1287 + NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1288 + },
1289 + &spi_extend_cmds, 0xff, 0xff
1290 + },
1291 + {
1292 + NAND_DEVICE("TC58CVG2S0HRAIJ",
1293 + NAND_PACK_ID(0x98, 0xED, 0x51, 0, 0, 0, 0, 0),
1294 + 3, 0, 3, 3,
1295 + 1, 1, 1, 2048, KB(256), KB(4), 256, 1,
1296 + &spi_cmds, &spi_addressing, &spi_status[0],
1297 + &spi_endurance, &spi_array_timing),
1298 + {
1299 + NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1300 + NAND_SPI_CONFIG(0xb0, 4, 6, 1),
1301 + NAND_SPI_STATUS(0xc0, 4, 5),
1302 + NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1303 + },
1304 + &spi_extend_cmds, 0xff, 0xff
1305 + },
1306 + {
1307 + NAND_DEVICE("NO-DEVICE",
1308 + NAND_PACK_ID(0, 0, 0, 0, 0, 0, 0, 0), 0, 0, 0, 0,
1309 + 0, 0, 0, 0, 0, 0, 0, 1,
1310 + &spi_cmds, &spi_addressing, &spi_status[0],
1311 + &spi_endurance, &spi_array_timing),
1312 + {
1313 + NAND_SPI_PROTECT(0xa0, 1, 2, 6),
1314 + NAND_SPI_CONFIG(0xb0, 4, 6, 0),
1315 + NAND_SPI_STATUS(0xc0, 4, 5),
1316 + NAND_SPI_CHARACTER(0xff, 0xff, 0xff, 0xff)
1317 + },
1318 + &spi_extend_cmds, 0xff, 0xff
1319 + }
1320 +};
1321 +
1322 +u8 spi_replace_rx_cmds(u8 mode)
1323 +{
1324 + u8 rx_replace_cmds[] = {0x03, 0x3b, 0x6b, 0xbb, 0xeb};
1325 +
1326 + return rx_replace_cmds[mode];
1327 +}
1328 +
1329 +u8 spi_replace_tx_cmds(u8 mode)
1330 +{
1331 + u8 tx_replace_cmds[] = {0x02, 0x32};
1332 +
1333 + return tx_replace_cmds[mode];
1334 +}
1335 +
1336 +u8 spi_replace_rx_col_cycle(u8 mode)
1337 +{
1338 + u8 rx_replace_col_cycle[] = {3, 3, 3, 3, 4};
1339 +
1340 + return rx_replace_col_cycle[mode];
1341 +}
1342 +
1343 +u8 spi_replace_tx_col_cycle(u8 mode)
1344 +{
1345 + u8 tx_replace_col_cycle[] = {2, 2};
1346 +
1347 + return tx_replace_col_cycle[mode];
1348 +}
1349 +
1350 +struct nand_device *nand_get_device(int index)
1351 +{
1352 + return &spi_nand[index].dev;
1353 +}
1354 +
1355 --- /dev/null
1356 +++ b/drivers/mtd/nandx/core/nand/device_spi.h
1357 @@ -0,0 +1,132 @@
1358 +/*
1359 + * Copyright (C) 2017 MediaTek Inc.
1360 + * Licensed under either
1361 + * BSD Licence, (see NOTICE for more details)
1362 + * GNU General Public License, version 2.0, (see NOTICE for more details)
1363 + */
1364 +
1365 +#ifndef __DEVICE_SPI_H__
1366 +#define __DEVICE_SPI_H__
1367 +
1368 +/*
1369 + * extend commands
1370 + * @die_select: select nand device die command
1371 + * @write_enable: enable write command before write data to spi nand
1372 + * spi nand device will auto to be disable after write done
1373 + */
1374 +struct spi_extend_cmds {
1375 + short die_select;
1376 + short write_enable;
1377 +};
1378 +
1379 +/*
1380 + * protection feature register
1381 + * @addr: register address
1382 + * @wp_en_bit: write protection enable bit
1383 + * @bp_start_bit: block protection mask start bit
1384 + * @bp_end_bit: block protection mask end bit
1385 + */
1386 +struct feature_protect {
1387 + u8 addr;
1388 + u8 wp_en_bit;
1389 + u8 bp_start_bit;
1390 + u8 bp_end_bit;
1391 +};
1392 +
1393 +/*
1394 + * configuration feature register
1395 + * @addr: register address
1396 + * @ecc_en_bit: in-die ecc enable bit
1397 + * @otp_en_bit: enter otp access mode bit
1398 + * @need_qe: quad io enable bit
1399 + */
1400 +struct feature_config {
1401 + u8 addr;
1402 + u8 ecc_en_bit;
1403 + u8 otp_en_bit;
1404 + u8 need_qe;
1405 +};
1406 +
1407 +/*
1408 + * status feature register
1409 + * @addr: register address
1410 + * @ecc_start_bit: ecc status mask start bit for error bits number
1411 + * @ecc_end_bit: ecc status mask end bit for error bits number
1412 + * note that:
1413 + * operations status (ex. array busy status) could see on struct nand_status
1414 + */
1415 +struct feature_status {
1416 + u8 addr;
1417 + u8 ecc_start_bit;
1418 + u8 ecc_end_bit;
1419 +};
1420 +
1421 +/*
1422 + * character feature register
1423 + * @addr: register address
1424 + * @die_sel_bit: die select bit
1425 + * @drive_start_bit: drive strength mask start bit
1426 + * @drive_end_bit: drive strength mask end bit
1427 + */
1428 +struct feature_character {
1429 + u8 addr;
1430 + u8 die_sel_bit;
1431 + u8 drive_start_bit;
1432 + u8 drive_end_bit;
1433 +};
1434 +
1435 +/*
1436 + * spi features
1437 + * @protect: protection feature register
1438 + * @config: configuration feature register
1439 + * @status: status feature register
1440 + * @character: character feature register
1441 + */
1442 +struct spi_features {
1443 + struct feature_protect protect;
1444 + struct feature_config config;
1445 + struct feature_status status;
1446 + struct feature_character character;
1447 +};
1448 +
1449 +/*
1450 + * device_spi
1451 + * configurations of spi nand device table
1452 + * @dev: base information of nand device
1453 + * @feature: feature information for spi nand
1454 + * @extend_cmds: extended the nand base commands
1455 + * @tx_mode_mask: tx mode mask for chip read
1456 + * @rx_mode_mask: rx mode mask for chip write
1457 + */
1458 +struct device_spi {
1459 + struct nand_device dev;
1460 + struct spi_features feature;
1461 + struct spi_extend_cmds *extend_cmds;
1462 +
1463 + u8 tx_mode_mask;
1464 + u8 rx_mode_mask;
1465 +};
1466 +
1467 +#define NAND_SPI_PROTECT(addr, wp_en_bit, bp_start_bit, bp_end_bit) \
1468 + {addr, wp_en_bit, bp_start_bit, bp_end_bit}
1469 +
1470 +#define NAND_SPI_CONFIG(addr, ecc_en_bit, otp_en_bit, need_qe) \
1471 + {addr, ecc_en_bit, otp_en_bit, need_qe}
1472 +
1473 +#define NAND_SPI_STATUS(addr, ecc_start_bit, ecc_end_bit) \
1474 + {addr, ecc_start_bit, ecc_end_bit}
1475 +
1476 +#define NAND_SPI_CHARACTER(addr, die_sel_bit, drive_start_bit, drive_end_bit) \
1477 + {addr, die_sel_bit, drive_start_bit, drive_end_bit}
1478 +
1479 +static inline struct device_spi *device_to_spi(struct nand_device *dev)
1480 +{
1481 + return container_of(dev, struct device_spi, dev);
1482 +}
1483 +
1484 +u8 spi_replace_rx_cmds(u8 mode);
1485 +u8 spi_replace_tx_cmds(u8 mode);
1486 +u8 spi_replace_rx_col_cycle(u8 mode);
1487 +u8 spi_replace_tx_col_cycle(u8 mode);
1488 +
1489 +#endif /* __DEVICE_SPI_H__ */
1490 --- /dev/null
1491 +++ b/drivers/mtd/nandx/core/nand/nand_spi.c
1492 @@ -0,0 +1,526 @@
1493 +/*
1494 + * Copyright (C) 2017 MediaTek Inc.
1495 + * Licensed under either
1496 + * BSD Licence, (see NOTICE for more details)
1497 + * GNU General Public License, version 2.0, (see NOTICE for more details)
1498 + */
1499 +
1500 +#include "nandx_util.h"
1501 +#include "nandx_core.h"
1502 +#include "../nand_chip.h"
1503 +#include "../nand_device.h"
1504 +#include "../nfi.h"
1505 +#include "../nand_base.h"
1506 +#include "device_spi.h"
1507 +#include "nand_spi.h"
1508 +
1509 +#define READY_TIMEOUT 500000 /* us */
1510 +
1511 +static int nand_spi_read_status(struct nand_base *nand)
1512 +{
1513 + struct device_spi *dev = device_to_spi(nand->dev);
1514 + u8 status;
1515 +
1516 + nand->get_feature(nand, dev->feature.status.addr, &status, 1);
1517 +
1518 + return status;
1519 +}
1520 +
1521 +static int nand_spi_wait_ready(struct nand_base *nand, u32 timeout)
1522 +{
1523 + u64 now, end;
1524 + int status;
1525 +
1526 + end = get_current_time_us() + timeout;
1527 +
1528 + do {
1529 + status = nand_spi_read_status(nand);
1530 + status &= nand->dev->status->array_busy;
1531 + now = get_current_time_us();
1532 +
1533 + if (now > end)
1534 + break;
1535 + } while (status);
1536 +
1537 + return status ? -EBUSY : 0;
1538 +}
1539 +
1540 +static int nand_spi_set_op_mode(struct nand_base *nand, u8 mode)
1541 +{
1542 + struct nand_spi *spi_nand = base_to_spi(nand);
1543 + struct nfi *nfi = nand->nfi;
1544 + int ret = 0;
1545 +
1546 + if (spi_nand->op_mode != mode) {
1547 + ret = nfi->nfi_ctrl(nfi, SNFI_CTRL_OP_MODE, (void *)&mode);
1548 + spi_nand->op_mode = mode;
1549 + }
1550 +
1551 + return ret;
1552 +}
1553 +
1554 +static int nand_spi_set_config(struct nand_base *nand, u8 addr, u8 mask,
1555 + bool en)
1556 +{
1557 + u8 configs = 0;
1558 +
1559 + nand->get_feature(nand, addr, &configs, 1);
1560 +
1561 + if (en)
1562 + configs |= mask;
1563 + else
1564 + configs &= ~mask;
1565 +
1566 + nand->set_feature(nand, addr, &configs, 1);
1567 +
1568 + configs = 0;
1569 + nand->get_feature(nand, addr, &configs, 1);
1570 +
1571 + return (configs & mask) == en ? 0 : -EFAULT;
1572 +}
1573 +
1574 +static int nand_spi_die_select(struct nand_base *nand, int *row)
1575 +{
1576 + struct device_spi *dev = device_to_spi(nand->dev);
1577 + struct nfi *nfi = nand->nfi;
1578 + int lun_blocks, block_pages, lun, blocks;
1579 + int page = *row, ret = 0;
1580 + u8 param = 0, die_sel;
1581 +
1582 + if (nand->dev->lun_num < 2)
1583 + return 0;
1584 +
1585 + block_pages = nand_block_pages(nand->dev);
1586 + lun_blocks = nand_lun_blocks(nand->dev);
1587 + blocks = div_down(page, block_pages);
1588 + lun = div_down(blocks, lun_blocks);
1589 +
1590 + if (dev->extend_cmds->die_select == -1) {
1591 + die_sel = (u8)(lun << dev->feature.character.die_sel_bit);
1592 + nand->get_feature(nand, dev->feature.character.addr, &param, 1);
1593 + param |= die_sel;
1594 + nand->set_feature(nand, dev->feature.character.addr, &param, 1);
1595 + param = 0;
1596 + nand->get_feature(nand, dev->feature.character.addr, &param, 1);
1597 + ret = (param & die_sel) ? 0 : -EFAULT;
1598 + } else {
1599 + nfi->reset(nfi);
1600 + nfi->send_cmd(nfi, dev->extend_cmds->die_select);
1601 + nfi->send_addr(nfi, lun, 0, 1, 0);
1602 + nfi->trigger(nfi);
1603 + }
1604 +
1605 + *row = page - (lun_blocks * block_pages) * lun;
1606 +
1607 + return ret;
1608 +}
1609 +
1610 +static int nand_spi_select_device(struct nand_base *nand, int cs)
1611 +{
1612 + struct nand_spi *spi = base_to_spi(nand);
1613 + struct nand_base *parent = spi->parent;
1614 +
1615 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1616 +
1617 + return parent->select_device(nand, cs);
1618 +}
1619 +
1620 +static int nand_spi_reset(struct nand_base *nand)
1621 +{
1622 + struct nand_spi *spi = base_to_spi(nand);
1623 + struct nand_base *parent = spi->parent;
1624 +
1625 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1626 +
1627 + parent->reset(nand);
1628 +
1629 + return nand_spi_wait_ready(nand, READY_TIMEOUT);
1630 +}
1631 +
1632 +static int nand_spi_read_id(struct nand_base *nand, u8 *id, int count)
1633 +{
1634 + struct nand_spi *spi = base_to_spi(nand);
1635 + struct nand_base *parent = spi->parent;
1636 +
1637 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1638 +
1639 + return parent->read_id(nand, id, count);
1640 +}
1641 +
1642 +static int nand_spi_read_param_page(struct nand_base *nand, u8 *data,
1643 + int count)
1644 +{
1645 + struct device_spi *dev = device_to_spi(nand->dev);
1646 + struct nand_spi *spi = base_to_spi(nand);
1647 + struct nfi *nfi = nand->nfi;
1648 + int sectors, value;
1649 + u8 param = 0;
1650 +
1651 + sectors = div_round_up(count, nfi->sector_size);
1652 +
1653 + nand->get_feature(nand, dev->feature.config.addr, &param, 1);
1654 + param |= BIT(dev->feature.config.otp_en_bit);
1655 + nand->set_feature(nand, dev->feature.config.addr, &param, 1);
1656 +
1657 + param = 0;
1658 + nand->get_feature(nand, dev->feature.config.addr, &param, 1);
1659 + if (param & BIT(dev->feature.config.otp_en_bit)) {
1660 + value = 0;
1661 + nfi->nfi_ctrl(nfi, NFI_CTRL_ECC, &value);
1662 + nand->dev->col_cycle = spi_replace_rx_col_cycle(spi->rx_mode);
1663 + nand->read_page(nand, 0x01);
1664 + nand->read_data(nand, 0x01, 0, sectors, data, NULL);
1665 + }
1666 +
1667 + param &= ~BIT(dev->feature.config.otp_en_bit);
1668 + nand->set_feature(nand, dev->feature.config.addr, &param, 1);
1669 +
1670 + return 0;
1671 +}
1672 +
1673 +static int nand_spi_set_feature(struct nand_base *nand, u8 addr,
1674 + u8 *param,
1675 + int count)
1676 +{
1677 + struct nand_spi *spi = base_to_spi(nand);
1678 + struct nand_base *parent = spi->parent;
1679 +
1680 + nand->write_enable(nand);
1681 +
1682 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1683 +
1684 + return parent->set_feature(nand, addr, param, count);
1685 +}
1686 +
1687 +static int nand_spi_get_feature(struct nand_base *nand, u8 addr,
1688 + u8 *param,
1689 + int count)
1690 +{
1691 + struct nand_spi *spi = base_to_spi(nand);
1692 + struct nand_base *parent = spi->parent;
1693 +
1694 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1695 +
1696 + return parent->get_feature(nand, addr, param, count);
1697 +}
1698 +
1699 +static int nand_spi_addressing(struct nand_base *nand, int *row,
1700 + int *col)
1701 +{
1702 + struct nand_device *dev = nand->dev;
1703 + int plane, block, block_pages;
1704 + int ret;
1705 +
1706 + ret = nand_spi_die_select(nand, row);
1707 + if (ret)
1708 + return ret;
1709 +
1710 + block_pages = nand_block_pages(dev);
1711 + block = div_down(*row, block_pages);
1712 +
1713 + plane = block % dev->plane_num;
1714 + *col |= (plane << dev->addressing->plane_bit_start);
1715 +
1716 + return 0;
1717 +}
1718 +
1719 +static int nand_spi_read_page(struct nand_base *nand, int row)
1720 +{
1721 + struct nand_spi *spi = base_to_spi(nand);
1722 + struct nand_base *parent = spi->parent;
1723 +
1724 + if (spi->op_mode == SNFI_AUTO_MODE)
1725 + nand_spi_set_op_mode(nand, SNFI_AUTO_MODE);
1726 + else
1727 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1728 +
1729 + parent->read_page(nand, row);
1730 +
1731 + return nand_spi_wait_ready(nand, READY_TIMEOUT);
1732 +}
1733 +
1734 +static int nand_spi_read_data(struct nand_base *nand, int row, int col,
1735 + int sectors, u8 *data, u8 *oob)
1736 +{
1737 + struct device_spi *dev = device_to_spi(nand->dev);
1738 + struct nand_spi *spi = base_to_spi(nand);
1739 + struct nand_base *parent = spi->parent;
1740 + int ret;
1741 +
1742 + if ((spi->rx_mode == SNFI_RX_114 || spi->rx_mode == SNFI_RX_144) &&
1743 + dev->feature.config.need_qe)
1744 + nand_spi_set_config(nand, dev->feature.config.addr,
1745 + BIT(0), true);
1746 +
1747 + nand->dev->col_cycle = spi_replace_rx_col_cycle(spi->rx_mode);
1748 +
1749 + nand_spi_set_op_mode(nand, SNFI_CUSTOM_MODE);
1750 +
1751 + ret = parent->read_data(nand, row, col, sectors, data, oob);
1752 + if (ret)
1753 + return -ENANDREAD;
1754 +
1755 + if (spi->ondie_ecc) {
1756 + ret = nand_spi_read_status(nand);
1757 + ret &= GENMASK(dev->feature.status.ecc_end_bit,
1758 + dev->feature.status.ecc_start_bit);
1759 + ret >>= dev->feature.status.ecc_start_bit;
1760 + if (ret > nand->dev->endurance->ecc_req)
1761 + return -ENANDREAD;
1762 + else if (ret > nand->dev->endurance->max_bitflips)
1763 + return -ENANDFLIPS;
1764 + }
1765 +
1766 + return 0;
1767 +}
1768 +
1769 +static int nand_spi_write_enable(struct nand_base *nand)
1770 +{
1771 + struct device_spi *dev = device_to_spi(nand->dev);
1772 + struct nfi *nfi = nand->nfi;
1773 + int status;
1774 +
1775 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1776 +
1777 + nfi->reset(nfi);
1778 + nfi->send_cmd(nfi, dev->extend_cmds->write_enable);
1779 +
1780 + nfi->trigger(nfi);
1781 +
1782 + status = nand_spi_read_status(nand);
1783 + status &= nand->dev->status->write_protect;
1784 +
1785 + return !status;
1786 +}
1787 +
1788 +static int nand_spi_program_data(struct nand_base *nand, int row,
1789 + int col,
1790 + u8 *data, u8 *oob)
1791 +{
1792 + struct device_spi *dev = device_to_spi(nand->dev);
1793 + struct nand_spi *spi = base_to_spi(nand);
1794 +
1795 + if (spi->tx_mode == SNFI_TX_114 && dev->feature.config.need_qe)
1796 + nand_spi_set_config(nand, dev->feature.config.addr,
1797 + BIT(0), true);
1798 +
1799 + nand_spi_set_op_mode(nand, SNFI_CUSTOM_MODE);
1800 +
1801 + nand->dev->col_cycle = spi_replace_tx_col_cycle(spi->tx_mode);
1802 +
1803 + return spi->parent->program_data(nand, row, col, data, oob);
1804 +}
1805 +
1806 +static int nand_spi_program_page(struct nand_base *nand, int row)
1807 +{
1808 + struct nand_spi *spi = base_to_spi(nand);
1809 + struct nand_device *dev = nand->dev;
1810 + struct nfi *nfi = nand->nfi;
1811 +
1812 + if (spi->op_mode == SNFI_AUTO_MODE)
1813 + nand_spi_set_op_mode(nand, SNFI_AUTO_MODE);
1814 + else
1815 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1816 +
1817 + nfi->reset(nfi);
1818 + nfi->send_cmd(nfi, dev->cmds->program_2nd);
1819 + nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
1820 + nfi->trigger(nfi);
1821 +
1822 + return nand_spi_wait_ready(nand, READY_TIMEOUT);
1823 +}
1824 +
1825 +static int nand_spi_erase_block(struct nand_base *nand, int row)
1826 +{
1827 + struct nand_spi *spi = base_to_spi(nand);
1828 + struct nand_base *parent = spi->parent;
1829 +
1830 + nand_spi_set_op_mode(nand, SNFI_MAC_MODE);
1831 +
1832 + parent->erase_block(nand, row);
1833 +
1834 + return nand_spi_wait_ready(nand, READY_TIMEOUT);
1835 +}
1836 +
1837 +static int nand_chip_spi_ctrl(struct nand_chip *chip, int cmd,
1838 + void *args)
1839 +{
1840 + struct nand_base *nand = chip->nand;
1841 + struct device_spi *dev = device_to_spi(nand->dev);
1842 + struct nand_spi *spi = base_to_spi(nand);
1843 + struct nfi *nfi = nand->nfi;
1844 + int ret = 0, value = *(int *)args;
1845 +
1846 + switch (cmd) {
1847 + case CHIP_CTRL_ONDIE_ECC:
1848 + spi->ondie_ecc = (bool)value;
1849 + ret = nand_spi_set_config(nand, dev->feature.config.addr,
1850 + BIT(dev->feature.config.ecc_en_bit),
1851 + spi->ondie_ecc);
1852 + break;
1853 +
1854 + case SNFI_CTRL_TX_MODE:
1855 + if (value < 0 || value > SNFI_TX_114)
1856 + return -EOPNOTSUPP;
1857 +
1858 + if (dev->tx_mode_mask & BIT(value)) {
1859 + spi->tx_mode = value;
1860 + nand->dev->cmds->random_out_1st = spi_replace_tx_cmds(
1861 + spi->tx_mode);
1862 + ret = nfi->nfi_ctrl(nfi, cmd, args);
1863 + }
1864 +
1865 + break;
1866 +
1867 + case SNFI_CTRL_RX_MODE:
1868 + if (value < 0 || value > SNFI_RX_144)
1869 + return -EOPNOTSUPP;
1870 +
1871 + if (dev->rx_mode_mask & BIT(value)) {
1872 + spi->rx_mode = value;
1873 + nand->dev->cmds->program_1st = spi_replace_rx_cmds(
1874 + spi->rx_mode);
1875 + ret = nfi->nfi_ctrl(nfi, cmd, args);
1876 + }
1877 +
1878 + break;
1879 +
1880 + case CHIP_CTRL_OPS_CACHE:
1881 + case CHIP_CTRL_OPS_MULTI:
1882 + case CHIP_CTRL_PSLC_MODE:
1883 + case CHIP_CTRL_DDR_MODE:
1884 + case CHIP_CTRL_DRIVE_STRENGTH:
1885 + case CHIP_CTRL_TIMING_MODE:
1886 + ret = -EOPNOTSUPP;
1887 + break;
1888 +
1889 + default:
1890 + ret = nfi->nfi_ctrl(nfi, cmd, args);
1891 + break;
1892 + }
1893 +
1894 + return ret;
1895 +}
1896 +
1897 +int nand_chip_spi_resume(struct nand_chip *chip)
1898 +{
1899 + struct nand_base *nand = chip->nand;
1900 + struct nand_spi *spi = base_to_spi(nand);
1901 + struct device_spi *dev = device_to_spi(nand->dev);
1902 + struct nfi *nfi = nand->nfi;
1903 + struct nfi_format format;
1904 + u8 mask;
1905 +
1906 + nand->reset(nand);
1907 +
1908 + mask = GENMASK(dev->feature.protect.bp_end_bit,
1909 + dev->feature.protect.bp_start_bit);
1910 + nand_spi_set_config(nand, dev->feature.config.addr, mask, false);
1911 + mask = BIT(dev->feature.config.ecc_en_bit);
1912 + nand_spi_set_config(nand, dev->feature.config.addr, mask,
1913 + spi->ondie_ecc);
1914 +
1915 + format.page_size = nand->dev->page_size;
1916 + format.spare_size = nand->dev->spare_size;
1917 + format.ecc_req = nand->dev->endurance->ecc_req;
1918 +
1919 + return nfi->set_format(nfi, &format);
1920 +}
1921 +
1922 +static int nand_spi_set_format(struct nand_base *nand)
1923 +{
1924 + struct nfi_format format = {
1925 + nand->dev->page_size,
1926 + nand->dev->spare_size,
1927 + nand->dev->endurance->ecc_req
1928 + };
1929 +
1930 + return nand->nfi->set_format(nand->nfi, &format);
1931 +}
1932 +
1933 +struct nand_base *nand_device_init(struct nand_chip *chip)
1934 +{
1935 + struct nand_base *nand;
1936 + struct nand_spi *spi;
1937 + struct device_spi *dev;
1938 + int ret;
1939 + u8 mask;
1940 +
1941 + spi = mem_alloc(1, sizeof(struct nand_spi));
1942 + if (!spi) {
1943 + pr_info("alloc nand_spi fail\n");
1944 + return NULL;
1945 + }
1946 +
1947 + spi->ondie_ecc = false;
1948 + spi->op_mode = SNFI_CUSTOM_MODE;
1949 + spi->rx_mode = SNFI_RX_114;
1950 + spi->tx_mode = SNFI_TX_114;
1951 +
1952 + spi->parent = chip->nand;
1953 + nand = &spi->base;
1954 + nand->dev = spi->parent->dev;
1955 + nand->nfi = spi->parent->nfi;
1956 +
1957 + nand->select_device = nand_spi_select_device;
1958 + nand->reset = nand_spi_reset;
1959 + nand->read_id = nand_spi_read_id;
1960 + nand->read_param_page = nand_spi_read_param_page;
1961 + nand->set_feature = nand_spi_set_feature;
1962 + nand->get_feature = nand_spi_get_feature;
1963 + nand->read_status = nand_spi_read_status;
1964 + nand->addressing = nand_spi_addressing;
1965 + nand->read_page = nand_spi_read_page;
1966 + nand->read_data = nand_spi_read_data;
1967 + nand->write_enable = nand_spi_write_enable;
1968 + nand->program_data = nand_spi_program_data;
1969 + nand->program_page = nand_spi_program_page;
1970 + nand->erase_block = nand_spi_erase_block;
1971 +
1972 + chip->chip_ctrl = nand_chip_spi_ctrl;
1973 + chip->nand_type = NAND_SPI;
1974 + chip->resume = nand_chip_spi_resume;
1975 +
1976 + ret = nand_detect_device(nand);
1977 + if (ret)
1978 + goto err;
1979 +
1980 + nand->select_device(nand, 0);
1981 +
1982 + ret = nand_spi_set_format(nand);
1983 + if (ret)
1984 + goto err;
1985 +
1986 + dev = (struct device_spi *)nand->dev;
1987 +
1988 + nand->dev->cmds->random_out_1st =
1989 + spi_replace_rx_cmds(spi->rx_mode);
1990 + nand->dev->cmds->program_1st =
1991 + spi_replace_tx_cmds(spi->tx_mode);
1992 +
1993 + mask = GENMASK(dev->feature.protect.bp_end_bit,
1994 + dev->feature.protect.bp_start_bit);
1995 + ret = nand_spi_set_config(nand, dev->feature.protect.addr, mask, false);
1996 + if (ret)
1997 + goto err;
1998 +
1999 + mask = BIT(dev->feature.config.ecc_en_bit);
2000 + ret = nand_spi_set_config(nand, dev->feature.config.addr, mask,
2001 + spi->ondie_ecc);
2002 + if (ret)
2003 + goto err;
2004 +
2005 + return nand;
2006 +
2007 +err:
2008 + mem_free(spi);
2009 + return NULL;
2010 +}
2011 +
2012 +void nand_exit(struct nand_base *nand)
2013 +{
2014 + struct nand_spi *spi = base_to_spi(nand);
2015 +
2016 + nand_base_exit(spi->parent);
2017 + mem_free(spi);
2018 +}
2019 --- /dev/null
2020 +++ b/drivers/mtd/nandx/core/nand/nand_spi.h
2021 @@ -0,0 +1,35 @@
2022 +/*
2023 + * Copyright (C) 2017 MediaTek Inc.
2024 + * Licensed under either
2025 + * BSD Licence, (see NOTICE for more details)
2026 + * GNU General Public License, version 2.0, (see NOTICE for more details)
2027 + */
2028 +
2029 +#ifndef __NAND_SPI_H__
2030 +#define __NAND_SPI_H__
2031 +
2032 +/*
2033 + * spi nand handler
2034 + * @base: spi nand base functions
2035 + * @parent: common parent nand base functions
2036 + * @tx_mode: spi bus width of transfer to device
2037 + * @rx_mode: spi bus width of transfer from device
2038 + * @op_mode: spi nand controller (NFI) operation mode
2039 + * @ondie_ecc: spi nand on-die ecc flag
2040 + */
2041 +
2042 +struct nand_spi {
2043 + struct nand_base base;
2044 + struct nand_base *parent;
2045 + u8 tx_mode;
2046 + u8 rx_mode;
2047 + u8 op_mode;
2048 + bool ondie_ecc;
2049 +};
2050 +
2051 +static inline struct nand_spi *base_to_spi(struct nand_base *base)
2052 +{
2053 + return container_of(base, struct nand_spi, base);
2054 +}
2055 +
2056 +#endif /* __NAND_SPI_H__ */
2057 --- /dev/null
2058 +++ b/drivers/mtd/nandx/core/nand_base.c
2059 @@ -0,0 +1,304 @@
2060 +/*
2061 + * Copyright (C) 2017 MediaTek Inc.
2062 + * Licensed under either
2063 + * BSD Licence, (see NOTICE for more details)
2064 + * GNU General Public License, version 2.0, (see NOTICE for more details)
2065 + */
2066 +
2067 +#include "nandx_util.h"
2068 +#include "nandx_core.h"
2069 +#include "nand_chip.h"
2070 +#include "nand_device.h"
2071 +#include "nfi.h"
2072 +#include "nand_base.h"
2073 +
2074 +static int nand_base_select_device(struct nand_base *nand, int cs)
2075 +{
2076 + struct nfi *nfi = nand->nfi;
2077 +
2078 + nfi->reset(nfi);
2079 +
2080 + return nfi->select_chip(nfi, cs);
2081 +}
2082 +
2083 +static int nand_base_reset(struct nand_base *nand)
2084 +{
2085 + struct nfi *nfi = nand->nfi;
2086 + struct nand_device *dev = nand->dev;
2087 +
2088 + nfi->reset(nfi);
2089 + nfi->send_cmd(nfi, dev->cmds->reset);
2090 + nfi->trigger(nfi);
2091 +
2092 + return nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tRST);
2093 +}
2094 +
2095 +static int nand_base_read_id(struct nand_base *nand, u8 *id, int count)
2096 +{
2097 + struct nfi *nfi = nand->nfi;
2098 + struct nand_device *dev = nand->dev;
2099 +
2100 + nfi->reset(nfi);
2101 + nfi->send_cmd(nfi, dev->cmds->read_id);
2102 + nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tWHR);
2103 + nfi->send_addr(nfi, 0, 0, 1, 0);
2104 +
2105 + return nfi->read_bytes(nfi, id, count);
2106 +}
2107 +
2108 +static int nand_base_read_param_page(struct nand_base *nand, u8 *data,
2109 + int count)
2110 +{
2111 + struct nfi *nfi = nand->nfi;
2112 + struct nand_device *dev = nand->dev;
2113 +
2114 + nfi->reset(nfi);
2115 + nfi->send_cmd(nfi, dev->cmds->read_param_page);
2116 + nfi->send_addr(nfi, 0, 0, 1, 0);
2117 +
2118 + nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tR);
2119 +
2120 + return nfi->read_bytes(nfi, data, count);
2121 +}
2122 +
2123 +static int nand_base_set_feature(struct nand_base *nand, u8 addr,
2124 + u8 *param,
2125 + int count)
2126 +{
2127 + struct nfi *nfi = nand->nfi;
2128 + struct nand_device *dev = nand->dev;
2129 +
2130 + nfi->reset(nfi);
2131 + nfi->send_cmd(nfi, dev->cmds->set_feature);
2132 + nfi->send_addr(nfi, addr, 0, 1, 0);
2133 +
2134 + nfi->write_bytes(nfi, param, count);
2135 +
2136 + return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2137 + dev->array_timing->tFEAT);
2138 +}
2139 +
2140 +static int nand_base_get_feature(struct nand_base *nand, u8 addr,
2141 + u8 *param,
2142 + int count)
2143 +{
2144 + struct nfi *nfi = nand->nfi;
2145 + struct nand_device *dev = nand->dev;
2146 +
2147 + nfi->reset(nfi);
2148 + nfi->send_cmd(nfi, dev->cmds->get_feature);
2149 + nfi->send_addr(nfi, addr, 0, 1, 0);
2150 + nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tFEAT);
2151 +
2152 + return nfi->read_bytes(nfi, param, count);
2153 +}
2154 +
2155 +static int nand_base_read_status(struct nand_base *nand)
2156 +{
2157 + struct nfi *nfi = nand->nfi;
2158 + struct nand_device *dev = nand->dev;
2159 + u8 status = 0;
2160 +
2161 + nfi->reset(nfi);
2162 + nfi->send_cmd(nfi, dev->cmds->read_status);
2163 + nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tWHR);
2164 + nfi->read_bytes(nfi, &status, 1);
2165 +
2166 + return status;
2167 +}
2168 +
2169 +static int nand_base_addressing(struct nand_base *nand, int *row,
2170 + int *col)
2171 +{
2172 + struct nand_device *dev = nand->dev;
2173 + int lun, plane, block, page, cs = 0;
2174 + int block_pages, target_blocks, wl = 0;
2175 + int icol = *col;
2176 +
2177 + if (dev->target_num > 1) {
2178 + block_pages = nand_block_pages(dev);
2179 + target_blocks = nand_target_blocks(dev);
2180 + cs = div_down(*row, block_pages * target_blocks);
2181 + *row -= cs * block_pages * target_blocks;
2182 + }
2183 +
2184 + nand->select_device(nand, cs);
2185 +
2186 + block_pages = nand_block_pages(dev);
2187 + block = div_down(*row, block_pages);
2188 + page = *row - block * block_pages;
2189 + plane = reminder(block, dev->plane_num);
2190 + lun = div_down(block, nand_lun_blocks(dev));
2191 +
2192 + wl |= (page << dev->addressing->row_bit_start);
2193 + wl |= (block << dev->addressing->block_bit_start);
2194 + wl |= (plane << dev->addressing->plane_bit_start);
2195 + wl |= (lun << dev->addressing->lun_bit_start);
2196 +
2197 + *row = wl;
2198 + *col = icol;
2199 +
2200 + return 0;
2201 +}
2202 +
2203 +static int nand_base_read_page(struct nand_base *nand, int row)
2204 +{
2205 + struct nfi *nfi = nand->nfi;
2206 + struct nand_device *dev = nand->dev;
2207 +
2208 + nfi->reset(nfi);
2209 + nfi->send_cmd(nfi, dev->cmds->read_1st);
2210 + nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
2211 + nfi->send_cmd(nfi, dev->cmds->read_2nd);
2212 + nfi->trigger(nfi);
2213 +
2214 + return nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tR);
2215 +}
2216 +
2217 +static int nand_base_read_data(struct nand_base *nand, int row, int col,
2218 + int sectors, u8 *data, u8 *oob)
2219 +{
2220 + struct nfi *nfi = nand->nfi;
2221 + struct nand_device *dev = nand->dev;
2222 +
2223 + nfi->reset(nfi);
2224 + nfi->send_cmd(nfi, dev->cmds->random_out_1st);
2225 + nfi->send_addr(nfi, col, row, dev->col_cycle, dev->row_cycle);
2226 + nfi->send_cmd(nfi, dev->cmds->random_out_2nd);
2227 + nfi->wait_ready(nfi, NAND_WAIT_POLLING, dev->array_timing->tRCBSY);
2228 +
2229 + return nfi->read_sectors(nfi, data, oob, sectors);
2230 +}
2231 +
2232 +static int nand_base_write_enable(struct nand_base *nand)
2233 +{
2234 + struct nand_device *dev = nand->dev;
2235 + int status;
2236 +
2237 + status = nand_base_read_status(nand);
2238 + if (status & dev->status->write_protect)
2239 + return 0;
2240 +
2241 + return -ENANDWP;
2242 +}
2243 +
2244 +static int nand_base_program_data(struct nand_base *nand, int row,
2245 + int col,
2246 + u8 *data, u8 *oob)
2247 +{
2248 + struct nfi *nfi = nand->nfi;
2249 + struct nand_device *dev = nand->dev;
2250 +
2251 + nfi->reset(nfi);
2252 + nfi->send_cmd(nfi, dev->cmds->program_1st);
2253 + nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
2254 +
2255 + return nfi->write_page(nfi, data, oob);
2256 +}
2257 +
2258 +static int nand_base_program_page(struct nand_base *nand, int row)
2259 +{
2260 + struct nfi *nfi = nand->nfi;
2261 + struct nand_device *dev = nand->dev;
2262 +
2263 + nfi->reset(nfi);
2264 + nfi->send_cmd(nfi, dev->cmds->program_2nd);
2265 + nfi->trigger(nfi);
2266 +
2267 + return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2268 + dev->array_timing->tPROG);
2269 +}
2270 +
2271 +static int nand_base_erase_block(struct nand_base *nand, int row)
2272 +{
2273 + struct nfi *nfi = nand->nfi;
2274 + struct nand_device *dev = nand->dev;
2275 +
2276 + nfi->reset(nfi);
2277 + nfi->send_cmd(nfi, dev->cmds->erase_1st);
2278 + nfi->send_addr(nfi, 0, row, 0, dev->row_cycle);
2279 + nfi->send_cmd(nfi, dev->cmds->erase_2nd);
2280 + nfi->trigger(nfi);
2281 +
2282 + return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2283 + dev->array_timing->tBERS);
2284 +}
2285 +
2286 +static int nand_base_read_cache(struct nand_base *nand, int row)
2287 +{
2288 + struct nfi *nfi = nand->nfi;
2289 + struct nand_device *dev = nand->dev;
2290 +
2291 + nfi->reset(nfi);
2292 + nfi->send_cmd(nfi, dev->cmds->read_1st);
2293 + nfi->send_addr(nfi, 0, row, dev->col_cycle, dev->row_cycle);
2294 + nfi->send_cmd(nfi, dev->cmds->read_cache);
2295 + nfi->trigger(nfi);
2296 +
2297 + return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2298 + dev->array_timing->tRCBSY);
2299 +}
2300 +
2301 +static int nand_base_read_last(struct nand_base *nand)
2302 +{
2303 + struct nfi *nfi = nand->nfi;
2304 + struct nand_device *dev = nand->dev;
2305 +
2306 + nfi->reset(nfi);
2307 + nfi->send_cmd(nfi, dev->cmds->read_cache_last);
2308 + nfi->trigger(nfi);
2309 +
2310 + return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2311 + dev->array_timing->tRCBSY);
2312 +}
2313 +
2314 +static int nand_base_program_cache(struct nand_base *nand)
2315 +{
2316 + struct nfi *nfi = nand->nfi;
2317 + struct nand_device *dev = nand->dev;
2318 +
2319 + nfi->reset(nfi);
2320 + nfi->send_cmd(nfi, dev->cmds->program_cache);
2321 + nfi->trigger(nfi);
2322 +
2323 + return nfi->wait_ready(nfi, NAND_WAIT_POLLING,
2324 + dev->array_timing->tPCBSY);
2325 +}
2326 +
2327 +struct nand_base *nand_base_init(struct nand_device *dev,
2328 + struct nfi *nfi)
2329 +{
2330 + struct nand_base *nand;
2331 +
2332 + nand = mem_alloc(1, sizeof(struct nand_base));
2333 + if (!nand)
2334 + return NULL;
2335 +
2336 + nand->dev = dev;
2337 + nand->nfi = nfi;
2338 + nand->select_device = nand_base_select_device;
2339 + nand->reset = nand_base_reset;
2340 + nand->read_id = nand_base_read_id;
2341 + nand->read_param_page = nand_base_read_param_page;
2342 + nand->set_feature = nand_base_set_feature;
2343 + nand->get_feature = nand_base_get_feature;
2344 + nand->read_status = nand_base_read_status;
2345 + nand->addressing = nand_base_addressing;
2346 + nand->read_page = nand_base_read_page;
2347 + nand->read_data = nand_base_read_data;
2348 + nand->read_cache = nand_base_read_cache;
2349 + nand->read_last = nand_base_read_last;
2350 + nand->write_enable = nand_base_write_enable;
2351 + nand->program_data = nand_base_program_data;
2352 + nand->program_page = nand_base_program_page;
2353 + nand->program_cache = nand_base_program_cache;
2354 + nand->erase_block = nand_base_erase_block;
2355 +
2356 + return nand;
2357 +}
2358 +
2359 +void nand_base_exit(struct nand_base *base)
2360 +{
2361 + nfi_exit(base->nfi);
2362 + mem_free(base);
2363 +}
2364 --- /dev/null
2365 +++ b/drivers/mtd/nandx/core/nand_base.h
2366 @@ -0,0 +1,71 @@
2367 +/*
2368 + * Copyright (C) 2017 MediaTek Inc.
2369 + * Licensed under either
2370 + * BSD Licence, (see NOTICE for more details)
2371 + * GNU General Public License, version 2.0, (see NOTICE for more details)
2372 + */
2373 +
2374 +#ifndef __NAND_BASE_H__
2375 +#define __NAND_BASE_H__
2376 +
2377 +/*
2378 + * nand base functions
2379 + * @dev: nand device infomations
2380 + * @nfi: nand host controller
2381 + * @select_device: select one nand device of multi nand on chip
2382 + * @reset: reset current nand device
2383 + * @read_id: read current nand id
2384 + * @read_param_page: read current nand parameters page
2385 + * @set_feature: configurate the nand device feature
2386 + * @get_feature: get the nand device feature
2387 + * @read_status: read nand device status
2388 + * @addressing: addressing the address to nand device physical address
2389 + * @read_page: read page data to device cache register
2390 + * @read_data: read data from device cache register by bus protocol
2391 + * @read_cache: nand cache read operation for data output
2392 + * @read_last: nand cache read operation for last page output
2393 + * @write_enable: enable program/erase for nand, especially spi nand
2394 + * @program_data: program data to nand device cache register
2395 + * @program_page: program page data from nand device cache register to array
2396 + * @program_cache: nand cache program operation for data input
2397 + * @erase_block: erase nand block operation
2398 + */
2399 +struct nand_base {
2400 + struct nand_device *dev;
2401 + struct nfi *nfi;
2402 + int (*select_device)(struct nand_base *nand, int cs);
2403 + int (*reset)(struct nand_base *nand);
2404 + int (*read_id)(struct nand_base *nand, u8 *id, int count);
2405 + int (*read_param_page)(struct nand_base *nand, u8 *data, int count);
2406 + int (*set_feature)(struct nand_base *nand, u8 addr, u8 *param,
2407 + int count);
2408 + int (*get_feature)(struct nand_base *nand, u8 addr, u8 *param,
2409 + int count);
2410 + int (*read_status)(struct nand_base *nand);
2411 + int (*addressing)(struct nand_base *nand, int *row, int *col);
2412 +
2413 + int (*read_page)(struct nand_base *nand, int row);
2414 + int (*read_data)(struct nand_base *nand, int row, int col, int sectors,
2415 + u8 *data, u8 *oob);
2416 + int (*read_cache)(struct nand_base *nand, int row);
2417 + int (*read_last)(struct nand_base *nand);
2418 +
2419 + int (*write_enable)(struct nand_base *nand);
2420 + int (*program_data)(struct nand_base *nand, int row, int col, u8 *data,
2421 + u8 *oob);
2422 + int (*program_page)(struct nand_base *nand, int row);
2423 + int (*program_cache)(struct nand_base *nand);
2424 +
2425 + int (*erase_block)(struct nand_base *nand, int row);
2426 +};
2427 +
2428 +struct nand_base *nand_base_init(struct nand_device *device,
2429 + struct nfi *nfi);
2430 +void nand_base_exit(struct nand_base *base);
2431 +
2432 +struct nand_base *nand_device_init(struct nand_chip *nand);
2433 +void nand_exit(struct nand_base *nand);
2434 +
2435 +int nand_detect_device(struct nand_base *nand);
2436 +
2437 +#endif /* __NAND_BASE_H__ */
2438 --- /dev/null
2439 +++ b/drivers/mtd/nandx/core/nand_chip.c
2440 @@ -0,0 +1,272 @@
2441 +/*
2442 + * Copyright (C) 2017 MediaTek Inc.
2443 + * Licensed under either
2444 + * BSD Licence, (see NOTICE for more details)
2445 + * GNU General Public License, version 2.0, (see NOTICE for more details)
2446 + */
2447 +
2448 +#include "nandx_util.h"
2449 +#include "nandx_core.h"
2450 +#include "nand_chip.h"
2451 +#include "nand_device.h"
2452 +#include "nfi.h"
2453 +#include "nand_base.h"
2454 +
2455 +static int nand_chip_read_page(struct nand_chip *chip,
2456 + struct nand_ops *ops,
2457 + int count)
2458 +{
2459 + struct nand_base *nand = chip->nand;
2460 + struct nand_device *dev = nand->dev;
2461 + int i, ret = 0;
2462 + int row, col, sectors;
2463 + u8 *data, *oob;
2464 +
2465 + for (i = 0; i < count; i++) {
2466 + row = ops[i].row;
2467 + col = ops[i].col;
2468 +
2469 + nand->addressing(nand, &row, &col);
2470 + ops[i].status = nand->read_page(nand, row);
2471 + if (ops[i].status < 0) {
2472 + ret = ops[i].status;
2473 + continue;
2474 + }
2475 +
2476 + data = ops[i].data;
2477 + oob = ops[i].oob;
2478 + sectors = ops[i].len / chip->sector_size;
2479 + ops[i].status = nand->read_data(nand, row, col,
2480 + sectors, data, oob);
2481 + if (ops[i].status > 0)
2482 + ops[i].status = ops[i].status >=
2483 + dev->endurance->max_bitflips ?
2484 + -ENANDFLIPS : 0;
2485 +
2486 + ret = min_t(int, ret, ops[i].status);
2487 + }
2488 +
2489 + return ret;
2490 +}
2491 +
2492 +static int nand_chip_write_page(struct nand_chip *chip,
2493 + struct nand_ops *ops,
2494 + int count)
2495 +{
2496 + struct nand_base *nand = chip->nand;
2497 + struct nand_device *dev = nand->dev;
2498 + int i, ret = 0;
2499 + int row, col;
2500 + u8 *data, *oob;
2501 +
2502 + for (i = 0; i < count; i++) {
2503 + row = ops[i].row;
2504 + col = ops[i].col;
2505 +
2506 + nand->addressing(nand, &row, &col);
2507 +
2508 + ops[i].status = nand->write_enable(nand);
2509 + if (ops[i].status) {
2510 + pr_debug("Write Protect at %x!\n", row);
2511 + ops[i].status = -ENANDWP;
2512 + return -ENANDWP;
2513 + }
2514 +
2515 + data = ops[i].data;
2516 + oob = ops[i].oob;
2517 + ops[i].status = nand->program_data(nand, row, col, data, oob);
2518 + if (ops[i].status < 0) {
2519 + ret = ops[i].status;
2520 + continue;
2521 + }
2522 +
2523 + ops[i].status = nand->program_page(nand, row);
2524 + if (ops[i].status < 0) {
2525 + ret = ops[i].status;
2526 + continue;
2527 + }
2528 +
2529 + ops[i].status = nand->read_status(nand);
2530 + if (ops[i].status & dev->status->program_fail)
2531 + ops[i].status = -ENANDWRITE;
2532 +
2533 + ret = min_t(int, ret, ops[i].status);
2534 + }
2535 +
2536 + return ret;
2537 +}
2538 +
2539 +static int nand_chip_erase_block(struct nand_chip *chip,
2540 + struct nand_ops *ops,
2541 + int count)
2542 +{
2543 + struct nand_base *nand = chip->nand;
2544 + struct nand_device *dev = nand->dev;
2545 + int i, ret = 0;
2546 + int row, col;
2547 +
2548 + for (i = 0; i < count; i++) {
2549 + row = ops[i].row;
2550 + col = ops[i].col;
2551 +
2552 + nand->addressing(nand, &row, &col);
2553 +
2554 + ops[i].status = nand->write_enable(nand);
2555 + if (ops[i].status) {
2556 + pr_debug("Write Protect at %x!\n", row);
2557 + ops[i].status = -ENANDWP;
2558 + return -ENANDWP;
2559 + }
2560 +
2561 + ops[i].status = nand->erase_block(nand, row);
2562 + if (ops[i].status < 0) {
2563 + ret = ops[i].status;
2564 + continue;
2565 + }
2566 +
2567 + ops[i].status = nand->read_status(nand);
2568 + if (ops[i].status & dev->status->erase_fail)
2569 + ops[i].status = -ENANDERASE;
2570 +
2571 + ret = min_t(int, ret, ops[i].status);
2572 + }
2573 +
2574 + return ret;
2575 +}
2576 +
2577 +/* read first bad mark on spare */
2578 +static int nand_chip_is_bad_block(struct nand_chip *chip,
2579 + struct nand_ops *ops,
2580 + int count)
2581 +{
2582 + int i, ret, value;
2583 + int status = 0;
2584 + u8 *data, *tmp_buf;
2585 +
2586 + tmp_buf = mem_alloc(1, chip->page_size);
2587 + if (!tmp_buf)
2588 + return -ENOMEM;
2589 +
2590 + memset(tmp_buf, 0x00, chip->page_size);
2591 +
2592 + /* Disable ECC */
2593 + value = 0;
2594 + ret = chip->chip_ctrl(chip, NFI_CTRL_ECC, &value);
2595 + if (ret)
2596 + goto out;
2597 +
2598 + ret = chip->read_page(chip, ops, count);
2599 + if (ret)
2600 + goto out;
2601 +
2602 + for (i = 0; i < count; i++) {
2603 + data = ops[i].data;
2604 +
2605 + /* temp solution for mt7622, because of no bad mark swap */
2606 + if (!memcmp(data, tmp_buf, chip->page_size)) {
2607 + ops[i].status = -ENANDBAD;
2608 + status = -ENANDBAD;
2609 +
2610 + } else {
2611 + ops[i].status = 0;
2612 + }
2613 + }
2614 +
2615 + /* Enable ECC */
2616 + value = 1;
2617 + ret = chip->chip_ctrl(chip, NFI_CTRL_ECC, &value);
2618 + if (ret)
2619 + goto out;
2620 +
2621 + mem_free(tmp_buf);
2622 + return status;
2623 +
2624 +out:
2625 + mem_free(tmp_buf);
2626 + return ret;
2627 +}
2628 +
2629 +static int nand_chip_ctrl(struct nand_chip *chip, int cmd, void *args)
2630 +{
2631 + return -EOPNOTSUPP;
2632 +}
2633 +
2634 +static int nand_chip_suspend(struct nand_chip *chip)
2635 +{
2636 + return 0;
2637 +}
2638 +
2639 +static int nand_chip_resume(struct nand_chip *chip)
2640 +{
2641 + return 0;
2642 +}
2643 +
2644 +struct nand_chip *nand_chip_init(struct nfi_resource *res)
2645 +{
2646 + struct nand_chip *chip;
2647 + struct nand_base *nand;
2648 + struct nfi *nfi;
2649 +
2650 + chip = mem_alloc(1, sizeof(struct nand_chip));
2651 + if (!chip) {
2652 + pr_info("nand chip alloc fail!\n");
2653 + return NULL;
2654 + }
2655 +
2656 + nfi = nfi_init(res);
2657 + if (!nfi) {
2658 + pr_info("nfi init fail!\n");
2659 + goto nfi_err;
2660 + }
2661 +
2662 + nand = nand_base_init(NULL, nfi);
2663 + if (!nand) {
2664 + pr_info("nand base init fail!\n");
2665 + goto base_err;
2666 + }
2667 +
2668 + chip->nand = (void *)nand;
2669 + chip->read_page = nand_chip_read_page;
2670 + chip->write_page = nand_chip_write_page;
2671 + chip->erase_block = nand_chip_erase_block;
2672 + chip->is_bad_block = nand_chip_is_bad_block;
2673 + chip->chip_ctrl = nand_chip_ctrl;
2674 + chip->suspend = nand_chip_suspend;
2675 + chip->resume = nand_chip_resume;
2676 +
2677 + nand = nand_device_init(chip);
2678 + if (!nand)
2679 + goto nand_err;
2680 +
2681 + chip->nand = (void *)nand;
2682 + chip->plane_num = nand->dev->plane_num;
2683 + chip->block_num = nand_total_blocks(nand->dev);
2684 + chip->block_size = nand->dev->block_size;
2685 + chip->block_pages = nand_block_pages(nand->dev);
2686 + chip->page_size = nand->dev->page_size;
2687 + chip->oob_size = nfi->fdm_size * div_down(chip->page_size,
2688 + nfi->sector_size);
2689 + chip->sector_size = nfi->sector_size;
2690 + chip->sector_spare_size = nfi->sector_spare_size;
2691 + chip->min_program_pages = nand->dev->min_program_pages;
2692 + chip->ecc_strength = nfi->ecc_strength;
2693 + chip->ecc_parity_size = nfi->ecc_parity_size;
2694 + chip->fdm_ecc_size = nfi->fdm_ecc_size;
2695 + chip->fdm_reg_size = nfi->fdm_size;
2696 +
2697 + return chip;
2698 +
2699 +nand_err:
2700 + mem_free(nand);
2701 +base_err:
2702 + nfi_exit(nfi);
2703 +nfi_err:
2704 + mem_free(chip);
2705 + return NULL;
2706 +}
2707 +
2708 +void nand_chip_exit(struct nand_chip *chip)
2709 +{
2710 + nand_exit(chip->nand);
2711 + mem_free(chip);
2712 +}
2713 --- /dev/null
2714 +++ b/drivers/mtd/nandx/core/nand_chip.h
2715 @@ -0,0 +1,103 @@
2716 +/*
2717 + * Copyright (C) 2017 MediaTek Inc.
2718 + * Licensed under either
2719 + * BSD Licence, (see NOTICE for more details)
2720 + * GNU General Public License, version 2.0, (see NOTICE for more details)
2721 + */
2722 +
2723 +#ifndef __NAND_CHIP_H__
2724 +#define __NAND_CHIP_H__
2725 +
2726 +enum nand_type {
2727 + NAND_SPI,
2728 + NAND_SLC,
2729 + NAND_MLC,
2730 + NAND_TLC
2731 +};
2732 +
2733 +/*
2734 + * nand chip operation unit
2735 + * one nand_ops indicates one row operation
2736 + * @row: nand chip row address, like as nand row
2737 + * @col: nand chip column address, like as nand column
2738 + * @len: operate data length, min is sector_size,
2739 + * max is page_size and sector_size aligned
2740 + * @status: one operation result status
2741 + * @data: data buffer for operation
2742 + * @oob: oob buffer for operation, like as nand spare area
2743 + */
2744 +struct nand_ops {
2745 + int row;
2746 + int col;
2747 + int len;
2748 + int status;
2749 + void *data;
2750 + void *oob;
2751 +};
2752 +
2753 +/*
2754 + * nand chip descriptions
2755 + * nand chip includes nand controller and the several same nand devices
2756 + * @nand_type: the nand type on this chip,
2757 + * the chip maybe have several nand device and the type must be same
2758 + * @plane_num: the whole plane number on the chip
2759 + * @block_num: the whole block number on the chip
2760 + * @block_size: nand device block size
2761 + * @block_pages: nand device block has page number
2762 + * @page_size: nand device page size
2763 + * @oob_size: chip out of band size, like as nand spare szie,
2764 + * but restricts this:
2765 + * the size is provied by nand controller(NFI),
2766 + * because NFI would use some nand spare size
2767 + * @min_program_pages: chip needs min pages per program operations
2768 + * one page as one nand_ops
2769 + * @sector_size: chip min read size
2770 + * @sector_spare_size: spare size for sector, is spare_size/page_sectors
2771 + * @ecc_strength: ecc stregth per sector_size, it would be for calculated ecc
2772 + * @ecc_parity_size: ecc parity size for one sector_size data
2773 + * @nand: pointer to inherited struct nand_base
2774 + * @read_page: read %count pages on chip
2775 + * @write_page: write %count pages on chip
2776 + * @erase_block: erase %count blocks on chip, one block is one nand_ops
2777 + * it is better to set nand_ops.row to block start row
2778 + * @is_bad_block: judge the %count blocks on chip if they are bad
2779 + * by vendor specification
2780 + * @chip_ctrl: control the chip features by nandx_ctrl_cmd
2781 + * @suspend: suspend nand chip
2782 + * @resume: resume nand chip
2783 + */
2784 +struct nand_chip {
2785 + int nand_type;
2786 + int plane_num;
2787 + int block_num;
2788 + int block_size;
2789 + int block_pages;
2790 + int page_size;
2791 + int oob_size;
2792 +
2793 + int min_program_pages;
2794 + int sector_size;
2795 + int sector_spare_size;
2796 + int ecc_strength;
2797 + int ecc_parity_size;
2798 + u32 fdm_ecc_size;
2799 + u32 fdm_reg_size;
2800 +
2801 + void *nand;
2802 +
2803 + int (*read_page)(struct nand_chip *chip, struct nand_ops *ops,
2804 + int count);
2805 + int (*write_page)(struct nand_chip *chip, struct nand_ops *ops,
2806 + int count);
2807 + int (*erase_block)(struct nand_chip *chip, struct nand_ops *ops,
2808 + int count);
2809 + int (*is_bad_block)(struct nand_chip *chip, struct nand_ops *ops,
2810 + int count);
2811 + int (*chip_ctrl)(struct nand_chip *chip, int cmd, void *args);
2812 + int (*suspend)(struct nand_chip *chip);
2813 + int (*resume)(struct nand_chip *chip);
2814 +};
2815 +
2816 +struct nand_chip *nand_chip_init(struct nfi_resource *res);
2817 +void nand_chip_exit(struct nand_chip *chip);
2818 +#endif /* __NAND_CHIP_H__ */
2819 --- /dev/null
2820 +++ b/drivers/mtd/nandx/core/nand_device.c
2821 @@ -0,0 +1,285 @@
2822 +/*
2823 + * Copyright (C) 2017 MediaTek Inc.
2824 + * Licensed under either
2825 + * BSD Licence, (see NOTICE for more details)
2826 + * GNU General Public License, version 2.0, (see NOTICE for more details)
2827 + */
2828 +
2829 +#include "nandx_util.h"
2830 +#include "nandx_core.h"
2831 +#include "nand_chip.h"
2832 +#include "nand_device.h"
2833 +#include "nand_base.h"
2834 +
2835 +#define MAX_CHIP_DEVICE 4
2836 +#define PARAM_PAGE_LEN 2048
2837 +#define ONFI_CRC_BASE 0x4f4e
2838 +
2839 +static u16 nand_onfi_crc16(u16 crc, u8 const *p, size_t len)
2840 +{
2841 + int i;
2842 +
2843 + while (len--) {
2844 + crc ^= *p++ << 8;
2845 +
2846 + for (i = 0; i < 8; i++)
2847 + crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
2848 + }
2849 +
2850 + return crc;
2851 +}
2852 +
2853 +static inline void decode_addr_cycle(u8 addr_cycle, u8 *row_cycle,
2854 + u8 *col_cycle)
2855 +{
2856 + *row_cycle = addr_cycle & 0xf;
2857 + *col_cycle = (addr_cycle >> 4) & 0xf;
2858 +}
2859 +
2860 +static int detect_onfi(struct nand_device *dev,
2861 + struct nand_onfi_params *onfi)
2862 +{
2863 + struct nand_endurance *endurance = dev->endurance;
2864 + u16 size, i, crc16;
2865 + u8 *id;
2866 +
2867 + size = sizeof(struct nand_onfi_params) - sizeof(u16);
2868 +
2869 + for (i = 0; i < 3; i++) {
2870 + crc16 = nand_onfi_crc16(ONFI_CRC_BASE, (u8 *)&onfi[i], size);
2871 +
2872 + if (onfi[i].signature[0] == 'O' &&
2873 + onfi[i].signature[1] == 'N' &&
2874 + onfi[i].signature[2] == 'F' &&
2875 + onfi[i].signature[3] == 'I' &&
2876 + onfi[i].crc16 == crc16)
2877 + break;
2878 +
2879 + /* in some spi nand, onfi signature maybe "NAND" */
2880 + if (onfi[i].signature[0] == 'N' &&
2881 + onfi[i].signature[1] == 'A' &&
2882 + onfi[i].signature[2] == 'N' &&
2883 + onfi[i].signature[3] == 'D' &&
2884 + onfi[i].crc16 == crc16)
2885 + break;
2886 + }
2887 +
2888 + if (i == 3)
2889 + return -ENODEV;
2890 +
2891 + memcpy(dev->name, onfi[i].model, 20);
2892 + id = onfi[i].manufacturer;
2893 + dev->id = NAND_PACK_ID(id[0], id[1], id[2], id[3], id[4], id[5], id[6],
2894 + id[7]);
2895 + dev->id_len = MAX_ID_NUM;
2896 + dev->io_width = (onfi[i].features & 1) ? NAND_IO16 : NAND_IO8;
2897 + decode_addr_cycle(onfi[i].addr_cycle, &dev->row_cycle,
2898 + &dev->col_cycle);
2899 + dev->target_num = 1;
2900 + dev->lun_num = onfi[i].lun_num;
2901 + dev->plane_num = BIT(onfi[i].plane_address_bits);
2902 + dev->block_num = onfi[i].lun_blocks / dev->plane_num;
2903 + dev->block_size = onfi[i].block_pages * onfi[i].page_size;
2904 + dev->page_size = onfi[i].page_size;
2905 + dev->spare_size = onfi[i].spare_size;
2906 +
2907 + endurance->ecc_req = onfi[i].ecc_req;
2908 + endurance->pe_cycle = onfi[i].valid_block_endurance;
2909 + endurance->max_bitflips = endurance->ecc_req >> 1;
2910 +
2911 + return 0;
2912 +}
2913 +
2914 +static int detect_jedec(struct nand_device *dev,
2915 + struct nand_jedec_params *jedec)
2916 +{
2917 + struct nand_endurance *endurance = dev->endurance;
2918 + u16 size, i, crc16;
2919 + u8 *id;
2920 +
2921 + size = sizeof(struct nand_jedec_params) - sizeof(u16);
2922 +
2923 + for (i = 0; i < 3; i++) {
2924 + crc16 = nand_onfi_crc16(ONFI_CRC_BASE, (u8 *)&jedec[i], size);
2925 +
2926 + if (jedec[i].signature[0] == 'J' &&
2927 + jedec[i].signature[1] == 'E' &&
2928 + jedec[i].signature[2] == 'S' &&
2929 + jedec[i].signature[3] == 'D' &&
2930 + jedec[i].crc16 == crc16)
2931 + break;
2932 + }
2933 +
2934 + if (i == 3)
2935 + return -ENODEV;
2936 +
2937 + memcpy(dev->name, jedec[i].model, 20);
2938 + id = jedec[i].manufacturer;
2939 + dev->id = NAND_PACK_ID(id[0], id[1], id[2], id[3], id[4], id[5], id[6],
2940 + id[7]);
2941 + dev->id_len = MAX_ID_NUM;
2942 + dev->io_width = (jedec[i].features & 1) ? NAND_IO16 : NAND_IO8;
2943 + decode_addr_cycle(jedec[i].addr_cycle, &dev->row_cycle,
2944 + &dev->col_cycle);
2945 + dev->target_num = 1;
2946 + dev->lun_num = jedec[i].lun_num;
2947 + dev->plane_num = BIT(jedec[i].plane_address_bits);
2948 + dev->block_num = jedec[i].lun_blocks / dev->plane_num;
2949 + dev->block_size = jedec[i].block_pages * jedec[i].page_size;
2950 + dev->page_size = jedec[i].page_size;
2951 + dev->spare_size = jedec[i].spare_size;
2952 +
2953 + endurance->ecc_req = jedec[i].endurance_block0[0];
2954 + endurance->pe_cycle = jedec[i].valid_block_endurance;
2955 + endurance->max_bitflips = endurance->ecc_req >> 1;
2956 +
2957 + return 0;
2958 +}
2959 +
2960 +static struct nand_device *detect_parameters_page(struct nand_base
2961 + *nand)
2962 +{
2963 + struct nand_device *dev = nand->dev;
2964 + void *params;
2965 + int ret;
2966 +
2967 + params = mem_alloc(1, PARAM_PAGE_LEN);
2968 + if (!params)
2969 + return NULL;
2970 +
2971 + memset(params, 0, PARAM_PAGE_LEN);
2972 + ret = nand->read_param_page(nand, params, PARAM_PAGE_LEN);
2973 + if (ret < 0) {
2974 + pr_info("read parameters page fail!\n");
2975 + goto error;
2976 + }
2977 +
2978 + ret = detect_onfi(dev, params);
2979 + if (ret) {
2980 + pr_info("detect onfi device fail! try to detect jedec\n");
2981 + ret = detect_jedec(dev, params);
2982 + if (ret) {
2983 + pr_info("detect jedec device fail!\n");
2984 + goto error;
2985 + }
2986 + }
2987 +
2988 + mem_free(params);
2989 + return dev;
2990 +
2991 +error:
2992 + mem_free(params);
2993 + return NULL;
2994 +}
2995 +
2996 +static int read_device_id(struct nand_base *nand, int cs, u8 *id)
2997 +{
2998 + int i;
2999 +
3000 + nand->select_device(nand, cs);
3001 + nand->reset(nand);
3002 + nand->read_id(nand, id, MAX_ID_NUM);
3003 + pr_info("device %d ID: ", cs);
3004 +
3005 + for (i = 0; i < MAX_ID_NUM; i++)
3006 + pr_info("%x ", id[i]);
3007 +
3008 + pr_info("\n");
3009 +
3010 + return 0;
3011 +}
3012 +
3013 +static int detect_more_device(struct nand_base *nand, u8 *id)
3014 +{
3015 + u8 id_ext[MAX_ID_NUM];
3016 + int i, j, target_num = 0;
3017 +
3018 + for (i = 1; i < MAX_CHIP_DEVICE; i++) {
3019 + memset(id_ext, 0xff, MAX_ID_NUM);
3020 + read_device_id(nand, i, id_ext);
3021 +
3022 + for (j = 0; j < MAX_ID_NUM; j++) {
3023 + if (id_ext[j] != id[j])
3024 + goto out;
3025 + }
3026 +
3027 + target_num += 1;
3028 + }
3029 +
3030 +out:
3031 + return target_num;
3032 +}
3033 +
3034 +static struct nand_device *scan_device_table(const u8 *id, int id_len)
3035 +{
3036 + struct nand_device *dev;
3037 + int i = 0, j;
3038 + u8 ids[MAX_ID_NUM] = {0};
3039 +
3040 + while (1) {
3041 + dev = nand_get_device(i);
3042 +
3043 + if (!strcmp(dev->name, "NO-DEVICE"))
3044 + break;
3045 +
3046 + if (id_len < dev->id_len) {
3047 + i += 1;
3048 + continue;
3049 + }
3050 +
3051 + NAND_UNPACK_ID(dev->id, ids, MAX_ID_NUM);
3052 + for (j = 0; j < dev->id_len; j++) {
3053 + if (ids[j] != id[j])
3054 + break;
3055 + }
3056 +
3057 + if (j == dev->id_len)
3058 + break;
3059 +
3060 + i += 1;
3061 + }
3062 +
3063 + return dev;
3064 +}
3065 +
3066 +int nand_detect_device(struct nand_base *nand)
3067 +{
3068 + struct nand_device *dev;
3069 + u8 id[MAX_ID_NUM] = { 0 };
3070 + int target_num = 0;
3071 +
3072 + /* Get nand device default setting for reset/read_id */
3073 + nand->dev = scan_device_table(NULL, -1);
3074 +
3075 + read_device_id(nand, 0, id);
3076 + dev = scan_device_table(id, MAX_ID_NUM);
3077 +
3078 + if (!strcmp(dev->name, "NO-DEVICE")) {
3079 + pr_info("device scan fail\n");
3080 + return -ENODEV;
3081 + }
3082 +
3083 + /* TobeFix: has null pointer issue in this funciton */
3084 + if (!strcmp(dev->name, "NO-DEVICE")) {
3085 + pr_info("device scan fail, detect parameters page\n");
3086 + dev = detect_parameters_page(nand);
3087 + if (!dev) {
3088 + pr_info("detect parameters fail\n");
3089 + return -ENODEV;
3090 + }
3091 + }
3092 +
3093 + if (dev->target_num > 1)
3094 + target_num = detect_more_device(nand, id);
3095 +
3096 + target_num += 1;
3097 + pr_debug("chip has target device num: %d\n", target_num);
3098 +
3099 + if (dev->target_num != target_num)
3100 + dev->target_num = target_num;
3101 +
3102 + nand->dev = dev;
3103 +
3104 + return 0;
3105 +}
3106 +
3107 --- /dev/null
3108 +++ b/drivers/mtd/nandx/core/nand_device.h
3109 @@ -0,0 +1,608 @@
3110 +/*
3111 + * Copyright (C) 2017 MediaTek Inc.
3112 + * Licensed under either
3113 + * BSD Licence, (see NOTICE for more details)
3114 + * GNU General Public License, version 2.0, (see NOTICE for more details)
3115 + */
3116 +
3117 +#ifndef __NAND_DEVICE_H__
3118 +#define __NAND_DEVICE_H__
3119 +
3120 +/* onfi 3.2 */
3121 +struct nand_onfi_params {
3122 + /* Revision information and features block. 0 */
3123 + /*
3124 + * Byte 0: 4Fh,
3125 + * Byte 1: 4Eh,
3126 + * Byte 2: 46h,
3127 + * Byte 3: 49h,
3128 + */
3129 + u8 signature[4];
3130 + /*
3131 + * 9-15 Reserved (0)
3132 + * 8 1 = supports ONFI version 3.2
3133 + * 7 1 = supports ONFI version 3.1
3134 + * 6 1 = supports ONFI version 3.0
3135 + * 5 1 = supports ONFI version 2.3
3136 + * 4 1 = supports ONFI version 2.2
3137 + * 3 1 = supports ONFI version 2.1
3138 + * 2 1 = supports ONFI version 2.0
3139 + * 1 1 = supports ONFI version 1.0
3140 + * 0 Reserved (0)
3141 + */
3142 + u16 revision;
3143 + /*
3144 + * 13-15 Reserved (0)
3145 + * 12 1 = supports external Vpp
3146 + * 11 1 = supports Volume addressing
3147 + * 10 1 = supports NV-DDR2
3148 + * 9 1 = supports EZ NAND
3149 + * 8 1 = supports program page register clear enhancement
3150 + * 7 1 = supports extended parameter page
3151 + * 6 1 = supports multi-plane read operations
3152 + * 5 1 = supports NV-DDR
3153 + * 4 1 = supports odd to even page Copyback
3154 + * 3 1 = supports multi-plane program and erase operations
3155 + * 2 1 = supports non-sequential page programming
3156 + * 1 1 = supports multiple LUN operations
3157 + * 0 1 = supports 16-bit data bus width
3158 + */
3159 + u16 features;
3160 + /*
3161 + * 13-15 Reserved (0)
3162 + * 12 1 = supports LUN Get and LUN Set Features
3163 + * 11 1 = supports ODT Configure
3164 + * 10 1 = supports Volume Select
3165 + * 9 1 = supports Reset LUN
3166 + * 8 1 = supports Small Data Move
3167 + * 7 1 = supports Change Row Address
3168 + * 6 1 = supports Change Read Column Enhanced
3169 + * 5 1 = supports Read Unique ID
3170 + * 4 1 = supports Copyback
3171 + * 3 1 = supports Read Status Enhanced
3172 + * 2 1 = supports Get Features and Set Features
3173 + * 1 1 = supports Read Cache commands
3174 + * 0 1 = supports Page Cache Program command
3175 + */
3176 + u16 opt_cmds;
3177 + /*
3178 + * 4-7 Reserved (0)
3179 + * 3 1 = supports Multi-plane Block Erase
3180 + * 2 1 = supports Multi-plane Copyback Program
3181 + * 1 1 = supports Multi-plane Page Program
3182 + * 0 1 = supports Random Data Out
3183 + */
3184 + u8 advance_cmds;
3185 + u8 reserved0[1];
3186 + u16 extend_param_len;
3187 + u8 param_page_num;
3188 + u8 reserved1[17];
3189 +
3190 + /* Manufacturer information block. 32 */
3191 + u8 manufacturer[12];
3192 + u8 model[20];
3193 + u8 jedec_id;
3194 + u16 data_code;
3195 + u8 reserved2[13];
3196 +
3197 + /* Memory organization block. 80 */
3198 + u32 page_size;
3199 + u16 spare_size;
3200 + u32 partial_page_size; /* obsolete */
3201 + u16 partial_spare_size; /* obsolete */
3202 + u32 block_pages;
3203 + u32 lun_blocks;
3204 + u8 lun_num;
3205 + /*
3206 + * 4-7 Column address cycles
3207 + * 0-3 Row address cycles
3208 + */
3209 + u8 addr_cycle;
3210 + u8 cell_bits;
3211 + u16 lun_max_bad_blocks;
3212 + u16 block_endurance;
3213 + u8 target_begin_valid_blocks;
3214 + u16 valid_block_endurance;
3215 + u8 page_program_num;
3216 + u8 partial_program_attr; /* obsolete */
3217 + u8 ecc_req;
3218 + /*
3219 + * 4-7 Reserved (0)
3220 + * 0-3 Number of plane address bits
3221 + */
3222 + u8 plane_address_bits;
3223 + /*
3224 + * 6-7 Reserved (0)
3225 + * 5 1 = lower bit XNOR block address restriction
3226 + * 4 1 = read cache supported
3227 + * 3 Address restrictions for cache operations
3228 + * 2 1 = program cache supported
3229 + * 1 1 = no block address restrictions
3230 + * 0 Overlapped / concurrent multi-plane support
3231 + */
3232 + u8 multi_plane_attr;
3233 + u8 ez_nand_support;
3234 + u8 reserved3[12];
3235 +
3236 + /* Electrical parameters block. 128 */
3237 + u8 io_pin_max_capacitance;
3238 + /*
3239 + * 6-15 Reserved (0)
3240 + * 5 1 = supports timing mode 5
3241 + * 4 1 = supports timing mode 4
3242 + * 3 1 = supports timing mode 3
3243 + * 2 1 = supports timing mode 2
3244 + * 1 1 = supports timing mode 1
3245 + * 0 1 = supports timing mode 0, shall be 1
3246 + */
3247 + u16 sdr_timing_mode;
3248 + u16 sdr_program_cache_timing_mode; /* obsolete */
3249 + u16 tPROG;
3250 + u16 tBERS;
3251 + u16 tR;
3252 + u16 tCCS;
3253 + /*
3254 + * 7 Reserved (0)
3255 + * 6 1 = supports NV-DDR2 timing mode 8
3256 + * 5 1 = supports NV-DDR timing mode 5
3257 + * 4 1 = supports NV-DDR timing mode 4
3258 + * 3 1 = supports NV-DDR timing mode 3
3259 + * 2 1 = supports NV-DDR timing mode 2
3260 + * 1 1 = supports NV-DDR timing mode 1
3261 + * 0 1 = supports NV-DDR timing mode 0
3262 + */
3263 + u8 nvddr_timing_mode;
3264 + /*
3265 + * 7 1 = supports timing mode 7
3266 + * 6 1 = supports timing mode 6
3267 + * 5 1 = supports timing mode 5
3268 + * 4 1 = supports timing mode 4
3269 + * 3 1 = supports timing mode 3
3270 + * 2 1 = supports timing mode 2
3271 + * 1 1 = supports timing mode 1
3272 + * 0 1 = supports timing mode 0
3273 + */
3274 + u8 nvddr2_timing_mode;
3275 + /*
3276 + * 4-7 Reserved (0)
3277 + * 3 1 = device requires Vpp enablement sequence
3278 + * 2 1 = device supports CLK stopped for data input
3279 + * 1 1 = typical capacitance
3280 + * 0 tCAD value to use
3281 + */
3282 + u8 nvddr_fetures;
3283 + u16 clk_pin_capacitance;
3284 + u16 io_pin_capacitance;
3285 + u16 input_pin_capacitance;
3286 + u8 input_pin_max_capacitance;
3287 + /*
3288 + * 3-7 Reserved (0)
3289 + * 2 1 = supports 18 Ohm drive strength
3290 + * 1 1 = supports 25 Ohm drive strength
3291 + * 0 1 = supports driver strength settings
3292 + */
3293 + u8 drive_strength;
3294 + u16 tR_multi_plane;
3295 + u16 tADL;
3296 + u16 tR_ez_nand;
3297 + /*
3298 + * 6-7 Reserved (0)
3299 + * 5 1 = external VREFQ required for >= 200 MT/s
3300 + * 4 1 = supports differential signaling for DQS
3301 + * 3 1 = supports differential signaling for RE_n
3302 + * 2 1 = supports ODT value of 30 Ohms
3303 + * 1 1 = supports matrix termination ODT
3304 + * 0 1 = supports self-termination ODT
3305 + */
3306 + u8 nvddr2_features;
3307 + u8 nvddr2_warmup_cycles;
3308 + u8 reserved4[4];
3309 +
3310 + /* vendor block. 164 */
3311 + u16 vendor_revision;
3312 + u8 vendor_spec[88];
3313 +
3314 + /* CRC for Parameter Page. 254 */
3315 + u16 crc16;
3316 +} __packed;
3317 +
3318 +/* JESD230-B */
3319 +struct nand_jedec_params {
3320 + /* Revision information and features block. 0 */
3321 + /*
3322 + * Byte 0:4Ah
3323 + * Byte 1:45h
3324 + * Byte 2:53h
3325 + * Byte 3:44h
3326 + */
3327 + u8 signature[4];
3328 + /*
3329 + * 3-15: Reserved (0)
3330 + * 2: 1 = supports parameter page revision 1.0 and standard revision 1.0
3331 + * 1: 1 = supports vendor specific parameter page
3332 + * 0: Reserved (0)
3333 + */
3334 + u16 revision;
3335 + /*
3336 + * 9-15 Reserved (0)
3337 + * 8: 1 = supports program page register clear enhancement
3338 + * 7: 1 = supports external Vpp
3339 + * 6: 1 = supports Toggle Mode DDR
3340 + * 5: 1 = supports Synchronous DDR
3341 + * 4: 1 = supports multi-plane read operations
3342 + * 3: 1 = supports multi-plane program and erase operations
3343 + * 2: 1 = supports non-sequential page programming
3344 + * 1: 1 = supports multiple LUN operations
3345 + * 0: 1 = supports 16-bit data bus width
3346 + */
3347 + u16 features;
3348 + /*
3349 + * 11-23: Reserved (0)
3350 + * 10: 1 = supports Synchronous Reset
3351 + * 9: 1 = supports Reset LUN (Primary)
3352 + * 8: 1 = supports Small Data Move
3353 + * 7: 1 = supports Multi-plane Copyback Program (Primary)
3354 + * 6: 1 = supports Random Data Out (Primary)
3355 + * 5: 1 = supports Read Unique ID
3356 + * 4: 1 = supports Copyback
3357 + * 3: 1 = supports Read Status Enhanced (Primary)
3358 + * 2: 1 = supports Get Features and Set Features
3359 + * 1: 1 = supports Read Cache commands
3360 + * 0: 1 = supports Page Cache Program command
3361 + */
3362 + u8 opt_cmds[3];
3363 + /*
3364 + * 8-15: Reserved (0)
3365 + * 7: 1 = supports secondary Read Status Enhanced
3366 + * 6: 1 = supports secondary Multi-plane Block Erase
3367 + * 5: 1 = supports secondary Multi-plane Copyback Program
3368 + * 4: 1 = supports secondary Multi-plane Program
3369 + * 3: 1 = supports secondary Random Data Out
3370 + * 2: 1 = supports secondary Multi-plane Copyback Read
3371 + * 1: 1 = supports secondary Multi-plane Read Cache Random
3372 + * 0: 1 = supports secondary Multi-plane Read
3373 + */
3374 + u16 secondary_cmds;
3375 + u8 param_page_num;
3376 + u8 reserved0[18];
3377 +
3378 + /* Manufacturer information block. 32*/
3379 + u8 manufacturer[12];
3380 + u8 model[20];
3381 + u8 jedec_id[6];
3382 + u8 reserved1[10];
3383 +
3384 + /* Memory organization block. 80 */
3385 + u32 page_size;
3386 + u16 spare_size;
3387 + u8 reserved2[6];
3388 + u32 block_pages;
3389 + u32 lun_blocks;
3390 + u8 lun_num;
3391 + /*
3392 + * 4-7 Column address cycles
3393 + * 0-3 Row address cycles
3394 + */
3395 + u8 addr_cycle;
3396 + u8 cell_bits;
3397 + u8 page_program_num;
3398 + /*
3399 + * 4-7 Reserved (0)
3400 + * 0-3 Number of plane address bits
3401 + */
3402 + u8 plane_address_bits;
3403 + /*
3404 + * 3-7: Reserved (0)
3405 + * 2: 1= read cache supported
3406 + * 1: 1 = program cache supported
3407 + * 0: 1= No multi-plane block address restrictions
3408 + */
3409 + u8 multi_plane_attr;
3410 + u8 reserved3[38];
3411 +
3412 + /* Electrical parameters block. 144 */
3413 + /*
3414 + * 6-15: Reserved (0)
3415 + * 5: 1 = supports 20 ns speed grade (50 MHz)
3416 + * 4: 1 = supports 25 ns speed grade (40 MHz)
3417 + * 3: 1 = supports 30 ns speed grade (~33 MHz)
3418 + * 2: 1 = supports 35 ns speed grade (~28 MHz)
3419 + * 1: 1 = supports 50 ns speed grade (20 MHz)
3420 + * 0: 1 = supports 100 ns speed grade (10 MHz)
3421 + */
3422 + u16 sdr_speed;
3423 + /*
3424 + * 8-15: Reserved (0)
3425 + * 7: 1 = supports 5 ns speed grade (200 MHz)
3426 + * 6: 1 = supports 6 ns speed grade (~166 MHz)
3427 + * 5: 1 = supports 7.5 ns speed grade (~133 MHz)
3428 + * 4: 1 = supports 10 ns speed grade (100 MHz)
3429 + * 3: 1 = supports 12 ns speed grade (~83 MHz)
3430 + * 2: 1 = supports 15 ns speed grade (~66 MHz)
3431 + * 1: 1 = supports 25 ns speed grade (40 MHz)
3432 + * 0: 1 = supports 30 ns speed grade (~33 MHz)
3433 + */
3434 + u16 toggle_ddr_speed;
3435 + /*
3436 + * 6-15: Reserved (0)
3437 + * 5: 1 = supports 10 ns speed grade (100 MHz)
3438 + * 4: 1 = supports 12 ns speed grade (~83 MHz)
3439 + * 3: 1 = supports 15 ns speed grade (~66 MHz)
3440 + * 2: 1 = supports 20 ns speed grade (50 MHz)
3441 + * 1: 1 = supports 30 ns speed grade (~33 MHz)
3442 + * 0: 1 = supports 50 ns speed grade (20 MHz)
3443 + */
3444 + u16 sync_ddr_speed;
3445 + u8 sdr_features;
3446 + u8 toggle_ddr_features;
3447 + /*
3448 + * 2-7: Reserved (0)
3449 + * 1: Device supports CK stopped for data input
3450 + * 0: tCAD value to use
3451 + */
3452 + u8 sync_ddr_features;
3453 + u16 tPROG;
3454 + u16 tBERS;
3455 + u16 tR;
3456 + u16 tR_multi_plane;
3457 + u16 tCCS;
3458 + u16 io_pin_capacitance;
3459 + u16 input_pin_capacitance;
3460 + u16 ck_pin_capacitance;
3461 + /*
3462 + * 3-7: Reserved (0)
3463 + * 2: 1 = supports 18 ohm drive strength
3464 + * 1: 1 = supports 25 ohm drive strength
3465 + * 0: 1 = supports 35ohm/50ohm drive strength
3466 + */
3467 + u8 drive_strength;
3468 + u16 tADL;
3469 + u8 reserved4[36];
3470 +
3471 + /* ECC and endurance block. 208 */
3472 + u8 target_begin_valid_blocks;
3473 + u16 valid_block_endurance;
3474 + /*
3475 + * Byte 0: Number of bits ECC correctability
3476 + * Byte 1: Codeword size
3477 + * Byte 2-3: Bad blocks maximum per LUN
3478 + * Byte 4-5: Block endurance
3479 + * Byte 6-7: Reserved (0)
3480 + */
3481 + u8 endurance_block0[8];
3482 + u8 endurance_block1[8];
3483 + u8 endurance_block2[8];
3484 + u8 endurance_block3[8];
3485 + u8 reserved5[29];
3486 +
3487 + /* Reserved. 272 */
3488 + u8 reserved6[148];
3489 +
3490 + /* Vendor specific block. 420 */
3491 + u16 vendor_revision;
3492 + u8 vendor_spec[88];
3493 +
3494 + /* CRC for Parameter Page. 510 */
3495 + u16 crc16;
3496 +} __packed;
3497 +
3498 +/* parallel nand io width */
3499 +enum nand_io_width {
3500 + NAND_IO8,
3501 + NAND_IO16
3502 +};
3503 +
3504 +/* all supported nand timming type */
3505 +enum nand_timing_type {
3506 + NAND_TIMING_SDR,
3507 + NAND_TIMING_SYNC_DDR,
3508 + NAND_TIMING_TOGGLE_DDR,
3509 + NAND_TIMING_NVDDR2
3510 +};
3511 +
3512 +/* nand basic commands */
3513 +struct nand_cmds {
3514 + short reset;
3515 + short read_id;
3516 + short read_status;
3517 + short read_param_page;
3518 + short set_feature;
3519 + short get_feature;
3520 + short read_1st;
3521 + short read_2nd;
3522 + short random_out_1st;
3523 + short random_out_2nd;
3524 + short program_1st;
3525 + short program_2nd;
3526 + short erase_1st;
3527 + short erase_2nd;
3528 + short read_cache;
3529 + short read_cache_last;
3530 + short program_cache;
3531 +};
3532 +
3533 +/*
3534 + * addressing for nand physical address
3535 + * @row_bit_start: row address start bit
3536 + * @block_bit_start: block address start bit
3537 + * @plane_bit_start: plane address start bit
3538 + * @lun_bit_start: lun address start bit
3539 + */
3540 +struct nand_addressing {
3541 + u8 row_bit_start;
3542 + u8 block_bit_start;
3543 + u8 plane_bit_start;
3544 + u8 lun_bit_start;
3545 +};
3546 +
3547 +/*
3548 + * nand operations status
3549 + * @array_busy: indicates device array operation busy
3550 + * @write_protect: indicates the device cannot be wrote or erased
3551 + * @erase_fail: indicates erase operation fail
3552 + * @program_fail: indicates program operation fail
3553 + */
3554 +struct nand_status {
3555 + u8 array_busy;
3556 + u8 write_protect;
3557 + u8 erase_fail;
3558 + u8 program_fail;
3559 +};
3560 +
3561 +/*
3562 + * nand endurance information
3563 + * @pe_cycle: max program/erase cycle for nand stored data stability
3564 + * @ecc_req: ecc strength required for the nand, measured per 1KB
3565 + * @max_bitflips: bitflips is ecc corrected bits,
3566 + * max_bitflips is the threshold for nand stored data stability
3567 + * if corrected bits is over max_bitflips, stored data must be moved
3568 + * to another good block
3569 + */
3570 +struct nand_endurance {
3571 + int pe_cycle;
3572 + int ecc_req;
3573 + int max_bitflips;
3574 +};
3575 +
3576 +/* wait for nand busy type */
3577 +enum nand_wait_type {
3578 + NAND_WAIT_IRQ,
3579 + NAND_WAIT_POLLING,
3580 + NAND_WAIT_TWHR2,
3581 +};
3582 +
3583 +/* each nand array operations time */
3584 +struct nand_array_timing {
3585 + u16 tRST;
3586 + u16 tWHR;
3587 + u16 tR;
3588 + u16 tRCBSY;
3589 + u16 tFEAT;
3590 + u16 tPROG;
3591 + u16 tPCBSY;
3592 + u16 tBERS;
3593 + u16 tDBSY;
3594 +};
3595 +
3596 +/* nand sdr interface timing required */
3597 +struct nand_sdr_timing {
3598 + u16 tREA;
3599 + u16 tREH;
3600 + u16 tCR;
3601 + u16 tRP;
3602 + u16 tWP;
3603 + u16 tWH;
3604 + u16 tWHR;
3605 + u16 tCLS;
3606 + u16 tALS;
3607 + u16 tCLH;
3608 + u16 tALH;
3609 + u16 tWC;
3610 + u16 tRC;
3611 +};
3612 +
3613 +/* nand onfi ddr (nvddr) interface timing required */
3614 +struct nand_onfi_timing {
3615 + u16 tCAD;
3616 + u16 tWPRE;
3617 + u16 tWPST;
3618 + u16 tWRCK;
3619 + u16 tDQSCK;
3620 + u16 tWHR;
3621 +};
3622 +
3623 +/* nand toggle ddr (toggle 1.0) interface timing required */
3624 +struct nand_toggle_timing {
3625 + u16 tCS;
3626 + u16 tCH;
3627 + u16 tCAS;
3628 + u16 tCAH;
3629 + u16 tCALS;
3630 + u16 tCALH;
3631 + u16 tWP;
3632 + u16 tWPRE;
3633 + u16 tWPST;
3634 + u16 tWPSTH;
3635 + u16 tCR;
3636 + u16 tRPRE;
3637 + u16 tRPST;
3638 + u16 tRPSTH;
3639 + u16 tCDQSS;
3640 + u16 tWHR;
3641 +};
3642 +
3643 +/* nand basic device information */
3644 +struct nand_device {
3645 + u8 *name;
3646 + u64 id;
3647 + u8 id_len;
3648 + u8 io_width;
3649 + u8 row_cycle;
3650 + u8 col_cycle;
3651 + u8 target_num;
3652 + u8 lun_num;
3653 + u8 plane_num;
3654 + int block_num;
3655 + int block_size;
3656 + int page_size;
3657 + int spare_size;
3658 + int min_program_pages;
3659 + struct nand_cmds *cmds;
3660 + struct nand_addressing *addressing;
3661 + struct nand_status *status;
3662 + struct nand_endurance *endurance;
3663 + struct nand_array_timing *array_timing;
3664 +};
3665 +
3666 +#define NAND_DEVICE(_name, _id, _id_len, _io_width, _row_cycle, \
3667 + _col_cycle, _target_num, _lun_num, _plane_num, \
3668 + _block_num, _block_size, _page_size, _spare_size, \
3669 + _min_program_pages, _cmds, _addressing, _status, \
3670 + _endurance, _array_timing) \
3671 +{ \
3672 + _name, _id, _id_len, _io_width, _row_cycle, \
3673 + _col_cycle, _target_num, _lun_num, _plane_num, \
3674 + _block_num, _block_size, _page_size, _spare_size, \
3675 + _min_program_pages, _cmds, _addressing, _status, \
3676 + _endurance, _array_timing \
3677 +}
3678 +
3679 +#define MAX_ID_NUM sizeof(u64)
3680 +
3681 +#define NAND_PACK_ID(id0, id1, id2, id3, id4, id5, id6, id7) \
3682 + ( \
3683 + id0 | id1 << 8 | id2 << 16 | id3 << 24 | \
3684 + (u64)id4 << 32 | (u64)id5 << 40 | \
3685 + (u64)id6 << 48 | (u64)id7 << 56 \
3686 + )
3687 +
3688 +#define NAND_UNPACK_ID(id, ids, len) \
3689 + do { \
3690 + int _i; \
3691 + for (_i = 0; _i < len; _i++) \
3692 + ids[_i] = id >> (_i << 3) & 0xff; \
3693 + } while (0)
3694 +
3695 +static inline int nand_block_pages(struct nand_device *device)
3696 +{
3697 + return div_down(device->block_size, device->page_size);
3698 +}
3699 +
3700 +static inline int nand_lun_blocks(struct nand_device *device)
3701 +{
3702 + return device->plane_num * device->block_num;
3703 +}
3704 +
3705 +static inline int nand_target_blocks(struct nand_device *device)
3706 +{
3707 + return device->lun_num * device->plane_num * device->block_num;
3708 +}
3709 +
3710 +static inline int nand_total_blocks(struct nand_device *device)
3711 +{
3712 + return device->target_num * device->lun_num * device->plane_num *
3713 + device->block_num;
3714 +}
3715 +
3716 +struct nand_device *nand_get_device(int index);
3717 +#endif /* __NAND_DEVICE_H__ */
3718 --- /dev/null
3719 +++ b/drivers/mtd/nandx/core/nfi.h
3720 @@ -0,0 +1,51 @@
3721 +/*
3722 + * Copyright (C) 2017 MediaTek Inc.
3723 + * Licensed under either
3724 + * BSD Licence, (see NOTICE for more details)
3725 + * GNU General Public License, version 2.0, (see NOTICE for more details)
3726 + */
3727 +
3728 +#ifndef __NFI_H__
3729 +#define __NFI_H__
3730 +
3731 +struct nfi_format {
3732 + int page_size;
3733 + int spare_size;
3734 + int ecc_req;
3735 +};
3736 +
3737 +struct nfi {
3738 + int sector_size;
3739 + int sector_spare_size;
3740 + int fdm_size; /*for sector*/
3741 + int fdm_ecc_size;
3742 + int ecc_strength;
3743 + int ecc_parity_size; /*for sector*/
3744 +
3745 + int (*select_chip)(struct nfi *nfi, int cs);
3746 + int (*set_format)(struct nfi *nfi, struct nfi_format *format);
3747 + int (*set_timing)(struct nfi *nfi, void *timing, int type);
3748 + int (*nfi_ctrl)(struct nfi *nfi, int cmd, void *args);
3749 +
3750 + int (*reset)(struct nfi *nfi);
3751 + int (*send_cmd)(struct nfi *nfi, short cmd);
3752 + int (*send_addr)(struct nfi *nfi, int col, int row,
3753 + int col_cycle, int row_cycle);
3754 + int (*trigger)(struct nfi *nfi);
3755 +
3756 + int (*write_page)(struct nfi *nfi, u8 *data, u8 *fdm);
3757 + int (*write_bytes)(struct nfi *nfi, u8 *data, int count);
3758 + int (*read_sectors)(struct nfi *nfi, u8 *data, u8 *fdm,
3759 + int sectors);
3760 + int (*read_bytes)(struct nfi *nfi, u8 *data, int count);
3761 +
3762 + int (*wait_ready)(struct nfi *nfi, int type, u32 timeout);
3763 +
3764 + int (*enable_randomizer)(struct nfi *nfi, u32 row, bool encode);
3765 + int (*disable_randomizer)(struct nfi *nfi);
3766 +};
3767 +
3768 +struct nfi *nfi_init(struct nfi_resource *res);
3769 +void nfi_exit(struct nfi *nfi);
3770 +
3771 +#endif /* __NFI_H__ */
3772 --- /dev/null
3773 +++ b/drivers/mtd/nandx/core/nfi/nfi_base.c
3774 @@ -0,0 +1,1357 @@
3775 +/*
3776 + * Copyright (C) 2017 MediaTek Inc.
3777 + * Licensed under either
3778 + * BSD Licence, (see NOTICE for more details)
3779 + * GNU General Public License, version 2.0, (see NOTICE for more details)
3780 + */
3781 +
3782 +/**
3783 + * nfi_base.c - the base logic for nfi to access nand flash
3784 + *
3785 + * slc/mlc/tlc could use same code to access nand
3786 + * of cause, there still some work need to do
3787 + * even for spi nand, there should be a chance to integrate code together
3788 + */
3789 +
3790 +#include "nandx_util.h"
3791 +#include "nandx_core.h"
3792 +#include "../nfi.h"
3793 +#include "../nand_device.h"
3794 +#include "nfi_regs.h"
3795 +#include "nfiecc.h"
3796 +#include "nfi_base.h"
3797 +
3798 +static const int spare_size_mt7622[] = {
3799 + 16, 26, 27, 28
3800 +};
3801 +
3802 +#define RAND_SEED_SHIFT(op) \
3803 + ((op) == RAND_ENCODE ? ENCODE_SEED_SHIFT : DECODE_SEED_SHIFT)
3804 +#define RAND_EN(op) \
3805 + ((op) == RAND_ENCODE ? RAN_ENCODE_EN : RAN_DECODE_EN)
3806 +
3807 +#define SS_SEED_NUM 128
3808 +static u16 ss_randomizer_seed[SS_SEED_NUM] = {
3809 + 0x576A, 0x05E8, 0x629D, 0x45A3, 0x649C, 0x4BF0, 0x2342, 0x272E,
3810 + 0x7358, 0x4FF3, 0x73EC, 0x5F70, 0x7A60, 0x1AD8, 0x3472, 0x3612,
3811 + 0x224F, 0x0454, 0x030E, 0x70A5, 0x7809, 0x2521, 0x484F, 0x5A2D,
3812 + 0x492A, 0x043D, 0x7F61, 0x3969, 0x517A, 0x3B42, 0x769D, 0x0647,
3813 + 0x7E2A, 0x1383, 0x49D9, 0x07B8, 0x2578, 0x4EEC, 0x4423, 0x352F,
3814 + 0x5B22, 0x72B9, 0x367B, 0x24B6, 0x7E8E, 0x2318, 0x6BD0, 0x5519,
3815 + 0x1783, 0x18A7, 0x7B6E, 0x7602, 0x4B7F, 0x3648, 0x2C53, 0x6B99,
3816 + 0x0C23, 0x67CF, 0x7E0E, 0x4D8C, 0x5079, 0x209D, 0x244A, 0x747B,
3817 + 0x350B, 0x0E4D, 0x7004, 0x6AC3, 0x7F3E, 0x21F5, 0x7A15, 0x2379,
3818 + 0x1517, 0x1ABA, 0x4E77, 0x15A1, 0x04FA, 0x2D61, 0x253A, 0x1302,
3819 + 0x1F63, 0x5AB3, 0x049A, 0x5AE8, 0x1CD7, 0x4A00, 0x30C8, 0x3247,
3820 + 0x729C, 0x5034, 0x2B0E, 0x57F2, 0x00E4, 0x575B, 0x6192, 0x38F8,
3821 + 0x2F6A, 0x0C14, 0x45FC, 0x41DF, 0x38DA, 0x7AE1, 0x7322, 0x62DF,
3822 + 0x5E39, 0x0E64, 0x6D85, 0x5951, 0x5937, 0x6281, 0x33A1, 0x6A32,
3823 + 0x3A5A, 0x2BAC, 0x743A, 0x5E74, 0x3B2E, 0x7EC7, 0x4FD2, 0x5D28,
3824 + 0x751F, 0x3EF8, 0x39B1, 0x4E49, 0x746B, 0x6EF6, 0x44BE, 0x6DB7
3825 +};
3826 +
3827 +#if 0
3828 +static void dump_register(void *regs)
3829 +{
3830 + int i;
3831 +
3832 + pr_info("registers:\n");
3833 + for (i = 0; i < 0x600; i += 0x10) {
3834 + pr_info(" address 0x%X : %X %X %X %X\n",
3835 + (u32)((unsigned long)regs + i),
3836 + (u32)readl(regs + i),
3837 + (u32)readl(regs + i + 0x4),
3838 + (u32)readl(regs + i + 0x8),
3839 + (u32)readl(regs + i + 0xC));
3840 + }
3841 +}
3842 +#endif
3843 +
3844 +static int nfi_enable_randomizer(struct nfi *nfi, u32 row, bool encode)
3845 +{
3846 + struct nfi_base *nb = nfi_to_base(nfi);
3847 + enum randomizer_op op = RAND_ENCODE;
3848 + void *regs = nb->res.nfi_regs;
3849 + u32 val;
3850 +
3851 + if (!encode)
3852 + op = RAND_DECODE;
3853 +
3854 + /* randomizer type and reseed type setup */
3855 + val = readl(regs + NFI_CNFG);
3856 + val |= CNFG_RAND_SEL | CNFG_RESEED_SEC_EN;
3857 + writel(val, regs + NFI_CNFG);
3858 +
3859 + /* randomizer seed and type setup */
3860 + val = ss_randomizer_seed[row % SS_SEED_NUM] & RAN_SEED_MASK;
3861 + val <<= RAND_SEED_SHIFT(op);
3862 + val |= RAND_EN(op);
3863 + writel(val, regs + NFI_RANDOM_CNFG);
3864 +
3865 + return 0;
3866 +}
3867 +
3868 +static int nfi_disable_randomizer(struct nfi *nfi)
3869 +{
3870 + struct nfi_base *nb = nfi_to_base(nfi);
3871 +
3872 + writel(0, nb->res.nfi_regs + NFI_RANDOM_CNFG);
3873 +
3874 + return 0;
3875 +}
3876 +
3877 +static int nfi_irq_handler(int irq, void *data)
3878 +{
3879 + struct nfi_base *nb = (struct nfi_base *) data;
3880 + void *regs = nb->res.nfi_regs;
3881 + u16 status, en;
3882 +
3883 + status = readw(regs + NFI_INTR_STA);
3884 + en = readw(regs + NFI_INTR_EN);
3885 +
3886 + if (!(status & en))
3887 + return NAND_IRQ_NONE;
3888 +
3889 + writew(~status & en, regs + NFI_INTR_EN);
3890 +
3891 + nandx_event_complete(nb->done);
3892 +
3893 + return NAND_IRQ_HANDLED;
3894 +}
3895 +
3896 +static int nfi_select_chip(struct nfi *nfi, int cs)
3897 +{
3898 + struct nfi_base *nb = nfi_to_base(nfi);
3899 +
3900 + writel(cs, nb->res.nfi_regs + NFI_CSEL);
3901 +
3902 + return 0;
3903 +}
3904 +
3905 +static inline void set_op_mode(void *regs, u32 mode)
3906 +{
3907 + u32 val = readl(regs + NFI_CNFG);
3908 +
3909 + val &= ~CNFG_OP_MODE_MASK;
3910 + val |= mode;
3911 +
3912 + writel(val, regs + NFI_CNFG);
3913 +}
3914 +
3915 +static int nfi_reset(struct nfi *nfi)
3916 +{
3917 + struct nfi_base *nb = nfi_to_base(nfi);
3918 + void *regs = nb->res.nfi_regs;
3919 + int ret, val;
3920 +
3921 + /* The NFI reset to reset all registers and force the NFI
3922 + * master be early terminated
3923 + */
3924 + writel(CON_FIFO_FLUSH | CON_NFI_RST, regs + NFI_CON);
3925 +
3926 + /* check state of NFI internal FSM and NAND interface FSM */
3927 + ret = readl_poll_timeout_atomic(regs + NFI_MASTER_STA, val,
3928 + !(val & MASTER_BUS_BUSY),
3929 + 10, NFI_TIMEOUT);
3930 + if (ret)
3931 + pr_info("nfi reset timeout...\n");
3932 +
3933 + writel(CON_FIFO_FLUSH | CON_NFI_RST, regs + NFI_CON);
3934 + writew(STAR_DE, regs + NFI_STRDATA);
3935 +
3936 + return ret;
3937 +}
3938 +
3939 +static void bad_mark_swap(struct nfi *nfi, u8 *buf, u8 *fdm)
3940 +{
3941 + struct nfi_base *nb = nfi_to_base(nfi);
3942 + u32 start_sector = div_down(nb->col, nfi->sector_size);
3943 + u32 data_mark_pos;
3944 + u8 temp;
3945 +
3946 + /* raw access, no need to do swap. */
3947 + if (!nb->ecc_en)
3948 + return;
3949 +
3950 + if (!buf || !fdm)
3951 + return;
3952 +
3953 + if (nb->bad_mark_ctrl.sector < start_sector ||
3954 + nb->bad_mark_ctrl.sector > start_sector + nb->rw_sectors)
3955 + return;
3956 +
3957 + data_mark_pos = nb->bad_mark_ctrl.position +
3958 + (nb->bad_mark_ctrl.sector - start_sector) *
3959 + nfi->sector_size;
3960 +
3961 + temp = *fdm;
3962 + *fdm = *(buf + data_mark_pos);
3963 + *(buf + data_mark_pos) = temp;
3964 +}
3965 +
3966 +static u8 *fdm_shift(struct nfi *nfi, u8 *fdm, int sector)
3967 +{
3968 + struct nfi_base *nb = nfi_to_base(nfi);
3969 + u8 *pos;
3970 +
3971 + if (!fdm)
3972 + return NULL;
3973 +
3974 + /* map the sector's FDM data to free oob:
3975 + * the beginning of the oob area stores the FDM data of bad mark sectors
3976 + */
3977 + if (sector < nb->bad_mark_ctrl.sector)
3978 + pos = fdm + (sector + 1) * nfi->fdm_size;
3979 + else if (sector == nb->bad_mark_ctrl.sector)
3980 + pos = fdm;
3981 + else
3982 + pos = fdm + sector * nfi->fdm_size;
3983 +
3984 + return pos;
3985 +
3986 +}
3987 +
3988 +static void set_bad_mark_ctrl(struct nfi_base *nb)
3989 +{
3990 + int temp, page_size = nb->format.page_size;
3991 +
3992 + nb->bad_mark_ctrl.bad_mark_swap = bad_mark_swap;
3993 + nb->bad_mark_ctrl.fdm_shift = fdm_shift;
3994 +
3995 + temp = nb->nfi.sector_size + nb->nfi.sector_spare_size;
3996 + nb->bad_mark_ctrl.sector = div_down(page_size, temp);
3997 + nb->bad_mark_ctrl.position = reminder(page_size, temp);
3998 +}
3999 +
4000 +/* NOTE: check if page_size valid future */
4001 +static int setup_format(struct nfi_base *nb, int spare_idx)
4002 +{
4003 + struct nfi *nfi = &nb->nfi;
4004 + u32 page_size = nb->format.page_size;
4005 + u32 val;
4006 +
4007 + switch (page_size) {
4008 + case 512:
4009 + val = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
4010 + break;
4011 +
4012 + case KB(2):
4013 + if (nfi->sector_size == 512)
4014 + val = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
4015 + else
4016 + val = PAGEFMT_512_2K;
4017 +
4018 + break;
4019 +
4020 + case KB(4):
4021 + if (nfi->sector_size == 512)
4022 + val = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
4023 + else
4024 + val = PAGEFMT_2K_4K;
4025 +
4026 + break;
4027 +
4028 + case KB(8):
4029 + if (nfi->sector_size == 512)
4030 + val = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
4031 + else
4032 + val = PAGEFMT_4K_8K;
4033 +
4034 + break;
4035 +
4036 + case KB(16):
4037 + val = PAGEFMT_8K_16K;
4038 + break;
4039 +
4040 + default:
4041 + pr_info("invalid page len: %d\n", page_size);
4042 + return -EINVAL;
4043 + }
4044 +
4045 + val |= spare_idx << PAGEFMT_SPARE_SHIFT;
4046 + val |= nfi->fdm_size << PAGEFMT_FDM_SHIFT;
4047 + val |= nfi->fdm_ecc_size << PAGEFMT_FDM_ECC_SHIFT;
4048 + writel(val, nb->res.nfi_regs + NFI_PAGEFMT);
4049 +
4050 + if (nb->custom_sector_en) {
4051 + val = nfi->sector_spare_size + nfi->sector_size;
4052 + val |= SECCUS_SIZE_EN;
4053 + writel(val, nb->res.nfi_regs + NFI_SECCUS_SIZE);
4054 + }
4055 +
4056 + return 0;
4057 +}
4058 +
4059 +static int adjust_spare(struct nfi_base *nb, int *spare)
4060 +{
4061 + int multi = nb->nfi.sector_size == 512 ? 1 : 2;
4062 + int i, count = nb->caps->spare_size_num;
4063 +
4064 + if (*spare >= nb->caps->spare_size[count - 1] * multi) {
4065 + *spare = nb->caps->spare_size[count - 1] * multi;
4066 + return count - 1;
4067 + }
4068 +
4069 + if (*spare < nb->caps->spare_size[0] * multi)
4070 + return -EINVAL;
4071 +
4072 + for (i = 1; i < count; i++) {
4073 + if (*spare < nb->caps->spare_size[i] * multi) {
4074 + *spare = nb->caps->spare_size[i - 1] * multi;
4075 + return i - 1;
4076 + }
4077 + }
4078 +
4079 + return -EINVAL;
4080 +}
4081 +
4082 +static int nfi_set_format(struct nfi *nfi, struct nfi_format *format)
4083 +{
4084 + struct nfi_base *nb = nfi_to_base(nfi);
4085 + struct nfiecc *ecc = nb->ecc;
4086 + int ecc_strength = format->ecc_req;
4087 + int min_fdm, min_ecc, max_ecc;
4088 + u32 temp, page_sectors;
4089 + int spare_idx = 0;
4090 +
4091 + if (!nb->buf) {
4092 +#if NANDX_BULK_IO_USE_DRAM
4093 + nb->buf = NANDX_NFI_BUF_ADDR;
4094 +#else
4095 + nb->buf = mem_alloc(1, format->page_size + format->spare_size);
4096 +#endif
4097 + if (!nb->buf)
4098 + return -ENOMEM;
4099 + }
4100 +
4101 + nb->format = *format;
4102 +
4103 + /* ToBeFixed: for spi nand, now sector size is 512,
4104 + * it should be same with slc.
4105 + */
4106 + nfi->sector_size = 512;
4107 + /* format->ecc_req is the requirement per 1KB */
4108 + ecc_strength >>= 1;
4109 +
4110 + page_sectors = div_down(format->page_size, nfi->sector_size);
4111 + nfi->sector_spare_size = div_down(format->spare_size, page_sectors);
4112 +
4113 + if (!nb->custom_sector_en) {
4114 + spare_idx = adjust_spare(nb, &nfi->sector_spare_size);
4115 + if (spare_idx < 0)
4116 + return -EINVAL;
4117 + }
4118 +
4119 + /* calculate ecc strength and fdm size */
4120 + temp = (nfi->sector_spare_size - nb->caps->max_fdm_size) * 8;
4121 + min_ecc = div_down(temp, nb->caps->ecc_parity_bits);
4122 + min_ecc = ecc->adjust_strength(ecc, min_ecc);
4123 + if (min_ecc < 0)
4124 + return -EINVAL;
4125 +
4126 + temp = div_up(nb->res.min_oob_req, page_sectors);
4127 + temp = (nfi->sector_spare_size - temp) * 8;
4128 + max_ecc = div_down(temp, nb->caps->ecc_parity_bits);
4129 + max_ecc = ecc->adjust_strength(ecc, max_ecc);
4130 + if (max_ecc < 0)
4131 + return -EINVAL;
4132 +
4133 + temp = div_up(temp * nb->caps->ecc_parity_bits, 8);
4134 + temp = nfi->sector_spare_size - temp;
4135 + min_fdm = min_t(u32, temp, (u32)nb->caps->max_fdm_size);
4136 +
4137 + if (ecc_strength > max_ecc) {
4138 + pr_info("required ecc strength %d, max supported %d\n",
4139 + ecc_strength, max_ecc);
4140 + nfi->ecc_strength = max_ecc;
4141 + nfi->fdm_size = min_fdm;
4142 + } else if (format->ecc_req < min_ecc) {
4143 + nfi->ecc_strength = min_ecc;
4144 + nfi->fdm_size = nb->caps->max_fdm_size;
4145 + } else {
4146 + ecc_strength = ecc->adjust_strength(ecc, ecc_strength);
4147 + if (ecc_strength < 0)
4148 + return -EINVAL;
4149 +
4150 + nfi->ecc_strength = ecc_strength;
4151 + temp = div_up(ecc_strength * nb->caps->ecc_parity_bits, 8);
4152 + nfi->fdm_size = nfi->sector_spare_size - temp;
4153 + }
4154 +
4155 + nb->page_sectors = div_down(format->page_size, nfi->sector_size);
4156 +
4157 + /* some IC has fixed fdm_ecc_size, if not assigend, set to fdm_size */
4158 + nfi->fdm_ecc_size = nb->caps->fdm_ecc_size ? : nfi->fdm_size;
4159 +
4160 + nfi->ecc_parity_size = div_up(nfi->ecc_strength *
4161 + nb->caps->ecc_parity_bits,
4162 + 8);
4163 + set_bad_mark_ctrl(nb);
4164 +
4165 + pr_debug("sector_size: %d\n", nfi->sector_size);
4166 + pr_debug("sector_spare_size: %d\n", nfi->sector_spare_size);
4167 + pr_debug("fdm_size: %d\n", nfi->fdm_size);
4168 + pr_debug("fdm_ecc_size: %d\n", nfi->fdm_ecc_size);
4169 + pr_debug("ecc_strength: %d\n", nfi->ecc_strength);
4170 + pr_debug("ecc_parity_size: %d\n", nfi->ecc_parity_size);
4171 +
4172 + return setup_format(nb, spare_idx);
4173 +}
4174 +
4175 +static int nfi_ctrl(struct nfi *nfi, int cmd, void *args)
4176 +{
4177 + struct nfi_base *nb = nfi_to_base(nfi);
4178 + int ret = 0;
4179 +
4180 + switch (cmd) {
4181 + case NFI_CTRL_DMA:
4182 + nb->dma_en = *(bool *)args;
4183 + break;
4184 +
4185 + case NFI_CTRL_AUTOFORMAT:
4186 + nb->auto_format = *(bool *)args;
4187 + break;
4188 +
4189 + case NFI_CTRL_NFI_IRQ:
4190 + nb->nfi_irq_en = *(bool *)args;
4191 + break;
4192 +
4193 + case NFI_CTRL_PAGE_IRQ:
4194 + nb->page_irq_en = *(bool *)args;
4195 + break;
4196 +
4197 + case NFI_CTRL_BAD_MARK_SWAP:
4198 + nb->bad_mark_swap_en = *(bool *)args;
4199 + break;
4200 +
4201 + case NFI_CTRL_ECC:
4202 + nb->ecc_en = *(bool *)args;
4203 + break;
4204 +
4205 + case NFI_CTRL_ECC_MODE:
4206 + nb->ecc_mode = *(enum nfiecc_mode *)args;
4207 + break;
4208 +
4209 + case NFI_CTRL_ECC_CLOCK:
4210 + /* NOTE: it seems that there's nothing need to do
4211 + * if new IC need, just add tht logic
4212 + */
4213 + nb->ecc_clk_en = *(bool *)args;
4214 + break;
4215 +
4216 + case NFI_CTRL_ECC_IRQ:
4217 + nb->ecc_irq_en = *(bool *)args;
4218 + break;
4219 +
4220 + case NFI_CTRL_ECC_DECODE_MODE:
4221 + nb->ecc_deccon = *(enum nfiecc_deccon *)args;
4222 + break;
4223 +
4224 + default:
4225 + pr_info("invalid arguments.\n");
4226 + ret = -EOPNOTSUPP;
4227 + break;
4228 + }
4229 +
4230 + pr_debug("%s: set cmd(%d) to %d\n", __func__, cmd, *(int *)args);
4231 + return ret;
4232 +}
4233 +
4234 +static int nfi_send_cmd(struct nfi *nfi, short cmd)
4235 +{
4236 + struct nfi_base *nb = nfi_to_base(nfi);
4237 + void *regs = nb->res.nfi_regs;
4238 + int ret;
4239 + u32 val;
4240 +
4241 + pr_debug("%s: cmd 0x%x\n", __func__, cmd);
4242 +
4243 + if (cmd < 0)
4244 + return -EINVAL;
4245 +
4246 + set_op_mode(regs, nb->op_mode);
4247 +
4248 + writel(cmd, regs + NFI_CMD);
4249 +
4250 + ret = readl_poll_timeout_atomic(regs + NFI_STA,
4251 + val, !(val & STA_CMD),
4252 + 5, NFI_TIMEOUT);
4253 + if (ret)
4254 + pr_info("send cmd 0x%x timeout\n", cmd);
4255 +
4256 + return ret;
4257 +}
4258 +
4259 +static int nfi_send_addr(struct nfi *nfi, int col, int row,
4260 + int col_cycle, int row_cycle)
4261 +{
4262 + struct nfi_base *nb = nfi_to_base(nfi);
4263 + void *regs = nb->res.nfi_regs;
4264 + int ret;
4265 + u32 val;
4266 +
4267 + pr_debug("%s: col 0x%x, row 0x%x, col_cycle 0x%x, row_cycle 0x%x\n",
4268 + __func__, col, row, col_cycle, row_cycle);
4269 +
4270 + nb->col = col;
4271 + nb->row = row;
4272 +
4273 + writel(col, regs + NFI_COLADDR);
4274 + writel(row, regs + NFI_ROWADDR);
4275 + writel(col_cycle | (row_cycle << ROW_SHIFT), regs + NFI_ADDRNOB);
4276 +
4277 + ret = readl_poll_timeout_atomic(regs + NFI_STA,
4278 + val, !(val & STA_ADDR),
4279 + 5, NFI_TIMEOUT);
4280 + if (ret)
4281 + pr_info("send address timeout\n");
4282 +
4283 + return ret;
4284 +}
4285 +
4286 +static int nfi_trigger(struct nfi *nfi)
4287 +{
4288 + /* Nothing need to do. */
4289 + return 0;
4290 +}
4291 +
4292 +static inline int wait_io_ready(void *regs)
4293 +{
4294 + u32 val;
4295 + int ret;
4296 +
4297 + ret = readl_poll_timeout_atomic(regs + NFI_PIO_DIRDY,
4298 + val, val & PIO_DI_RDY,
4299 + 2, NFI_TIMEOUT);
4300 + if (ret)
4301 + pr_info("wait io ready timeout\n");
4302 +
4303 + return ret;
4304 +}
4305 +
4306 +static int wait_ready_irq(struct nfi_base *nb, u32 timeout)
4307 +{
4308 + void *regs = nb->res.nfi_regs;
4309 + int ret;
4310 + u32 val;
4311 +
4312 + writel(0xf1, regs + NFI_CNRNB);
4313 + nandx_event_init(nb->done);
4314 +
4315 + writel(INTR_BUSY_RETURN_EN, (void *)(regs + NFI_INTR_EN));
4316 +
4317 + /**
4318 + * check if nand already bean ready,
4319 + * avoid issue that casued by missing irq-event.
4320 + */
4321 + val = readl(regs + NFI_STA);
4322 + if (val & STA_BUSY2READY) {
4323 + readl(regs + NFI_INTR_STA);
4324 + writel(0, (void *)(regs + NFI_INTR_EN));
4325 + return 0;
4326 + }
4327 +
4328 + ret = nandx_event_wait_complete(nb->done, timeout);
4329 +
4330 + writew(0, regs + NFI_CNRNB);
4331 + return ret;
4332 +}
4333 +
4334 +static void wait_ready_twhr2(struct nfi_base *nb, u32 timeout)
4335 +{
4336 + /* NOTE: this for tlc */
4337 +}
4338 +
4339 +static int wait_ready_poll(struct nfi_base *nb, u32 timeout)
4340 +{
4341 + void *regs = nb->res.nfi_regs;
4342 + int ret;
4343 + u32 val;
4344 +
4345 + writel(0x21, regs + NFI_CNRNB);
4346 + ret = readl_poll_timeout_atomic(regs + NFI_STA, val,
4347 + val & STA_BUSY2READY,
4348 + 2, timeout);
4349 + writew(0, regs + NFI_CNRNB);
4350 +
4351 + return ret;
4352 +}
4353 +
4354 +static int nfi_wait_ready(struct nfi *nfi, int type, u32 timeout)
4355 +{
4356 + struct nfi_base *nb = nfi_to_base(nfi);
4357 + int ret;
4358 +
4359 + switch (type) {
4360 + case NAND_WAIT_IRQ:
4361 + if (nb->nfi_irq_en)
4362 + ret = wait_ready_irq(nb, timeout);
4363 + else
4364 + ret = -EINVAL;
4365 +
4366 + break;
4367 +
4368 + case NAND_WAIT_POLLING:
4369 + ret = wait_ready_poll(nb, timeout);
4370 + break;
4371 +
4372 + case NAND_WAIT_TWHR2:
4373 + wait_ready_twhr2(nb, timeout);
4374 + ret = 0;
4375 + break;
4376 +
4377 + default:
4378 + ret = -EINVAL;
4379 + break;
4380 + }
4381 +
4382 + if (ret)
4383 + pr_info("%s: type 0x%x, timeout 0x%x\n",
4384 + __func__, type, timeout);
4385 +
4386 + return ret;
4387 +}
4388 +
4389 +static int enable_ecc_decode(struct nfi_base *nb, int sectors)
4390 +{
4391 + struct nfi *nfi = &nb->nfi;
4392 + struct nfiecc *ecc = nb->ecc;
4393 +
4394 + ecc->config.op = ECC_DECODE;
4395 + ecc->config.mode = nb->ecc_mode;
4396 + ecc->config.deccon = nb->ecc_deccon;
4397 + ecc->config.sectors = sectors;
4398 + ecc->config.len = nfi->sector_size + nfi->fdm_ecc_size;
4399 + ecc->config.strength = nfi->ecc_strength;
4400 +
4401 + return ecc->enable(ecc);
4402 +}
4403 +
4404 +static int enable_ecc_encode(struct nfi_base *nb)
4405 +{
4406 + struct nfiecc *ecc = nb->ecc;
4407 + struct nfi *nfi = &nb->nfi;
4408 +
4409 + ecc->config.op = ECC_ENCODE;
4410 + ecc->config.mode = nb->ecc_mode;
4411 + ecc->config.len = nfi->sector_size + nfi->fdm_ecc_size;
4412 + ecc->config.strength = nfi->ecc_strength;
4413 +
4414 + return ecc->enable(ecc);
4415 +}
4416 +
4417 +static void read_fdm(struct nfi_base *nb, u8 *fdm, int start_sector,
4418 + int sectors)
4419 +{
4420 + void *regs = nb->res.nfi_regs;
4421 + int j, i = start_sector;
4422 + u32 vall, valm;
4423 + u8 *buf = fdm;
4424 +
4425 + for (; i < start_sector + sectors; i++) {
4426 + if (nb->bad_mark_swap_en)
4427 + buf = nb->bad_mark_ctrl.fdm_shift(&nb->nfi, fdm, i);
4428 +
4429 + vall = readl(regs + NFI_FDML(i));
4430 + valm = readl(regs + NFI_FDMM(i));
4431 +
4432 + for (j = 0; j < nb->nfi.fdm_size; j++)
4433 + *buf++ = (j >= 4 ? valm : vall) >> ((j & 3) << 3);
4434 + }
4435 +}
4436 +
4437 +static void write_fdm(struct nfi_base *nb, u8 *fdm)
4438 +{
4439 + struct nfi *nfi = &nb->nfi;
4440 + void *regs = nb->res.nfi_regs;
4441 + u32 vall, valm;
4442 + int i, j;
4443 + u8 *buf = fdm;
4444 +
4445 + for (i = 0; i < nb->page_sectors; i++) {
4446 + if (nb->bad_mark_swap_en)
4447 + buf = nb->bad_mark_ctrl.fdm_shift(nfi, fdm, i);
4448 +
4449 + vall = 0;
4450 + for (j = 0; j < 4; j++)
4451 + vall |= (j < nfi->fdm_size ? *buf++ : 0xff) << (j * 8);
4452 + writel(vall, regs + NFI_FDML(i));
4453 +
4454 + valm = 0;
4455 + for (j = 0; j < 4; j++)
4456 + valm |= (j < nfi->fdm_size ? *buf++ : 0xff) << (j * 8);
4457 + writel(valm, regs + NFI_FDMM(i));
4458 + }
4459 +}
4460 +
4461 +/* NOTE: pio not use auto format */
4462 +static int pio_rx_data(struct nfi_base *nb, u8 *data, u8 *fdm,
4463 + int sectors)
4464 +{
4465 + struct nfiecc_status ecc_status;
4466 + struct nfi *nfi = &nb->nfi;
4467 + void *regs = nb->res.nfi_regs;
4468 + u32 val, bitflips = 0;
4469 + int len, ret, i;
4470 + u8 *buf;
4471 +
4472 + val = readl(regs + NFI_CNFG) | CNFG_BYTE_RW;
4473 + writel(val, regs + NFI_CNFG);
4474 +
4475 + len = nfi->sector_size + nfi->sector_spare_size;
4476 + len *= sectors;
4477 +
4478 + for (i = 0; i < len; i++) {
4479 + ret = wait_io_ready(regs);
4480 + if (ret)
4481 + return ret;
4482 +
4483 + nb->buf[i] = readb(regs + NFI_DATAR);
4484 + }
4485 +
4486 + /* TODO: do error handle for autoformat setting of pio */
4487 + if (nb->ecc_en) {
4488 + for (i = 0; i < sectors; i++) {
4489 + buf = nb->buf + i * (nfi->sector_size +
4490 + nfi->sector_spare_size);
4491 + ret = nb->ecc->correct_data(nb->ecc, &ecc_status,
4492 + buf, i);
4493 + if (data)
4494 + memcpy(data + i * nfi->sector_size,
4495 + buf, nfi->sector_size);
4496 + if (fdm)
4497 + memcpy(fdm + i * nfi->fdm_size,
4498 + buf + nfi->sector_size, nfi->fdm_size);
4499 + if (ret) {
4500 + ret = nb->ecc->decode_status(nb->ecc, i, 1);
4501 + if (ret < 0)
4502 + return ret;
4503 +
4504 + bitflips = max_t(int, (int)bitflips, ret);
4505 + }
4506 + }
4507 +
4508 + return bitflips;
4509 + }
4510 +
4511 + /* raw read, only data not null, and its length should be $len */
4512 + if (data)
4513 + memcpy(data, nb->buf, len);
4514 +
4515 + return 0;
4516 +}
4517 +
4518 +static int pio_tx_data(struct nfi_base *nb, u8 *data, u8 *fdm,
4519 + int sectors)
4520 +{
4521 + struct nfi *nfi = &nb->nfi;
4522 + void *regs = nb->res.nfi_regs;
4523 + u32 i, val;
4524 + int len, ret;
4525 +
4526 + val = readw(regs + NFI_CNFG) | CNFG_BYTE_RW;
4527 + writew(val, regs + NFI_CNFG);
4528 +
4529 + len = nb->ecc_en ? nfi->sector_size :
4530 + nfi->sector_size + nfi->sector_spare_size;
4531 + len *= sectors;
4532 +
4533 + /* data shouldn't null,
4534 + * and if ecc enable ,fdm been written in prepare process
4535 + */
4536 + for (i = 0; i < len; i++) {
4537 + ret = wait_io_ready(regs);
4538 + if (ret)
4539 + return ret;
4540 + writeb(data[i], regs + NFI_DATAW);
4541 + }
4542 +
4543 + return 0;
4544 +}
4545 +
4546 +static bool is_page_empty(struct nfi_base *nb, u8 *data, u8 *fdm,
4547 + int sectors)
4548 +{
4549 + u32 empty = readl(nb->res.nfi_regs + NFI_STA) & STA_EMP_PAGE;
4550 +
4551 + if (empty) {
4552 + pr_info("empty page!\n");
4553 + return true;
4554 + }
4555 +
4556 + return false;
4557 +}
4558 +
4559 +static int rw_prepare(struct nfi_base *nb, int sectors, u8 *data,
4560 + u8 *fdm, bool read)
4561 +{
4562 + void *regs = nb->res.nfi_regs;
4563 + u32 len = nb->nfi.sector_size * sectors;
4564 + bool irq_en = nb->dma_en && nb->nfi_irq_en;
4565 + void *dma_addr;
4566 + u32 val;
4567 + int ret;
4568 +
4569 + nb->rw_sectors = sectors;
4570 +
4571 + if (irq_en) {
4572 + nandx_event_init(nb->done);
4573 + writel(INTR_AHB_DONE_EN, regs + NFI_INTR_EN);
4574 + }
4575 +
4576 + val = readw(regs + NFI_CNFG);
4577 + if (read)
4578 + val |= CNFG_READ_EN;
4579 + else
4580 + val &= ~CNFG_READ_EN;
4581 +
4582 + /* as design, now, auto format enabled when ecc enabled */
4583 + if (nb->ecc_en) {
4584 + val |= CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
4585 +
4586 + if (read)
4587 + ret = enable_ecc_decode(nb, sectors);
4588 + else
4589 + ret = enable_ecc_encode(nb);
4590 +
4591 + if (ret) {
4592 + pr_info("%s: ecc enable %s fail!\n", __func__,
4593 + read ? "decode" : "encode");
4594 + return ret;
4595 + }
4596 + } else {
4597 + val &= ~(CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN);
4598 + }
4599 +
4600 + if (!read && nb->bad_mark_swap_en)
4601 + nb->bad_mark_ctrl.bad_mark_swap(&nb->nfi, data, fdm);
4602 +
4603 + if (!nb->ecc_en && read)
4604 + len += sectors * nb->nfi.sector_spare_size;
4605 +
4606 + if (nb->dma_en) {
4607 + val |= CNFG_DMA_BURST_EN | CNFG_AHB;
4608 +
4609 + if (read) {
4610 + dma_addr = (void *)(unsigned long)nandx_dma_map(
4611 + nb->res.dev, nb->buf,
4612 + (u64)len, NDMA_FROM_DEV);
4613 + } else {
4614 + memcpy(nb->buf, data, len);
4615 + dma_addr = (void *)(unsigned long)nandx_dma_map(
4616 + nb->res.dev, nb->buf,
4617 + (u64)len, NDMA_TO_DEV);
4618 + }
4619 +
4620 + writel((unsigned long)dma_addr, (void *)regs + NFI_STRADDR);
4621 +
4622 + nb->access_len = len;
4623 + nb->dma_addr = dma_addr;
4624 + }
4625 +
4626 + if (nb->ecc_en && !read && fdm)
4627 + write_fdm(nb, fdm);
4628 +
4629 + writew(val, regs + NFI_CNFG);
4630 + /* setup R/W sector number */
4631 + writel(sectors << CON_SEC_SHIFT, regs + NFI_CON);
4632 +
4633 + return 0;
4634 +}
4635 +
4636 +static void rw_trigger(struct nfi_base *nb, bool read)
4637 +{
4638 + void *regs = nb->res.nfi_regs;
4639 + u32 val;
4640 +
4641 + val = read ? CON_BRD : CON_BWR;
4642 + val |= readl(regs + NFI_CON);
4643 + writel(val, regs + NFI_CON);
4644 +
4645 + writel(STAR_EN, regs + NFI_STRDATA);
4646 +}
4647 +
4648 +static int rw_wait_done(struct nfi_base *nb, int sectors, bool read)
4649 +{
4650 + void *regs = nb->res.nfi_regs;
4651 + bool irq_en = nb->dma_en && nb->nfi_irq_en;
4652 + int ret;
4653 + u32 val;
4654 +
4655 + if (irq_en) {
4656 + ret = nandx_event_wait_complete(nb->done, NFI_TIMEOUT);
4657 + if (!ret) {
4658 + writew(0, regs + NFI_INTR_EN);
4659 + return ret;
4660 + }
4661 + }
4662 +
4663 + if (read) {
4664 + ret = readl_poll_timeout_atomic(regs + NFI_BYTELEN, val,
4665 + ADDRCNTR_SEC(val) >=
4666 + (u32)sectors,
4667 + 2, NFI_TIMEOUT);
4668 + /* HW issue: if not wait ahb done, need polling bus busy */
4669 + if (!ret && !irq_en)
4670 + ret = readl_poll_timeout_atomic(regs + NFI_MASTER_STA,
4671 + val,
4672 + !(val &
4673 + MASTER_BUS_BUSY),
4674 + 2, NFI_TIMEOUT);
4675 + } else {
4676 + ret = readl_poll_timeout_atomic(regs + NFI_ADDRCNTR, val,
4677 + ADDRCNTR_SEC(val) >=
4678 + (u32)sectors,
4679 + 2, NFI_TIMEOUT);
4680 + }
4681 +
4682 + if (ret) {
4683 + pr_info("do page %s timeout\n", read ? "read" : "write");
4684 + return ret;
4685 + }
4686 +
4687 + if (read && nb->ecc_en) {
4688 + ret = nb->ecc->wait_done(nb->ecc);
4689 + if (ret)
4690 + return ret;
4691 +
4692 + return nb->ecc->decode_status(nb->ecc, 0, sectors);
4693 + }
4694 +
4695 + return 0;
4696 +}
4697 +
4698 +static int rw_data(struct nfi_base *nb, u8 *data, u8 *fdm, int sectors,
4699 + bool read)
4700 +{
4701 + if (read && nb->dma_en && nb->ecc_en && fdm)
4702 + read_fdm(nb, fdm, 0, sectors);
4703 +
4704 + if (!nb->dma_en) {
4705 + if (read)
4706 + return pio_rx_data(nb, data, fdm, sectors);
4707 +
4708 + return pio_tx_data(nb, data, fdm, sectors);
4709 + }
4710 +
4711 + return 0;
4712 +}
4713 +
4714 +static void rw_complete(struct nfi_base *nb, u8 *data, u8 *fdm,
4715 + bool read)
4716 +{
4717 + int data_len = 0;
4718 + bool is_empty;
4719 +
4720 + if (nb->dma_en) {
4721 + if (read) {
4722 + nandx_dma_unmap(nb->res.dev, nb->buf, nb->dma_addr,
4723 + (u64)nb->access_len, NDMA_FROM_DEV);
4724 +
4725 + if (data) {
4726 + data_len = nb->rw_sectors * nb->nfi.sector_size;
4727 + memcpy(data, nb->buf, data_len);
4728 + }
4729 +
4730 + if (fdm)
4731 + memcpy(fdm, nb->buf + data_len,
4732 + nb->access_len - data_len);
4733 +
4734 + if (nb->read_status == -ENANDREAD) {
4735 + is_empty = nb->is_page_empty(nb, data, fdm,
4736 + nb->rw_sectors);
4737 + if (is_empty)
4738 + nb->read_status = 0;
4739 + }
4740 + } else {
4741 + nandx_dma_unmap(nb->res.dev, nb->buf, nb->dma_addr,
4742 + (u64)nb->access_len, NDMA_TO_DEV);
4743 + }
4744 + }
4745 +
4746 + /* whether it's reading or writing, we all check if nee swap
4747 + * for write, we need to restore data
4748 + */
4749 + if (nb->bad_mark_swap_en)
4750 + nb->bad_mark_ctrl.bad_mark_swap(&nb->nfi, data, fdm);
4751 +
4752 + if (nb->ecc_en)
4753 + nb->ecc->disable(nb->ecc);
4754 +
4755 + writel(0, nb->res.nfi_regs + NFI_CNFG);
4756 + writel(0, nb->res.nfi_regs + NFI_CON);
4757 +}
4758 +
4759 +static int nfi_read_sectors(struct nfi *nfi, u8 *data, u8 *fdm,
4760 + int sectors)
4761 +{
4762 + struct nfi_base *nb = nfi_to_base(nfi);
4763 + int bitflips = 0, ret;
4764 +
4765 + pr_debug("%s: read page#%d\n", __func__, nb->row);
4766 + pr_debug("%s: data address 0x%x, fdm address 0x%x, sectors 0x%x\n",
4767 + __func__, (u32)((unsigned long)data),
4768 + (u32)((unsigned long)fdm), sectors);
4769 +
4770 + nb->read_status = 0;
4771 +
4772 + ret = nb->rw_prepare(nb, sectors, data, fdm, true);
4773 + if (ret)
4774 + return ret;
4775 +
4776 + nb->rw_trigger(nb, true);
4777 +
4778 + if (nb->dma_en) {
4779 + ret = nb->rw_wait_done(nb, sectors, true);
4780 + if (ret > 0)
4781 + bitflips = ret;
4782 + else if (ret == -ENANDREAD)
4783 + nb->read_status = -ENANDREAD;
4784 + else if (ret < 0)
4785 + goto complete;
4786 +
4787 + }
4788 +
4789 + ret = nb->rw_data(nb, data, fdm, sectors, true);
4790 + if (ret > 0)
4791 + ret = max_t(int, ret, bitflips);
4792 +
4793 +complete:
4794 + nb->rw_complete(nb, data, fdm, true);
4795 +
4796 + if (nb->read_status == -ENANDREAD)
4797 + return -ENANDREAD;
4798 +
4799 + return ret;
4800 +}
4801 +
4802 +int nfi_write_page(struct nfi *nfi, u8 *data, u8 *fdm)
4803 +{
4804 + struct nfi_base *nb = nfi_to_base(nfi);
4805 + u32 sectors = div_down(nb->format.page_size, nfi->sector_size);
4806 + int ret;
4807 +
4808 + pr_debug("%s: data address 0x%x, fdm address 0x%x\n",
4809 + __func__, (int)((unsigned long)data),
4810 + (int)((unsigned long)fdm));
4811 +
4812 + ret = nb->rw_prepare(nb, sectors, data, fdm, false);
4813 + if (ret)
4814 + return ret;
4815 +
4816 + nb->rw_trigger(nb, false);
4817 +
4818 + ret = nb->rw_data(nb, data, fdm, sectors, false);
4819 + if (ret)
4820 + return ret;
4821 +
4822 + ret = nb->rw_wait_done(nb, sectors, false);
4823 +
4824 + nb->rw_complete(nb, data, fdm, false);
4825 +
4826 + return ret;
4827 +}
4828 +
4829 +static int nfi_rw_bytes(struct nfi *nfi, u8 *data, int count, bool read)
4830 +{
4831 + struct nfi_base *nb = nfi_to_base(nfi);
4832 + void *regs = nb->res.nfi_regs;
4833 + int i, ret;
4834 + u32 val;
4835 +
4836 + for (i = 0; i < count; i++) {
4837 + val = readl(regs + NFI_STA) & NFI_FSM_MASK;
4838 + if (val != NFI_FSM_CUSTDATA) {
4839 + val = readw(regs + NFI_CNFG) | CNFG_BYTE_RW;
4840 + if (read)
4841 + val |= CNFG_READ_EN;
4842 + writew(val, regs + NFI_CNFG);
4843 +
4844 + val = div_up(count, nfi->sector_size);
4845 + val = (val << CON_SEC_SHIFT) | CON_BRD | CON_BWR;
4846 + writel(val, regs + NFI_CON);
4847 +
4848 + writew(STAR_EN, regs + NFI_STRDATA);
4849 + }
4850 +
4851 + ret = wait_io_ready(regs);
4852 + if (ret)
4853 + return ret;
4854 +
4855 + if (read)
4856 + data[i] = readb(regs + NFI_DATAR);
4857 + else
4858 + writeb(data[i], regs + NFI_DATAW);
4859 + }
4860 +
4861 + writel(0, nb->res.nfi_regs + NFI_CNFG);
4862 +
4863 + return 0;
4864 +}
4865 +
4866 +static int nfi_read_bytes(struct nfi *nfi, u8 *data, int count)
4867 +{
4868 + return nfi_rw_bytes(nfi, data, count, true);
4869 +}
4870 +
4871 +static int nfi_write_bytes(struct nfi *nfi, u8 *data, int count)
4872 +{
4873 + return nfi_rw_bytes(nfi, data, count, false);
4874 +}
4875 +
4876 +/* As register map says, only when flash macro is idle,
4877 + * sw reset or nand interface change can be issued
4878 + */
4879 +static inline int wait_flash_macro_idle(void *regs)
4880 +{
4881 + u32 val;
4882 +
4883 + return readl_poll_timeout_atomic(regs + NFI_STA, val,
4884 + val & FLASH_MACRO_IDLE, 2,
4885 + NFI_TIMEOUT);
4886 +}
4887 +
4888 +#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
4889 + ((tpoecs) << 28 | (tprecs) << 22 | (tc2r) << 16 | \
4890 + (tw2r) << 12 | (twh) << 8 | (twst) << 4 | (trlt))
4891 +
4892 +static int nfi_set_sdr_timing(struct nfi *nfi, void *timing, u8 type)
4893 +{
4894 + struct nand_sdr_timing *sdr = (struct nand_sdr_timing *) timing;
4895 + struct nfi_base *nb = nfi_to_base(nfi);
4896 + void *regs = nb->res.nfi_regs;
4897 + u32 tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt, tstrobe;
4898 + u32 rate, val;
4899 + int ret;
4900 +
4901 + ret = wait_flash_macro_idle(regs);
4902 + if (ret)
4903 + return ret;
4904 +
4905 + /* turn clock rate into KHZ */
4906 + rate = nb->res.clock_1x / 1000;
4907 +
4908 + tpoecs = max_t(u16, sdr->tALH, sdr->tCLH);
4909 + tpoecs = div_up(tpoecs * rate, 1000000);
4910 + tpoecs &= 0xf;
4911 +
4912 + tprecs = max_t(u16, sdr->tCLS, sdr->tALS);
4913 + tprecs = div_up(tprecs * rate, 1000000);
4914 + tprecs &= 0x3f;
4915 +
4916 + /* tc2r is in unit of 2T */
4917 + tc2r = div_up(sdr->tCR * rate, 1000000);
4918 + tc2r = div_down(tc2r, 2);
4919 + tc2r &= 0x3f;
4920 +
4921 + tw2r = div_up(sdr->tWHR * rate, 1000000);
4922 + tw2r = div_down(tw2r, 2);
4923 + tw2r &= 0xf;
4924 +
4925 + twh = max_t(u16, sdr->tREH, sdr->tWH);
4926 + twh = div_up(twh * rate, 1000000) - 1;
4927 + twh &= 0xf;
4928 +
4929 + twst = div_up(sdr->tWP * rate, 1000000) - 1;
4930 + twst &= 0xf;
4931 +
4932 + trlt = div_up(sdr->tRP * rate, 1000000) - 1;
4933 + trlt &= 0xf;
4934 +
4935 + /* If tREA is bigger than tRP, setup strobe sel here */
4936 + if ((trlt + 1) * 1000000 / rate < sdr->tREA) {
4937 + tstrobe = sdr->tREA - (trlt + 1) * 1000000 / rate;
4938 + tstrobe = div_up(tstrobe * rate, 1000000);
4939 + val = readl(regs + NFI_DEBUG_CON1);
4940 + val &= ~STROBE_MASK;
4941 + val |= tstrobe << STROBE_SHIFT;
4942 + writel(val, regs + NFI_DEBUG_CON1);
4943 + }
4944 +
4945 + /*
4946 + * ACCON: access timing control register
4947 + * -------------------------------------
4948 + * 31:28: tpoecs, minimum required time for CS post pulling down after
4949 + * accessing the device
4950 + * 27:22: tprecs, minimum required time for CS pre pulling down before
4951 + * accessing the device
4952 + * 21:16: tc2r, minimum required time from NCEB low to NREB low
4953 + * 15:12: tw2r, minimum required time from NWEB high to NREB low.
4954 + * 11:08: twh, write enable hold time
4955 + * 07:04: twst, write wait states
4956 + * 03:00: trlt, read wait states
4957 + */
4958 + val = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt);
4959 + pr_info("acctiming: 0x%x\n", val);
4960 + writel(val, regs + NFI_ACCCON);
4961 +
4962 + /* set NAND type */
4963 + writel(NAND_TYPE_ASYNC, regs + NFI_NAND_TYPE_CNFG);
4964 +
4965 + return ret;
4966 +}
4967 +
4968 +static int nfi_set_timing(struct nfi *nfi, void *timing, int type)
4969 +{
4970 + switch (type) {
4971 + case NAND_TIMING_SDR:
4972 + return nfi_set_sdr_timing(nfi, timing, type);
4973 +
4974 + /* NOTE: for mlc/tlc */
4975 + case NAND_TIMING_SYNC_DDR:
4976 + case NAND_TIMING_TOGGLE_DDR:
4977 + case NAND_TIMING_NVDDR2:
4978 + default:
4979 + return -EINVAL;
4980 + }
4981 +
4982 + return 0;
4983 +}
4984 +
4985 +static void set_nfi_funcs(struct nfi *nfi)
4986 +{
4987 + nfi->select_chip = nfi_select_chip;
4988 + nfi->set_format = nfi_set_format;
4989 + nfi->nfi_ctrl = nfi_ctrl;
4990 + nfi->set_timing = nfi_set_timing;
4991 +
4992 + nfi->reset = nfi_reset;
4993 + nfi->send_cmd = nfi_send_cmd;
4994 + nfi->send_addr = nfi_send_addr;
4995 + nfi->trigger = nfi_trigger;
4996 +
4997 + nfi->write_page = nfi_write_page;
4998 + nfi->write_bytes = nfi_write_bytes;
4999 + nfi->read_sectors = nfi_read_sectors;
5000 + nfi->read_bytes = nfi_read_bytes;
5001 +
5002 + nfi->wait_ready = nfi_wait_ready;
5003 +
5004 + nfi->enable_randomizer = nfi_enable_randomizer;
5005 + nfi->disable_randomizer = nfi_disable_randomizer;
5006 +}
5007 +
5008 +static struct nfi_caps nfi_caps_mt7622 = {
5009 + .max_fdm_size = 8,
5010 + .fdm_ecc_size = 1,
5011 + .ecc_parity_bits = 13,
5012 + .spare_size = spare_size_mt7622,
5013 + .spare_size_num = 4,
5014 +};
5015 +
5016 +static struct nfi_caps *nfi_get_match_data(enum mtk_ic_version ic)
5017 +{
5018 + /* NOTE: add other IC's data */
5019 + return &nfi_caps_mt7622;
5020 +}
5021 +
5022 +static void set_nfi_base_params(struct nfi_base *nb)
5023 +{
5024 + nb->ecc_en = false;
5025 + nb->dma_en = false;
5026 + nb->nfi_irq_en = false;
5027 + nb->ecc_irq_en = false;
5028 + nb->page_irq_en = false;
5029 + nb->ecc_clk_en = false;
5030 + nb->randomize_en = false;
5031 + nb->custom_sector_en = false;
5032 + nb->bad_mark_swap_en = false;
5033 +
5034 + nb->op_mode = CNFG_CUSTOM_MODE;
5035 + nb->ecc_deccon = ECC_DEC_CORRECT;
5036 + nb->ecc_mode = ECC_NFI_MODE;
5037 +
5038 + nb->done = nandx_event_create();
5039 + nb->caps = nfi_get_match_data(nb->res.ic_ver);
5040 +
5041 + nb->set_op_mode = set_op_mode;
5042 + nb->is_page_empty = is_page_empty;
5043 +
5044 + nb->rw_prepare = rw_prepare;
5045 + nb->rw_trigger = rw_trigger;
5046 + nb->rw_wait_done = rw_wait_done;
5047 + nb->rw_data = rw_data;
5048 + nb->rw_complete = rw_complete;
5049 +}
5050 +
5051 +struct nfi *__weak nfi_extend_init(struct nfi_base *nb)
5052 +{
5053 + return &nb->nfi;
5054 +}
5055 +
5056 +void __weak nfi_extend_exit(struct nfi_base *nb)
5057 +{
5058 + mem_free(nb);
5059 +}
5060 +
5061 +struct nfi *nfi_init(struct nfi_resource *res)
5062 +{
5063 + struct nfiecc_resource ecc_res;
5064 + struct nfi_base *nb;
5065 + struct nfiecc *ecc;
5066 + struct nfi *nfi;
5067 + int ret;
5068 +
5069 + nb = mem_alloc(1, sizeof(struct nfi_base));
5070 + if (!nb) {
5071 + pr_info("nfi alloc memory fail @%s.\n", __func__);
5072 + return NULL;
5073 + }
5074 +
5075 + nb->res = *res;
5076 +
5077 + ret = nandx_irq_register(res->dev, res->nfi_irq_id, nfi_irq_handler,
5078 + "mtk_nand", nb);
5079 + if (ret) {
5080 + pr_info("nfi irq register failed!\n");
5081 + goto error;
5082 + }
5083 +
5084 + /* fill ecc paras and init ecc */
5085 + ecc_res.ic_ver = nb->res.ic_ver;
5086 + ecc_res.dev = nb->res.dev;
5087 + ecc_res.irq_id = nb->res.ecc_irq_id;
5088 + ecc_res.regs = nb->res.ecc_regs;
5089 + ecc = nfiecc_init(&ecc_res);
5090 + if (!ecc) {
5091 + pr_info("nfiecc init fail.\n");
5092 + return NULL;
5093 + }
5094 +
5095 + nb->ecc = ecc;
5096 +
5097 + set_nfi_base_params(nb);
5098 + set_nfi_funcs(&nb->nfi);
5099 +
5100 + /* Assign a temp sector size for reading ID & para page.
5101 + * We may assign new value later.
5102 + */
5103 + nb->nfi.sector_size = 512;
5104 +
5105 + /* give a default timing, and as discuss
5106 + * this is the only thing what we need do for nfi init
5107 + * if need do more, then we can add a function
5108 + */
5109 + writel(0x30C77FFF, nb->res.nfi_regs + NFI_ACCCON);
5110 +
5111 + nfi = nfi_extend_init(nb);
5112 + if (nfi)
5113 + return nfi;
5114 +
5115 +error:
5116 + mem_free(nb);
5117 + return NULL;
5118 +}
5119 +
5120 +void nfi_exit(struct nfi *nfi)
5121 +{
5122 + struct nfi_base *nb = nfi_to_base(nfi);
5123 +
5124 + nandx_event_destroy(nb->done);
5125 + nfiecc_exit(nb->ecc);
5126 +#if !NANDX_BULK_IO_USE_DRAM
5127 + mem_free(nb->buf);
5128 +#endif
5129 + nfi_extend_exit(nb);
5130 +}
5131 +
5132 --- /dev/null
5133 +++ b/drivers/mtd/nandx/core/nfi/nfi_base.h
5134 @@ -0,0 +1,95 @@
5135 +/*
5136 + * Copyright (C) 2017 MediaTek Inc.
5137 + * Licensed under either
5138 + * BSD Licence, (see NOTICE for more details)
5139 + * GNU General Public License, version 2.0, (see NOTICE for more details)
5140 + */
5141 +
5142 +#ifndef __NFI_BASE_H__
5143 +#define __NFI_BASE_H__
5144 +
5145 +#define NFI_TIMEOUT 1000000
5146 +
5147 +enum randomizer_op {
5148 + RAND_ENCODE,
5149 + RAND_DECODE
5150 +};
5151 +
5152 +struct bad_mark_ctrl {
5153 + void (*bad_mark_swap)(struct nfi *nfi, u8 *buf, u8 *fdm);
5154 + u8 *(*fdm_shift)(struct nfi *nfi, u8 *fdm, int sector);
5155 + u32 sector;
5156 + u32 position;
5157 +};
5158 +
5159 +struct nfi_caps {
5160 + u8 max_fdm_size;
5161 + u8 fdm_ecc_size;
5162 + u8 ecc_parity_bits;
5163 + const int *spare_size;
5164 + u32 spare_size_num;
5165 +};
5166 +
5167 +struct nfi_base {
5168 + struct nfi nfi;
5169 + struct nfi_resource res;
5170 + struct nfiecc *ecc;
5171 + struct nfi_format format;
5172 + struct nfi_caps *caps;
5173 + struct bad_mark_ctrl bad_mark_ctrl;
5174 +
5175 + /* page_size + spare_size */
5176 + u8 *buf;
5177 +
5178 + /* used for spi nand */
5179 + u8 cmd_mode;
5180 + u32 op_mode;
5181 +
5182 + int page_sectors;
5183 +
5184 + void *done;
5185 +
5186 + /* for read/write */
5187 + int col;
5188 + int row;
5189 + int access_len;
5190 + int rw_sectors;
5191 + void *dma_addr;
5192 + int read_status;
5193 +
5194 + bool dma_en;
5195 + bool nfi_irq_en;
5196 + bool page_irq_en;
5197 + bool auto_format;
5198 + bool ecc_en;
5199 + bool ecc_irq_en;
5200 + bool ecc_clk_en;
5201 + bool randomize_en;
5202 + bool custom_sector_en;
5203 + bool bad_mark_swap_en;
5204 +
5205 + enum nfiecc_deccon ecc_deccon;
5206 + enum nfiecc_mode ecc_mode;
5207 +
5208 + void (*set_op_mode)(void *regs, u32 mode);
5209 + bool (*is_page_empty)(struct nfi_base *nb, u8 *data, u8 *fdm,
5210 + int sectors);
5211 +
5212 + int (*rw_prepare)(struct nfi_base *nb, int sectors, u8 *data, u8 *fdm,
5213 + bool read);
5214 + void (*rw_trigger)(struct nfi_base *nb, bool read);
5215 + int (*rw_wait_done)(struct nfi_base *nb, int sectors, bool read);
5216 + int (*rw_data)(struct nfi_base *nb, u8 *data, u8 *fdm, int sectors,
5217 + bool read);
5218 + void (*rw_complete)(struct nfi_base *nb, u8 *data, u8 *fdm, bool read);
5219 +};
5220 +
5221 +static inline struct nfi_base *nfi_to_base(struct nfi *nfi)
5222 +{
5223 + return container_of(nfi, struct nfi_base, nfi);
5224 +}
5225 +
5226 +struct nfi *nfi_extend_init(struct nfi_base *nb);
5227 +void nfi_extend_exit(struct nfi_base *nb);
5228 +
5229 +#endif /* __NFI_BASE_H__ */
5230 --- /dev/null
5231 +++ b/drivers/mtd/nandx/core/nfi/nfi_regs.h
5232 @@ -0,0 +1,114 @@
5233 +/*
5234 + * Copyright (C) 2017 MediaTek Inc.
5235 + * Licensed under either
5236 + * BSD Licence, (see NOTICE for more details)
5237 + * GNU General Public License, version 2.0, (see NOTICE for more details)
5238 + */
5239 +
5240 +#ifndef __NFI_REGS_H__
5241 +#define __NFI_REGS_H__
5242 +
5243 +#define NFI_CNFG 0x000
5244 +#define CNFG_AHB BIT(0)
5245 +#define CNFG_READ_EN BIT(1)
5246 +#define CNFG_DMA_BURST_EN BIT(2)
5247 +#define CNFG_RESEED_SEC_EN BIT(4)
5248 +#define CNFG_RAND_SEL BIT(5)
5249 +#define CNFG_BYTE_RW BIT(6)
5250 +#define CNFG_HW_ECC_EN BIT(8)
5251 +#define CNFG_AUTO_FMT_EN BIT(9)
5252 +#define CNFG_RAND_MASK GENMASK(5, 4)
5253 +#define CNFG_OP_MODE_MASK GENMASK(14, 12)
5254 +#define CNFG_IDLE_MOD 0
5255 +#define CNFG_READ_MODE (1 << 12)
5256 +#define CNFG_SINGLE_READ_MODE (2 << 12)
5257 +#define CNFG_PROGRAM_MODE (3 << 12)
5258 +#define CNFG_ERASE_MODE (4 << 12)
5259 +#define CNFG_RESET_MODE (5 << 12)
5260 +#define CNFG_CUSTOM_MODE (6 << 12)
5261 +#define NFI_PAGEFMT 0x004
5262 +#define PAGEFMT_SPARE_SHIFT 4
5263 +#define PAGEFMT_FDM_ECC_SHIFT 12
5264 +#define PAGEFMT_FDM_SHIFT 8
5265 +#define PAGEFMT_SEC_SEL_512 BIT(2)
5266 +#define PAGEFMT_512_2K 0
5267 +#define PAGEFMT_2K_4K 1
5268 +#define PAGEFMT_4K_8K 2
5269 +#define PAGEFMT_8K_16K 3
5270 +#define NFI_CON 0x008
5271 +#define CON_FIFO_FLUSH BIT(0)
5272 +#define CON_NFI_RST BIT(1)
5273 +#define CON_BRD BIT(8)
5274 +#define CON_BWR BIT(9)
5275 +#define CON_SEC_SHIFT 12
5276 +#define NFI_ACCCON 0x00c
5277 +#define NFI_INTR_EN 0x010
5278 +#define INTR_BUSY_RETURN_EN BIT(4)
5279 +#define INTR_AHB_DONE_EN BIT(6)
5280 +#define NFI_INTR_STA 0x014
5281 +#define NFI_CMD 0x020
5282 +#define NFI_ADDRNOB 0x030
5283 +#define ROW_SHIFT 4
5284 +#define NFI_COLADDR 0x034
5285 +#define NFI_ROWADDR 0x038
5286 +#define NFI_STRDATA 0x040
5287 +#define STAR_EN 1
5288 +#define STAR_DE 0
5289 +#define NFI_CNRNB 0x044
5290 +#define NFI_DATAW 0x050
5291 +#define NFI_DATAR 0x054
5292 +#define NFI_PIO_DIRDY 0x058
5293 +#define PIO_DI_RDY 1
5294 +#define NFI_STA 0x060
5295 +#define STA_CMD BIT(0)
5296 +#define STA_ADDR BIT(1)
5297 +#define FLASH_MACRO_IDLE BIT(5)
5298 +#define STA_BUSY BIT(8)
5299 +#define STA_BUSY2READY BIT(9)
5300 +#define STA_EMP_PAGE BIT(12)
5301 +#define NFI_FSM_CUSTDATA (0xe << 16)
5302 +#define NFI_FSM_MASK GENMASK(19, 16)
5303 +#define NAND_FSM_MASK GENMASK(29, 23)
5304 +#define NFI_ADDRCNTR 0x070
5305 +#define CNTR_VALID_MASK GENMASK(16, 0)
5306 +#define CNTR_MASK GENMASK(15, 12)
5307 +#define ADDRCNTR_SEC_SHIFT 12
5308 +#define ADDRCNTR_SEC(val) \
5309 + (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
5310 +#define NFI_STRADDR 0x080
5311 +#define NFI_BYTELEN 0x084
5312 +#define NFI_CSEL 0x090
5313 +#define NFI_FDML(x) (0x0a0 + (x) * 8)
5314 +#define NFI_FDMM(x) (0x0a4 + (x) * 8)
5315 +#define NFI_DEBUG_CON1 0x220
5316 +#define STROBE_MASK GENMASK(4, 3)
5317 +#define STROBE_SHIFT 3
5318 +#define ECC_CLK_EN BIT(11)
5319 +#define AUTOC_SRAM_MODE BIT(12)
5320 +#define BYPASS_MASTER_EN BIT(15)
5321 +#define NFI_MASTER_STA 0x224
5322 +#define MASTER_BUS_BUSY 0x3
5323 +#define NFI_SECCUS_SIZE 0x22c
5324 +#define SECCUS_SIZE_EN BIT(17)
5325 +#define NFI_RANDOM_CNFG 0x238
5326 +#define RAN_ENCODE_EN BIT(0)
5327 +#define ENCODE_SEED_SHIFT 1
5328 +#define RAN_DECODE_EN BIT(16)
5329 +#define DECODE_SEED_SHIFT 17
5330 +#define RAN_SEED_MASK 0x7fff
5331 +#define NFI_EMPTY_THRESH 0x23c
5332 +#define NFI_NAND_TYPE_CNFG 0x240
5333 +#define NAND_TYPE_ASYNC 0
5334 +#define NAND_TYPE_TOGGLE 1
5335 +#define NAND_TYPE_SYNC 2
5336 +#define NFI_ACCCON1 0x244
5337 +#define NFI_DELAY_CTRL 0x248
5338 +#define NFI_TLC_RD_WHR2 0x300
5339 +#define TLC_RD_WHR2_EN BIT(12)
5340 +#define TLC_RD_WHR2_MASK GENMASK(11, 0)
5341 +#define SNF_SNF_CNFG 0x55c
5342 +#define SPI_MODE_EN 1
5343 +#define SPI_MODE_DIS 0
5344 +
5345 +#endif /* __NFI_REGS_H__ */
5346 +
5347 --- /dev/null
5348 +++ b/drivers/mtd/nandx/core/nfi/nfi_spi.c
5349 @@ -0,0 +1,689 @@
5350 +/*
5351 + * Copyright (C) 2017 MediaTek Inc.
5352 + * Licensed under either
5353 + * BSD Licence, (see NOTICE for more details)
5354 + * GNU General Public License, version 2.0, (see NOTICE for more details)
5355 + */
5356 +
5357 +#include "nandx_util.h"
5358 +#include "nandx_core.h"
5359 +#include "../nfi.h"
5360 +#include "nfiecc.h"
5361 +#include "nfi_regs.h"
5362 +#include "nfi_base.h"
5363 +#include "nfi_spi_regs.h"
5364 +#include "nfi_spi.h"
5365 +
5366 +#define NFI_CMD_DUMMY_RD 0x00
5367 +#define NFI_CMD_DUMMY_WR 0x80
5368 +
5369 +static struct nfi_spi_delay spi_delay[SPI_NAND_MAX_DELAY] = {
5370 + /*
5371 + * tCLK_SAM_DLY, tCLK_OUT_DLY, tCS_DLY, tWR_EN_DLY,
5372 + * tIO_IN_DLY[4], tIO_OUT_DLY[4], tREAD_LATCH_LATENCY
5373 + */
5374 + {0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
5375 + {21, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
5376 + {63, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 0},
5377 + {0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1},
5378 + {21, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1},
5379 + {63, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0}, 1}
5380 +};
5381 +
5382 +static inline struct nfi_spi *base_to_snfi(struct nfi_base *nb)
5383 +{
5384 + return container_of(nb, struct nfi_spi, base);
5385 +}
5386 +
5387 +static void snfi_mac_enable(struct nfi_base *nb)
5388 +{
5389 + void *regs = nb->res.nfi_regs;
5390 + u32 val;
5391 +
5392 + val = readl(regs + SNF_MAC_CTL);
5393 + val &= ~MAC_XIO_SEL;
5394 + val |= SF_MAC_EN;
5395 +
5396 + writel(val, regs + SNF_MAC_CTL);
5397 +}
5398 +
5399 +static void snfi_mac_disable(struct nfi_base *nb)
5400 +{
5401 + void *regs = nb->res.nfi_regs;
5402 + u32 val;
5403 +
5404 + val = readl(regs + SNF_MAC_CTL);
5405 + val &= ~(SF_TRIG | SF_MAC_EN);
5406 + writel(val, regs + SNF_MAC_CTL);
5407 +}
5408 +
5409 +static int snfi_mac_trigger(struct nfi_base *nb)
5410 +{
5411 + void *regs = nb->res.nfi_regs;
5412 + int ret;
5413 + u32 val;
5414 +
5415 + val = readl(regs + SNF_MAC_CTL);
5416 + val |= SF_TRIG;
5417 + writel(val, regs + SNF_MAC_CTL);
5418 +
5419 + ret = readl_poll_timeout_atomic(regs + SNF_MAC_CTL, val,
5420 + val & WIP_READY, 10,
5421 + NFI_TIMEOUT);
5422 + if (ret) {
5423 + pr_info("polling wip ready for read timeout\n");
5424 + return ret;
5425 + }
5426 +
5427 + return readl_poll_timeout_atomic(regs + SNF_MAC_CTL, val,
5428 + !(val & WIP), 10,
5429 + NFI_TIMEOUT);
5430 +}
5431 +
5432 +static int snfi_mac_op(struct nfi_base *nb)
5433 +{
5434 + int ret;
5435 +
5436 + snfi_mac_enable(nb);
5437 + ret = snfi_mac_trigger(nb);
5438 + snfi_mac_disable(nb);
5439 +
5440 + return ret;
5441 +}
5442 +
5443 +static void snfi_write_mac(struct nfi_spi *nfi_spi, u8 *data, int count)
5444 +{
5445 + struct nandx_split32 split = {0};
5446 + u32 reg_offset = round_down(nfi_spi->tx_count, 4);
5447 + void *regs = nfi_spi->base.res.nfi_regs;
5448 + u32 data_offset = 0, i, val;
5449 + u8 *p_val = (u8 *)(&val);
5450 +
5451 + nandx_split(&split, nfi_spi->tx_count, count, val, 4);
5452 +
5453 + if (split.head_len) {
5454 + val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
5455 +
5456 + for (i = 0; i < split.head_len; i++)
5457 + p_val[split.head + i] = data[i];
5458 +
5459 + writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
5460 + }
5461 +
5462 + if (split.body_len) {
5463 + reg_offset = split.body;
5464 + data_offset = split.head_len;
5465 +
5466 + for (i = 0; i < split.body_len; i++) {
5467 + p_val[i & 3] = data[data_offset + i];
5468 +
5469 + if ((i & 3) == 3) {
5470 + writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
5471 + reg_offset += 4;
5472 + }
5473 + }
5474 + }
5475 +
5476 + if (split.tail_len) {
5477 + reg_offset = split.tail;
5478 + data_offset += split.body_len;
5479 +
5480 + for (i = 0; i < split.tail_len; i++) {
5481 + p_val[i] = data[data_offset + i];
5482 +
5483 + if (i == split.tail_len - 1)
5484 + writel(val, regs + SPI_GPRAM_ADDR + reg_offset);
5485 + }
5486 + }
5487 +}
5488 +
5489 +static void snfi_read_mac(struct nfi_spi *nfi_spi, u8 *data, int count)
5490 +{
5491 + void *regs = nfi_spi->base.res.nfi_regs;
5492 + u32 reg_offset = round_down(nfi_spi->tx_count, 4);
5493 + struct nandx_split32 split = {0};
5494 + u32 data_offset = 0, i, val;
5495 + u8 *p_val = (u8 *)&val;
5496 +
5497 + nandx_split(&split, nfi_spi->tx_count, count, val, 4);
5498 +
5499 + if (split.head_len) {
5500 + val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
5501 +
5502 + for (i = 0; i < split.head_len; i++)
5503 + data[data_offset + i] = p_val[split.head + i];
5504 + }
5505 +
5506 + if (split.body_len) {
5507 + reg_offset = split.body;
5508 + data_offset = split.head_len;
5509 +
5510 + for (i = 0; i < split.body_len; i++) {
5511 + if ((i & 3) == 0) {
5512 + val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
5513 + reg_offset += 4;
5514 + }
5515 +
5516 + data[data_offset + i] = p_val[i % 4];
5517 + }
5518 + }
5519 +
5520 + if (split.tail_len) {
5521 + reg_offset = split.tail;
5522 + data_offset += split.body_len;
5523 + val = readl(regs + SPI_GPRAM_ADDR + reg_offset);
5524 +
5525 + for (i = 0; i < split.tail_len; i++)
5526 + data[data_offset + i] = p_val[i];
5527 + }
5528 +}
5529 +
5530 +static int snfi_send_command(struct nfi *nfi, short cmd)
5531 +{
5532 + struct nfi_base *nb = nfi_to_base(nfi);
5533 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5534 +
5535 + if (cmd == -1)
5536 + return 0;
5537 +
5538 + if (nfi_spi->snfi_mode == SNFI_MAC_MODE) {
5539 + snfi_write_mac(nfi_spi, (u8 *)&cmd, 1);
5540 + nfi_spi->tx_count++;
5541 + return 0;
5542 + }
5543 +
5544 + nfi_spi->cmd[nfi_spi->cur_cmd_idx++] = cmd;
5545 + return 0;
5546 +}
5547 +
5548 +static int snfi_send_address(struct nfi *nfi, int col, int row,
5549 + int col_cycle,
5550 + int row_cycle)
5551 +{
5552 + struct nfi_base *nb = nfi_to_base(nfi);
5553 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5554 + u32 addr, cycle, temp;
5555 +
5556 + nb->col = col;
5557 + nb->row = row;
5558 +
5559 + if (nfi_spi->snfi_mode == SNFI_MAC_MODE) {
5560 + addr = row;
5561 + cycle = row_cycle;
5562 +
5563 + if (!row_cycle) {
5564 + addr = col;
5565 + cycle = col_cycle;
5566 + }
5567 +
5568 + temp = nandx_cpu_to_be32(addr) >> ((4 - cycle) << 3);
5569 + snfi_write_mac(nfi_spi, (u8 *)&temp, cycle);
5570 + nfi_spi->tx_count += cycle;
5571 + } else {
5572 + nfi_spi->row_addr[nfi_spi->cur_addr_idx++] = row;
5573 + nfi_spi->col_addr[nfi_spi->cur_addr_idx++] = col;
5574 + }
5575 +
5576 + return 0;
5577 +}
5578 +
5579 +static int snfi_trigger(struct nfi *nfi)
5580 +{
5581 + struct nfi_base *nb = nfi_to_base(nfi);
5582 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5583 + void *regs = nb->res.nfi_regs;
5584 +
5585 + writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
5586 + writel(0, regs + SNF_MAC_INL);
5587 +
5588 + nfi_spi->tx_count = 0;
5589 + nfi_spi->cur_cmd_idx = 0;
5590 + nfi_spi->cur_addr_idx = 0;
5591 +
5592 + return snfi_mac_op(nb);
5593 +}
5594 +
5595 +static int snfi_select_chip(struct nfi *nfi, int cs)
5596 +{
5597 + struct nfi_base *nb = nfi_to_base(nfi);
5598 + void *regs = nb->res.nfi_regs;
5599 + u32 val;
5600 +
5601 + val = readl(regs + SNF_MISC_CTL);
5602 +
5603 + if (cs == 0) {
5604 + val &= ~SF2CS_SEL;
5605 + val &= ~SF2CS_EN;
5606 + } else if (cs == 1) {
5607 + val |= SF2CS_SEL;
5608 + val |= SF2CS_EN;
5609 + } else {
5610 + return -EIO;
5611 + }
5612 +
5613 + writel(val, regs + SNF_MISC_CTL);
5614 +
5615 + return 0;
5616 +}
5617 +
5618 +static int snfi_set_delay(struct nfi_base *nb, u8 delay_mode)
5619 +{
5620 + void *regs = nb->res.nfi_regs;
5621 + struct nfi_spi_delay *delay;
5622 + u32 val;
5623 +
5624 + if (delay_mode < 0 || delay_mode > SPI_NAND_MAX_DELAY)
5625 + return -EINVAL;
5626 +
5627 + delay = &spi_delay[delay_mode];
5628 +
5629 + val = delay->tIO_OUT_DLY[0] | delay->tIO_OUT_DLY[1] << 8 |
5630 + delay->tIO_OUT_DLY[2] << 16 |
5631 + delay->tIO_OUT_DLY[3] << 24;
5632 + writel(val, regs + SNF_DLY_CTL1);
5633 +
5634 + val = delay->tIO_IN_DLY[0] | (delay->tIO_IN_DLY[1] << 8) |
5635 + delay->tIO_IN_DLY[2] << 16 |
5636 + delay->tIO_IN_DLY[3] << 24;
5637 + writel(val, regs + SNF_DLY_CTL2);
5638 +
5639 + val = delay->tCLK_SAM_DLY | delay->tCLK_OUT_DLY << 8 |
5640 + delay->tCS_DLY << 16 |
5641 + delay->tWR_EN_DLY << 24;
5642 + writel(val, regs + SNF_DLY_CTL3);
5643 +
5644 + writel(delay->tCS_DLY, regs + SNF_DLY_CTL4);
5645 +
5646 + val = readl(regs + SNF_MISC_CTL);
5647 + val |= (delay->tREAD_LATCH_LATENCY) <<
5648 + LATCH_LAT_SHIFT;
5649 + writel(val, regs + SNF_MISC_CTL);
5650 +
5651 + return 0;
5652 +}
5653 +
5654 +static int snfi_set_timing(struct nfi *nfi, void *timing, int type)
5655 +{
5656 + /* Nothing need to do. */
5657 + return 0;
5658 +}
5659 +
5660 +static int snfi_wait_ready(struct nfi *nfi, int type, u32 timeout)
5661 +{
5662 + /* Nothing need to do. */
5663 + return 0;
5664 +}
5665 +
5666 +static int snfi_ctrl(struct nfi *nfi, int cmd, void *args)
5667 +{
5668 + struct nfi_base *nb = nfi_to_base(nfi);
5669 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5670 + int ret = 0;
5671 +
5672 + if (!args)
5673 + return -EINVAL;
5674 +
5675 + switch (cmd) {
5676 + case NFI_CTRL_DMA:
5677 + nb->dma_en = *(bool *)args;
5678 + break;
5679 +
5680 + case NFI_CTRL_NFI_IRQ:
5681 + nb->nfi_irq_en = *(bool *)args;
5682 + break;
5683 +
5684 + case NFI_CTRL_ECC_IRQ:
5685 + nb->ecc_irq_en = *(bool *)args;
5686 + break;
5687 +
5688 + case NFI_CTRL_PAGE_IRQ:
5689 + nb->page_irq_en = *(bool *)args;
5690 + break;
5691 +
5692 + case NFI_CTRL_ECC:
5693 + nb->ecc_en = *(bool *)args;
5694 + break;
5695 +
5696 + case NFI_CTRL_BAD_MARK_SWAP:
5697 + nb->bad_mark_swap_en = *(bool *)args;
5698 + break;
5699 +
5700 + case NFI_CTRL_ECC_CLOCK:
5701 + nb->ecc_clk_en = *(bool *)args;
5702 + break;
5703 +
5704 + case SNFI_CTRL_OP_MODE:
5705 + nfi_spi->snfi_mode = *(u8 *)args;
5706 + break;
5707 +
5708 + case SNFI_CTRL_RX_MODE:
5709 + nfi_spi->read_cache_mode = *(u8 *)args;
5710 + break;
5711 +
5712 + case SNFI_CTRL_TX_MODE:
5713 + nfi_spi->write_cache_mode = *(u8 *)args;
5714 + break;
5715 +
5716 + case SNFI_CTRL_DELAY_MODE:
5717 + ret = snfi_set_delay(nb, *(u8 *)args);
5718 + break;
5719 +
5720 + default:
5721 + pr_info("operation not support.\n");
5722 + ret = -EOPNOTSUPP;
5723 + break;
5724 + }
5725 +
5726 + return ret;
5727 +}
5728 +
5729 +static int snfi_read_bytes(struct nfi *nfi, u8 *data, int count)
5730 +{
5731 + struct nfi_base *nb = nfi_to_base(nfi);
5732 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5733 + void *regs = nb->res.nfi_regs;
5734 + int ret;
5735 +
5736 + writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
5737 + writel(count, regs + SNF_MAC_INL);
5738 +
5739 + ret = snfi_mac_op(nb);
5740 + if (ret)
5741 + return ret;
5742 +
5743 + snfi_read_mac(nfi_spi, data, count);
5744 +
5745 + nfi_spi->tx_count = 0;
5746 +
5747 + return 0;
5748 +}
5749 +
5750 +static int snfi_write_bytes(struct nfi *nfi, u8 *data, int count)
5751 +{
5752 + struct nfi_base *nb = nfi_to_base(nfi);
5753 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5754 + void *regs = nb->res.nfi_regs;
5755 +
5756 + snfi_write_mac(nfi_spi, data, count);
5757 + nfi_spi->tx_count += count;
5758 +
5759 + writel(0, regs + SNF_MAC_INL);
5760 + writel(nfi_spi->tx_count, regs + SNF_MAC_OUTL);
5761 +
5762 + nfi_spi->tx_count = 0;
5763 +
5764 + return snfi_mac_op(nb);
5765 +}
5766 +
5767 +static int snfi_reset(struct nfi *nfi)
5768 +{
5769 + struct nfi_base *nb = nfi_to_base(nfi);
5770 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5771 + void *regs = nb->res.nfi_regs;
5772 + u32 val;
5773 + int ret;
5774 +
5775 + ret = nfi_spi->parent->nfi.reset(nfi);
5776 + if (ret)
5777 + return ret;
5778 +
5779 + val = readl(regs + SNF_MISC_CTL);
5780 + val |= SW_RST;
5781 + writel(val, regs + SNF_MISC_CTL);
5782 +
5783 + ret = readx_poll_timeout_atomic(readw, regs + SNF_STA_CTL1, val,
5784 + !(val & SPI_STATE), 50,
5785 + NFI_TIMEOUT);
5786 + if (ret) {
5787 + pr_info("spi state active in reset [0x%x] = 0x%x\n",
5788 + SNF_STA_CTL1, val);
5789 + return ret;
5790 + }
5791 +
5792 + val = readl(regs + SNF_MISC_CTL);
5793 + val &= ~SW_RST;
5794 + writel(val, regs + SNF_MISC_CTL);
5795 +
5796 + return 0;
5797 +}
5798 +
5799 +static int snfi_config_for_write(struct nfi_base *nb, int count)
5800 +{
5801 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5802 + void *regs = nb->res.nfi_regs;
5803 + u32 val;
5804 +
5805 + nb->set_op_mode(regs, CNFG_CUSTOM_MODE);
5806 +
5807 + val = readl(regs + SNF_MISC_CTL);
5808 +
5809 + if (nfi_spi->write_cache_mode == SNFI_TX_114)
5810 + val |= PG_LOAD_X4_EN;
5811 +
5812 + if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
5813 + val |= PG_LOAD_CUSTOM_EN;
5814 +
5815 + writel(val, regs + SNF_MISC_CTL);
5816 +
5817 + val = count * (nb->nfi.sector_size + nb->nfi.sector_spare_size);
5818 + writel(val << PG_LOAD_SHIFT, regs + SNF_MISC_CTL2);
5819 +
5820 + val = readl(regs + SNF_PG_CTL1);
5821 +
5822 + if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
5823 + val |= nfi_spi->cmd[0] << PG_LOAD_CMD_SHIFT;
5824 + else {
5825 + val |= nfi_spi->cmd[0] | nfi_spi->cmd[1] << PG_LOAD_CMD_SHIFT |
5826 + nfi_spi->cmd[2] << PG_EXE_CMD_SHIFT;
5827 +
5828 + writel(nfi_spi->row_addr[1], regs + SNF_PG_CTL3);
5829 + writel(nfi_spi->cmd[3] << GF_CMD_SHIFT | nfi_spi->col_addr[2] <<
5830 + GF_ADDR_SHIFT, regs + SNF_GF_CTL1);
5831 + }
5832 +
5833 + writel(val, regs + SNF_PG_CTL1);
5834 + writel(nfi_spi->col_addr[1], regs + SNF_PG_CTL2);
5835 +
5836 + writel(NFI_CMD_DUMMY_WR, regs + NFI_CMD);
5837 +
5838 + return 0;
5839 +}
5840 +
5841 +static int snfi_config_for_read(struct nfi_base *nb, int count)
5842 +{
5843 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5844 + void *regs = nb->res.nfi_regs;
5845 + u32 val;
5846 + int ret = 0;
5847 +
5848 + nb->set_op_mode(regs, CNFG_CUSTOM_MODE);
5849 +
5850 + val = readl(regs + SNF_MISC_CTL);
5851 + val &= ~DARA_READ_MODE_MASK;
5852 +
5853 + switch (nfi_spi->read_cache_mode) {
5854 +
5855 + case SNFI_RX_111:
5856 + break;
5857 +
5858 + case SNFI_RX_112:
5859 + val |= X2_DATA_MODE << READ_MODE_SHIFT;
5860 + break;
5861 +
5862 + case SNFI_RX_114:
5863 + val |= X4_DATA_MODE << READ_MODE_SHIFT;
5864 + break;
5865 +
5866 + case SNFI_RX_122:
5867 + val |= DUAL_IO_MODE << READ_MODE_SHIFT;
5868 + break;
5869 +
5870 + case SNFI_RX_144:
5871 + val |= QUAD_IO_MODE << READ_MODE_SHIFT;
5872 + break;
5873 +
5874 + default:
5875 + pr_info("Not support this read operarion: %d!\n",
5876 + nfi_spi->read_cache_mode);
5877 + ret = -EINVAL;
5878 + break;
5879 + }
5880 +
5881 + if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE)
5882 + val |= DATARD_CUSTOM_EN;
5883 +
5884 + writel(val, regs + SNF_MISC_CTL);
5885 +
5886 + val = count * (nb->nfi.sector_size + nb->nfi.sector_spare_size);
5887 + writel(val, regs + SNF_MISC_CTL2);
5888 +
5889 + val = readl(regs + SNF_RD_CTL2);
5890 +
5891 + if (nfi_spi->snfi_mode == SNFI_CUSTOM_MODE) {
5892 + val |= nfi_spi->cmd[0];
5893 + writel(nfi_spi->col_addr[1], regs + SNF_RD_CTL3);
5894 + } else {
5895 + val |= nfi_spi->cmd[2];
5896 + writel(nfi_spi->cmd[0] << PAGE_READ_CMD_SHIFT |
5897 + nfi_spi->row_addr[0], regs + SNF_RD_CTL1);
5898 + writel(nfi_spi->cmd[1] << GF_CMD_SHIFT |
5899 + nfi_spi->col_addr[1] << GF_ADDR_SHIFT,
5900 + regs + SNF_GF_CTL1);
5901 + writel(nfi_spi->col_addr[2], regs + SNF_RD_CTL3);
5902 + }
5903 +
5904 + writel(val, regs + SNF_RD_CTL2);
5905 +
5906 + writel(NFI_CMD_DUMMY_RD, regs + NFI_CMD);
5907 +
5908 + return ret;
5909 +}
5910 +
5911 +static bool is_page_empty(struct nfi_base *nb, u8 *data, u8 *fdm,
5912 + int sectors)
5913 +{
5914 + u32 *data32 = (u32 *)data;
5915 + u32 *fdm32 = (u32 *)fdm;
5916 + u32 i, count = 0;
5917 +
5918 + for (i = 0; i < nb->format.page_size >> 2; i++) {
5919 + if (data32[i] != 0xffff) {
5920 + count += zero_popcount(data32[i]);
5921 + if (count > 10) {
5922 + pr_info("%s %d %d count:%d\n",
5923 + __func__, __LINE__, i, count);
5924 + return false;
5925 + }
5926 + }
5927 + }
5928 +
5929 + if (fdm) {
5930 + for (i = 0; i < (nb->nfi.fdm_size * sectors >> 2); i++)
5931 + if (fdm32[i] != 0xffff) {
5932 + count += zero_popcount(fdm32[i]);
5933 + if (count > 10) {
5934 + pr_info("%s %d %d count:%d\n",
5935 + __func__, __LINE__, i, count);
5936 + return false;
5937 + }
5938 + }
5939 + }
5940 +
5941 + return true;
5942 +}
5943 +
5944 +static int rw_prepare(struct nfi_base *nb, int sectors, u8 *data,
5945 + u8 *fdm,
5946 + bool read)
5947 +{
5948 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5949 + int ret;
5950 +
5951 + ret = nfi_spi->parent->rw_prepare(nb, sectors, data, fdm, read);
5952 + if (ret)
5953 + return ret;
5954 +
5955 + if (read)
5956 + ret = snfi_config_for_read(nb, sectors);
5957 + else
5958 + ret = snfi_config_for_write(nb, sectors);
5959 +
5960 + return ret;
5961 +}
5962 +
5963 +static void rw_complete(struct nfi_base *nb, u8 *data, u8 *fdm,
5964 + bool read)
5965 +{
5966 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
5967 + void *regs = nb->res.nfi_regs;
5968 + u32 val;
5969 +
5970 + nfi_spi->parent->rw_complete(nb, data, fdm, read);
5971 +
5972 + val = readl(regs + SNF_MISC_CTL);
5973 +
5974 + if (read)
5975 + val &= ~DATARD_CUSTOM_EN;
5976 + else
5977 + val &= ~PG_LOAD_CUSTOM_EN;
5978 +
5979 + writel(val, regs + SNF_MISC_CTL);
5980 +
5981 + nfi_spi->tx_count = 0;
5982 + nfi_spi->cur_cmd_idx = 0;
5983 + nfi_spi->cur_addr_idx = 0;
5984 +}
5985 +
5986 +static void set_nfi_base_funcs(struct nfi_base *nb)
5987 +{
5988 + nb->nfi.reset = snfi_reset;
5989 + nb->nfi.set_timing = snfi_set_timing;
5990 + nb->nfi.wait_ready = snfi_wait_ready;
5991 +
5992 + nb->nfi.send_cmd = snfi_send_command;
5993 + nb->nfi.send_addr = snfi_send_address;
5994 + nb->nfi.trigger = snfi_trigger;
5995 + nb->nfi.nfi_ctrl = snfi_ctrl;
5996 + nb->nfi.select_chip = snfi_select_chip;
5997 +
5998 + nb->nfi.read_bytes = snfi_read_bytes;
5999 + nb->nfi.write_bytes = snfi_write_bytes;
6000 +
6001 + nb->rw_prepare = rw_prepare;
6002 + nb->rw_complete = rw_complete;
6003 + nb->is_page_empty = is_page_empty;
6004 +
6005 +}
6006 +
6007 +struct nfi *nfi_extend_init(struct nfi_base *nb)
6008 +{
6009 + struct nfi_spi *nfi_spi;
6010 +
6011 + nfi_spi = mem_alloc(1, sizeof(struct nfi_spi));
6012 + if (!nfi_spi) {
6013 + pr_info("snfi alloc memory fail @%s.\n", __func__);
6014 + return NULL;
6015 + }
6016 +
6017 + memcpy(&nfi_spi->base, nb, sizeof(struct nfi_base));
6018 + nfi_spi->parent = nb;
6019 +
6020 + nfi_spi->read_cache_mode = SNFI_RX_114;
6021 + nfi_spi->write_cache_mode = SNFI_TX_114;
6022 +
6023 + set_nfi_base_funcs(&nfi_spi->base);
6024 +
6025 + /* Change nfi to spi mode */
6026 + writel(SPI_MODE, nb->res.nfi_regs + SNF_SNF_CNFG);
6027 +
6028 + return &(nfi_spi->base.nfi);
6029 +}
6030 +
6031 +void nfi_extend_exit(struct nfi_base *nb)
6032 +{
6033 + struct nfi_spi *nfi_spi = base_to_snfi(nb);
6034 +
6035 + mem_free(nfi_spi->parent);
6036 + mem_free(nfi_spi);
6037 +}
6038 +
6039 --- /dev/null
6040 +++ b/drivers/mtd/nandx/core/nfi/nfi_spi.h
6041 @@ -0,0 +1,44 @@
6042 +/*
6043 + * Copyright (C) 2017 MediaTek Inc.
6044 + * Licensed under either
6045 + * BSD Licence, (see NOTICE for more details)
6046 + * GNU General Public License, version 2.0, (see NOTICE for more details)
6047 + */
6048 +
6049 +#ifndef __NFI_SPI_H__
6050 +#define __NFI_SPI_H__
6051 +
6052 +#define SPI_NAND_MAX_DELAY 6
6053 +#define SPI_NAND_MAX_OP 4
6054 +
6055 +/*TODO - add comments */
6056 +struct nfi_spi_delay {
6057 + u8 tCLK_SAM_DLY;
6058 + u8 tCLK_OUT_DLY;
6059 + u8 tCS_DLY;
6060 + u8 tWR_EN_DLY;
6061 + u8 tIO_IN_DLY[4];
6062 + u8 tIO_OUT_DLY[4];
6063 + u8 tREAD_LATCH_LATENCY;
6064 +};
6065 +
6066 +/* SPI Nand structure */
6067 +struct nfi_spi {
6068 + struct nfi_base base;
6069 + struct nfi_base *parent;
6070 +
6071 + u8 snfi_mode;
6072 + u8 tx_count;
6073 +
6074 + u8 cmd[SPI_NAND_MAX_OP];
6075 + u8 cur_cmd_idx;
6076 +
6077 + u32 row_addr[SPI_NAND_MAX_OP];
6078 + u32 col_addr[SPI_NAND_MAX_OP];
6079 + u8 cur_addr_idx;
6080 +
6081 + u8 read_cache_mode;
6082 + u8 write_cache_mode;
6083 +};
6084 +
6085 +#endif /* __NFI_SPI_H__ */
6086 --- /dev/null
6087 +++ b/drivers/mtd/nandx/core/nfi/nfi_spi_regs.h
6088 @@ -0,0 +1,64 @@
6089 +/*
6090 + * Copyright (C) 2017 MediaTek Inc.
6091 + * Licensed under either
6092 + * BSD Licence, (see NOTICE for more details)
6093 + * GNU General Public License, version 2.0, (see NOTICE for more details)
6094 + */
6095 +
6096 +#ifndef __NFI_SPI_REGS_H__
6097 +#define __NFI_SPI_REGS_H__
6098 +
6099 +#define SNF_MAC_CTL 0x500
6100 +#define WIP BIT(0)
6101 +#define WIP_READY BIT(1)
6102 +#define SF_TRIG BIT(2)
6103 +#define SF_MAC_EN BIT(3)
6104 +#define MAC_XIO_SEL BIT(4)
6105 +#define SNF_MAC_OUTL 0x504
6106 +#define SNF_MAC_INL 0x508
6107 +#define SNF_RD_CTL1 0x50c
6108 +#define PAGE_READ_CMD_SHIFT 24
6109 +#define SNF_RD_CTL2 0x510
6110 +#define SNF_RD_CTL3 0x514
6111 +#define SNF_GF_CTL1 0x518
6112 +#define GF_ADDR_SHIFT 16
6113 +#define GF_CMD_SHIFT 24
6114 +#define SNF_GF_CTL3 0x520
6115 +#define SNF_PG_CTL1 0x524
6116 +#define PG_EXE_CMD_SHIFT 16
6117 +#define PG_LOAD_CMD_SHIFT 8
6118 +#define SNF_PG_CTL2 0x528
6119 +#define SNF_PG_CTL3 0x52c
6120 +#define SNF_ER_CTL 0x530
6121 +#define SNF_ER_CTL2 0x534
6122 +#define SNF_MISC_CTL 0x538
6123 +#define SW_RST BIT(28)
6124 +#define PG_LOAD_X4_EN BIT(20)
6125 +#define X2_DATA_MODE 1
6126 +#define X4_DATA_MODE 2
6127 +#define DUAL_IO_MODE 5
6128 +#define QUAD_IO_MODE 6
6129 +#define READ_MODE_SHIFT 16
6130 +#define LATCH_LAT_SHIFT 8
6131 +#define LATCH_LAT_MASK GENMASK(9, 8)
6132 +#define DARA_READ_MODE_MASK GENMASK(18, 16)
6133 +#define SF2CS_SEL BIT(13)
6134 +#define SF2CS_EN BIT(12)
6135 +#define PG_LOAD_CUSTOM_EN BIT(7)
6136 +#define DATARD_CUSTOM_EN BIT(6)
6137 +#define SNF_MISC_CTL2 0x53c
6138 +#define PG_LOAD_SHIFT 16
6139 +#define SNF_DLY_CTL1 0x540
6140 +#define SNF_DLY_CTL2 0x544
6141 +#define SNF_DLY_CTL3 0x548
6142 +#define SNF_DLY_CTL4 0x54c
6143 +#define SNF_STA_CTL1 0x550
6144 +#define SPI_STATE GENMASK(3, 0)
6145 +#define SNF_STA_CTL2 0x554
6146 +#define SNF_STA_CTL3 0x558
6147 +#define SNF_SNF_CNFG 0x55c
6148 +#define SPI_MODE BIT(0)
6149 +#define SNF_DEBUG_SEL 0x560
6150 +#define SPI_GPRAM_ADDR 0x800
6151 +
6152 +#endif /* __NFI_SPI_REGS_H__ */
6153 --- /dev/null
6154 +++ b/drivers/mtd/nandx/core/nfi/nfiecc.c
6155 @@ -0,0 +1,510 @@
6156 +/*
6157 + * Copyright (C) 2017 MediaTek Inc.
6158 + * Licensed under either
6159 + * BSD Licence, (see NOTICE for more details)
6160 + * GNU General Public License, version 2.0, (see NOTICE for more details)
6161 + */
6162 +
6163 +#include "nandx_util.h"
6164 +#include "nandx_core.h"
6165 +#include "nfiecc_regs.h"
6166 +#include "nfiecc.h"
6167 +
6168 +#define NFIECC_IDLE_REG(op) \
6169 + ((op) == ECC_ENCODE ? NFIECC_ENCIDLE : NFIECC_DECIDLE)
6170 +#define IDLE_MASK 1
6171 +#define NFIECC_CTL_REG(op) \
6172 + ((op) == ECC_ENCODE ? NFIECC_ENCCON : NFIECC_DECCON)
6173 +#define NFIECC_IRQ_REG(op) \
6174 + ((op) == ECC_ENCODE ? NFIECC_ENCIRQEN : NFIECC_DECIRQEN)
6175 +#define NFIECC_ADDR(op) \
6176 + ((op) == ECC_ENCODE ? NFIECC_ENCDIADDR : NFIECC_DECDIADDR)
6177 +
6178 +#define ECC_TIMEOUT 500000
6179 +
6180 +/* ecc strength that each IP supports */
6181 +static const int ecc_strength_mt7622[] = {
6182 + 4, 6, 8, 10, 12, 14, 16
6183 +};
6184 +
6185 +static int nfiecc_irq_handler(void *data)
6186 +{
6187 + struct nfiecc *ecc = data;
6188 + void *regs = ecc->res.regs;
6189 + u32 status;
6190 +
6191 + status = readl(regs + NFIECC_DECIRQSTA) & DEC_IRQSTA_GEN;
6192 + if (status) {
6193 + status = readl(regs + NFIECC_DECDONE);
6194 + if (!(status & ecc->config.sectors))
6195 + return NAND_IRQ_NONE;
6196 +
6197 + /*
6198 + * Clear decode IRQ status once again to ensure that
6199 + * there will be no extra IRQ.
6200 + */
6201 + readl(regs + NFIECC_DECIRQSTA);
6202 + ecc->config.sectors = 0;
6203 + nandx_event_complete(ecc->done);
6204 + } else {
6205 + status = readl(regs + NFIECC_ENCIRQSTA) & ENC_IRQSTA_GEN;
6206 + if (!status)
6207 + return NAND_IRQ_NONE;
6208 +
6209 + nandx_event_complete(ecc->done);
6210 + }
6211 +
6212 + return NAND_IRQ_HANDLED;
6213 +}
6214 +
6215 +static inline int nfiecc_wait_idle(struct nfiecc *ecc)
6216 +{
6217 + int op = ecc->config.op;
6218 + int ret, val;
6219 +
6220 + ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_IDLE_REG(op),
6221 + val, val & IDLE_MASK,
6222 + 10, ECC_TIMEOUT);
6223 + if (ret)
6224 + pr_info("%s not idle\n",
6225 + op == ECC_ENCODE ? "encoder" : "decoder");
6226 +
6227 + return ret;
6228 +}
6229 +
6230 +static int nfiecc_wait_encode_done(struct nfiecc *ecc)
6231 +{
6232 + int ret, val;
6233 +
6234 + if (ecc->ecc_irq_en) {
6235 + /* poll one time to avoid missing irq event */
6236 + ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_ENCSTA,
6237 + val, val & ENC_FSM_IDLE, 1, 1);
6238 + if (!ret)
6239 + return 0;
6240 +
6241 + /* irq done, if not, we can go on to poll status for a while */
6242 + ret = nandx_event_wait_complete(ecc->done, ECC_TIMEOUT);
6243 + if (ret)
6244 + return 0;
6245 + }
6246 +
6247 + ret = readl_poll_timeout_atomic(ecc->res.regs + NFIECC_ENCSTA,
6248 + val, val & ENC_FSM_IDLE,
6249 + 10, ECC_TIMEOUT);
6250 + if (ret)
6251 + pr_info("encode timeout\n");
6252 +
6253 + return ret;
6254 +
6255 +}
6256 +
6257 +static int nfiecc_wait_decode_done(struct nfiecc *ecc)
6258 +{
6259 + u32 secbit = BIT(ecc->config.sectors - 1);
6260 + void *regs = ecc->res.regs;
6261 + int ret, val;
6262 +
6263 + if (ecc->ecc_irq_en) {
6264 + ret = readl_poll_timeout_atomic(regs + NFIECC_DECDONE,
6265 + val, val & secbit, 1, 1);
6266 + if (!ret)
6267 + return 0;
6268 +
6269 + ret = nandx_event_wait_complete(ecc->done, ECC_TIMEOUT);
6270 + if (ret)
6271 + return 0;
6272 + }
6273 +
6274 + ret = readl_poll_timeout_atomic(regs + NFIECC_DECDONE,
6275 + val, val & secbit,
6276 + 10, ECC_TIMEOUT);
6277 + if (ret) {
6278 + pr_info("decode timeout\n");
6279 + return ret;
6280 + }
6281 +
6282 + /* decode done does not stands for ecc all work done.
6283 + * we need check syn, bma, chien, autoc all idle.
6284 + * just check it when ECC_DECCNFG[13:12] is 3,
6285 + * which means auto correct.
6286 + */
6287 + ret = readl_poll_timeout_atomic(regs + NFIECC_DECFSM,
6288 + val, (val & FSM_MASK) == FSM_IDLE,
6289 + 10, ECC_TIMEOUT);
6290 + if (ret)
6291 + pr_info("decode fsm(0x%x) is not idle\n",
6292 + readl(regs + NFIECC_DECFSM));
6293 +
6294 + return ret;
6295 +}
6296 +
6297 +static int nfiecc_wait_done(struct nfiecc *ecc)
6298 +{
6299 + if (ecc->config.op == ECC_ENCODE)
6300 + return nfiecc_wait_encode_done(ecc);
6301 +
6302 + return nfiecc_wait_decode_done(ecc);
6303 +}
6304 +
6305 +static void nfiecc_encode_config(struct nfiecc *ecc, u32 ecc_idx)
6306 +{
6307 + struct nfiecc_config *config = &ecc->config;
6308 + u32 val;
6309 +
6310 + val = ecc_idx | (config->mode << ecc->caps->ecc_mode_shift);
6311 +
6312 + if (config->mode == ECC_DMA_MODE)
6313 + val |= ENC_BURST_EN;
6314 +
6315 + val |= (config->len << 3) << ENCCNFG_MS_SHIFT;
6316 + writel(val, ecc->res.regs + NFIECC_ENCCNFG);
6317 +}
6318 +
6319 +static void nfiecc_decode_config(struct nfiecc *ecc, u32 ecc_idx)
6320 +{
6321 + struct nfiecc_config *config = &ecc->config;
6322 + u32 dec_sz = (config->len << 3) +
6323 + config->strength * ecc->caps->parity_bits;
6324 + u32 val;
6325 +
6326 + val = ecc_idx | (config->mode << ecc->caps->ecc_mode_shift);
6327 +
6328 + if (config->mode == ECC_DMA_MODE)
6329 + val |= DEC_BURST_EN;
6330 +
6331 + val |= (dec_sz << DECCNFG_MS_SHIFT) |
6332 + (config->deccon << DEC_CON_SHIFT);
6333 + val |= DEC_EMPTY_EN;
6334 + writel(val, ecc->res.regs + NFIECC_DECCNFG);
6335 +}
6336 +
6337 +static void nfiecc_config(struct nfiecc *ecc)
6338 +{
6339 + u32 idx;
6340 +
6341 + for (idx = 0; idx < ecc->caps->ecc_strength_num; idx++) {
6342 + if (ecc->config.strength == ecc->caps->ecc_strength[idx])
6343 + break;
6344 + }
6345 +
6346 + if (ecc->config.op == ECC_ENCODE)
6347 + nfiecc_encode_config(ecc, idx);
6348 + else
6349 + nfiecc_decode_config(ecc, idx);
6350 +}
6351 +
6352 +static int nfiecc_enable(struct nfiecc *ecc)
6353 +{
6354 + enum nfiecc_operation op = ecc->config.op;
6355 + void *regs = ecc->res.regs;
6356 +
6357 + nfiecc_config(ecc);
6358 +
6359 + writel(ECC_OP_EN, regs + NFIECC_CTL_REG(op));
6360 +
6361 + if (ecc->ecc_irq_en) {
6362 + writel(ECC_IRQEN, regs + NFIECC_IRQ_REG(op));
6363 +
6364 + if (ecc->page_irq_en)
6365 + writel(ECC_IRQEN | ECC_PG_IRQ_SEL,
6366 + regs + NFIECC_IRQ_REG(op));
6367 +
6368 + nandx_event_init(ecc->done);
6369 + }
6370 +
6371 + return 0;
6372 +}
6373 +
6374 +static int nfiecc_disable(struct nfiecc *ecc)
6375 +{
6376 + enum nfiecc_operation op = ecc->config.op;
6377 + void *regs = ecc->res.regs;
6378 +
6379 + nfiecc_wait_idle(ecc);
6380 +
6381 + writel(0, regs + NFIECC_IRQ_REG(op));
6382 + writel(~ECC_OP_EN, regs + NFIECC_CTL_REG(op));
6383 +
6384 + return 0;
6385 +}
6386 +
6387 +static int nfiecc_correct_data(struct nfiecc *ecc,
6388 + struct nfiecc_status *status,
6389 + u8 *data, u32 sector)
6390 +{
6391 + u32 err, offset, i;
6392 + u32 loc, byteloc, bitloc;
6393 +
6394 + status->corrected = 0;
6395 + status->failed = 0;
6396 +
6397 + offset = (sector >> 2);
6398 + err = readl(ecc->res.regs + NFIECC_DECENUM(offset));
6399 + err >>= (sector % 4) * 8;
6400 + err &= ecc->caps->err_mask;
6401 +
6402 + if (err == ecc->caps->err_mask) {
6403 + status->failed++;
6404 + return -ENANDREAD;
6405 + }
6406 +
6407 + status->corrected += err;
6408 + status->bitflips = max_t(u32, status->bitflips, err);
6409 +
6410 + for (i = 0; i < err; i++) {
6411 + loc = readl(ecc->res.regs + NFIECC_DECEL(i >> 1));
6412 + loc >>= ((i & 0x1) << 4);
6413 + byteloc = loc >> 3;
6414 + bitloc = loc & 0x7;
6415 + data[byteloc] ^= (1 << bitloc);
6416 + }
6417 +
6418 + return 0;
6419 +}
6420 +
6421 +static int nfiecc_fill_data(struct nfiecc *ecc, u8 *data)
6422 +{
6423 + struct nfiecc_config *config = &ecc->config;
6424 + void *regs = ecc->res.regs;
6425 + int size, ret, i;
6426 + u32 val;
6427 +
6428 + if (config->mode == ECC_DMA_MODE) {
6429 + if ((unsigned long)config->dma_addr & 0x3)
6430 + pr_info("encode address is not 4B aligned: 0x%x\n",
6431 + (u32)(unsigned long)config->dma_addr);
6432 +
6433 + writel((unsigned long)config->dma_addr,
6434 + regs + NFIECC_ADDR(config->op));
6435 + } else if (config->mode == ECC_PIO_MODE) {
6436 + if (config->op == ECC_ENCODE) {
6437 + size = (config->len + 3) >> 2;
6438 + } else {
6439 + size = config->strength * ecc->caps->parity_bits;
6440 + size = (size + 7) >> 3;
6441 + size += config->len;
6442 + size >>= 2;
6443 + }
6444 +
6445 + for (i = 0; i < size; i++) {
6446 + ret = readl_poll_timeout_atomic(regs + NFIECC_PIO_DIRDY,
6447 + val, val & PIO_DI_RDY,
6448 + 10, ECC_TIMEOUT);
6449 + if (ret)
6450 + return ret;
6451 +
6452 + writel(*((u32 *)data + i), regs + NFIECC_PIO_DI);
6453 + }
6454 + }
6455 +
6456 + return 0;
6457 +}
6458 +
6459 +static int nfiecc_encode(struct nfiecc *ecc, u8 *data)
6460 +{
6461 + struct nfiecc_config *config = &ecc->config;
6462 + u32 len, i, val = 0;
6463 + u8 *p;
6464 + int ret;
6465 +
6466 + /* Under NFI mode, nothing need to do */
6467 + if (config->mode == ECC_NFI_MODE)
6468 + return 0;
6469 +
6470 + ret = nfiecc_fill_data(ecc, data);
6471 + if (ret)
6472 + return ret;
6473 +
6474 + ret = nfiecc_wait_encode_done(ecc);
6475 + if (ret)
6476 + return ret;
6477 +
6478 + ret = nfiecc_wait_idle(ecc);
6479 + if (ret)
6480 + return ret;
6481 +
6482 + /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
6483 + len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
6484 + p = data + config->len;
6485 +
6486 + /* Write the parity bytes generated by the ECC back to the OOB region */
6487 + for (i = 0; i < len; i++) {
6488 + if ((i % 4) == 0)
6489 + val = readl(ecc->res.regs + NFIECC_ENCPAR(i / 4));
6490 +
6491 + p[i] = (val >> ((i % 4) * 8)) & 0xff;
6492 + }
6493 +
6494 + return 0;
6495 +}
6496 +
6497 +static int nfiecc_decode(struct nfiecc *ecc, u8 *data)
6498 +{
6499 + int ret;
6500 +
6501 + /* Under NFI mode, nothing need to do */
6502 + if (ecc->config.mode == ECC_NFI_MODE)
6503 + return 0;
6504 +
6505 + ret = nfiecc_fill_data(ecc, data);
6506 + if (ret)
6507 + return ret;
6508 +
6509 + return nfiecc_wait_decode_done(ecc);
6510 +}
6511 +
6512 +static int nfiecc_decode_status(struct nfiecc *ecc, u32 start_sector,
6513 + u32 sectors)
6514 +{
6515 + void *regs = ecc->res.regs;
6516 + u32 i, val = 0, err;
6517 + u32 bitflips = 0;
6518 +
6519 + for (i = start_sector; i < start_sector + sectors; i++) {
6520 + if ((i % 4) == 0)
6521 + val = readl(regs + NFIECC_DECENUM(i / 4));
6522 +
6523 + err = val >> ((i % 4) * 5);
6524 + err &= ecc->caps->err_mask;
6525 +
6526 + if (err == ecc->caps->err_mask)
6527 + pr_err("sector %d is uncorrect\n", i);
6528 +
6529 + bitflips = max_t(u32, bitflips, err);
6530 + }
6531 +
6532 + if (bitflips == ecc->caps->err_mask)
6533 + return -ENANDREAD;
6534 +
6535 + if (bitflips)
6536 + pr_info("bitflips %d is corrected\n", bitflips);
6537 +
6538 + return bitflips;
6539 +}
6540 +
6541 +static int nfiecc_adjust_strength(struct nfiecc *ecc, int strength)
6542 +{
6543 + struct nfiecc_caps *caps = ecc->caps;
6544 + int i, count = caps->ecc_strength_num;
6545 +
6546 + if (strength >= caps->ecc_strength[count - 1])
6547 + return caps->ecc_strength[count - 1];
6548 +
6549 + if (strength < caps->ecc_strength[0])
6550 + return -EINVAL;
6551 +
6552 + for (i = 1; i < count; i++) {
6553 + if (strength < caps->ecc_strength[i])
6554 + return caps->ecc_strength[i - 1];
6555 + }
6556 +
6557 + return -EINVAL;
6558 +}
6559 +
6560 +static int nfiecc_ctrl(struct nfiecc *ecc, int cmd, void *args)
6561 +{
6562 + int ret = 0;
6563 +
6564 + switch (cmd) {
6565 + case NFI_CTRL_ECC_IRQ:
6566 + ecc->ecc_irq_en = *(bool *)args;
6567 + break;
6568 +
6569 + case NFI_CTRL_ECC_PAGE_IRQ:
6570 + ecc->page_irq_en = *(bool *)args;
6571 + break;
6572 +
6573 + default:
6574 + pr_info("invalid arguments.\n");
6575 + ret = -EINVAL;
6576 + break;
6577 + }
6578 +
6579 + return ret;
6580 +}
6581 +
6582 +static int nfiecc_hw_init(struct nfiecc *ecc)
6583 +{
6584 + int ret;
6585 +
6586 + ret = nfiecc_wait_idle(ecc);
6587 + if (ret)
6588 + return ret;
6589 +
6590 + writel(~ECC_OP_EN, ecc->res.regs + NFIECC_ENCCON);
6591 +
6592 + ret = nfiecc_wait_idle(ecc);
6593 + if (ret)
6594 + return ret;
6595 +
6596 + writel(~ECC_OP_EN, ecc->res.regs + NFIECC_DECCON);
6597 +
6598 + return 0;
6599 +}
6600 +
6601 +static struct nfiecc_caps nfiecc_caps_mt7622 = {
6602 + .err_mask = 0x1f,
6603 + .ecc_mode_shift = 4,
6604 + .parity_bits = 13,
6605 + .ecc_strength = ecc_strength_mt7622,
6606 + .ecc_strength_num = 7,
6607 +};
6608 +
6609 +static struct nfiecc_caps *nfiecc_get_match_data(enum mtk_ic_version ic)
6610 +{
6611 + /* NOTE: add other IC's data */
6612 + return &nfiecc_caps_mt7622;
6613 +}
6614 +
6615 +struct nfiecc *nfiecc_init(struct nfiecc_resource *res)
6616 +{
6617 + struct nfiecc *ecc;
6618 + int ret;
6619 +
6620 + ecc = mem_alloc(1, sizeof(struct nfiecc));
6621 + if (!ecc)
6622 + return NULL;
6623 +
6624 + ecc->res = *res;
6625 +
6626 + ret = nandx_irq_register(res->dev, res->irq_id, nfiecc_irq_handler,
6627 + "mtk-ecc", ecc);
6628 + if (ret) {
6629 + pr_info("ecc irq register failed!\n");
6630 + goto error;
6631 + }
6632 +
6633 + ecc->ecc_irq_en = false;
6634 + ecc->page_irq_en = false;
6635 + ecc->done = nandx_event_create();
6636 + ecc->caps = nfiecc_get_match_data(res->ic_ver);
6637 +
6638 + ecc->adjust_strength = nfiecc_adjust_strength;
6639 + ecc->enable = nfiecc_enable;
6640 + ecc->disable = nfiecc_disable;
6641 + ecc->decode = nfiecc_decode;
6642 + ecc->encode = nfiecc_encode;
6643 + ecc->wait_done = nfiecc_wait_done;
6644 + ecc->decode_status = nfiecc_decode_status;
6645 + ecc->correct_data = nfiecc_correct_data;
6646 + ecc->nfiecc_ctrl = nfiecc_ctrl;
6647 +
6648 + ret = nfiecc_hw_init(ecc);
6649 + if (ret)
6650 + return NULL;
6651 +
6652 + return ecc;
6653 +
6654 +error:
6655 + mem_free(ecc);
6656 +
6657 + return NULL;
6658 +}
6659 +
6660 +void nfiecc_exit(struct nfiecc *ecc)
6661 +{
6662 + nandx_event_destroy(ecc->done);
6663 + mem_free(ecc);
6664 +}
6665 +
6666 --- /dev/null
6667 +++ b/drivers/mtd/nandx/core/nfi/nfiecc.h
6668 @@ -0,0 +1,90 @@
6669 +/*
6670 + * Copyright (C) 2017 MediaTek Inc.
6671 + * Licensed under either
6672 + * BSD Licence, (see NOTICE for more details)
6673 + * GNU General Public License, version 2.0, (see NOTICE for more details)
6674 + */
6675 +
6676 +#ifndef __NFIECC_H__
6677 +#define __NFIECC_H__
6678 +
6679 +enum nfiecc_mode {
6680 + ECC_DMA_MODE,
6681 + ECC_NFI_MODE,
6682 + ECC_PIO_MODE
6683 +};
6684 +
6685 +enum nfiecc_operation {
6686 + ECC_ENCODE,
6687 + ECC_DECODE
6688 +};
6689 +
6690 +enum nfiecc_deccon {
6691 + ECC_DEC_FER = 1,
6692 + ECC_DEC_LOCATE = 2,
6693 + ECC_DEC_CORRECT = 3
6694 +};
6695 +
6696 +struct nfiecc_resource {
6697 + int ic_ver;
6698 + void *dev;
6699 + void *regs;
6700 + int irq_id;
6701 +
6702 +};
6703 +
6704 +struct nfiecc_status {
6705 + u32 corrected;
6706 + u32 failed;
6707 + u32 bitflips;
6708 +};
6709 +
6710 +struct nfiecc_caps {
6711 + u32 err_mask;
6712 + u32 ecc_mode_shift;
6713 + u32 parity_bits;
6714 + const int *ecc_strength;
6715 + u32 ecc_strength_num;
6716 +};
6717 +
6718 +struct nfiecc_config {
6719 + enum nfiecc_operation op;
6720 + enum nfiecc_mode mode;
6721 + enum nfiecc_deccon deccon;
6722 +
6723 + void *dma_addr; /* DMA use only */
6724 + u32 strength;
6725 + u32 sectors;
6726 + u32 len;
6727 +};
6728 +
6729 +struct nfiecc {
6730 + struct nfiecc_resource res;
6731 + struct nfiecc_config config;
6732 + struct nfiecc_caps *caps;
6733 +
6734 + bool ecc_irq_en;
6735 + bool page_irq_en;
6736 +
6737 + void *done;
6738 +
6739 + int (*adjust_strength)(struct nfiecc *ecc, int strength);
6740 + int (*enable)(struct nfiecc *ecc);
6741 + int (*disable)(struct nfiecc *ecc);
6742 +
6743 + int (*decode)(struct nfiecc *ecc, u8 *data);
6744 + int (*encode)(struct nfiecc *ecc, u8 *data);
6745 +
6746 + int (*decode_status)(struct nfiecc *ecc, u32 start_sector, u32 sectors);
6747 + int (*correct_data)(struct nfiecc *ecc,
6748 + struct nfiecc_status *status,
6749 + u8 *data, u32 sector);
6750 + int (*wait_done)(struct nfiecc *ecc);
6751 +
6752 + int (*nfiecc_ctrl)(struct nfiecc *ecc, int cmd, void *args);
6753 +};
6754 +
6755 +struct nfiecc *nfiecc_init(struct nfiecc_resource *res);
6756 +void nfiecc_exit(struct nfiecc *ecc);
6757 +
6758 +#endif /* __NFIECC_H__ */
6759 --- /dev/null
6760 +++ b/drivers/mtd/nandx/core/nfi/nfiecc_regs.h
6761 @@ -0,0 +1,51 @@
6762 +/*
6763 + * Copyright (C) 2017 MediaTek Inc.
6764 + * Licensed under either
6765 + * BSD Licence, (see NOTICE for more details)
6766 + * GNU General Public License, version 2.0, (see NOTICE for more details)
6767 + */
6768 +
6769 +#ifndef __NFIECC_REGS_H__
6770 +#define __NFIECC_REGS_H__
6771 +
6772 +#define NFIECC_ENCCON 0x000
6773 +/* NFIECC_DECCON has same bit define */
6774 +#define ECC_OP_EN BIT(0)
6775 +#define NFIECC_ENCCNFG 0x004
6776 +#define ENCCNFG_MS_SHIFT 16
6777 +#define ENC_BURST_EN BIT(8)
6778 +#define NFIECC_ENCDIADDR 0x008
6779 +#define NFIECC_ENCIDLE 0x00c
6780 +#define NFIECC_ENCSTA 0x02c
6781 +#define ENC_FSM_IDLE 1
6782 +#define NFIECC_ENCIRQEN 0x030
6783 +/* NFIECC_DECIRQEN has same bit define */
6784 +#define ECC_IRQEN BIT(0)
6785 +#define ECC_PG_IRQ_SEL BIT(1)
6786 +#define NFIECC_ENCIRQSTA 0x034
6787 +#define ENC_IRQSTA_GEN BIT(0)
6788 +#define NFIECC_PIO_DIRDY 0x080
6789 +#define PIO_DI_RDY BIT(0)
6790 +#define NFIECC_PIO_DI 0x084
6791 +#define NFIECC_DECCON 0x100
6792 +#define NFIECC_DECCNFG 0x104
6793 +#define DEC_BURST_EN BIT(8)
6794 +#define DEC_EMPTY_EN BIT(31)
6795 +#define DEC_CON_SHIFT 12
6796 +#define DECCNFG_MS_SHIFT 16
6797 +#define NFIECC_DECDIADDR 0x108
6798 +#define NFIECC_DECIDLE 0x10c
6799 +#define NFIECC_DECENUM(x) (0x114 + (x) * 4)
6800 +#define NFIECC_DECDONE 0x11c
6801 +#define NFIECC_DECIRQEN 0x140
6802 +#define NFIECC_DECIRQSTA 0x144
6803 +#define DEC_IRQSTA_GEN BIT(0)
6804 +#define NFIECC_DECFSM 0x14c
6805 +#define FSM_MASK 0x7f0f0f0f
6806 +#define FSM_IDLE 0x01010101
6807 +#define NFIECC_BYPASS 0x20c
6808 +#define NFIECC_BYPASS_EN BIT(0)
6809 +#define NFIECC_ENCPAR(x) (0x010 + (x) * 4)
6810 +#define NFIECC_DECEL(x) (0x120 + (x) * 4)
6811 +
6812 +#endif /* __NFIECC_REGS_H__ */
6813 --- /dev/null
6814 +++ b/drivers/mtd/nandx/driver/Nandx.mk
6815 @@ -0,0 +1,18 @@
6816 +#
6817 +# Copyright (C) 2017 MediaTek Inc.
6818 +# Licensed under either
6819 +# BSD Licence, (see NOTICE for more details)
6820 +# GNU General Public License, version 2.0, (see NOTICE for more details)
6821 +#
6822 +
6823 +nandx-$(NANDX_SIMULATOR_SUPPORT) += simulator/driver.c
6824 +
6825 +nandx-$(NANDX_CTP_SUPPORT) += ctp/ts_nand.c
6826 +nandx-$(NANDX_CTP_SUPPORT) += ctp/nand_test.c
6827 +nandx-header-$(NANDX_CTP_SUPPORT) += ctp/nand_test.h
6828 +
6829 +nandx-$(NANDX_BBT_SUPPORT) += bbt/bbt.c
6830 +nandx-$(NANDX_BROM_SUPPORT) += brom/driver.c
6831 +nandx-$(NANDX_KERNEL_SUPPORT) += kernel/driver.c
6832 +nandx-$(NANDX_LK_SUPPORT) += lk/driver.c
6833 +nandx-$(NANDX_UBOOT_SUPPORT) += uboot/driver.c
6834 --- /dev/null
6835 +++ b/drivers/mtd/nandx/driver/bbt/bbt.c
6836 @@ -0,0 +1,408 @@
6837 +/*
6838 + * Copyright (C) 2017 MediaTek Inc.
6839 + * Licensed under either
6840 + * BSD Licence, (see NOTICE for more details)
6841 + * GNU General Public License, version 2.0, (see NOTICE for more details)
6842 + */
6843 +
6844 +#include "nandx_util.h"
6845 +#include "nandx_core.h"
6846 +#include "bbt.h"
6847 +
6848 +/* Not support: multi-chip */
6849 +static u8 main_bbt_pattern[] = {'B', 'b', 't', '0' };
6850 +static u8 mirror_bbt_pattern[] = {'1', 't', 'b', 'B' };
6851 +
6852 +static struct bbt_manager g_bbt_manager = {
6853 + { {{main_bbt_pattern, 4}, 0, BBT_INVALID_ADDR},
6854 + {{mirror_bbt_pattern, 4}, 0, BBT_INVALID_ADDR}
6855 + },
6856 + NAND_BBT_SCAN_MAXBLOCKS, NULL
6857 +};
6858 +
6859 +static inline void set_bbt_mark(u8 *bbt, int block, u8 mark)
6860 +{
6861 + int index, offset;
6862 +
6863 + index = GET_ENTRY(block);
6864 + offset = GET_POSITION(block);
6865 +
6866 + bbt[index] &= ~(BBT_ENTRY_MASK << offset);
6867 + bbt[index] |= (mark & BBT_ENTRY_MASK) << offset;
6868 + pr_info("%s %d block:%d, bbt[%d]:0x%x, offset:%d, mark:%d\n",
6869 + __func__, __LINE__, block, index, bbt[index], offset, mark);
6870 +}
6871 +
6872 +static inline u8 get_bbt_mark(u8 *bbt, int block)
6873 +{
6874 + int offset = GET_POSITION(block);
6875 + int index = GET_ENTRY(block);
6876 + u8 value = bbt[index];
6877 +
6878 + return (value >> offset) & BBT_ENTRY_MASK;
6879 +}
6880 +
6881 +static void mark_nand_bad(struct nandx_info *nand, int block)
6882 +{
6883 + u8 *buf;
6884 +
6885 + buf = mem_alloc(1, nand->page_size + nand->oob_size);
6886 + if (!buf) {
6887 + pr_info("%s, %d, memory alloc fail, pagesize:%d, oobsize:%d\n",
6888 + __func__, __LINE__, nand->page_size, nand->oob_size);
6889 + return;
6890 + }
6891 + memset(buf, 0, nand->page_size + nand->oob_size);
6892 + nandx_erase(block * nand->block_size, nand->block_size);
6893 + nandx_write(buf, buf + nand->page_size, block * nand->block_size,
6894 + nand->page_size);
6895 + mem_free(buf);
6896 +}
6897 +
6898 +static inline bool is_bbt_data(u8 *buf, struct bbt_pattern *pattern)
6899 +{
6900 + int i;
6901 +
6902 + for (i = 0; i < pattern->len; i++) {
6903 + if (buf[i] != pattern->data[i])
6904 + return false;
6905 + }
6906 +
6907 + return true;
6908 +}
6909 +
6910 +static u64 get_bbt_address(struct nandx_info *nand, u8 *bbt,
6911 + u64 mirror_addr,
6912 + int max_blocks)
6913 +{
6914 + u64 addr, end_addr;
6915 + u8 mark;
6916 +
6917 + addr = nand->total_size;
6918 + end_addr = nand->total_size - nand->block_size * max_blocks;
6919 +
6920 + while (addr > end_addr) {
6921 + addr -= nand->block_size;
6922 + mark = get_bbt_mark(bbt, div_down(addr, nand->block_size));
6923 +
6924 + if (mark == BBT_BLOCK_WORN || mark == BBT_BLOCK_FACTORY_BAD)
6925 + continue;
6926 + if (addr != mirror_addr)
6927 + return addr;
6928 + }
6929 +
6930 + return BBT_INVALID_ADDR;
6931 +}
6932 +
6933 +static int read_bbt(struct bbt_desc *desc, u8 *bbt, u32 len)
6934 +{
6935 + int ret;
6936 +
6937 + ret = nandx_read(bbt, NULL, desc->bbt_addr + desc->pattern.len + 1,
6938 + len);
6939 + if (ret < 0)
6940 + pr_info("nand_bbt: error reading BBT page, ret:-%x\n", ret);
6941 +
6942 + return ret;
6943 +}
6944 +
6945 +static void create_bbt(struct nandx_info *nand, u8 *bbt)
6946 +{
6947 + u32 offset = 0, block = 0;
6948 +
6949 + do {
6950 + if (nandx_is_bad_block(offset)) {
6951 + pr_info("Create bbt at bad block:%d\n", block);
6952 + set_bbt_mark(bbt, block, BBT_BLOCK_FACTORY_BAD);
6953 + }
6954 + block++;
6955 + offset += nand->block_size;
6956 + } while (offset < nand->total_size);
6957 +}
6958 +
6959 +static int search_bbt(struct nandx_info *nand, struct bbt_desc *desc,
6960 + int max_blocks)
6961 +{
6962 + u64 addr, end_addr;
6963 + u8 *buf;
6964 + int ret;
6965 +
6966 + buf = mem_alloc(1, nand->page_size);
6967 + if (!buf) {
6968 + pr_info("%s, %d, mem alloc fail!!! len:%d\n",
6969 + __func__, __LINE__, nand->page_size);
6970 + return -ENOMEM;
6971 + }
6972 +
6973 + addr = nand->total_size;
6974 + end_addr = nand->total_size - max_blocks * nand->block_size;
6975 + while (addr > end_addr) {
6976 + addr -= nand->block_size;
6977 +
6978 + nandx_read(buf, NULL, addr, nand->page_size);
6979 +
6980 + if (is_bbt_data(buf, &desc->pattern)) {
6981 + desc->bbt_addr = addr;
6982 + desc->version = buf[desc->pattern.len];
6983 + pr_info("BBT is found at addr 0x%llx, version %d\n",
6984 + desc->bbt_addr, desc->version);
6985 + ret = 0;
6986 + break;
6987 + }
6988 + ret = -EFAULT;
6989 + }
6990 +
6991 + mem_free(buf);
6992 + return ret;
6993 +}
6994 +
6995 +static int save_bbt(struct nandx_info *nand, struct bbt_desc *desc,
6996 + u8 *bbt)
6997 +{
6998 + u32 page_size_mask, total_block;
6999 + int write_len;
7000 + u8 *buf;
7001 + int ret;
7002 +
7003 + ret = nandx_erase(desc->bbt_addr, nand->block_size);
7004 + if (ret) {
7005 + pr_info("erase addr 0x%llx fail !!!, ret %d\n",
7006 + desc->bbt_addr, ret);
7007 + return ret;
7008 + }
7009 +
7010 + total_block = div_down(nand->total_size, nand->block_size);
7011 + write_len = GET_BBT_LENGTH(total_block) + desc->pattern.len + 1;
7012 + page_size_mask = nand->page_size - 1;
7013 + write_len = (write_len + page_size_mask) & (~page_size_mask);
7014 +
7015 + buf = (u8 *)mem_alloc(1, write_len);
7016 + if (!buf) {
7017 + pr_info("%s, %d, mem alloc fail!!! len:%d\n",
7018 + __func__, __LINE__, write_len);
7019 + return -ENOMEM;
7020 + }
7021 + memset(buf, 0xFF, write_len);
7022 +
7023 + memcpy(buf, desc->pattern.data, desc->pattern.len);
7024 + buf[desc->pattern.len] = desc->version;
7025 +
7026 + memcpy(buf + desc->pattern.len + 1, bbt, GET_BBT_LENGTH(total_block));
7027 +
7028 + ret = nandx_write(buf, NULL, desc->bbt_addr, write_len);
7029 +
7030 + if (ret)
7031 + pr_info("nandx_write fail(%d), offset:0x%llx, len(%d)\n",
7032 + ret, desc->bbt_addr, write_len);
7033 + mem_free(buf);
7034 +
7035 + return ret;
7036 +}
7037 +
7038 +static int write_bbt(struct nandx_info *nand, struct bbt_desc *main,
7039 + struct bbt_desc *mirror, u8 *bbt, int max_blocks)
7040 +{
7041 + int block;
7042 + int ret;
7043 +
7044 + do {
7045 + if (main->bbt_addr == BBT_INVALID_ADDR) {
7046 + main->bbt_addr = get_bbt_address(nand, bbt,
7047 + mirror->bbt_addr, max_blocks);
7048 + if (main->bbt_addr == BBT_INVALID_ADDR)
7049 + return -ENOSPC;
7050 + }
7051 +
7052 + ret = save_bbt(nand, main, bbt);
7053 + if (!ret)
7054 + break;
7055 +
7056 + block = div_down(main->bbt_addr, nand->block_size);
7057 + set_bbt_mark(bbt, block, BBT_BLOCK_WORN);
7058 + main->version++;
7059 + mark_nand_bad(nand, block);
7060 + main->bbt_addr = BBT_INVALID_ADDR;
7061 + } while (1);
7062 +
7063 + return 0;
7064 +}
7065 +
7066 +static void mark_bbt_region(struct nandx_info *nand, u8 *bbt, int bbt_blocks)
7067 +{
7068 + int total_block;
7069 + int block;
7070 + u8 mark;
7071 +
7072 + total_block = div_down(nand->total_size, nand->block_size);
7073 + block = total_block - bbt_blocks;
7074 +
7075 + while (bbt_blocks) {
7076 + mark = get_bbt_mark(bbt, block);
7077 + if (mark == BBT_BLOCK_GOOD)
7078 + set_bbt_mark(bbt, block, BBT_BLOCK_RESERVED);
7079 + block++;
7080 + bbt_blocks--;
7081 + }
7082 +}
7083 +
7084 +static void unmark_bbt_region(struct nandx_info *nand, u8 *bbt, int bbt_blocks)
7085 +{
7086 + int total_block;
7087 + int block;
7088 + u8 mark;
7089 +
7090 + total_block = div_down(nand->total_size, nand->block_size);
7091 + block = total_block - bbt_blocks;
7092 +
7093 + while (bbt_blocks) {
7094 + mark = get_bbt_mark(bbt, block);
7095 + if (mark == BBT_BLOCK_RESERVED)
7096 + set_bbt_mark(bbt, block, BBT_BLOCK_GOOD);
7097 + block++;
7098 + bbt_blocks--;
7099 + }
7100 +}
7101 +
7102 +static int update_bbt(struct nandx_info *nand, struct bbt_desc *desc,
7103 + u8 *bbt,
7104 + int max_blocks)
7105 +{
7106 + int ret = 0, i;
7107 +
7108 + /* The reserved info is not stored in NAND*/
7109 + unmark_bbt_region(nand, bbt, max_blocks);
7110 +
7111 + desc[0].version++;
7112 + for (i = 0; i < 2; i++) {
7113 + if (i > 0)
7114 + desc[i].version = desc[i - 1].version;
7115 +
7116 + ret = write_bbt(nand, &desc[i], &desc[1 - i], bbt, max_blocks);
7117 + if (ret)
7118 + break;
7119 + }
7120 + mark_bbt_region(nand, bbt, max_blocks);
7121 +
7122 + return ret;
7123 +}
7124 +
7125 +int scan_bbt(struct nandx_info *nand)
7126 +{
7127 + struct bbt_manager *manager = &g_bbt_manager;
7128 + struct bbt_desc *pdesc;
7129 + int total_block, len, i;
7130 + int valid_desc = 0;
7131 + int ret = 0;
7132 + u8 *bbt;
7133 +
7134 + total_block = div_down(nand->total_size, nand->block_size);
7135 + len = GET_BBT_LENGTH(total_block);
7136 +
7137 + if (!manager->bbt) {
7138 + manager->bbt = (u8 *)mem_alloc(1, len);
7139 + if (!manager->bbt) {
7140 + pr_info("%s, %d, mem alloc fail!!! len:%d\n",
7141 + __func__, __LINE__, len);
7142 + return -ENOMEM;
7143 + }
7144 + }
7145 + bbt = manager->bbt;
7146 + memset(bbt, 0xFF, len);
7147 +
7148 + /* scan bbt */
7149 + for (i = 0; i < 2; i++) {
7150 + pdesc = &manager->desc[i];
7151 + pdesc->bbt_addr = BBT_INVALID_ADDR;
7152 + pdesc->version = 0;
7153 + ret = search_bbt(nand, pdesc, manager->max_blocks);
7154 + if (!ret && (pdesc->bbt_addr != BBT_INVALID_ADDR))
7155 + valid_desc += 1 << i;
7156 + }
7157 +
7158 + pdesc = &manager->desc[0];
7159 + if ((valid_desc == 0x3) && (pdesc[0].version != pdesc[1].version))
7160 + valid_desc = (pdesc[0].version > pdesc[1].version) ? 1 : 2;
7161 +
7162 + /* read bbt */
7163 + for (i = 0; i < 2; i++) {
7164 + if (!(valid_desc & (1 << i)))
7165 + continue;
7166 + ret = read_bbt(&pdesc[i], bbt, len);
7167 + if (ret) {
7168 + pdesc->bbt_addr = BBT_INVALID_ADDR;
7169 + pdesc->version = 0;
7170 + valid_desc &= ~(1 << i);
7171 + }
7172 + /* If two BBT version is same, only need to read the first bbt*/
7173 + if ((valid_desc == 0x3) &&
7174 + (pdesc[0].version == pdesc[1].version))
7175 + break;
7176 + }
7177 +
7178 + if (!valid_desc) {
7179 + create_bbt(nand, bbt);
7180 + pdesc[0].version = 1;
7181 + pdesc[1].version = 1;
7182 + }
7183 +
7184 + pdesc[0].version = max_t(u8, pdesc[0].version, pdesc[1].version);
7185 + pdesc[1].version = pdesc[0].version;
7186 +
7187 + for (i = 0; i < 2; i++) {
7188 + if (valid_desc & (1 << i))
7189 + continue;
7190 +
7191 + ret = write_bbt(nand, &pdesc[i], &pdesc[1 - i], bbt,
7192 + manager->max_blocks);
7193 + if (ret) {
7194 + pr_info("write bbt(%d) fail, ret:%d\n", i, ret);
7195 + manager->bbt = NULL;
7196 + return ret;
7197 + }
7198 + }
7199 +
7200 + /* Prevent the bbt regions from erasing / writing */
7201 + mark_bbt_region(nand, manager->bbt, manager->max_blocks);
7202 +
7203 + for (i = 0; i < total_block; i++) {
7204 + if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_WORN)
7205 + pr_info("Checked WORN bad blk: %d\n", i);
7206 + else if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_FACTORY_BAD)
7207 + pr_info("Checked Factory bad blk: %d\n", i);
7208 + else if (get_bbt_mark(manager->bbt, i) == BBT_BLOCK_RESERVED)
7209 + pr_info("Checked Reserved blk: %d\n", i);
7210 + else if (get_bbt_mark(manager->bbt, i) != BBT_BLOCK_GOOD)
7211 + pr_info("Checked unknown blk: %d\n", i);
7212 + }
7213 +
7214 + return 0;
7215 +}
7216 +
7217 +int bbt_mark_bad(struct nandx_info *nand, off_t offset)
7218 +{
7219 + struct bbt_manager *manager = &g_bbt_manager;
7220 + int block = div_down(offset, nand->block_size);
7221 + int ret = 0;
7222 +
7223 + mark_nand_bad(nand, block);
7224 +
7225 +#if 0
7226 + set_bbt_mark(manager->bbt, block, BBT_BLOCK_WORN);
7227 +
7228 + /* Update flash-based bad block table */
7229 + ret = update_bbt(nand, manager->desc, manager->bbt,
7230 + manager->max_blocks);
7231 +#endif
7232 + pr_info("block %d, update result %d.\n", block, ret);
7233 +
7234 + return ret;
7235 +}
7236 +
7237 +int bbt_is_bad(struct nandx_info *nand, off_t offset)
7238 +{
7239 + int block;
7240 +
7241 + block = div_down(offset, nand->block_size);
7242 +
7243 + return get_bbt_mark(g_bbt_manager.bbt, block) != BBT_BLOCK_GOOD;
7244 +}
7245 --- /dev/null
7246 +++ b/drivers/mtd/nandx/driver/uboot/driver.c
7247 @@ -0,0 +1,574 @@
7248 +/*
7249 + * Copyright (C) 2017 MediaTek Inc.
7250 + * Licensed under either
7251 + * BSD Licence, (see NOTICE for more details)
7252 + * GNU General Public License, version 2.0, (see NOTICE for more details)
7253 + */
7254 +
7255 +#include <common.h>
7256 +#include <linux/io.h>
7257 +#include <dm.h>
7258 +#include <clk.h>
7259 +#include <nand.h>
7260 +#include <linux/iopoll.h>
7261 +#include <linux/delay.h>
7262 +#include <linux/mtd/nand.h>
7263 +#include <linux/mtd/mtd.h>
7264 +#include <linux/mtd/partitions.h>
7265 +#include "nandx_core.h"
7266 +#include "nandx_util.h"
7267 +#include "bbt.h"
7268 +
7269 +typedef int (*func_nandx_operation)(u8 *, u8 *, u64, size_t);
7270 +
7271 +struct nandx_clk {
7272 + struct clk *nfi_clk;
7273 + struct clk *ecc_clk;
7274 + struct clk *snfi_clk;
7275 + struct clk *snfi_clk_sel;
7276 + struct clk *snfi_parent_50m;
7277 +};
7278 +
7279 +struct nandx_nfc {
7280 + struct nandx_info info;
7281 + struct nandx_clk clk;
7282 + struct nfi_resource *res;
7283 +
7284 + struct nand_chip *nand;
7285 + spinlock_t lock;
7286 +};
7287 +
7288 +/* Default flash layout for MTK nand controller
7289 + * 64Bytes oob format.
7290 + */
7291 +static struct nand_ecclayout eccoob = {
7292 + .eccbytes = 42,
7293 + .eccpos = {
7294 + 17, 18, 19, 20, 21, 22, 23, 24, 25,
7295 + 26, 27, 28, 29, 30, 31, 32, 33, 34,
7296 + 35, 36, 37, 38, 39, 40, 41
7297 + },
7298 + .oobavail = 16,
7299 + .oobfree = {
7300 + {
7301 + .offset = 0,
7302 + .length = 16,
7303 + },
7304 + }
7305 +};
7306 +
7307 +static struct nandx_nfc *mtd_to_nfc(struct mtd_info *mtd)
7308 +{
7309 + struct nand_chip *nand = mtd_to_nand(mtd);
7310 +
7311 + return (struct nandx_nfc *)nand_get_controller_data(nand);
7312 +}
7313 +
7314 +static int nandx_enable_clk(struct nandx_clk *clk)
7315 +{
7316 + int ret;
7317 +
7318 + ret = clk_enable(clk->nfi_clk);
7319 + if (ret) {
7320 + pr_info("failed to enable nfi clk\n");
7321 + return ret;
7322 + }
7323 +
7324 + ret = clk_enable(clk->ecc_clk);
7325 + if (ret) {
7326 + pr_info("failed to enable ecc clk\n");
7327 + goto disable_nfi_clk;
7328 + }
7329 +
7330 + ret = clk_enable(clk->snfi_clk);
7331 + if (ret) {
7332 + pr_info("failed to enable snfi clk\n");
7333 + goto disable_ecc_clk;
7334 + }
7335 +
7336 + ret = clk_enable(clk->snfi_clk_sel);
7337 + if (ret) {
7338 + pr_info("failed to enable snfi clk sel\n");
7339 + goto disable_snfi_clk;
7340 + }
7341 +
7342 + ret = clk_set_parent(clk->snfi_clk_sel, clk->snfi_parent_50m);
7343 + if (ret) {
7344 + pr_info("failed to set snfi parent 50MHz\n");
7345 + goto disable_snfi_clk;
7346 + }
7347 +
7348 + return 0;
7349 +
7350 +disable_snfi_clk:
7351 + clk_disable(clk->snfi_clk);
7352 +disable_ecc_clk:
7353 + clk_disable(clk->ecc_clk);
7354 +disable_nfi_clk:
7355 + clk_disable(clk->nfi_clk);
7356 +
7357 + return ret;
7358 +}
7359 +
7360 +static void nandx_disable_clk(struct nandx_clk *clk)
7361 +{
7362 + clk_disable(clk->ecc_clk);
7363 + clk_disable(clk->nfi_clk);
7364 + clk_disable(clk->snfi_clk);
7365 +}
7366 +
7367 +static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
7368 + struct mtd_oob_region *oob_region)
7369 +{
7370 + struct nandx_nfc *nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
7371 + u32 eccsteps;
7372 +
7373 + eccsteps = div_down(mtd->writesize, mtd->ecc_step_size);
7374 +
7375 + if (section >= eccsteps)
7376 + return -EINVAL;
7377 +
7378 + oob_region->length = nfc->info.fdm_reg_size - nfc->info.fdm_ecc_size;
7379 + oob_region->offset = section * nfc->info.fdm_reg_size
7380 + + nfc->info.fdm_ecc_size;
7381 +
7382 + return 0;
7383 +}
7384 +
7385 +static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
7386 + struct mtd_oob_region *oob_region)
7387 +{
7388 + struct nandx_nfc *nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
7389 + u32 eccsteps;
7390 +
7391 + if (section)
7392 + return -EINVAL;
7393 +
7394 + eccsteps = div_down(mtd->writesize, mtd->ecc_step_size);
7395 + oob_region->offset = nfc->info.fdm_reg_size * eccsteps;
7396 + oob_region->length = mtd->oobsize - oob_region->offset;
7397 +
7398 + return 0;
7399 +}
7400 +
7401 +static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
7402 + .rfree = mtk_nfc_ooblayout_free,
7403 + .ecc = mtk_nfc_ooblayout_ecc,
7404 +};
7405 +
7406 +struct nfc_compatible {
7407 + enum mtk_ic_version ic_ver;
7408 +
7409 + u32 clock_1x;
7410 + u32 *clock_2x;
7411 + int clock_2x_num;
7412 +
7413 + int min_oob_req;
7414 +};
7415 +
7416 +static const struct nfc_compatible nfc_compats_mt7622 = {
7417 + .ic_ver = NANDX_MT7622,
7418 + .clock_1x = 26000000,
7419 + .clock_2x = NULL,
7420 + .clock_2x_num = 8,
7421 + .min_oob_req = 1,
7422 +};
7423 +
7424 +static const struct udevice_id ic_of_match[] = {
7425 + {.compatible = "mediatek,mt7622-nfc", .data = &nfc_compats_mt7622},
7426 + {}
7427 +};
7428 +
7429 +static int nand_operation(struct mtd_info *mtd, loff_t addr, size_t len,
7430 + size_t *retlen, uint8_t *data, uint8_t *oob, bool read)
7431 +{
7432 + struct nandx_split64 split = {0};
7433 + func_nandx_operation operation;
7434 + u64 block_oobs, val, align;
7435 + uint8_t *databuf, *oobbuf;
7436 + struct nandx_nfc *nfc;
7437 + bool readoob;
7438 + int ret = 0;
7439 +
7440 + nfc = (struct nandx_nfc *)nand_get_controller_data;
7441 + spin_lock(&nfc->lock);
7442 +
7443 + databuf = data;
7444 + oobbuf = oob;
7445 +
7446 + readoob = data ? false : true;
7447 + block_oobs = div_up(mtd->erasesize, mtd->writesize) * mtd->oobavail;
7448 + align = readoob ? block_oobs : mtd->erasesize;
7449 +
7450 + operation = read ? nandx_read : nandx_write;
7451 +
7452 + nandx_split(&split, addr, len, val, align);
7453 +
7454 + if (split.head_len) {
7455 + ret = operation((u8 *) databuf, oobbuf, addr, split.head_len);
7456 +
7457 + if (databuf)
7458 + databuf += split.head_len;
7459 +
7460 + if (oobbuf)
7461 + oobbuf += split.head_len;
7462 +
7463 + addr += split.head_len;
7464 + *retlen += split.head_len;
7465 + }
7466 +
7467 + if (split.body_len) {
7468 + while (div_up(split.body_len, align)) {
7469 + ret = operation((u8 *) databuf, oobbuf, addr, align);
7470 +
7471 + if (databuf) {
7472 + databuf += mtd->erasesize;
7473 + split.body_len -= mtd->erasesize;
7474 + *retlen += mtd->erasesize;
7475 + }
7476 +
7477 + if (oobbuf) {
7478 + oobbuf += block_oobs;
7479 + split.body_len -= block_oobs;
7480 + *retlen += block_oobs;
7481 + }
7482 +
7483 + addr += mtd->erasesize;
7484 + }
7485 +
7486 + }
7487 +
7488 + if (split.tail_len) {
7489 + ret = operation((u8 *) databuf, oobbuf, addr, split.tail_len);
7490 + *retlen += split.tail_len;
7491 + }
7492 +
7493 + spin_unlock(&nfc->lock);
7494 +
7495 + return ret;
7496 +}
7497 +
7498 +static int mtk_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
7499 + size_t *retlen, u_char *buf)
7500 +{
7501 + return nand_operation(mtd, from, len, retlen, buf, NULL, true);
7502 +}
7503 +
7504 +static int mtk_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
7505 + size_t *retlen, const u_char *buf)
7506 +{
7507 + return nand_operation(mtd, to, len, retlen, (uint8_t *)buf,
7508 + NULL, false);
7509 +}
7510 +
7511 +int mtk_nand_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
7512 +{
7513 + size_t retlen;
7514 +
7515 + return nand_operation(mtd, from, ops->ooblen, &retlen, NULL,
7516 + ops->oobbuf, true);
7517 +}
7518 +
7519 +int mtk_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
7520 +{
7521 + size_t retlen;
7522 +
7523 + return nand_operation(mtd, to, ops->ooblen, &retlen, NULL,
7524 + ops->oobbuf, false);
7525 +}
7526 +
7527 +static int mtk_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
7528 +{
7529 + struct nandx_nfc *nfc;
7530 + u64 erase_len, erase_addr;
7531 + u32 block_size;
7532 + int ret = 0;
7533 +
7534 + nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
7535 + block_size = nfc->info.block_size;
7536 + erase_len = instr->len;
7537 + erase_addr = instr->addr;
7538 + spin_lock(&nfc->lock);
7539 + instr->state = MTD_ERASING;
7540 +
7541 + while (erase_len) {
7542 + if (mtk_nand_is_bad(mtd, erase_addr)) {
7543 + pr_info("block(0x%llx) is bad, not erase\n",
7544 + erase_addr);
7545 + instr->state = MTD_ERASE_FAILED;
7546 + goto erase_exit;
7547 + } else {
7548 + ret = nandx_erase(erase_addr, block_size);
7549 + if (ret < 0) {
7550 + instr->state = MTD_ERASE_FAILED;
7551 + goto erase_exit;
7552 + pr_info("erase fail at blk %llu, ret:%d\n",
7553 + erase_addr, ret);
7554 + }
7555 + }
7556 + erase_addr += block_size;
7557 + erase_len -= block_size;
7558 + }
7559 +
7560 + instr->state = MTD_ERASE_DONE;
7561 +
7562 +erase_exit:
7563 + ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
7564 +
7565 + spin_unlock(&nfc->lock);
7566 + /* Do mtd call back function */
7567 + if (!ret)
7568 + mtd_erase_callback(instr);
7569 +
7570 + return ret;
7571 +}
7572 +
7573 +int mtk_nand_is_bad(struct mtd_info *mtd, loff_t ofs)
7574 +{
7575 + struct nandx_nfc *nfc;
7576 + int ret;
7577 +
7578 + nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
7579 + spin_lock(&nfc->lock);
7580 +
7581 + /*ret = bbt_is_bad(&nfc->info, ofs);*/
7582 + ret = nandx_is_bad_block(ofs);
7583 + spin_unlock(&nfc->lock);
7584 +
7585 + if (ret) {
7586 + pr_info("nand block 0x%x is bad, ret %d!\n", ofs, ret);
7587 + return 1;
7588 + } else {
7589 + return 0;
7590 + }
7591 +}
7592 +
7593 +int mtk_nand_mark_bad(struct mtd_info *mtd, loff_t ofs)
7594 +{
7595 + struct nandx_nfc *nfc;
7596 + int ret;
7597 +
7598 + nfc = (struct nandx_nfc *)mtd_to_nfc(mtd);
7599 + spin_lock(&nfc->lock);
7600 + pr_info("%s, %d\n", __func__, __LINE__);
7601 + ret = bbt_mark_bad(&nfc->info, ofs);
7602 +
7603 + spin_unlock(&nfc->lock);
7604 +
7605 + return ret;
7606 +}
7607 +
7608 +void mtk_nand_sync(struct mtd_info *mtd)
7609 +{
7610 + nandx_sync();
7611 +}
7612 +
7613 +static struct mtd_info *mtd_info_create(struct udevice *pdev,
7614 + struct nandx_nfc *nfc, struct nand_chip *nand)
7615 +{
7616 + struct mtd_info *mtd = nand_to_mtd(nand);
7617 + int ret;
7618 +
7619 + nand_set_controller_data(nand, nfc);
7620 +
7621 + nand->flash_node = dev_of_offset(pdev);
7622 + nand->ecc.layout = &eccoob;
7623 +
7624 + ret = nandx_ioctl(CORE_CTRL_NAND_INFO, &nfc->info);
7625 + if (ret) {
7626 + pr_info("fail to get nand info (%d)!\n", ret);
7627 + mem_free(mtd);
7628 + return NULL;
7629 + }
7630 +
7631 + mtd->owner = THIS_MODULE;
7632 +
7633 + mtd->name = "MTK-SNand";
7634 + mtd->writesize = nfc->info.page_size;
7635 + mtd->erasesize = nfc->info.block_size;
7636 + mtd->oobsize = nfc->info.oob_size;
7637 + mtd->size = nfc->info.total_size;
7638 + mtd->type = MTD_NANDFLASH;
7639 + mtd->flags = MTD_CAP_NANDFLASH;
7640 + mtd->_erase = mtk_nand_erase;
7641 + mtd->_read = mtk_nand_read;
7642 + mtd->_write = mtk_nand_write;
7643 + mtd->_read_oob = mtk_nand_read_oob;
7644 + mtd->_write_oob = mtk_nand_write_oob;
7645 + mtd->_sync = mtk_nand_sync;
7646 + mtd->_lock = NULL;
7647 + mtd->_unlock = NULL;
7648 + mtd->_block_isbad = mtk_nand_is_bad;
7649 + mtd->_block_markbad = mtk_nand_mark_bad;
7650 + mtd->writebufsize = mtd->writesize;
7651 +
7652 + mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
7653 +
7654 + mtd->ecc_strength = nfc->info.ecc_strength;
7655 + mtd->ecc_step_size = nfc->info.sector_size;
7656 +
7657 + if (!mtd->bitflip_threshold)
7658 + mtd->bitflip_threshold = mtd->ecc_strength;
7659 +
7660 + return mtd;
7661 +}
7662 +
7663 +int board_nand_init(struct nand_chip *nand)
7664 +{
7665 + struct udevice *dev;
7666 + struct mtd_info *mtd;
7667 + struct nandx_nfc *nfc;
7668 + int arg = 1;
7669 + int ret;
7670 +
7671 + ret = uclass_get_device_by_driver(UCLASS_MTD,
7672 + DM_GET_DRIVER(mtk_snand_drv),
7673 + &dev);
7674 + if (ret) {
7675 + pr_err("Failed to get mtk_nand_drv. (error %d)\n", ret);
7676 + return ret;
7677 + }
7678 +
7679 + nfc = dev_get_priv(dev);
7680 +
7681 + ret = nandx_enable_clk(&nfc->clk);
7682 + if (ret) {
7683 + pr_err("failed to enable nfi clk (error %d)\n", ret);
7684 + return ret;
7685 + }
7686 +
7687 + ret = nandx_init(nfc->res);
7688 + if (ret) {
7689 + pr_err("nandx init error (%d)!\n", ret);
7690 + goto disable_clk;
7691 + }
7692 +
7693 + arg = 1;
7694 + nandx_ioctl(NFI_CTRL_DMA, &arg);
7695 + nandx_ioctl(NFI_CTRL_ECC, &arg);
7696 +
7697 +#ifdef NANDX_UNIT_TEST
7698 + nandx_unit_test(0x780000, 0x800);
7699 +#endif
7700 +
7701 + mtd = mtd_info_create(dev, nfc, nand);
7702 + if (!mtd) {
7703 + ret = -ENOMEM;
7704 + goto disable_clk;
7705 + }
7706 +
7707 + spin_lock_init(&nfc->lock);
7708 +#if 0
7709 + ret = scan_bbt(&nfc->info);
7710 + if (ret) {
7711 + pr_info("bbt init error (%d)!\n", ret);
7712 + goto disable_clk;
7713 + }
7714 +#endif
7715 + return ret;
7716 +
7717 +disable_clk:
7718 + nandx_disable_clk(&nfc->clk);
7719 +
7720 + return ret;
7721 +}
7722 +
7723 +static int mtk_snand_ofdata_to_platdata(struct udevice *dev)
7724 +{
7725 + struct nandx_nfc *nfc = dev_get_priv(dev);
7726 + struct nfc_compatible *compat;
7727 + struct nfi_resource *res;
7728 +
7729 + int ret = 0;
7730 +
7731 + res = mem_alloc(1, sizeof(struct nfi_resource));
7732 + if (!res)
7733 + return -ENOMEM;
7734 +
7735 + nfc->res = res;
7736 +
7737 + res->nfi_regs = (void *)dev_read_addr_index(dev, 0);
7738 + res->ecc_regs = (void *)dev_read_addr_index(dev, 1);
7739 + pr_debug("mtk snand nfi_regs:0x%x ecc_regs:0x%x\n",
7740 + res->nfi_regs, res->ecc_regs);
7741 +
7742 + compat = (struct nfc_compatible *)dev_get_driver_data(dev);
7743 +
7744 + res->ic_ver = (enum mtk_ic_version)(compat->ic_ver);
7745 + res->clock_1x = compat->clock_1x;
7746 + res->clock_2x = compat->clock_2x;
7747 + res->clock_2x_num = compat->clock_2x_num;
7748 +
7749 + memset(&nfc->clk, 0, sizeof(struct nandx_clk));
7750 + nfc->clk.nfi_clk =
7751 + kmalloc(sizeof(*nfc->clk.nfi_clk), GFP_KERNEL);
7752 + nfc->clk.ecc_clk =
7753 + kmalloc(sizeof(*nfc->clk.ecc_clk), GFP_KERNEL);
7754 + nfc->clk.snfi_clk=
7755 + kmalloc(sizeof(*nfc->clk.snfi_clk), GFP_KERNEL);
7756 + nfc->clk.snfi_clk_sel =
7757 + kmalloc(sizeof(*nfc->clk.snfi_clk_sel), GFP_KERNEL);
7758 + nfc->clk.snfi_parent_50m =
7759 + kmalloc(sizeof(*nfc->clk.snfi_parent_50m), GFP_KERNEL);
7760 +
7761 + if (!nfc->clk.nfi_clk || !nfc->clk.ecc_clk || !nfc->clk.snfi_clk ||
7762 + !nfc->clk.snfi_clk_sel || !nfc->clk.snfi_parent_50m) {
7763 + ret = -ENOMEM;
7764 + goto err;
7765 + }
7766 +
7767 + ret = clk_get_by_name(dev, "nfi_clk", nfc->clk.nfi_clk);
7768 + if (IS_ERR(nfc->clk.nfi_clk)) {
7769 + ret = PTR_ERR(nfc->clk.nfi_clk);
7770 + goto err;
7771 + }
7772 +
7773 + ret = clk_get_by_name(dev, "ecc_clk", nfc->clk.ecc_clk);
7774 + if (IS_ERR(nfc->clk.ecc_clk)) {
7775 + ret = PTR_ERR(nfc->clk.ecc_clk);
7776 + goto err;
7777 + }
7778 +
7779 + ret = clk_get_by_name(dev, "snfi_clk", nfc->clk.snfi_clk);
7780 + if (IS_ERR(nfc->clk.snfi_clk)) {
7781 + ret = PTR_ERR(nfc->clk.snfi_clk);
7782 + goto err;
7783 + }
7784 +
7785 + ret = clk_get_by_name(dev, "spinfi_sel", nfc->clk.snfi_clk_sel);
7786 + if (IS_ERR(nfc->clk.snfi_clk_sel)) {
7787 + ret = PTR_ERR(nfc->clk.snfi_clk_sel);
7788 + goto err;
7789 + }
7790 +
7791 + ret = clk_get_by_name(dev, "spinfi_parent_50m", nfc->clk.snfi_parent_50m);
7792 + if (IS_ERR(nfc->clk.snfi_parent_50m))
7793 + pr_info("spinfi parent 50MHz is not configed\n");
7794 +
7795 + return 0;
7796 +err:
7797 + if (nfc->clk.nfi_clk)
7798 + kfree(nfc->clk.nfi_clk);
7799 + if (nfc->clk.snfi_clk)
7800 + kfree(nfc->clk.snfi_clk);
7801 + if (nfc->clk.ecc_clk)
7802 + kfree(nfc->clk.ecc_clk);
7803 + if (nfc->clk.snfi_clk_sel)
7804 + kfree(nfc->clk.snfi_clk_sel);
7805 + if (nfc->clk.snfi_parent_50m)
7806 + kfree(nfc->clk.snfi_parent_50m);
7807 +
7808 + return ret;
7809 +}
7810 +
7811 +U_BOOT_DRIVER(mtk_snand_drv) = {
7812 + .name = "mtk_snand",
7813 + .id = UCLASS_MTD,
7814 + .of_match = ic_of_match,
7815 + .ofdata_to_platdata = mtk_snand_ofdata_to_platdata,
7816 + .priv_auto_alloc_size = sizeof(struct nandx_nfc),
7817 +};
7818 +
7819 +MODULE_LICENSE("GPL v2");
7820 +MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
7821 +MODULE_AUTHOR("MediaTek");
7822 --- /dev/null
7823 +++ b/drivers/mtd/nandx/include/Nandx.mk
7824 @@ -0,0 +1,16 @@
7825 +#
7826 +# Copyright (C) 2017 MediaTek Inc.
7827 +# Licensed under either
7828 +# BSD Licence, (see NOTICE for more details)
7829 +# GNU General Public License, version 2.0, (see NOTICE for more details)
7830 +#
7831 +
7832 +nandx-header-y += internal/nandx_core.h
7833 +nandx-header-y += internal/nandx_errno.h
7834 +nandx-header-y += internal/nandx_util.h
7835 +nandx-header-$(NANDX_BBT_SUPPORT) += internal/bbt.h
7836 +nandx-header-$(NANDX_SIMULATOR_SUPPORT) += simulator/nandx_os.h
7837 +nandx-header-$(NANDX_CTP_SUPPORT) += ctp/nandx_os.h
7838 +nandx-header-$(NANDX_LK_SUPPORT) += lk/nandx_os.h
7839 +nandx-header-$(NANDX_KERNEL_SUPPORT) += kernel/nandx_os.h
7840 +nandx-header-$(NANDX_UBOOT_SUPPORT) += uboot/nandx_os.h
7841 --- /dev/null
7842 +++ b/drivers/mtd/nandx/include/internal/bbt.h
7843 @@ -0,0 +1,62 @@
7844 +/*
7845 + * Copyright (C) 2017 MediaTek Inc.
7846 + * Licensed under either
7847 + * BSD Licence, (see NOTICE for more details)
7848 + * GNU General Public License, version 2.0, (see NOTICE for more details)
7849 + */
7850 +
7851 +#ifndef __BBT_H__
7852 +#define __BBT_H__
7853 +
7854 +#define BBT_BLOCK_GOOD 0x03
7855 +#define BBT_BLOCK_WORN 0x02
7856 +#define BBT_BLOCK_RESERVED 0x01
7857 +#define BBT_BLOCK_FACTORY_BAD 0x00
7858 +
7859 +#define BBT_INVALID_ADDR 0
7860 +/* The maximum number of blocks to scan for a bbt */
7861 +#define NAND_BBT_SCAN_MAXBLOCKS 4
7862 +#define NAND_BBT_USE_FLASH 0x00020000
7863 +#define NAND_BBT_NO_OOB 0x00040000
7864 +
7865 +/* Search good / bad pattern on the first and the second page */
7866 +#define NAND_BBT_SCAN2NDPAGE 0x00008000
7867 +/* Search good / bad pattern on the last page of the eraseblock */
7868 +#define NAND_BBT_SCANLASTPAGE 0x00010000
7869 +
7870 +#define NAND_DRAM_BUF_DATABUF_ADDR (NAND_BUF_ADDR)
7871 +
7872 +struct bbt_pattern {
7873 + u8 *data;
7874 + int len;
7875 +};
7876 +
7877 +struct bbt_desc {
7878 + struct bbt_pattern pattern;
7879 + u8 version;
7880 + u64 bbt_addr;/*0: invalid value; otherwise, valid value*/
7881 +};
7882 +
7883 +struct bbt_manager {
7884 + /* main bbt descriptor and mirror descriptor */
7885 + struct bbt_desc desc[2];/* 0: main bbt; 1: mirror bbt */
7886 + int max_blocks;
7887 + u8 *bbt;
7888 +};
7889 +
7890 +#define BBT_ENTRY_MASK 0x03
7891 +#define BBT_ENTRY_SHIFT 2
7892 +
7893 +#define GET_BBT_LENGTH(blocks) (blocks >> 2)
7894 +#define GET_ENTRY(block) ((block) >> BBT_ENTRY_SHIFT)
7895 +#define GET_POSITION(block) (((block) & BBT_ENTRY_MASK) * 2)
7896 +#define GET_MARK_VALUE(block, mark) \
7897 + (((mark) & BBT_ENTRY_MASK) << GET_POSITION(block))
7898 +
7899 +int scan_bbt(struct nandx_info *nand);
7900 +
7901 +int bbt_mark_bad(struct nandx_info *nand, off_t offset);
7902 +
7903 +int bbt_is_bad(struct nandx_info *nand, off_t offset);
7904 +
7905 +#endif /*__BBT_H__*/
7906 --- /dev/null
7907 +++ b/drivers/mtd/nandx/include/internal/nandx_core.h
7908 @@ -0,0 +1,250 @@
7909 +/*
7910 + * Copyright (C) 2017 MediaTek Inc.
7911 + * Licensed under either
7912 + * BSD Licence, (see NOTICE for more details)
7913 + * GNU General Public License, version 2.0, (see NOTICE for more details)
7914 + */
7915 +
7916 +#ifndef __NANDX_CORE_H__
7917 +#define __NANDX_CORE_H__
7918 +
7919 +/**
7920 + * mtk_ic_version - indicates specifical IC, IP need this to load some info
7921 + */
7922 +enum mtk_ic_version {
7923 + NANDX_MT7622,
7924 +};
7925 +
7926 +/**
7927 + * nandx_ioctl_cmd - operations supported by nandx
7928 + *
7929 + * @NFI_CTRL_DMA dma enable or not
7930 + * @NFI_CTRL_NFI_MODE customer/read/program/erase...
7931 + * @NFI_CTRL_ECC ecc enable or not
7932 + * @NFI_CTRL_ECC_MODE nfi/dma/pio
7933 + * @CHIP_CTRL_DRIVE_STRENGTH enum chip_ctrl_drive_strength
7934 + */
7935 +enum nandx_ctrl_cmd {
7936 + CORE_CTRL_NAND_INFO,
7937 +
7938 + NFI_CTRL_DMA,
7939 + NFI_CTRL_NFI_MODE,
7940 + NFI_CTRL_AUTOFORMAT,
7941 + NFI_CTRL_NFI_IRQ,
7942 + NFI_CTRL_PAGE_IRQ,
7943 + NFI_CTRL_RANDOMIZE,
7944 + NFI_CTRL_BAD_MARK_SWAP,
7945 +
7946 + NFI_CTRL_ECC,
7947 + NFI_CTRL_ECC_MODE,
7948 + NFI_CTRL_ECC_CLOCK,
7949 + NFI_CTRL_ECC_IRQ,
7950 + NFI_CTRL_ECC_PAGE_IRQ,
7951 + NFI_CTRL_ECC_DECODE_MODE,
7952 +
7953 + SNFI_CTRL_OP_MODE,
7954 + SNFI_CTRL_RX_MODE,
7955 + SNFI_CTRL_TX_MODE,
7956 + SNFI_CTRL_DELAY_MODE,
7957 +
7958 + CHIP_CTRL_OPS_CACHE,
7959 + CHIP_CTRL_OPS_MULTI,
7960 + CHIP_CTRL_PSLC_MODE,
7961 + CHIP_CTRL_DRIVE_STRENGTH,
7962 + CHIP_CTRL_DDR_MODE,
7963 + CHIP_CTRL_ONDIE_ECC,
7964 + CHIP_CTRL_TIMING_MODE
7965 +};
7966 +
7967 +enum snfi_ctrl_op_mode {
7968 + SNFI_CUSTOM_MODE,
7969 + SNFI_AUTO_MODE,
7970 + SNFI_MAC_MODE
7971 +};
7972 +
7973 +enum snfi_ctrl_rx_mode {
7974 + SNFI_RX_111,
7975 + SNFI_RX_112,
7976 + SNFI_RX_114,
7977 + SNFI_RX_122,
7978 + SNFI_RX_144
7979 +};
7980 +
7981 +enum snfi_ctrl_tx_mode {
7982 + SNFI_TX_111,
7983 + SNFI_TX_114,
7984 +};
7985 +
7986 +enum chip_ctrl_drive_strength {
7987 + CHIP_DRIVE_NORMAL,
7988 + CHIP_DRIVE_HIGH,
7989 + CHIP_DRIVE_MIDDLE,
7990 + CHIP_DRIVE_LOW
7991 +};
7992 +
7993 +enum chip_ctrl_timing_mode {
7994 + CHIP_TIMING_MODE0,
7995 + CHIP_TIMING_MODE1,
7996 + CHIP_TIMING_MODE2,
7997 + CHIP_TIMING_MODE3,
7998 + CHIP_TIMING_MODE4,
7999 + CHIP_TIMING_MODE5,
8000 +};
8001 +
8002 +/**
8003 + * nandx_info - basic information
8004 + */
8005 +struct nandx_info {
8006 + u32 max_io_count;
8007 + u32 min_write_pages;
8008 + u32 plane_num;
8009 + u32 oob_size;
8010 + u32 page_parity_size;
8011 + u32 page_size;
8012 + u32 block_size;
8013 + u64 total_size;
8014 + u32 fdm_reg_size;
8015 + u32 fdm_ecc_size;
8016 + u32 ecc_strength;
8017 + u32 sector_size;
8018 +};
8019 +
8020 +/**
8021 + * nfi_resource - the resource needed by nfi & ecc to do initialization
8022 + */
8023 +struct nfi_resource {
8024 + int ic_ver;
8025 + void *dev;
8026 +
8027 + void *ecc_regs;
8028 + int ecc_irq_id;
8029 +
8030 + void *nfi_regs;
8031 + int nfi_irq_id;
8032 +
8033 + u32 clock_1x;
8034 + u32 *clock_2x;
8035 + int clock_2x_num;
8036 +
8037 + int min_oob_req;
8038 +};
8039 +
8040 +/**
8041 + * nandx_init - init all related modules below
8042 + *
8043 + * @res: basic resource of the project
8044 + *
8045 + * return 0 if init success, otherwise return negative error code
8046 + */
8047 +int nandx_init(struct nfi_resource *res);
8048 +
8049 +/**
8050 + * nandx_exit - release resource those that obtained in init flow
8051 + */
8052 +void nandx_exit(void);
8053 +
8054 +/**
8055 + * nandx_read - read data from nand this function can read data and related
8056 + * oob from specifical address
8057 + * if do multi_ops, set one operation per time, and call nandx_sync at last
8058 + * in multi mode, not support page partial read
8059 + * oob not support partial read
8060 + *
8061 + * @data: buf to receive data from nand
8062 + * @oob: buf to receive oob data from nand which related to data page
8063 + * length of @oob should oob size aligned, oob not support partial read
8064 + * @offset: offset address on the whole flash
8065 + * @len: the length of @data that need to read
8066 + *
8067 + * if read success return 0, otherwise return negative error code
8068 + */
8069 +int nandx_read(u8 *data, u8 *oob, u64 offset, size_t len);
8070 +
8071 +/**
8072 + * nandx_write - write data to nand
8073 + * this function can write data and related oob to specifical address
8074 + * if do multi_ops, set one operation per time, and call nandx_sync at last
8075 + *
8076 + * @data: source data to be written to nand,
8077 + * for multi operation, the length of @data should be page size aliged
8078 + * @oob: source oob which related to data page to be written to nand,
8079 + * length of @oob should oob size aligned
8080 + * @offset: offset address on the whole flash, the value should be start address
8081 + * of a page
8082 + * @len: the length of @data that need to write,
8083 + * for multi operation, the len should be page size aliged
8084 + *
8085 + * if write success return 0, otherwise return negative error code
8086 + * if return value > 0, it indicates that how many pages still need to write,
8087 + * and data has not been written to nand
8088 + * please call nandx_sync after pages alligned $nandx_info.min_write_pages
8089 + */
8090 +int nandx_write(u8 *data, u8 *oob, u64 offset, size_t len);
8091 +
8092 +/**
8093 + * nandx_erase - erase an area of nand
8094 + * if do multi_ops, set one operation per time, and call nandx_sync at last
8095 + *
8096 + * @offset: offset address on the flash
8097 + * @len: erase length which should be block size aligned
8098 + *
8099 + * if erase success return 0, otherwise return negative error code
8100 + */
8101 +int nandx_erase(u64 offset, size_t len);
8102 +
8103 +/**
8104 + * nandx_sync - sync all operations to nand
8105 + * when do multi_ops, this function will be called at last operation
8106 + * when write data, if number of pages not alligned
8107 + * by $nandx_info.min_write_pages, this interface could be called to do
8108 + * force write, 0xff will be padded to blanked pages.
8109 + */
8110 +int nandx_sync(void);
8111 +
8112 +/**
8113 + * nandx_is_bad_block - check if the block is bad
8114 + * only check the flag that marked by the flash vendor
8115 + *
8116 + * @offset: offset address on the whole flash
8117 + *
8118 + * return true if the block is bad, otherwise return false
8119 + */
8120 +bool nandx_is_bad_block(u64 offset);
8121 +
8122 +/**
8123 + * nandx_ioctl - set/get property of nand chip
8124 + *
8125 + * @cmd: parameter that defined in enum nandx_ioctl_cmd
8126 + * @arg: operate parameter
8127 + *
8128 + * return 0 if operate success, otherwise return negative error code
8129 + */
8130 +int nandx_ioctl(int cmd, void *arg);
8131 +
8132 +/**
8133 + * nandx_suspend - suspend nand, and store some data
8134 + *
8135 + * return 0 if suspend success, otherwise return negative error code
8136 + */
8137 +int nandx_suspend(void);
8138 +
8139 +/**
8140 + * nandx_resume - resume nand, and replay some data
8141 + *
8142 + * return 0 if resume success, otherwise return negative error code
8143 + */
8144 +int nandx_resume(void);
8145 +
8146 +#ifdef NANDX_UNIT_TEST
8147 +/**
8148 + * nandx_unit_test - unit test
8149 + *
8150 + * @offset: offset address on the whole flash
8151 + * @len: should be not larger than a block size, we only test a block per time
8152 + *
8153 + * return 0 if test success, otherwise return negative error code
8154 + */
8155 +int nandx_unit_test(u64 offset, size_t len);
8156 +#endif
8157 +
8158 +#endif /* __NANDX_CORE_H__ */
8159 --- /dev/null
8160 +++ b/drivers/mtd/nandx/include/internal/nandx_errno.h
8161 @@ -0,0 +1,40 @@
8162 +/*
8163 + * Copyright (C) 2017 MediaTek Inc.
8164 + * Licensed under either
8165 + * BSD Licence, (see NOTICE for more details)
8166 + * GNU General Public License, version 2.0, (see NOTICE for more details)
8167 + */
8168 +
8169 +#ifndef __NANDX_ERRNO_H__
8170 +#define __NANDX_ERRNO_H__
8171 +
8172 +#ifndef EIO
8173 +#define EIO 5 /* I/O error */
8174 +#define ENOMEM 12 /* Out of memory */
8175 +#define EFAULT 14 /* Bad address */
8176 +#define EBUSY 16 /* Device or resource busy */
8177 +#define ENODEV 19 /* No such device */
8178 +#define EINVAL 22 /* Invalid argument */
8179 +#define ENOSPC 28 /* No space left on device */
8180 +/* Operation not supported on transport endpoint */
8181 +#define EOPNOTSUPP 95
8182 +#define ETIMEDOUT 110 /* Connection timed out */
8183 +#endif
8184 +
8185 +#define ENANDFLIPS 1024 /* Too many bitflips, uncorrected */
8186 +#define ENANDREAD 1025 /* Read fail, can't correct */
8187 +#define ENANDWRITE 1026 /* Write fail */
8188 +#define ENANDERASE 1027 /* Erase fail */
8189 +#define ENANDBAD 1028 /* Bad block */
8190 +#define ENANDWP 1029
8191 +
8192 +#define IS_NAND_ERR(err) ((err) >= -ENANDBAD && (err) <= -ENANDFLIPS)
8193 +
8194 +#ifndef MAX_ERRNO
8195 +#define MAX_ERRNO 4096
8196 +#define ERR_PTR(errno) ((void *)((long)errno))
8197 +#define PTR_ERR(ptr) ((long)(ptr))
8198 +#define IS_ERR(ptr) ((unsigned long)(ptr) > (unsigned long)-MAX_ERRNO)
8199 +#endif
8200 +
8201 +#endif /* __NANDX_ERRNO_H__ */
8202 --- /dev/null
8203 +++ b/drivers/mtd/nandx/include/internal/nandx_util.h
8204 @@ -0,0 +1,221 @@
8205 +/*
8206 + * Copyright (C) 2017 MediaTek Inc.
8207 + * Licensed under either
8208 + * BSD Licence, (see NOTICE for more details)
8209 + * GNU General Public License, version 2.0, (see NOTICE for more details)
8210 + */
8211 +
8212 +#ifndef __NANDX_UTIL_H__
8213 +#define __NANDX_UTIL_H__
8214 +
8215 +typedef unsigned char u8;
8216 +typedef unsigned short u16;
8217 +typedef unsigned int u32;
8218 +typedef unsigned long long u64;
8219 +
8220 +enum nand_irq_return {
8221 + NAND_IRQ_NONE,
8222 + NAND_IRQ_HANDLED,
8223 +};
8224 +
8225 +enum nand_dma_operation {
8226 + NDMA_FROM_DEV,
8227 + NDMA_TO_DEV,
8228 +};
8229 +
8230 +
8231 +/*
8232 + * Compatible function
8233 + * used for preloader/lk/kernel environment
8234 + */
8235 +#include "nandx_os.h"
8236 +#include "nandx_errno.h"
8237 +
8238 +#ifndef BIT
8239 +#define BIT(a) (1 << (a))
8240 +#endif
8241 +
8242 +#ifndef min_t
8243 +#define min_t(type, x, y) ({ \
8244 + type __min1 = (x); \
8245 + type __min2 = (y); \
8246 + __min1 < __min2 ? __min1 : __min2; })
8247 +
8248 +#define max_t(type, x, y) ({ \
8249 + type __max1 = (x); \
8250 + type __max2 = (y); \
8251 + __max1 > __max2 ? __max1 : __max2; })
8252 +#endif
8253 +
8254 +#ifndef GENMASK
8255 +#define GENMASK(h, l) \
8256 + (((~0UL) << (l)) & (~0UL >> ((sizeof(unsigned long) * 8) - 1 - (h))))
8257 +#endif
8258 +
8259 +#ifndef __weak
8260 +#define __weak __attribute__((__weak__))
8261 +#endif
8262 +
8263 +#ifndef __packed
8264 +#define __packed __attribute__((__packed__))
8265 +#endif
8266 +
8267 +#ifndef KB
8268 +#define KB(x) ((x) << 10)
8269 +#define MB(x) (KB(x) << 10)
8270 +#define GB(x) (MB(x) << 10)
8271 +#endif
8272 +
8273 +#ifndef offsetof
8274 +#define offsetof(type, member) ((size_t)&((type *)0)->member)
8275 +#endif
8276 +
8277 +#ifndef NULL
8278 +#define NULL (void *)0
8279 +#endif
8280 +static inline u32 nandx_popcount(u32 x)
8281 +{
8282 + x = (x & 0x55555555) + ((x >> 1) & 0x55555555);
8283 + x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
8284 + x = (x & 0x0F0F0F0F) + ((x >> 4) & 0x0F0F0F0F);
8285 + x = (x & 0x00FF00FF) + ((x >> 8) & 0x00FF00FF);
8286 + x = (x & 0x0000FFFF) + ((x >> 16) & 0x0000FFFF);
8287 +
8288 + return x;
8289 +}
8290 +
8291 +#ifndef zero_popcount
8292 +#define zero_popcount(x) (32 - nandx_popcount(x))
8293 +#endif
8294 +
8295 +#ifndef do_div
8296 +#define do_div(n, base) \
8297 + ({ \
8298 + u32 __base = (base); \
8299 + u32 __rem; \
8300 + __rem = ((u64)(n)) % __base; \
8301 + (n) = ((u64)(n)) / __base; \
8302 + __rem; \
8303 + })
8304 +#endif
8305 +
8306 +#define div_up(x, y) \
8307 + ({ \
8308 + u64 __temp = ((x) + (y) - 1); \
8309 + do_div(__temp, (y)); \
8310 + __temp; \
8311 + })
8312 +
8313 +#define div_down(x, y) \
8314 + ({ \
8315 + u64 __temp = (x); \
8316 + do_div(__temp, (y)); \
8317 + __temp; \
8318 + })
8319 +
8320 +#define div_round_up(x, y) (div_up(x, y) * (y))
8321 +#define div_round_down(x, y) (div_down(x, y) * (y))
8322 +
8323 +#define reminder(x, y) \
8324 + ({ \
8325 + u64 __temp = (x); \
8326 + do_div(__temp, (y)); \
8327 + })
8328 +
8329 +#ifndef round_up
8330 +#define round_up(x, y) ((((x) - 1) | ((y) - 1)) + 1)
8331 +#define round_down(x, y) ((x) & ~((y) - 1))
8332 +#endif
8333 +
8334 +#ifndef readx_poll_timeout_atomic
8335 +#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
8336 + ({ \
8337 + u64 end = get_current_time_us() + timeout_us; \
8338 + for (;;) { \
8339 + u64 now = get_current_time_us(); \
8340 + (val) = op(addr); \
8341 + if (cond) \
8342 + break; \
8343 + if (now > end) { \
8344 + (val) = op(addr); \
8345 + break; \
8346 + } \
8347 + } \
8348 + (cond) ? 0 : -ETIMEDOUT; \
8349 + })
8350 +
8351 +#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
8352 + readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us)
8353 +#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
8354 + readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us)
8355 +#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
8356 + readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us)
8357 +#endif
8358 +
8359 +struct nandx_split64 {
8360 + u64 head;
8361 + size_t head_len;
8362 + u64 body;
8363 + size_t body_len;
8364 + u64 tail;
8365 + size_t tail_len;
8366 +};
8367 +
8368 +struct nandx_split32 {
8369 + u32 head;
8370 + u32 head_len;
8371 + u32 body;
8372 + u32 body_len;
8373 + u32 tail;
8374 + u32 tail_len;
8375 +};
8376 +
8377 +#define nandx_split(split, offset, len, val, align) \
8378 + do { \
8379 + (split)->head = (offset); \
8380 + (val) = div_round_down((offset), (align)); \
8381 + (val) = (align) - ((offset) - (val)); \
8382 + if ((val) == (align)) \
8383 + (split)->head_len = 0; \
8384 + else if ((val) > (len)) \
8385 + (split)->head_len = len; \
8386 + else \
8387 + (split)->head_len = val; \
8388 + (split)->body = (offset) + (split)->head_len; \
8389 + (split)->body_len = div_round_down((len) - \
8390 + (split)->head_len,\
8391 + (align)); \
8392 + (split)->tail = (split)->body + (split)->body_len; \
8393 + (split)->tail_len = (len) - (split)->head_len - \
8394 + (split)->body_len; \
8395 + } while (0)
8396 +
8397 +#ifndef container_of
8398 +#define container_of(ptr, type, member) \
8399 + ({const __typeof__(((type *)0)->member) * __mptr = (ptr); \
8400 + (type *)((char *)__mptr - offsetof(type, member)); })
8401 +#endif
8402 +
8403 +static inline u32 nandx_cpu_to_be32(u32 val)
8404 +{
8405 + u32 temp = 1;
8406 + u8 *p_temp = (u8 *)&temp;
8407 +
8408 + if (*p_temp)
8409 + return ((val & 0xff) << 24) | ((val & 0xff00) << 8) |
8410 + ((val >> 8) & 0xff00) | ((val >> 24) & 0xff);
8411 +
8412 + return val;
8413 +}
8414 +
8415 +static inline void nandx_set_bits32(unsigned long addr, u32 mask,
8416 + u32 val)
8417 +{
8418 + u32 temp = readl((void *)addr);
8419 +
8420 + temp &= ~(mask);
8421 + temp |= val;
8422 + writel(temp, (void *)addr);
8423 +}
8424 +
8425 +#endif /* __NANDX_UTIL_H__ */
8426 --- /dev/null
8427 +++ b/drivers/mtd/nandx/include/uboot/nandx_os.h
8428 @@ -0,0 +1,78 @@
8429 +/*
8430 + * Copyright (C) 2017 MediaTek Inc.
8431 + * Licensed under either
8432 + * BSD Licence, (see NOTICE for more details)
8433 + * GNU General Public License, version 2.0, (see NOTICE for more details)
8434 + */
8435 +
8436 +#ifndef __NANDX_OS_H__
8437 +#define __NANDX_OS_H__
8438 +
8439 +#include <common.h>
8440 +#include <dm.h>
8441 +#include <clk.h>
8442 +#include <asm/dma-mapping.h>
8443 +#include <linux/io.h>
8444 +#include <linux/err.h>
8445 +#include <linux/errno.h>
8446 +#include <linux/bitops.h>
8447 +#include <linux/kernel.h>
8448 +#include <linux/compiler-gcc.h>
8449 +
8450 +#define NANDX_BULK_IO_USE_DRAM 0
8451 +
8452 +#define nandx_event_create() NULL
8453 +#define nandx_event_destroy(event)
8454 +#define nandx_event_complete(event)
8455 +#define nandx_event_init(event)
8456 +#define nandx_event_wait_complete(event, timeout) true
8457 +
8458 +#define nandx_irq_register(dev, irq, irq_handler, name, data) NULL
8459 +
8460 +static inline void *mem_alloc(u32 count, u32 size)
8461 +{
8462 + return kmalloc(count * size, GFP_KERNEL | __GFP_ZERO);
8463 +}
8464 +
8465 +static inline void mem_free(void *mem)
8466 +{
8467 + kfree(mem);
8468 +}
8469 +
8470 +static inline u64 get_current_time_us(void)
8471 +{
8472 + return timer_get_us();
8473 +}
8474 +
8475 +static inline u32 nandx_dma_map(void *dev, void *buf, u64 len,
8476 + enum nand_dma_operation op)
8477 +{
8478 + unsigned long addr = (unsigned long)buf;
8479 + u64 size;
8480 +
8481 + size = ALIGN(len, ARCH_DMA_MINALIGN);
8482 +
8483 + if (op == NDMA_FROM_DEV)
8484 + invalidate_dcache_range(addr, addr + size);
8485 + else
8486 + flush_dcache_range(addr, addr + size);
8487 +
8488 + return addr;
8489 +}
8490 +
8491 +static inline void nandx_dma_unmap(void *dev, void *buf, void *addr,
8492 + u64 len, enum nand_dma_operation op)
8493 +{
8494 + u64 size;
8495 +
8496 + size = ALIGN(len, ARCH_DMA_MINALIGN);
8497 +
8498 + if (op != NDMA_FROM_DEV)
8499 + invalidate_dcache_range((unsigned long)addr, addr + size);
8500 + else
8501 + flush_dcache_range((unsigned long)addr, addr + size);
8502 +
8503 + return addr;
8504 +}
8505 +
8506 +#endif /* __NANDX_OS_H__ */
8507 --- a/include/configs/mt7622.h
8508 +++ b/include/configs/mt7622.h
8509 @@ -11,6 +11,31 @@
8510
8511 #include <linux/sizes.h>
8512
8513 +/* SPI Nand */
8514 +#if defined(CONFIG_MTD_RAW_NAND)
8515 +#define CONFIG_SYS_MAX_NAND_DEVICE 1
8516 +#define CONFIG_SYS_NAND_BASE 0x1100d000
8517 +
8518 +#define ENV_BOOT_READ_IMAGE \
8519 + "boot_rd_img=" \
8520 + "nand read 0x4007ff28 0x380000 0x1400000" \
8521 + ";iminfo 0x4007ff28 \0"
8522 +
8523 +#define ENV_BOOT_WRITE_IMAGE \
8524 + "boot_wr_img=" \
8525 + "nand write 0x4007ff28 0x380000 0x1400000" \
8526 + ";iminfo 0x4007ff28 \0"
8527 +
8528 +#define ENV_BOOT_CMD \
8529 + "mtk_boot=run boot_rd_img;bootm;\0"
8530 +
8531 +#define CONFIG_EXTRA_ENV_SETTINGS \
8532 + ENV_BOOT_READ_IMAGE \
8533 + ENV_BOOT_CMD \
8534 + "bootcmd=run mtk_boot;\0"
8535 +
8536 +#endif
8537 +
8538 #define CONFIG_SYS_MAXARGS 8
8539 #define CONFIG_SYS_BOOTM_LEN SZ_64M
8540 #define CONFIG_SYS_CBSIZE SZ_1K