uboot-mediatek: update to U-Boot 2023.01
[openwrt/staging/noltari.git] / package / boot / uboot-mediatek / patches / 100-06-mtd-add-core-facility-code-of-NMBM.patch
1 From 690479081fb6a0c0f77f10fb457ad69e71390f15 Mon Sep 17 00:00:00 2001
2 From: Weijie Gao <weijie.gao@mediatek.com>
3 Date: Mon, 25 Jul 2022 10:26:35 +0800
4 Subject: [PATCH 40/71] mtd: add core facility code of NMBM
5
6 This patch adds a NAND bad block management named NMBM (NAND mapping block
7 management) which supports using a mapping table to deal with bad blocks
8 before factory shipping and during use.
9
10 Signed-off-by: Weijie Gao <weijie.gao@mediatek.com>
11 ---
12 drivers/mtd/Kconfig | 2 +
13 drivers/mtd/Makefile | 1 +
14 drivers/mtd/nmbm/Kconfig | 29 +
15 drivers/mtd/nmbm/Makefile | 5 +
16 drivers/mtd/nmbm/nmbm-core.c | 2936 +++++++++++++++++++++++++++++++
17 drivers/mtd/nmbm/nmbm-debug.h | 37 +
18 drivers/mtd/nmbm/nmbm-debug.inl | 39 +
19 drivers/mtd/nmbm/nmbm-private.h | 137 ++
20 include/nmbm/nmbm-os.h | 66 +
21 include/nmbm/nmbm.h | 102 ++
22 10 files changed, 3354 insertions(+)
23 create mode 100644 drivers/mtd/nmbm/Kconfig
24 create mode 100644 drivers/mtd/nmbm/Makefile
25 create mode 100644 drivers/mtd/nmbm/nmbm-core.c
26 create mode 100644 drivers/mtd/nmbm/nmbm-debug.h
27 create mode 100644 drivers/mtd/nmbm/nmbm-debug.inl
28 create mode 100644 drivers/mtd/nmbm/nmbm-private.h
29 create mode 100644 include/nmbm/nmbm-os.h
30 create mode 100644 include/nmbm/nmbm.h
31
32 --- a/drivers/mtd/Kconfig
33 +++ b/drivers/mtd/Kconfig
34 @@ -226,4 +226,6 @@ source "drivers/mtd/spi/Kconfig"
35
36 source "drivers/mtd/ubi/Kconfig"
37
38 +source "drivers/mtd/nmbm/Kconfig"
39 +
40 endmenu
41 --- a/drivers/mtd/Makefile
42 +++ b/drivers/mtd/Makefile
43 @@ -40,3 +40,4 @@ obj-$(CONFIG_SPL_UBI) += ubispl/
44 endif
45
46 obj-$(CONFIG_MTK_SPI_NAND) += mtk-snand/
47 +obj-$(CONFIG_NMBM) += nmbm/
48 --- /dev/null
49 +++ b/drivers/mtd/nmbm/Kconfig
50 @@ -0,0 +1,29 @@
51 +
52 +config NMBM
53 + bool "Enable NAND mapping block management"
54 + default n
55 +
56 +choice
57 + prompt "Default log level"
58 + depends on NMBM
59 + default NMBM_LOG_LEVEL_INFO
60 +
61 +config NMBM_LOG_LEVEL_DEBUG
62 + bool "0 - Debug"
63 +
64 +config NMBM_LOG_LEVEL_INFO
65 + bool "1 - Info"
66 +
67 +config NMBM_LOG_LEVEL_WARN
68 + bool "2 - Warn"
69 +
70 +config NMBM_LOG_LEVEL_ERR
71 + bool "3 - Error"
72 +
73 +config NMBM_LOG_LEVEL_EMERG
74 + bool "4 - Emergency"
75 +
76 +config NMBM_LOG_LEVEL_NONE
77 + bool "5 - None"
78 +
79 +endchoice
80 --- /dev/null
81 +++ b/drivers/mtd/nmbm/Makefile
82 @@ -0,0 +1,5 @@
83 +# SPDX-License-Identifier: GPL-2.0
84 +#
85 +# (C) Copyright 2020 MediaTek Inc. All rights reserved.
86 +
87 +obj-$(CONFIG_NMBM) += nmbm-core.o
88 --- /dev/null
89 +++ b/drivers/mtd/nmbm/nmbm-core.c
90 @@ -0,0 +1,2936 @@
91 +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
92 +/*
93 + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
94 + *
95 + * Author: Weijie Gao <weijie.gao@mediatek.com>
96 + */
97 +
98 +#include "nmbm-private.h"
99 +
100 +#include "nmbm-debug.h"
101 +
102 +#define NMBM_VER_MAJOR 1
103 +#define NMBM_VER_MINOR 0
104 +#define NMBM_VER NMBM_VERSION_MAKE(NMBM_VER_MAJOR, \
105 + NMBM_VER_MINOR)
106 +
107 +#define NMBM_ALIGN(v, a) (((v) + (a) - 1) & ~((a) - 1))
108 +
109 +/*****************************************************************************/
110 +/* Logging related functions */
111 +/*****************************************************************************/
112 +
113 +/*
114 + * nmbm_log_lower - Print log using OS specific routine
115 + * @nld: NMBM lower device structure
116 + * @level: log level
117 + * @fmt: format string
118 + */
119 +static void nmbm_log_lower(struct nmbm_lower_device *nld,
120 + enum nmbm_log_category level, const char *fmt, ...)
121 +{
122 + va_list ap;
123 +
124 + if (!nld->logprint)
125 + return;
126 +
127 + va_start(ap, fmt);
128 + nld->logprint(nld->arg, level, fmt, ap);
129 + va_end(ap);
130 +}
131 +
132 +/*
133 + * nmbm_log - Print log using OS specific routine
134 + * @ni: NMBM instance structure
135 + * @level: log level
136 + * @fmt: format string
137 + */
138 +static void nmbm_log(struct nmbm_instance *ni, enum nmbm_log_category level,
139 + const char *fmt, ...)
140 +{
141 + va_list ap;
142 +
143 + if (!ni)
144 + return;
145 +
146 + if (!ni->lower.logprint || level < ni->log_display_level)
147 + return;
148 +
149 + va_start(ap, fmt);
150 + ni->lower.logprint(ni->lower.arg, level, fmt, ap);
151 + va_end(ap);
152 +}
153 +
154 +/*
155 + * nmbm_set_log_level - Set log display level
156 + * @ni: NMBM instance structure
157 + * @level: log display level
158 + */
159 +enum nmbm_log_category nmbm_set_log_level(struct nmbm_instance *ni,
160 + enum nmbm_log_category level)
161 +{
162 + enum nmbm_log_category old;
163 +
164 + if (!ni)
165 + return __NMBM_LOG_MAX;
166 +
167 + old = ni->log_display_level;
168 + ni->log_display_level = level;
169 + return old;
170 +}
171 +
172 +/*
173 + * nlog_table_creation - Print log of table creation event
174 + * @ni: NMBM instance structure
175 + * @main_table: whether the table is main info table
176 + * @start_ba: start block address of the table
177 + * @end_ba: block address after the end of the table
178 + */
179 +static void nlog_table_creation(struct nmbm_instance *ni, bool main_table,
180 + uint32_t start_ba, uint32_t end_ba)
181 +{
182 + if (start_ba == end_ba - 1)
183 + nlog_info(ni, "%s info table has been written to block %u\n",
184 + main_table ? "Main" : "Backup", start_ba);
185 + else
186 + nlog_info(ni, "%s info table has been written to block %u-%u\n",
187 + main_table ? "Main" : "Backup", start_ba, end_ba - 1);
188 +
189 + nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
190 +}
191 +
192 +/*
193 + * nlog_table_update - Print log of table update event
194 + * @ni: NMBM instance structure
195 + * @main_table: whether the table is main info table
196 + * @start_ba: start block address of the table
197 + * @end_ba: block address after the end of the table
198 + */
199 +static void nlog_table_update(struct nmbm_instance *ni, bool main_table,
200 + uint32_t start_ba, uint32_t end_ba)
201 +{
202 + if (start_ba == end_ba - 1)
203 + nlog_debug(ni, "%s info table has been updated in block %u\n",
204 + main_table ? "Main" : "Backup", start_ba);
205 + else
206 + nlog_debug(ni, "%s info table has been updated in block %u-%u\n",
207 + main_table ? "Main" : "Backup", start_ba, end_ba - 1);
208 +
209 + nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
210 +}
211 +
212 +/*
213 + * nlog_table_found - Print log of table found event
214 + * @ni: NMBM instance structure
215 + * @first_table: whether the table is first found info table
216 + * @write_count: write count of the info table
217 + * @start_ba: start block address of the table
218 + * @end_ba: block address after the end of the table
219 + */
220 +static void nlog_table_found(struct nmbm_instance *ni, bool first_table,
221 + uint32_t write_count, uint32_t start_ba,
222 + uint32_t end_ba)
223 +{
224 + if (start_ba == end_ba - 1)
225 + nlog_info(ni, "%s info table with writecount %u found in block %u\n",
226 + first_table ? "First" : "Second", write_count,
227 + start_ba);
228 + else
229 + nlog_info(ni, "%s info table with writecount %u found in block %u-%u\n",
230 + first_table ? "First" : "Second", write_count,
231 + start_ba, end_ba - 1);
232 +
233 + nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
234 +}
235 +
236 +/*****************************************************************************/
237 +/* Address conversion functions */
238 +/*****************************************************************************/
239 +
240 +/*
241 + * addr2ba - Convert a linear address to block address
242 + * @ni: NMBM instance structure
243 + * @addr: Linear address
244 + */
245 +static uint32_t addr2ba(struct nmbm_instance *ni, uint64_t addr)
246 +{
247 + return addr >> ni->erasesize_shift;
248 +}
249 +
250 +/*
251 + * ba2addr - Convert a block address to linear address
252 + * @ni: NMBM instance structure
253 + * @ba: Block address
254 + */
255 +static uint64_t ba2addr(struct nmbm_instance *ni, uint32_t ba)
256 +{
257 + return (uint64_t)ba << ni->erasesize_shift;
258 +}
259 +/*
260 + * size2blk - Get minimum required blocks for storing specific size of data
261 + * @ni: NMBM instance structure
262 + * @size: size for storing
263 + */
264 +static uint32_t size2blk(struct nmbm_instance *ni, uint64_t size)
265 +{
266 + return (size + ni->lower.erasesize - 1) >> ni->erasesize_shift;
267 +}
268 +
269 +/*****************************************************************************/
270 +/* High level NAND chip APIs */
271 +/*****************************************************************************/
272 +
273 +/*
274 + * nmbm_reset_chip - Reset NAND device
275 + * @nld: Lower NAND chip structure
276 + */
277 +static void nmbm_reset_chip(struct nmbm_instance *ni)
278 +{
279 + if (ni->lower.reset_chip)
280 + ni->lower.reset_chip(ni->lower.arg);
281 +}
282 +
283 +/*
284 + * nmbm_read_phys_page - Read page with retry
285 + * @ni: NMBM instance structure
286 + * @addr: linear address where the data will be read from
287 + * @data: the main data to be read
288 + * @oob: the oob data to be read
289 + * @mode: mode for processing oob data
290 + *
291 + * Read a page for at most NMBM_TRY_COUNT times.
292 + *
293 + * Return 0 for success, positive value for corrected bitflip count,
294 + * -EBADMSG for ecc error, other negative values for other errors
295 + */
296 +static int nmbm_read_phys_page(struct nmbm_instance *ni, uint64_t addr,
297 + void *data, void *oob, enum nmbm_oob_mode mode)
298 +{
299 + int tries, ret;
300 +
301 + for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
302 + ret = ni->lower.read_page(ni->lower.arg, addr, data, oob, mode);
303 + if (ret >= 0)
304 + return ret;
305 +
306 + nmbm_reset_chip(ni);
307 + }
308 +
309 + if (ret != -EBADMSG)
310 + nlog_err(ni, "Page read failed at address 0x%08llx\n", addr);
311 +
312 + return ret;
313 +}
314 +
315 +/*
316 + * nmbm_write_phys_page - Write page with retry
317 + * @ni: NMBM instance structure
318 + * @addr: linear address where the data will be written to
319 + * @data: the main data to be written
320 + * @oob: the oob data to be written
321 + * @mode: mode for processing oob data
322 + *
323 + * Write a page for at most NMBM_TRY_COUNT times.
324 + */
325 +static bool nmbm_write_phys_page(struct nmbm_instance *ni, uint64_t addr,
326 + const void *data, const void *oob,
327 + enum nmbm_oob_mode mode)
328 +{
329 + int tries, ret;
330 +
331 + if (ni->lower.flags & NMBM_F_READ_ONLY) {
332 + nlog_err(ni, "%s called with NMBM_F_READ_ONLY set\n", addr);
333 + return false;
334 + }
335 +
336 + for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
337 + ret = ni->lower.write_page(ni->lower.arg, addr, data, oob, mode);
338 + if (!ret)
339 + return true;
340 +
341 + nmbm_reset_chip(ni);
342 + }
343 +
344 + nlog_err(ni, "Page write failed at address 0x%08llx\n", addr);
345 +
346 + return false;
347 +}
348 +
349 +/*
350 + * nmbm_erase_phys_block - Erase a block with retry
351 + * @ni: NMBM instance structure
352 + * @addr: Linear address
353 + *
354 + * Erase a block for at most NMBM_TRY_COUNT times.
355 + */
356 +static bool nmbm_erase_phys_block(struct nmbm_instance *ni, uint64_t addr)
357 +{
358 + int tries, ret;
359 +
360 + if (ni->lower.flags & NMBM_F_READ_ONLY) {
361 + nlog_err(ni, "%s called with NMBM_F_READ_ONLY set\n", addr);
362 + return false;
363 + }
364 +
365 + for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
366 + ret = ni->lower.erase_block(ni->lower.arg, addr);
367 + if (!ret)
368 + return true;
369 +
370 + nmbm_reset_chip(ni);
371 + }
372 +
373 + nlog_err(ni, "Block erasure failed at address 0x%08llx\n", addr);
374 +
375 + return false;
376 +}
377 +
378 +/*
379 + * nmbm_check_bad_phys_block - Check whether a block is marked bad in OOB
380 + * @ni: NMBM instance structure
381 + * @ba: block address
382 + */
383 +static bool nmbm_check_bad_phys_block(struct nmbm_instance *ni, uint32_t ba)
384 +{
385 + uint64_t addr = ba2addr(ni, ba);
386 + int ret;
387 +
388 + if (ni->lower.is_bad_block)
389 + return ni->lower.is_bad_block(ni->lower.arg, addr);
390 +
391 + /* Treat ECC error as read success */
392 + ret = nmbm_read_phys_page(ni, addr, NULL,
393 + ni->page_cache + ni->lower.writesize,
394 + NMBM_MODE_RAW);
395 + if (ret < 0 && ret != -EBADMSG)
396 + return true;
397 +
398 + return ni->page_cache[ni->lower.writesize] != 0xff;
399 +}
400 +
401 +/*
402 + * nmbm_mark_phys_bad_block - Mark a block bad
403 + * @ni: NMBM instance structure
404 + * @addr: Linear address
405 + */
406 +static int nmbm_mark_phys_bad_block(struct nmbm_instance *ni, uint32_t ba)
407 +{
408 + uint64_t addr = ba2addr(ni, ba);
409 + enum nmbm_log_category level;
410 + uint32_t off;
411 +
412 + if (ni->lower.flags & NMBM_F_READ_ONLY) {
413 + nlog_err(ni, "%s called with NMBM_F_READ_ONLY set\n", addr);
414 + return false;
415 + }
416 +
417 + nlog_info(ni, "Block %u [0x%08llx] will be marked bad\n", ba, addr);
418 +
419 + if (ni->lower.mark_bad_block)
420 + return ni->lower.mark_bad_block(ni->lower.arg, addr);
421 +
422 + /* Whole page set to 0x00 */
423 + memset(ni->page_cache, 0, ni->rawpage_size);
424 +
425 + /* Write to all pages within this block, disable all errors */
426 + level = nmbm_set_log_level(ni, __NMBM_LOG_MAX);
427 +
428 + for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
429 + nmbm_write_phys_page(ni, addr + off, ni->page_cache,
430 + ni->page_cache + ni->lower.writesize,
431 + NMBM_MODE_RAW);
432 + }
433 +
434 + nmbm_set_log_level(ni, level);
435 +
436 + return 0;
437 +}
438 +
439 +/*****************************************************************************/
440 +/* NMBM related functions */
441 +/*****************************************************************************/
442 +
443 +/*
444 + * nmbm_check_header - Check whether a NMBM structure is valid
445 + * @data: pointer to a NMBM structure with a NMBM header at beginning
446 + * @size: Size of the buffer pointed by @header
447 + *
448 + * The size of the NMBM structure may be larger than NMBM header,
449 + * e.g. block mapping table and block state table.
450 + */
451 +static bool nmbm_check_header(const void *data, uint32_t size)
452 +{
453 + const struct nmbm_header *header = data;
454 + struct nmbm_header nhdr;
455 + uint32_t new_checksum;
456 +
457 + /*
458 + * Make sure expected structure size is equal or smaller than
459 + * buffer size.
460 + */
461 + if (header->size > size)
462 + return false;
463 +
464 + memcpy(&nhdr, data, sizeof(nhdr));
465 +
466 + nhdr.checksum = 0;
467 + new_checksum = nmbm_crc32(0, &nhdr, sizeof(nhdr));
468 + if (header->size > sizeof(nhdr))
469 + new_checksum = nmbm_crc32(new_checksum,
470 + (const uint8_t *)data + sizeof(nhdr),
471 + header->size - sizeof(nhdr));
472 +
473 + if (header->checksum != new_checksum)
474 + return false;
475 +
476 + return true;
477 +}
478 +
479 +/*
480 + * nmbm_update_checksum - Update checksum of a NMBM structure
481 + * @header: pointer to a NMBM structure with a NMBM header at beginning
482 + *
483 + * The size of the NMBM structure must be specified by @header->size
484 + */
485 +static void nmbm_update_checksum(struct nmbm_header *header)
486 +{
487 + header->checksum = 0;
488 + header->checksum = nmbm_crc32(0, header, header->size);
489 +}
490 +
491 +/*
492 + * nmbm_get_spare_block_count - Calculate number of blocks should be reserved
493 + * @block_count: number of blocks of data
494 + *
495 + * Calculate number of blocks should be reserved for data
496 + */
497 +static uint32_t nmbm_get_spare_block_count(uint32_t block_count)
498 +{
499 + uint32_t val;
500 +
501 + val = (block_count + NMBM_SPARE_BLOCK_DIV / 2) / NMBM_SPARE_BLOCK_DIV;
502 + val *= NMBM_SPARE_BLOCK_MULTI;
503 +
504 + if (val < NMBM_SPARE_BLOCK_MIN)
505 + val = NMBM_SPARE_BLOCK_MIN;
506 +
507 + return val;
508 +}
509 +
510 +/*
511 + * nmbm_get_block_state_raw - Get state of a block from raw block state table
512 + * @block_state: pointer to raw block state table (bitmap)
513 + * @ba: block address
514 + */
515 +static uint32_t nmbm_get_block_state_raw(nmbm_bitmap_t *block_state,
516 + uint32_t ba)
517 +{
518 + uint32_t unit, shift;
519 +
520 + unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
521 + shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
522 +
523 + return (block_state[unit] >> shift) & BLOCK_ST_MASK;
524 +}
525 +
526 +/*
527 + * nmbm_get_block_state - Get state of a block from block state table
528 + * @ni: NMBM instance structure
529 + * @ba: block address
530 + */
531 +static uint32_t nmbm_get_block_state(struct nmbm_instance *ni, uint32_t ba)
532 +{
533 + return nmbm_get_block_state_raw(ni->block_state, ba);
534 +}
535 +
536 +/*
537 + * nmbm_set_block_state - Set state of a block to block state table
538 + * @ni: NMBM instance structure
539 + * @ba: block address
540 + * @state: block state
541 + *
542 + * Set state of a block. If the block state changed, ni->block_state_changed
543 + * will be increased.
544 + */
545 +static bool nmbm_set_block_state(struct nmbm_instance *ni, uint32_t ba,
546 + uint32_t state)
547 +{
548 + uint32_t unit, shift, orig;
549 + nmbm_bitmap_t uv;
550 +
551 + unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
552 + shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
553 +
554 + orig = (ni->block_state[unit] >> shift) & BLOCK_ST_MASK;
555 + state &= BLOCK_ST_MASK;
556 +
557 + uv = ni->block_state[unit] & (~(BLOCK_ST_MASK << shift));
558 + uv |= state << shift;
559 + ni->block_state[unit] = uv;
560 +
561 + if (state == BLOCK_ST_BAD)
562 + nmbm_mark_block_color_bad(ni, ba);
563 +
564 + if (orig != state) {
565 + ni->block_state_changed++;
566 + return true;
567 + }
568 +
569 + return false;
570 +}
571 +
572 +/*
573 + * nmbm_block_walk_asc - Skip specified number of good blocks, ascending addr.
574 + * @ni: NMBM instance structure
575 + * @ba: start physical block address
576 + * @nba: return physical block address after walk
577 + * @count: number of good blocks to be skipped
578 + * @limit: highest block address allowed for walking
579 + *
580 + * Start from @ba, skipping any bad blocks, counting @count good blocks, and
581 + * return the next good block address.
582 + *
583 + * If no enough good blocks counted while @limit reached, false will be returned.
584 + *
585 + * If @count == 0, nearest good block address will be returned.
586 + * @limit is not counted in walking.
587 + */
588 +static bool nmbm_block_walk_asc(struct nmbm_instance *ni, uint32_t ba,
589 + uint32_t *nba, uint32_t count,
590 + uint32_t limit)
591 +{
592 + int32_t nblock = count;
593 +
594 + if (limit >= ni->block_count)
595 + limit = ni->block_count - 1;
596 +
597 + while (ba < limit) {
598 + if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
599 + nblock--;
600 +
601 + if (nblock < 0) {
602 + *nba = ba;
603 + return true;
604 + }
605 +
606 + ba++;
607 + }
608 +
609 + return false;
610 +}
611 +
612 +/*
613 + * nmbm_block_walk_desc - Skip specified number of good blocks, descending addr
614 + * @ni: NMBM instance structure
615 + * @ba: start physical block address
616 + * @nba: return physical block address after walk
617 + * @count: number of good blocks to be skipped
618 + * @limit: lowest block address allowed for walking
619 + *
620 + * Start from @ba, skipping any bad blocks, counting @count good blocks, and
621 + * return the next good block address.
622 + *
623 + * If no enough good blocks counted while @limit reached, false will be returned.
624 + *
625 + * If @count == 0, nearest good block address will be returned.
626 + * @limit is not counted in walking.
627 + */
628 +static bool nmbm_block_walk_desc(struct nmbm_instance *ni, uint32_t ba,
629 + uint32_t *nba, uint32_t count, uint32_t limit)
630 +{
631 + int32_t nblock = count;
632 +
633 + if (limit >= ni->block_count)
634 + limit = ni->block_count - 1;
635 +
636 + while (ba > limit) {
637 + if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
638 + nblock--;
639 +
640 + if (nblock < 0) {
641 + *nba = ba;
642 + return true;
643 + }
644 +
645 + ba--;
646 + }
647 +
648 + return false;
649 +}
650 +
651 +/*
652 + * nmbm_block_walk - Skip specified number of good blocks from curr. block addr
653 + * @ni: NMBM instance structure
654 + * @ascending: whether to walk ascending
655 + * @ba: start physical block address
656 + * @nba: return physical block address after walk
657 + * @count: number of good blocks to be skipped
658 + * @limit: highest/lowest block address allowed for walking
659 + *
660 + * Start from @ba, skipping any bad blocks, counting @count good blocks, and
661 + * return the next good block address.
662 + *
663 + * If no enough good blocks counted while @limit reached, false will be returned.
664 + *
665 + * If @count == 0, nearest good block address will be returned.
666 + * @limit can be set to negative if no limit required.
667 + * @limit is not counted in walking.
668 + */
669 +static bool nmbm_block_walk(struct nmbm_instance *ni, bool ascending,
670 + uint32_t ba, uint32_t *nba, int32_t count,
671 + int32_t limit)
672 +{
673 + if (ascending)
674 + return nmbm_block_walk_asc(ni, ba, nba, count, limit);
675 +
676 + return nmbm_block_walk_desc(ni, ba, nba, count, limit);
677 +}
678 +
679 +/*
680 + * nmbm_scan_badblocks - Scan and record all bad blocks
681 + * @ni: NMBM instance structure
682 + *
683 + * Scan the entire lower NAND chip and record all bad blocks in to block state
684 + * table.
685 + */
686 +static void nmbm_scan_badblocks(struct nmbm_instance *ni)
687 +{
688 + uint32_t ba;
689 +
690 + for (ba = 0; ba < ni->block_count; ba++) {
691 + if (nmbm_check_bad_phys_block(ni, ba)) {
692 + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
693 + nlog_info(ni, "Bad block %u [0x%08llx]\n", ba,
694 + ba2addr(ni, ba));
695 + }
696 + }
697 +}
698 +
699 +/*
700 + * nmbm_build_mapping_table - Build initial block mapping table
701 + * @ni: NMBM instance structure
702 + *
703 + * The initial mapping table will be compatible with the stratage of
704 + * factory production.
705 + */
706 +static void nmbm_build_mapping_table(struct nmbm_instance *ni)
707 +{
708 + uint32_t pb, lb;
709 +
710 + for (pb = 0, lb = 0; pb < ni->mgmt_start_ba; pb++) {
711 + if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
712 + continue;
713 +
714 + /* Always map to the next good block */
715 + ni->block_mapping[lb++] = pb;
716 + }
717 +
718 + ni->data_block_count = lb;
719 +
720 + /* Unusable/Management blocks */
721 + for (pb = lb; pb < ni->block_count; pb++)
722 + ni->block_mapping[pb] = -1;
723 +}
724 +
725 +/*
726 + * nmbm_erase_block_and_check - Erase a block and check its usability
727 + * @ni: NMBM instance structure
728 + * @ba: block address to be erased
729 + *
730 + * Erase a block anc check its usability
731 + *
732 + * Return true if the block is usable, false if erasure failure or the block
733 + * has too many bitflips.
734 + */
735 +static bool nmbm_erase_block_and_check(struct nmbm_instance *ni, uint32_t ba)
736 +{
737 + uint64_t addr, off;
738 + bool success;
739 + int ret;
740 +
741 + success = nmbm_erase_phys_block(ni, ba2addr(ni, ba));
742 + if (!success)
743 + return false;
744 +
745 + if (!(ni->lower.flags & NMBM_F_EMPTY_PAGE_ECC_OK))
746 + return true;
747 +
748 + /* Check every page to make sure there aren't too many bitflips */
749 +
750 + addr = ba2addr(ni, ba);
751 +
752 + for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
753 + schedule();
754 +
755 + ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL,
756 + NMBM_MODE_PLACE_OOB);
757 + if (ret == -EBADMSG) {
758 + /*
759 + * NMBM_F_EMPTY_PAGE_ECC_OK means the empty page is
760 + * still protected by ECC. So reading pages with ECC
761 + * enabled and -EBADMSG means there are too many
762 + * bitflips that can't be recovered, and the block
763 + * containing the page should be marked bad.
764 + */
765 + nlog_err(ni,
766 + "Too many bitflips in empty page at 0x%llx\n",
767 + addr + off);
768 + return false;
769 + }
770 + }
771 +
772 + return true;
773 +}
774 +
775 +/*
776 + * nmbm_erase_range - Erase a range of blocks
777 + * @ni: NMBM instance structure
778 + * @ba: block address where the erasure will start
779 + * @limit: top block address allowed for erasure
780 + *
781 + * Erase blocks within the specific range. Newly-found bad blocks will be
782 + * marked.
783 + *
784 + * @limit is not counted into the allowed erasure address.
785 + */
786 +static void nmbm_erase_range(struct nmbm_instance *ni, uint32_t ba,
787 + uint32_t limit)
788 +{
789 + bool success;
790 +
791 + while (ba < limit) {
792 + schedule();
793 +
794 + if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
795 + goto next_block;
796 +
797 + /* Insurance to detect unexpected bad block marked by user */
798 + if (nmbm_check_bad_phys_block(ni, ba)) {
799 + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
800 + goto next_block;
801 + }
802 +
803 + success = nmbm_erase_block_and_check(ni, ba);
804 + if (success)
805 + goto next_block;
806 +
807 + nmbm_mark_phys_bad_block(ni, ba);
808 + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
809 +
810 + next_block:
811 + ba++;
812 + }
813 +}
814 +
815 +/*
816 + * nmbm_write_repeated_data - Write critical data to a block with retry
817 + * @ni: NMBM instance structure
818 + * @ba: block address where the data will be written to
819 + * @data: the data to be written
820 + * @size: size of the data
821 + *
822 + * Write data to every page of the block. Success only if all pages within
823 + * this block have been successfully written.
824 + *
825 + * Make sure data size is not bigger than one page.
826 + *
827 + * This function will write and verify every page for at most
828 + * NMBM_TRY_COUNT times.
829 + */
830 +static bool nmbm_write_repeated_data(struct nmbm_instance *ni, uint32_t ba,
831 + const void *data, uint32_t size)
832 +{
833 + uint64_t addr, off;
834 + bool success;
835 + int ret;
836 +
837 + if (size > ni->lower.writesize)
838 + return false;
839 +
840 + addr = ba2addr(ni, ba);
841 +
842 + for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
843 + schedule();
844 +
845 + /* Prepare page data. fill 0xff to unused region */
846 + memcpy(ni->page_cache, data, size);
847 + memset(ni->page_cache + size, 0xff, ni->rawpage_size - size);
848 +
849 + success = nmbm_write_phys_page(ni, addr + off, ni->page_cache,
850 + NULL, NMBM_MODE_PLACE_OOB);
851 + if (!success)
852 + return false;
853 +
854 + /* Verify the data just written. ECC error indicates failure */
855 + ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL,
856 + NMBM_MODE_PLACE_OOB);
857 + if (ret < 0)
858 + return false;
859 +
860 + if (memcmp(ni->page_cache, data, size))
861 + return false;
862 + }
863 +
864 + return true;
865 +}
866 +
867 +/*
868 + * nmbm_write_signature - Write signature to NAND chip
869 + * @ni: NMBM instance structure
870 + * @limit: top block address allowed for writing
871 + * @signature: the signature to be written
872 + * @signature_ba: the actual block address where signature is written to
873 + *
874 + * Write signature within a specific range, from chip bottom to limit.
875 + * At most one block will be written.
876 + *
877 + * @limit is not counted into the allowed write address.
878 + */
879 +static bool nmbm_write_signature(struct nmbm_instance *ni, uint32_t limit,
880 + const struct nmbm_signature *signature,
881 + uint32_t *signature_ba)
882 +{
883 + uint32_t ba = ni->block_count - 1;
884 + bool success;
885 +
886 + while (ba > limit) {
887 + schedule();
888 +
889 + if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
890 + goto next_block;
891 +
892 + /* Insurance to detect unexpected bad block marked by user */
893 + if (nmbm_check_bad_phys_block(ni, ba)) {
894 + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
895 + goto next_block;
896 + }
897 +
898 + success = nmbm_erase_block_and_check(ni, ba);
899 + if (!success)
900 + goto skip_bad_block;
901 +
902 + success = nmbm_write_repeated_data(ni, ba, signature,
903 + sizeof(*signature));
904 + if (success) {
905 + *signature_ba = ba;
906 + return true;
907 + }
908 +
909 + skip_bad_block:
910 + nmbm_mark_phys_bad_block(ni, ba);
911 + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
912 +
913 + next_block:
914 + ba--;
915 + };
916 +
917 + return false;
918 +}
919 +
920 +/*
921 + * nmbn_read_data - Read data
922 + * @ni: NMBM instance structure
923 + * @addr: linear address where the data will be read from
924 + * @data: the data to be read
925 + * @size: the size of data
926 + *
927 + * Read data range.
928 + * Every page will be tried for at most NMBM_TRY_COUNT times.
929 + *
930 + * Return 0 for success, positive value for corrected bitflip count,
931 + * -EBADMSG for ecc error, other negative values for other errors
932 + */
933 +static int nmbn_read_data(struct nmbm_instance *ni, uint64_t addr, void *data,
934 + uint32_t size)
935 +{
936 + uint64_t off = addr;
937 + uint8_t *ptr = data;
938 + uint32_t sizeremain = size, chunksize, leading;
939 + int ret;
940 +
941 + while (sizeremain) {
942 + schedule();
943 +
944 + leading = off & ni->writesize_mask;
945 + chunksize = ni->lower.writesize - leading;
946 + if (chunksize > sizeremain)
947 + chunksize = sizeremain;
948 +
949 + if (chunksize == ni->lower.writesize) {
950 + ret = nmbm_read_phys_page(ni, off - leading, ptr, NULL,
951 + NMBM_MODE_PLACE_OOB);
952 + if (ret < 0)
953 + return ret;
954 + } else {
955 + ret = nmbm_read_phys_page(ni, off - leading,
956 + ni->page_cache, NULL,
957 + NMBM_MODE_PLACE_OOB);
958 + if (ret < 0)
959 + return ret;
960 +
961 + memcpy(ptr, ni->page_cache + leading, chunksize);
962 + }
963 +
964 + off += chunksize;
965 + ptr += chunksize;
966 + sizeremain -= chunksize;
967 + }
968 +
969 + return 0;
970 +}
971 +
972 +/*
973 + * nmbn_write_verify_data - Write data with validation
974 + * @ni: NMBM instance structure
975 + * @addr: linear address where the data will be written to
976 + * @data: the data to be written
977 + * @size: the size of data
978 + *
979 + * Write data and verify.
980 + * Every page will be tried for at most NMBM_TRY_COUNT times.
981 + */
982 +static bool nmbn_write_verify_data(struct nmbm_instance *ni, uint64_t addr,
983 + const void *data, uint32_t size)
984 +{
985 + uint64_t off = addr;
986 + const uint8_t *ptr = data;
987 + uint32_t sizeremain = size, chunksize, leading;
988 + bool success;
989 + int ret;
990 +
991 + while (sizeremain) {
992 + schedule();
993 +
994 + leading = off & ni->writesize_mask;
995 + chunksize = ni->lower.writesize - leading;
996 + if (chunksize > sizeremain)
997 + chunksize = sizeremain;
998 +
999 + /* Prepare page data. fill 0xff to unused region */
1000 + memset(ni->page_cache, 0xff, ni->rawpage_size);
1001 + memcpy(ni->page_cache + leading, ptr, chunksize);
1002 +
1003 + success = nmbm_write_phys_page(ni, off - leading,
1004 + ni->page_cache, NULL,
1005 + NMBM_MODE_PLACE_OOB);
1006 + if (!success)
1007 + return false;
1008 +
1009 + /* Verify the data just written. ECC error indicates failure */
1010 + ret = nmbm_read_phys_page(ni, off - leading, ni->page_cache,
1011 + NULL, NMBM_MODE_PLACE_OOB);
1012 + if (ret < 0)
1013 + return false;
1014 +
1015 + if (memcmp(ni->page_cache + leading, ptr, chunksize))
1016 + return false;
1017 +
1018 + off += chunksize;
1019 + ptr += chunksize;
1020 + sizeremain -= chunksize;
1021 + }
1022 +
1023 + return true;
1024 +}
1025 +
1026 +/*
1027 + * nmbm_write_mgmt_range - Write management data into NAND within a range
1028 + * @ni: NMBM instance structure
1029 + * @addr: preferred start block address for writing
1030 + * @limit: highest block address allowed for writing
1031 + * @data: the data to be written
1032 + * @size: the size of data
1033 + * @actual_start_ba: actual start block address of data
1034 + * @actual_end_ba: block address after the end of data
1035 + *
1036 + * @limit is not counted into the allowed write address.
1037 + */
1038 +static bool nmbm_write_mgmt_range(struct nmbm_instance *ni, uint32_t ba,
1039 + uint32_t limit, const void *data,
1040 + uint32_t size, uint32_t *actual_start_ba,
1041 + uint32_t *actual_end_ba)
1042 +{
1043 + const uint8_t *ptr = data;
1044 + uint32_t sizeremain = size, chunksize;
1045 + bool success;
1046 +
1047 + while (sizeremain && ba < limit) {
1048 + schedule();
1049 +
1050 + chunksize = sizeremain;
1051 + if (chunksize > ni->lower.erasesize)
1052 + chunksize = ni->lower.erasesize;
1053 +
1054 + if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
1055 + goto next_block;
1056 +
1057 + /* Insurance to detect unexpected bad block marked by user */
1058 + if (nmbm_check_bad_phys_block(ni, ba)) {
1059 + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1060 + goto next_block;
1061 + }
1062 +
1063 + success = nmbm_erase_block_and_check(ni, ba);
1064 + if (!success)
1065 + goto skip_bad_block;
1066 +
1067 + success = nmbn_write_verify_data(ni, ba2addr(ni, ba), ptr,
1068 + chunksize);
1069 + if (!success)
1070 + goto skip_bad_block;
1071 +
1072 + if (sizeremain == size)
1073 + *actual_start_ba = ba;
1074 +
1075 + ptr += chunksize;
1076 + sizeremain -= chunksize;
1077 +
1078 + goto next_block;
1079 +
1080 + skip_bad_block:
1081 + nmbm_mark_phys_bad_block(ni, ba);
1082 + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1083 +
1084 + next_block:
1085 + ba++;
1086 + }
1087 +
1088 + if (sizeremain)
1089 + return false;
1090 +
1091 + *actual_end_ba = ba;
1092 +
1093 + return true;
1094 +}
1095 +
1096 +/*
1097 + * nmbm_generate_info_table_cache - Generate info table cache data
1098 + * @ni: NMBM instance structure
1099 + *
1100 + * Generate info table cache data to be written into flash.
1101 + */
1102 +static bool nmbm_generate_info_table_cache(struct nmbm_instance *ni)
1103 +{
1104 + bool changed = false;
1105 +
1106 + memset(ni->info_table_cache, 0xff, ni->info_table_size);
1107 +
1108 + memcpy(ni->info_table_cache + ni->info_table.state_table_off,
1109 + ni->block_state, ni->state_table_size);
1110 +
1111 + memcpy(ni->info_table_cache + ni->info_table.mapping_table_off,
1112 + ni->block_mapping, ni->mapping_table_size);
1113 +
1114 + ni->info_table.header.magic = NMBM_MAGIC_INFO_TABLE;
1115 + ni->info_table.header.version = NMBM_VER;
1116 + ni->info_table.header.size = ni->info_table_size;
1117 +
1118 + if (ni->block_state_changed || ni->block_mapping_changed) {
1119 + ni->info_table.write_count++;
1120 + changed = true;
1121 + }
1122 +
1123 + memcpy(ni->info_table_cache, &ni->info_table, sizeof(ni->info_table));
1124 +
1125 + nmbm_update_checksum((struct nmbm_header *)ni->info_table_cache);
1126 +
1127 + return changed;
1128 +}
1129 +
1130 +/*
1131 + * nmbm_write_info_table - Write info table into NAND within a range
1132 + * @ni: NMBM instance structure
1133 + * @ba: preferred start block address for writing
1134 + * @limit: highest block address allowed for writing
1135 + * @actual_start_ba: actual start block address of info table
1136 + * @actual_end_ba: block address after the end of info table
1137 + *
1138 + * @limit is counted into the allowed write address.
1139 + */
1140 +static bool nmbm_write_info_table(struct nmbm_instance *ni, uint32_t ba,
1141 + uint32_t limit, uint32_t *actual_start_ba,
1142 + uint32_t *actual_end_ba)
1143 +{
1144 + return nmbm_write_mgmt_range(ni, ba, limit, ni->info_table_cache,
1145 + ni->info_table_size, actual_start_ba,
1146 + actual_end_ba);
1147 +}
1148 +
1149 +/*
1150 + * nmbm_mark_tables_clean - Mark info table `clean'
1151 + * @ni: NMBM instance structure
1152 + */
1153 +static void nmbm_mark_tables_clean(struct nmbm_instance *ni)
1154 +{
1155 + ni->block_state_changed = 0;
1156 + ni->block_mapping_changed = 0;
1157 +}
1158 +
1159 +/*
1160 + * nmbm_try_reserve_blocks - Reserve blocks with compromisation
1161 + * @ni: NMBM instance structure
1162 + * @ba: start physical block address
1163 + * @nba: return physical block address after reservation
1164 + * @count: number of good blocks to be skipped
1165 + * @min_count: minimum number of good blocks to be skipped
1166 + * @limit: highest/lowest block address allowed for walking
1167 + *
1168 + * Reserve specific blocks. If failed, try to reserve as many as possible.
1169 + */
1170 +static bool nmbm_try_reserve_blocks(struct nmbm_instance *ni, uint32_t ba,
1171 + uint32_t *nba, uint32_t count,
1172 + int32_t min_count, int32_t limit)
1173 +{
1174 + int32_t nblocks = count;
1175 + bool success;
1176 +
1177 + while (nblocks >= min_count) {
1178 + success = nmbm_block_walk(ni, true, ba, nba, nblocks, limit);
1179 + if (success)
1180 + return true;
1181 +
1182 + nblocks--;
1183 + }
1184 +
1185 + return false;
1186 +}
1187 +
1188 +/*
1189 + * nmbm_rebuild_info_table - Build main & backup info table from scratch
1190 + * @ni: NMBM instance structure
1191 + * @allow_no_gap: allow no spare blocks between two tables
1192 + */
1193 +static bool nmbm_rebuild_info_table(struct nmbm_instance *ni)
1194 +{
1195 + uint32_t table_start_ba, table_end_ba, next_start_ba;
1196 + uint32_t main_table_end_ba;
1197 + bool success;
1198 +
1199 + /* Set initial value */
1200 + ni->main_table_ba = 0;
1201 + ni->backup_table_ba = 0;
1202 + ni->mapping_blocks_ba = ni->mapping_blocks_top_ba;
1203 +
1204 + /* Write main table */
1205 + success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1206 + ni->mapping_blocks_top_ba,
1207 + &table_start_ba, &table_end_ba);
1208 + if (!success) {
1209 + /* Failed to write main table, data will be lost */
1210 + nlog_emerg(ni, "Unable to write at least one info table!\n");
1211 + nlog_emerg(ni, "Please save your data before power off!\n");
1212 + ni->protected = 1;
1213 + return false;
1214 + }
1215 +
1216 + /* Main info table is successfully written, record its offset */
1217 + ni->main_table_ba = table_start_ba;
1218 + main_table_end_ba = table_end_ba;
1219 +
1220 + /* Adjust mapping_blocks_ba */
1221 + ni->mapping_blocks_ba = table_end_ba;
1222 +
1223 + nmbm_mark_tables_clean(ni);
1224 +
1225 + nlog_table_creation(ni, true, table_start_ba, table_end_ba);
1226 +
1227 + /* Reserve spare blocks for main info table. */
1228 + success = nmbm_try_reserve_blocks(ni, table_end_ba,
1229 + &next_start_ba,
1230 + ni->info_table_spare_blocks, 0,
1231 + ni->mapping_blocks_top_ba -
1232 + size2blk(ni, ni->info_table_size));
1233 + if (!success) {
1234 + /* There is no spare block. */
1235 + nlog_debug(ni, "No room for backup info table\n");
1236 + return true;
1237 + }
1238 +
1239 + /* Write backup info table. */
1240 + success = nmbm_write_info_table(ni, next_start_ba,
1241 + ni->mapping_blocks_top_ba,
1242 + &table_start_ba, &table_end_ba);
1243 + if (!success) {
1244 + /* There is no enough blocks for backup table. */
1245 + nlog_debug(ni, "No room for backup info table\n");
1246 + return true;
1247 + }
1248 +
1249 + /* Backup table is successfully written, record its offset */
1250 + ni->backup_table_ba = table_start_ba;
1251 +
1252 + /* Adjust mapping_blocks_off */
1253 + ni->mapping_blocks_ba = table_end_ba;
1254 +
1255 + /* Erase spare blocks of main table to clean possible interference data */
1256 + nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
1257 +
1258 + nlog_table_creation(ni, false, table_start_ba, table_end_ba);
1259 +
1260 + return true;
1261 +}
1262 +
1263 +/*
1264 + * nmbm_rescue_single_info_table - Rescue when there is only one info table
1265 + * @ni: NMBM instance structure
1266 + *
1267 + * This function is called when there is only one info table exists.
1268 + * This function may fail if we can't write new info table
1269 + */
1270 +static bool nmbm_rescue_single_info_table(struct nmbm_instance *ni)
1271 +{
1272 + uint32_t table_start_ba, table_end_ba, write_ba;
1273 + bool success;
1274 +
1275 + /* Try to write new info table in front of existing table */
1276 + success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1277 + ni->main_table_ba,
1278 + &table_start_ba,
1279 + &table_end_ba);
1280 + if (success) {
1281 + /*
1282 + * New table becomes the main table, existing table becomes
1283 + * the backup table.
1284 + */
1285 + ni->backup_table_ba = ni->main_table_ba;
1286 + ni->main_table_ba = table_start_ba;
1287 +
1288 + nmbm_mark_tables_clean(ni);
1289 +
1290 + /* Erase spare blocks of main table to clean possible interference data */
1291 + nmbm_erase_range(ni, table_end_ba, ni->backup_table_ba);
1292 +
1293 + nlog_table_creation(ni, true, table_start_ba, table_end_ba);
1294 +
1295 + return true;
1296 + }
1297 +
1298 + /* Try to reserve spare blocks for existing table */
1299 + success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
1300 + ni->info_table_spare_blocks, 0,
1301 + ni->mapping_blocks_top_ba -
1302 + size2blk(ni, ni->info_table_size));
1303 + if (!success) {
1304 + nlog_warn(ni, "Failed to rescue single info table\n");
1305 + return false;
1306 + }
1307 +
1308 + /* Try to write new info table next to the existing table */
1309 + while (write_ba >= ni->mapping_blocks_ba) {
1310 + schedule();
1311 +
1312 + success = nmbm_write_info_table(ni, write_ba,
1313 + ni->mapping_blocks_top_ba,
1314 + &table_start_ba,
1315 + &table_end_ba);
1316 + if (success)
1317 + break;
1318 +
1319 + write_ba--;
1320 + }
1321 +
1322 + if (success) {
1323 + /* Erase spare blocks of main table to clean possible interference data */
1324 + nmbm_erase_range(ni, ni->mapping_blocks_ba, table_start_ba);
1325 +
1326 + /* New table becomes the backup table */
1327 + ni->backup_table_ba = table_start_ba;
1328 + ni->mapping_blocks_ba = table_end_ba;
1329 +
1330 + nmbm_mark_tables_clean(ni);
1331 +
1332 + nlog_table_creation(ni, false, table_start_ba, table_end_ba);
1333 +
1334 + return true;
1335 + }
1336 +
1337 + nlog_warn(ni, "Failed to rescue single info table\n");
1338 + return false;
1339 +}
1340 +
1341 +/*
1342 + * nmbm_update_single_info_table - Update specific one info table
1343 + * @ni: NMBM instance structure
1344 + */
1345 +static bool nmbm_update_single_info_table(struct nmbm_instance *ni,
1346 + bool update_main_table)
1347 +{
1348 + uint32_t write_start_ba, write_limit, table_start_ba, table_end_ba;
1349 + bool success;
1350 +
1351 + /* Determine the write range */
1352 + if (update_main_table) {
1353 + write_start_ba = ni->main_table_ba;
1354 + write_limit = ni->backup_table_ba;
1355 + } else {
1356 + write_start_ba = ni->backup_table_ba;
1357 + write_limit = ni->mapping_blocks_top_ba;
1358 + }
1359 +
1360 + nmbm_mark_block_color_mgmt(ni, write_start_ba, write_limit - 1);
1361 +
1362 + success = nmbm_write_info_table(ni, write_start_ba, write_limit,
1363 + &table_start_ba, &table_end_ba);
1364 + if (success) {
1365 + if (update_main_table) {
1366 + ni->main_table_ba = table_start_ba;
1367 + } else {
1368 + ni->backup_table_ba = table_start_ba;
1369 + ni->mapping_blocks_ba = table_end_ba;
1370 + }
1371 +
1372 + nmbm_mark_tables_clean(ni);
1373 +
1374 + nlog_table_update(ni, update_main_table, table_start_ba,
1375 + table_end_ba);
1376 +
1377 + return true;
1378 + }
1379 +
1380 + if (update_main_table) {
1381 + /*
1382 + * If failed to update main table, make backup table the new
1383 + * main table, and call nmbm_rescue_single_info_table()
1384 + */
1385 + nlog_warn(ni, "Unable to update %s info table\n",
1386 + update_main_table ? "Main" : "Backup");
1387 +
1388 + ni->main_table_ba = ni->backup_table_ba;
1389 + ni->backup_table_ba = 0;
1390 + return nmbm_rescue_single_info_table(ni);
1391 + }
1392 +
1393 + /* Only one table left */
1394 + ni->mapping_blocks_ba = ni->backup_table_ba;
1395 + ni->backup_table_ba = 0;
1396 +
1397 + return false;
1398 +}
1399 +
1400 +/*
1401 + * nmbm_rescue_main_info_table - Rescue when failed to write main info table
1402 + * @ni: NMBM instance structure
1403 + *
1404 + * This function is called when main info table failed to be written, and
1405 + * backup info table exists.
1406 + */
1407 +static bool nmbm_rescue_main_info_table(struct nmbm_instance *ni)
1408 +{
1409 + uint32_t tmp_table_start_ba, tmp_table_end_ba, main_table_start_ba;
1410 + uint32_t main_table_end_ba, write_ba;
1411 + uint32_t info_table_erasesize = size2blk(ni, ni->info_table_size);
1412 + bool success;
1413 +
1414 + /* Try to reserve spare blocks for existing backup info table */
1415 + success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
1416 + ni->info_table_spare_blocks, 0,
1417 + ni->mapping_blocks_top_ba -
1418 + info_table_erasesize);
1419 + if (!success) {
1420 + /* There is no spare block. Backup info table becomes the main table. */
1421 + nlog_err(ni, "No room for temporary info table\n");
1422 + ni->main_table_ba = ni->backup_table_ba;
1423 + ni->backup_table_ba = 0;
1424 + return true;
1425 + }
1426 +
1427 + /* Try to write temporary info table into spare unmapped blocks */
1428 + while (write_ba >= ni->mapping_blocks_ba) {
1429 + schedule();
1430 +
1431 + success = nmbm_write_info_table(ni, write_ba,
1432 + ni->mapping_blocks_top_ba,
1433 + &tmp_table_start_ba,
1434 + &tmp_table_end_ba);
1435 + if (success)
1436 + break;
1437 +
1438 + write_ba--;
1439 + }
1440 +
1441 + if (!success) {
1442 + /* Backup info table becomes the main table */
1443 + nlog_err(ni, "Failed to update main info table\n");
1444 + ni->main_table_ba = ni->backup_table_ba;
1445 + ni->backup_table_ba = 0;
1446 + return true;
1447 + }
1448 +
1449 + /* Adjust mapping_blocks_off */
1450 + ni->mapping_blocks_ba = tmp_table_end_ba;
1451 +
1452 + nmbm_mark_block_color_mgmt(ni, ni->backup_table_ba,
1453 + tmp_table_end_ba - 1);
1454 +
1455 + /*
1456 + * Now write main info table at the beginning of management area.
1457 + * This operation will generally destroy the original backup info
1458 + * table.
1459 + */
1460 + success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1461 + tmp_table_start_ba,
1462 + &main_table_start_ba,
1463 + &main_table_end_ba);
1464 + if (!success) {
1465 + /* Temporary info table becomes the main table */
1466 + ni->main_table_ba = tmp_table_start_ba;
1467 + ni->backup_table_ba = 0;
1468 +
1469 + nmbm_mark_tables_clean(ni);
1470 +
1471 + nlog_err(ni, "Failed to update main info table\n");
1472 + nmbm_mark_block_color_info_table(ni, tmp_table_start_ba,
1473 + tmp_table_end_ba - 1);
1474 +
1475 + return true;
1476 + }
1477 +
1478 + /* Main info table has been successfully written, record its offset */
1479 + ni->main_table_ba = main_table_start_ba;
1480 +
1481 + nmbm_mark_tables_clean(ni);
1482 +
1483 + nlog_table_creation(ni, true, main_table_start_ba, main_table_end_ba);
1484 +
1485 + /*
1486 + * Temporary info table becomes the new backup info table if it's
1487 + * not overwritten.
1488 + */
1489 + if (main_table_end_ba <= tmp_table_start_ba) {
1490 + ni->backup_table_ba = tmp_table_start_ba;
1491 +
1492 + nlog_table_creation(ni, false, tmp_table_start_ba,
1493 + tmp_table_end_ba);
1494 +
1495 + return true;
1496 + }
1497 +
1498 + /* Adjust mapping_blocks_off */
1499 + ni->mapping_blocks_ba = main_table_end_ba;
1500 +
1501 + /* Try to reserve spare blocks for new main info table */
1502 + success = nmbm_try_reserve_blocks(ni, main_table_end_ba, &write_ba,
1503 + ni->info_table_spare_blocks, 0,
1504 + ni->mapping_blocks_top_ba -
1505 + info_table_erasesize);
1506 + if (!success) {
1507 + /* There is no spare block. Only main table exists. */
1508 + nlog_err(ni, "No room for backup info table\n");
1509 + ni->backup_table_ba = 0;
1510 + return true;
1511 + }
1512 +
1513 + /* Write new backup info table. */
1514 + while (write_ba >= main_table_end_ba) {
1515 + schedule();
1516 +
1517 + success = nmbm_write_info_table(ni, write_ba,
1518 + ni->mapping_blocks_top_ba,
1519 + &tmp_table_start_ba,
1520 + &tmp_table_end_ba);
1521 + if (success)
1522 + break;
1523 +
1524 + write_ba--;
1525 + }
1526 +
1527 + if (!success) {
1528 + nlog_err(ni, "No room for backup info table\n");
1529 + ni->backup_table_ba = 0;
1530 + return true;
1531 + }
1532 +
1533 + /* Backup info table has been successfully written, record its offset */
1534 + ni->backup_table_ba = tmp_table_start_ba;
1535 +
1536 + /* Adjust mapping_blocks_off */
1537 + ni->mapping_blocks_ba = tmp_table_end_ba;
1538 +
1539 + /* Erase spare blocks of main table to clean possible interference data */
1540 + nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
1541 +
1542 + nlog_table_creation(ni, false, tmp_table_start_ba, tmp_table_end_ba);
1543 +
1544 + return true;
1545 +}
1546 +
1547 +/*
1548 + * nmbm_update_info_table_once - Update info table once
1549 + * @ni: NMBM instance structure
1550 + * @force: force update
1551 + *
1552 + * Update both main and backup info table. Return true if at least one info
1553 + * table has been successfully written.
1554 + * This function only try to update info table once regard less of the result.
1555 + */
1556 +static bool nmbm_update_info_table_once(struct nmbm_instance *ni, bool force)
1557 +{
1558 + uint32_t table_start_ba, table_end_ba;
1559 + uint32_t main_table_limit;
1560 + bool success;
1561 +
1562 + /* Do nothing if there is no change */
1563 + if (!nmbm_generate_info_table_cache(ni) && !force)
1564 + return true;
1565 +
1566 + /* Check whether both two tables exist */
1567 + if (!ni->backup_table_ba) {
1568 + main_table_limit = ni->mapping_blocks_top_ba;
1569 + goto write_main_table;
1570 + }
1571 +
1572 + nmbm_mark_block_color_mgmt(ni, ni->backup_table_ba,
1573 + ni->mapping_blocks_ba - 1);
1574 +
1575 + /*
1576 + * Write backup info table in its current range.
1577 + * Note that limit is set to mapping_blocks_top_off to provide as many
1578 + * spare blocks as possible for the backup table. If at last
1579 + * unmapped blocks are used by backup table, mapping_blocks_off will
1580 + * be adjusted.
1581 + */
1582 + success = nmbm_write_info_table(ni, ni->backup_table_ba,
1583 + ni->mapping_blocks_top_ba,
1584 + &table_start_ba, &table_end_ba);
1585 + if (!success) {
1586 + /*
1587 + * There is nothing to do if failed to write backup table.
1588 + * Write the main table now.
1589 + */
1590 + nlog_err(ni, "No room for backup table\n");
1591 + ni->mapping_blocks_ba = ni->backup_table_ba;
1592 + ni->backup_table_ba = 0;
1593 + main_table_limit = ni->mapping_blocks_top_ba;
1594 + goto write_main_table;
1595 + }
1596 +
1597 + /* Backup table is successfully written, record its offset */
1598 + ni->backup_table_ba = table_start_ba;
1599 +
1600 + /* Adjust mapping_blocks_off */
1601 + ni->mapping_blocks_ba = table_end_ba;
1602 +
1603 + nmbm_mark_tables_clean(ni);
1604 +
1605 + /* The normal limit of main table */
1606 + main_table_limit = ni->backup_table_ba;
1607 +
1608 + nlog_table_update(ni, false, table_start_ba, table_end_ba);
1609 +
1610 +write_main_table:
1611 + if (!ni->main_table_ba)
1612 + goto rebuild_tables;
1613 +
1614 + if (!ni->backup_table_ba)
1615 + nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
1616 + ni->mapping_blocks_ba - 1);
1617 + else
1618 + nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
1619 + ni->backup_table_ba - 1);
1620 +
1621 + /* Write main info table in its current range */
1622 + success = nmbm_write_info_table(ni, ni->main_table_ba,
1623 + main_table_limit, &table_start_ba,
1624 + &table_end_ba);
1625 + if (!success) {
1626 + /* If failed to write main table, go rescue procedure */
1627 + if (!ni->backup_table_ba)
1628 + goto rebuild_tables;
1629 +
1630 + return nmbm_rescue_main_info_table(ni);
1631 + }
1632 +
1633 + /* Main info table is successfully written, record its offset */
1634 + ni->main_table_ba = table_start_ba;
1635 +
1636 + /* Adjust mapping_blocks_off */
1637 + if (!ni->backup_table_ba)
1638 + ni->mapping_blocks_ba = table_end_ba;
1639 +
1640 + nmbm_mark_tables_clean(ni);
1641 +
1642 + nlog_table_update(ni, true, table_start_ba, table_end_ba);
1643 +
1644 + return true;
1645 +
1646 +rebuild_tables:
1647 + return nmbm_rebuild_info_table(ni);
1648 +}
1649 +
1650 +/*
1651 + * nmbm_update_info_table - Update info table
1652 + * @ni: NMBM instance structure
1653 + *
1654 + * Update both main and backup info table. Return true if at least one table
1655 + * has been successfully written.
1656 + * This function will try to update info table repeatedly until no new bad
1657 + * block found during updating.
1658 + */
1659 +static bool nmbm_update_info_table(struct nmbm_instance *ni)
1660 +{
1661 + bool success;
1662 +
1663 + if (ni->protected)
1664 + return true;
1665 +
1666 + while (ni->block_state_changed || ni->block_mapping_changed) {
1667 + success = nmbm_update_info_table_once(ni, false);
1668 + if (!success) {
1669 + nlog_err(ni, "Failed to update info table\n");
1670 + return false;
1671 + }
1672 + }
1673 +
1674 + return true;
1675 +}
1676 +
1677 +/*
1678 + * nmbm_map_block - Map a bad block to a unused spare block
1679 + * @ni: NMBM instance structure
1680 + * @lb: logic block addr to map
1681 + */
1682 +static bool nmbm_map_block(struct nmbm_instance *ni, uint32_t lb)
1683 +{
1684 + uint32_t pb;
1685 + bool success;
1686 +
1687 + if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
1688 + nlog_warn(ni, "No spare unmapped blocks.\n");
1689 + return false;
1690 + }
1691 +
1692 + success = nmbm_block_walk(ni, false, ni->mapping_blocks_top_ba, &pb, 0,
1693 + ni->mapping_blocks_ba);
1694 + if (!success) {
1695 + nlog_warn(ni, "No spare unmapped blocks.\n");
1696 + nmbm_update_info_table(ni);
1697 + ni->mapping_blocks_top_ba = ni->mapping_blocks_ba;
1698 + return false;
1699 + }
1700 +
1701 + ni->block_mapping[lb] = pb;
1702 + ni->mapping_blocks_top_ba--;
1703 + ni->block_mapping_changed++;
1704 +
1705 + nlog_info(ni, "Logic block %u mapped to physical blcok %u\n", lb, pb);
1706 + nmbm_mark_block_color_mapped(ni, pb);
1707 +
1708 + return true;
1709 +}
1710 +
1711 +/*
1712 + * nmbm_create_info_table - Create info table(s)
1713 + * @ni: NMBM instance structure
1714 + *
1715 + * This function assumes that the chip has no existing info table(s)
1716 + */
1717 +static bool nmbm_create_info_table(struct nmbm_instance *ni)
1718 +{
1719 + uint32_t lb;
1720 + bool success;
1721 +
1722 + /* Set initial mapping_blocks_top_off */
1723 + success = nmbm_block_walk(ni, false, ni->signature_ba,
1724 + &ni->mapping_blocks_top_ba, 1,
1725 + ni->mgmt_start_ba);
1726 + if (!success) {
1727 + nlog_err(ni, "No room for spare blocks\n");
1728 + return false;
1729 + }
1730 +
1731 + /* Generate info table cache */
1732 + nmbm_generate_info_table_cache(ni);
1733 +
1734 + /* Write info table */
1735 + success = nmbm_rebuild_info_table(ni);
1736 + if (!success) {
1737 + nlog_err(ni, "Failed to build info tables\n");
1738 + return false;
1739 + }
1740 +
1741 + /* Remap bad block(s) at end of data area */
1742 + for (lb = ni->data_block_count; lb < ni->mgmt_start_ba; lb++) {
1743 + success = nmbm_map_block(ni, lb);
1744 + if (!success)
1745 + break;
1746 +
1747 + ni->data_block_count++;
1748 + }
1749 +
1750 + /* If state table and/or mapping table changed, update info table. */
1751 + success = nmbm_update_info_table(ni);
1752 + if (!success)
1753 + return false;
1754 +
1755 + return true;
1756 +}
1757 +
1758 +/*
1759 + * nmbm_create_new - Create NMBM on a new chip
1760 + * @ni: NMBM instance structure
1761 + */
1762 +static bool nmbm_create_new(struct nmbm_instance *ni)
1763 +{
1764 + bool success;
1765 +
1766 + /* Determine the boundary of management blocks */
1767 + ni->mgmt_start_ba = ni->block_count * (NMBM_MGMT_DIV - ni->lower.max_ratio) / NMBM_MGMT_DIV;
1768 +
1769 + if (ni->lower.max_reserved_blocks && ni->block_count - ni->mgmt_start_ba > ni->lower.max_reserved_blocks)
1770 + ni->mgmt_start_ba = ni->block_count - ni->lower.max_reserved_blocks;
1771 +
1772 + nlog_info(ni, "NMBM management region starts at block %u [0x%08llx]\n",
1773 + ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
1774 + nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba, ni->block_count - 1);
1775 +
1776 + /* Fill block state table & mapping table */
1777 + nmbm_scan_badblocks(ni);
1778 + nmbm_build_mapping_table(ni);
1779 +
1780 + /* Write signature */
1781 + ni->signature.header.magic = NMBM_MAGIC_SIGNATURE;
1782 + ni->signature.header.version = NMBM_VER;
1783 + ni->signature.header.size = sizeof(ni->signature);
1784 + ni->signature.nand_size = ni->lower.size;
1785 + ni->signature.block_size = ni->lower.erasesize;
1786 + ni->signature.page_size = ni->lower.writesize;
1787 + ni->signature.spare_size = ni->lower.oobsize;
1788 + ni->signature.mgmt_start_pb = ni->mgmt_start_ba;
1789 + ni->signature.max_try_count = NMBM_TRY_COUNT;
1790 + nmbm_update_checksum(&ni->signature.header);
1791 +
1792 + if (ni->lower.flags & NMBM_F_READ_ONLY) {
1793 + nlog_info(ni, "NMBM has been initialized in read-only mode\n");
1794 + return true;
1795 + }
1796 +
1797 + success = nmbm_write_signature(ni, ni->mgmt_start_ba,
1798 + &ni->signature, &ni->signature_ba);
1799 + if (!success) {
1800 + nlog_err(ni, "Failed to write signature to a proper offset\n");
1801 + return false;
1802 + }
1803 +
1804 + nlog_info(ni, "Signature has been written to block %u [0x%08llx]\n",
1805 + ni->signature_ba, ba2addr(ni, ni->signature_ba));
1806 + nmbm_mark_block_color_signature(ni, ni->signature_ba);
1807 +
1808 + /* Write info table(s) */
1809 + success = nmbm_create_info_table(ni);
1810 + if (success) {
1811 + nlog_info(ni, "NMBM has been successfully created\n");
1812 + return true;
1813 + }
1814 +
1815 + return false;
1816 +}
1817 +
1818 +/*
1819 + * nmbm_check_info_table_header - Check if a info table header is valid
1820 + * @ni: NMBM instance structure
1821 + * @data: pointer to the info table header
1822 + */
1823 +static bool nmbm_check_info_table_header(struct nmbm_instance *ni, void *data)
1824 +{
1825 + struct nmbm_info_table_header *ifthdr = data;
1826 +
1827 + if (ifthdr->header.magic != NMBM_MAGIC_INFO_TABLE)
1828 + return false;
1829 +
1830 + if (ifthdr->header.size != ni->info_table_size)
1831 + return false;
1832 +
1833 + if (ifthdr->mapping_table_off - ifthdr->state_table_off < ni->state_table_size)
1834 + return false;
1835 +
1836 + if (ni->info_table_size - ifthdr->mapping_table_off < ni->mapping_table_size)
1837 + return false;
1838 +
1839 + return true;
1840 +}
1841 +
1842 +/*
1843 + * nmbm_check_info_table - Check if a whole info table is valid
1844 + * @ni: NMBM instance structure
1845 + * @start_ba: start block address of this table
1846 + * @end_ba: end block address of this table
1847 + * @data: pointer to the info table header
1848 + * @mapping_blocks_top_ba: return the block address of top remapped block
1849 + */
1850 +static bool nmbm_check_info_table(struct nmbm_instance *ni, uint32_t start_ba,
1851 + uint32_t end_ba, void *data,
1852 + uint32_t *mapping_blocks_top_ba)
1853 +{
1854 + struct nmbm_info_table_header *ifthdr = data;
1855 + int32_t *block_mapping = (int32_t *)((uintptr_t)data + ifthdr->mapping_table_off);
1856 + nmbm_bitmap_t *block_state = (nmbm_bitmap_t *)((uintptr_t)data + ifthdr->state_table_off);
1857 + uint32_t minimum_mapping_pb = ni->signature_ba;
1858 + uint32_t ba;
1859 +
1860 + for (ba = 0; ba < ni->data_block_count; ba++) {
1861 + if ((block_mapping[ba] >= ni->data_block_count && block_mapping[ba] < end_ba) ||
1862 + block_mapping[ba] == ni->signature_ba)
1863 + return false;
1864 +
1865 + if (block_mapping[ba] >= end_ba && block_mapping[ba] < minimum_mapping_pb)
1866 + minimum_mapping_pb = block_mapping[ba];
1867 + }
1868 +
1869 + for (ba = start_ba; ba < end_ba; ba++) {
1870 + if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
1871 + continue;
1872 +
1873 + if (nmbm_get_block_state_raw(block_state, ba) != BLOCK_ST_GOOD)
1874 + return false;
1875 + }
1876 +
1877 + *mapping_blocks_top_ba = minimum_mapping_pb - 1;
1878 +
1879 + return true;
1880 +}
1881 +
1882 +/*
1883 + * nmbm_try_load_info_table - Try to load info table from a address
1884 + * @ni: NMBM instance structure
1885 + * @ba: start block address of the info table
1886 + * @eba: return the block address after end of the table
1887 + * @write_count: return the write count of this table
1888 + * @mapping_blocks_top_ba: return the block address of top remapped block
1889 + * @table_loaded: used to record whether ni->info_table has valid data
1890 + */
1891 +static bool nmbm_try_load_info_table(struct nmbm_instance *ni, uint32_t ba,
1892 + uint32_t *eba, uint32_t *write_count,
1893 + uint32_t *mapping_blocks_top_ba,
1894 + bool table_loaded)
1895 +{
1896 + struct nmbm_info_table_header *ifthdr = (void *)ni->info_table_cache;
1897 + uint8_t *off = ni->info_table_cache;
1898 + uint32_t limit = ba + size2blk(ni, ni->info_table_size);
1899 + uint32_t start_ba = 0, chunksize, sizeremain = ni->info_table_size;
1900 + bool success, checkhdr = true;
1901 + int ret;
1902 +
1903 + while (sizeremain && ba < limit) {
1904 + schedule();
1905 +
1906 + if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
1907 + goto next_block;
1908 +
1909 + if (nmbm_check_bad_phys_block(ni, ba)) {
1910 + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1911 + goto next_block;
1912 + }
1913 +
1914 + chunksize = sizeremain;
1915 + if (chunksize > ni->lower.erasesize)
1916 + chunksize = ni->lower.erasesize;
1917 +
1918 + /* Assume block with ECC error has no info table data */
1919 + ret = nmbn_read_data(ni, ba2addr(ni, ba), off, chunksize);
1920 + if (ret < 0)
1921 + goto skip_bad_block;
1922 + else if (ret > 0)
1923 + return false;
1924 +
1925 + if (checkhdr) {
1926 + success = nmbm_check_info_table_header(ni, off);
1927 + if (!success)
1928 + return false;
1929 +
1930 + start_ba = ba;
1931 + checkhdr = false;
1932 + }
1933 +
1934 + off += chunksize;
1935 + sizeremain -= chunksize;
1936 +
1937 + goto next_block;
1938 +
1939 + skip_bad_block:
1940 + /* Only mark bad in memory */
1941 + nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1942 +
1943 + next_block:
1944 + ba++;
1945 + }
1946 +
1947 + if (sizeremain)
1948 + return false;
1949 +
1950 + success = nmbm_check_header(ni->info_table_cache, ni->info_table_size);
1951 + if (!success)
1952 + return false;
1953 +
1954 + *eba = ba;
1955 + *write_count = ifthdr->write_count;
1956 +
1957 + success = nmbm_check_info_table(ni, start_ba, ba, ni->info_table_cache,
1958 + mapping_blocks_top_ba);
1959 + if (!success)
1960 + return false;
1961 +
1962 + if (!table_loaded || ifthdr->write_count > ni->info_table.write_count) {
1963 + memcpy(&ni->info_table, ifthdr, sizeof(ni->info_table));
1964 + memcpy(ni->block_state,
1965 + (uint8_t *)ifthdr + ifthdr->state_table_off,
1966 + ni->state_table_size);
1967 + memcpy(ni->block_mapping,
1968 + (uint8_t *)ifthdr + ifthdr->mapping_table_off,
1969 + ni->mapping_table_size);
1970 + ni->info_table.write_count = ifthdr->write_count;
1971 + }
1972 +
1973 + return true;
1974 +}
1975 +
1976 +/*
1977 + * nmbm_search_info_table - Search info table from specific address
1978 + * @ni: NMBM instance structure
1979 + * @ba: start block address to search
1980 + * @limit: highest block address allowed for searching
1981 + * @table_start_ba: return the start block address of this table
1982 + * @table_end_ba: return the block address after end of this table
1983 + * @write_count: return the write count of this table
1984 + * @mapping_blocks_top_ba: return the block address of top remapped block
1985 + * @table_loaded: used to record whether ni->info_table has valid data
1986 + */
1987 +static bool nmbm_search_info_table(struct nmbm_instance *ni, uint32_t ba,
1988 + uint32_t limit, uint32_t *table_start_ba,
1989 + uint32_t *table_end_ba,
1990 + uint32_t *write_count,
1991 + uint32_t *mapping_blocks_top_ba,
1992 + bool table_loaded)
1993 +{
1994 + bool success;
1995 +
1996 + while (ba < limit - size2blk(ni, ni->info_table_size)) {
1997 + schedule();
1998 +
1999 + success = nmbm_try_load_info_table(ni, ba, table_end_ba,
2000 + write_count,
2001 + mapping_blocks_top_ba,
2002 + table_loaded);
2003 + if (success) {
2004 + *table_start_ba = ba;
2005 + return true;
2006 + }
2007 +
2008 + ba++;
2009 + }
2010 +
2011 + return false;
2012 +}
2013 +
2014 +/*
2015 + * nmbm_load_info_table - Load info table(s) from a chip
2016 + * @ni: NMBM instance structure
2017 + * @ba: start block address to search info table
2018 + * @limit: highest block address allowed for searching
2019 + */
2020 +static bool nmbm_load_info_table(struct nmbm_instance *ni, uint32_t ba,
2021 + uint32_t limit)
2022 +{
2023 + uint32_t main_table_end_ba, backup_table_end_ba, table_end_ba;
2024 + uint32_t main_mapping_blocks_top_ba, backup_mapping_blocks_top_ba;
2025 + uint32_t main_table_write_count, backup_table_write_count;
2026 + uint32_t i;
2027 + bool success;
2028 +
2029 + /* Set initial value */
2030 + ni->main_table_ba = 0;
2031 + ni->backup_table_ba = 0;
2032 + ni->info_table.write_count = 0;
2033 + ni->mapping_blocks_top_ba = ni->signature_ba - 1;
2034 + ni->data_block_count = ni->signature.mgmt_start_pb;
2035 +
2036 + /* Find first info table */
2037 + success = nmbm_search_info_table(ni, ba, limit, &ni->main_table_ba,
2038 + &main_table_end_ba, &main_table_write_count,
2039 + &main_mapping_blocks_top_ba, false);
2040 + if (!success) {
2041 + nlog_warn(ni, "No valid info table found\n");
2042 + return false;
2043 + }
2044 +
2045 + table_end_ba = main_table_end_ba;
2046 +
2047 + nlog_table_found(ni, true, main_table_write_count, ni->main_table_ba,
2048 + main_table_end_ba);
2049 +
2050 + /* Find second info table */
2051 + success = nmbm_search_info_table(ni, main_table_end_ba, limit,
2052 + &ni->backup_table_ba, &backup_table_end_ba,
2053 + &backup_table_write_count, &backup_mapping_blocks_top_ba, true);
2054 + if (!success) {
2055 + nlog_warn(ni, "Second info table not found\n");
2056 + } else {
2057 + table_end_ba = backup_table_end_ba;
2058 +
2059 + nlog_table_found(ni, false, backup_table_write_count,
2060 + ni->backup_table_ba, backup_table_end_ba);
2061 + }
2062 +
2063 + /* Pick mapping_blocks_top_ba */
2064 + if (!ni->backup_table_ba) {
2065 + ni->mapping_blocks_top_ba= main_mapping_blocks_top_ba;
2066 + } else {
2067 + if (main_table_write_count >= backup_table_write_count)
2068 + ni->mapping_blocks_top_ba = main_mapping_blocks_top_ba;
2069 + else
2070 + ni->mapping_blocks_top_ba = backup_mapping_blocks_top_ba;
2071 + }
2072 +
2073 + /* Set final mapping_blocks_ba */
2074 + ni->mapping_blocks_ba = table_end_ba;
2075 +
2076 + /* Set final data_block_count */
2077 + for (i = ni->signature.mgmt_start_pb; i > 0; i--) {
2078 + if (ni->block_mapping[i - 1] >= 0) {
2079 + ni->data_block_count = i;
2080 + break;
2081 + }
2082 + }
2083 +
2084 + /* Debug purpose: mark mapped blocks and bad blocks */
2085 + for (i = 0; i < ni->data_block_count; i++) {
2086 + if (ni->block_mapping[i] > ni->mapping_blocks_top_ba)
2087 + nmbm_mark_block_color_mapped(ni, ni->block_mapping[i]);
2088 + }
2089 +
2090 + for (i = 0; i < ni->block_count; i++) {
2091 + if (nmbm_get_block_state(ni, i) == BLOCK_ST_BAD)
2092 + nmbm_mark_block_color_bad(ni, i);
2093 + }
2094 +
2095 + /* Regenerate the info table cache from the final selected info table */
2096 + nmbm_generate_info_table_cache(ni);
2097 +
2098 + if (ni->lower.flags & NMBM_F_READ_ONLY)
2099 + return true;
2100 +
2101 + /*
2102 + * If only one table exists, try to write another table.
2103 + * If two tables have different write count, try to update info table
2104 + */
2105 + if (!ni->backup_table_ba) {
2106 + success = nmbm_rescue_single_info_table(ni);
2107 + } else if (main_table_write_count != backup_table_write_count) {
2108 + /* Mark state & mapping tables changed */
2109 + ni->block_state_changed = 1;
2110 + ni->block_mapping_changed = 1;
2111 +
2112 + success = nmbm_update_single_info_table(ni,
2113 + main_table_write_count < backup_table_write_count);
2114 + } else {
2115 + success = true;
2116 + }
2117 +
2118 + /*
2119 + * If there is no spare unmapped blocks, or still only one table
2120 + * exists, set the chip to read-only
2121 + */
2122 + if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
2123 + nlog_warn(ni, "No spare unmapped blocks. Device is now read-only\n");
2124 + ni->protected = 1;
2125 + } else if (!success) {
2126 + nlog_warn(ni, "Only one info table found. Device is now read-only\n");
2127 + ni->protected = 1;
2128 + }
2129 +
2130 + return true;
2131 +}
2132 +
2133 +/*
2134 + * nmbm_load_existing - Load NMBM from a new chip
2135 + * @ni: NMBM instance structure
2136 + */
2137 +static bool nmbm_load_existing(struct nmbm_instance *ni)
2138 +{
2139 + bool success;
2140 +
2141 + /* Calculate the boundary of management blocks */
2142 + ni->mgmt_start_ba = ni->signature.mgmt_start_pb;
2143 +
2144 + nlog_debug(ni, "NMBM management region starts at block %u [0x%08llx]\n",
2145 + ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
2146 + nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
2147 + ni->signature_ba - 1);
2148 +
2149 + /* Look for info table(s) */
2150 + success = nmbm_load_info_table(ni, ni->mgmt_start_ba,
2151 + ni->signature_ba);
2152 + if (success) {
2153 + nlog_info(ni, "NMBM has been successfully attached %s\n",
2154 + (ni->lower.flags & NMBM_F_READ_ONLY) ? "in read-only mode" : "");
2155 + return true;
2156 + }
2157 +
2158 + if (!(ni->lower.flags & NMBM_F_CREATE))
2159 + return false;
2160 +
2161 + /* Fill block state table & mapping table */
2162 + nmbm_scan_badblocks(ni);
2163 + nmbm_build_mapping_table(ni);
2164 +
2165 + if (ni->lower.flags & NMBM_F_READ_ONLY) {
2166 + nlog_info(ni, "NMBM has been initialized in read-only mode\n");
2167 + return true;
2168 + }
2169 +
2170 + /* Write info table(s) */
2171 + success = nmbm_create_info_table(ni);
2172 + if (success) {
2173 + nlog_info(ni, "NMBM has been successfully created\n");
2174 + return true;
2175 + }
2176 +
2177 + return false;
2178 +}
2179 +
2180 +/*
2181 + * nmbm_find_signature - Find signature in the lower NAND chip
2182 + * @ni: NMBM instance structure
2183 + * @signature_ba: used for storing block address of the signature
2184 + * @signature_ba: return the actual block address of signature block
2185 + *
2186 + * Find a valid signature from a specific range in the lower NAND chip,
2187 + * from bottom (highest address) to top (lowest address)
2188 + *
2189 + * Return true if found.
2190 + */
2191 +static bool nmbm_find_signature(struct nmbm_instance *ni,
2192 + struct nmbm_signature *signature,
2193 + uint32_t *signature_ba)
2194 +{
2195 + struct nmbm_signature sig;
2196 + uint64_t off, addr;
2197 + uint32_t block_count, ba, limit;
2198 + bool success;
2199 + int ret;
2200 +
2201 + /* Calculate top and bottom block address */
2202 + block_count = ni->lower.size >> ni->erasesize_shift;
2203 + ba = block_count;
2204 + limit = (block_count / NMBM_MGMT_DIV) * (NMBM_MGMT_DIV - ni->lower.max_ratio);
2205 + if (ni->lower.max_reserved_blocks && block_count - limit > ni->lower.max_reserved_blocks)
2206 + limit = block_count - ni->lower.max_reserved_blocks;
2207 +
2208 + while (ba >= limit) {
2209 + schedule();
2210 +
2211 + ba--;
2212 + addr = ba2addr(ni, ba);
2213 +
2214 + if (nmbm_check_bad_phys_block(ni, ba))
2215 + continue;
2216 +
2217 + /* Check every page.
2218 + * As long as at leaset one page contains valid signature,
2219 + * the block is treated as a valid signature block.
2220 + */
2221 + for (off = 0; off < ni->lower.erasesize;
2222 + off += ni->lower.writesize) {
2223 + schedule();
2224 +
2225 + ret = nmbn_read_data(ni, addr + off, &sig,
2226 + sizeof(sig));
2227 + if (ret)
2228 + continue;
2229 +
2230 + /* Check for header size and checksum */
2231 + success = nmbm_check_header(&sig, sizeof(sig));
2232 + if (!success)
2233 + continue;
2234 +
2235 + /* Check for header magic */
2236 + if (sig.header.magic == NMBM_MAGIC_SIGNATURE) {
2237 + /* Found it */
2238 + memcpy(signature, &sig, sizeof(sig));
2239 + *signature_ba = ba;
2240 + return true;
2241 + }
2242 + }
2243 + };
2244 +
2245 + return false;
2246 +}
2247 +
2248 +/*
2249 + * is_power_of_2_u64 - Check whether a 64-bit integer is power of 2
2250 + * @n: number to check
2251 + */
2252 +static bool is_power_of_2_u64(uint64_t n)
2253 +{
2254 + return (n != 0 && ((n & (n - 1)) == 0));
2255 +}
2256 +
2257 +/*
2258 + * nmbm_check_lower_members - Validate the members of lower NAND device
2259 + * @nld: Lower NAND chip structure
2260 + */
2261 +static bool nmbm_check_lower_members(struct nmbm_lower_device *nld)
2262 +{
2263 +
2264 + if (!nld->size || !is_power_of_2_u64(nld->size)) {
2265 + nmbm_log_lower(nld, NMBM_LOG_ERR,
2266 + "Chip size %llu is not valid\n", nld->size);
2267 + return false;
2268 + }
2269 +
2270 + if (!nld->erasesize || !is_power_of_2(nld->erasesize)) {
2271 + nmbm_log_lower(nld, NMBM_LOG_ERR,
2272 + "Block size %u is not valid\n", nld->erasesize);
2273 + return false;
2274 + }
2275 +
2276 + if (!nld->writesize || !is_power_of_2(nld->writesize)) {
2277 + nmbm_log_lower(nld, NMBM_LOG_ERR,
2278 + "Page size %u is not valid\n", nld->writesize);
2279 + return false;
2280 + }
2281 +
2282 + if (!nld->oobsize || !is_power_of_2(nld->oobsize)) {
2283 + nmbm_log_lower(nld, NMBM_LOG_ERR,
2284 + "Page spare size %u is not valid\n", nld->oobsize);
2285 + return false;
2286 + }
2287 +
2288 + if (!nld->read_page) {
2289 + nmbm_log_lower(nld, NMBM_LOG_ERR, "read_page() is required\n");
2290 + return false;
2291 + }
2292 +
2293 + if (!(nld->flags & NMBM_F_READ_ONLY) && (!nld->write_page || !nld->erase_block)) {
2294 + nmbm_log_lower(nld, NMBM_LOG_ERR,
2295 + "write_page() and erase_block() are required\n");
2296 + return false;
2297 + }
2298 +
2299 + /* Data sanity check */
2300 + if (!nld->max_ratio)
2301 + nld->max_ratio = 1;
2302 +
2303 + if (nld->max_ratio >= NMBM_MGMT_DIV - 1) {
2304 + nmbm_log_lower(nld, NMBM_LOG_ERR,
2305 + "max ratio %u is invalid\n", nld->max_ratio);
2306 + return false;
2307 + }
2308 +
2309 + if (nld->max_reserved_blocks && nld->max_reserved_blocks < NMBM_MGMT_BLOCKS_MIN) {
2310 + nmbm_log_lower(nld, NMBM_LOG_ERR,
2311 + "max reserved blocks %u is too small\n", nld->max_reserved_blocks);
2312 + return false;
2313 + }
2314 +
2315 + return true;
2316 +}
2317 +
2318 +/*
2319 + * nmbm_calc_structure_size - Calculate the instance structure size
2320 + * @nld: NMBM lower device structure
2321 + */
2322 +size_t nmbm_calc_structure_size(struct nmbm_lower_device *nld)
2323 +{
2324 + uint32_t state_table_size, mapping_table_size, info_table_size;
2325 + uint32_t block_count;
2326 +
2327 + block_count = nmbm_lldiv(nld->size, nld->erasesize);
2328 +
2329 + /* Calculate info table size */
2330 + state_table_size = ((block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
2331 + NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
2332 + mapping_table_size = block_count * sizeof(int32_t);
2333 +
2334 + info_table_size = NMBM_ALIGN(sizeof(struct nmbm_info_table_header),
2335 + nld->writesize);
2336 + info_table_size += NMBM_ALIGN(state_table_size, nld->writesize);
2337 + info_table_size += NMBM_ALIGN(mapping_table_size, nld->writesize);
2338 +
2339 + return info_table_size + state_table_size + mapping_table_size +
2340 + nld->writesize + nld->oobsize + sizeof(struct nmbm_instance);
2341 +}
2342 +
2343 +/*
2344 + * nmbm_init_structure - Initialize members of instance structure
2345 + * @ni: NMBM instance structure
2346 + */
2347 +static void nmbm_init_structure(struct nmbm_instance *ni)
2348 +{
2349 + uint32_t pages_per_block, blocks_per_chip;
2350 + uintptr_t ptr;
2351 +
2352 + pages_per_block = ni->lower.erasesize / ni->lower.writesize;
2353 + blocks_per_chip = nmbm_lldiv(ni->lower.size, ni->lower.erasesize);
2354 +
2355 + ni->rawpage_size = ni->lower.writesize + ni->lower.oobsize;
2356 + ni->rawblock_size = pages_per_block * ni->rawpage_size;
2357 + ni->rawchip_size = blocks_per_chip * ni->rawblock_size;
2358 +
2359 + ni->writesize_mask = ni->lower.writesize - 1;
2360 + ni->erasesize_mask = ni->lower.erasesize - 1;
2361 +
2362 + ni->writesize_shift = ffs(ni->lower.writesize) - 1;
2363 + ni->erasesize_shift = ffs(ni->lower.erasesize) - 1;
2364 +
2365 + /* Calculate number of block this chip */
2366 + ni->block_count = ni->lower.size >> ni->erasesize_shift;
2367 +
2368 + /* Calculate info table size */
2369 + ni->state_table_size = ((ni->block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
2370 + NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
2371 + ni->mapping_table_size = ni->block_count * sizeof(*ni->block_mapping);
2372 +
2373 + ni->info_table_size = NMBM_ALIGN(sizeof(ni->info_table),
2374 + ni->lower.writesize);
2375 + ni->info_table.state_table_off = ni->info_table_size;
2376 +
2377 + ni->info_table_size += NMBM_ALIGN(ni->state_table_size,
2378 + ni->lower.writesize);
2379 + ni->info_table.mapping_table_off = ni->info_table_size;
2380 +
2381 + ni->info_table_size += NMBM_ALIGN(ni->mapping_table_size,
2382 + ni->lower.writesize);
2383 +
2384 + ni->info_table_spare_blocks = nmbm_get_spare_block_count(
2385 + size2blk(ni, ni->info_table_size));
2386 +
2387 + /* Assign memory to members */
2388 + ptr = (uintptr_t)ni + sizeof(*ni);
2389 +
2390 + ni->info_table_cache = (void *)ptr;
2391 + ptr += ni->info_table_size;
2392 +
2393 + ni->block_state = (void *)ptr;
2394 + ptr += ni->state_table_size;
2395 +
2396 + ni->block_mapping = (void *)ptr;
2397 + ptr += ni->mapping_table_size;
2398 +
2399 + ni->page_cache = (uint8_t *)ptr;
2400 +
2401 + /* Initialize block state table */
2402 + ni->block_state_changed = 0;
2403 + memset(ni->block_state, 0xff, ni->state_table_size);
2404 +
2405 + /* Initialize block mapping table */
2406 + ni->block_mapping_changed = 0;
2407 +}
2408 +
2409 +/*
2410 + * nmbm_attach - Attach to a lower device
2411 + * @nld: NMBM lower device structure
2412 + * @ni: NMBM instance structure
2413 + */
2414 +int nmbm_attach(struct nmbm_lower_device *nld, struct nmbm_instance *ni)
2415 +{
2416 + bool success;
2417 +
2418 + if (!nld || !ni)
2419 + return -EINVAL;
2420 +
2421 + /* Set default log level */
2422 + ni->log_display_level = NMBM_DEFAULT_LOG_LEVEL;
2423 +
2424 + /* Check lower members */
2425 + success = nmbm_check_lower_members(nld);
2426 + if (!success)
2427 + return -EINVAL;
2428 +
2429 + /* Initialize NMBM instance */
2430 + memcpy(&ni->lower, nld, sizeof(struct nmbm_lower_device));
2431 + nmbm_init_structure(ni);
2432 +
2433 + success = nmbm_find_signature(ni, &ni->signature, &ni->signature_ba);
2434 + if (!success) {
2435 + if (!(nld->flags & NMBM_F_CREATE)) {
2436 + nlog_err(ni, "Signature not found\n");
2437 + return -ENODEV;
2438 + }
2439 +
2440 + success = nmbm_create_new(ni);
2441 + if (!success)
2442 + return -ENODEV;
2443 +
2444 + return 0;
2445 + }
2446 +
2447 + nlog_info(ni, "Signature found at block %u [0x%08llx]\n",
2448 + ni->signature_ba, ba2addr(ni, ni->signature_ba));
2449 + nmbm_mark_block_color_signature(ni, ni->signature_ba);
2450 +
2451 + if (ni->signature.header.version != NMBM_VER) {
2452 + nlog_err(ni, "NMBM version %u.%u is not supported\n",
2453 + NMBM_VERSION_MAJOR_GET(ni->signature.header.version),
2454 + NMBM_VERSION_MINOR_GET(ni->signature.header.version));
2455 + return -EINVAL;
2456 + }
2457 +
2458 + if (ni->signature.nand_size != nld->size ||
2459 + ni->signature.block_size != nld->erasesize ||
2460 + ni->signature.page_size != nld->writesize ||
2461 + ni->signature.spare_size != nld->oobsize) {
2462 + nlog_err(ni, "NMBM configuration mismatch\n");
2463 + return -EINVAL;
2464 + }
2465 +
2466 + success = nmbm_load_existing(ni);
2467 + if (!success)
2468 + return -ENODEV;
2469 +
2470 + return 0;
2471 +}
2472 +
2473 +/*
2474 + * nmbm_detach - Detach from a lower device, and save all tables
2475 + * @ni: NMBM instance structure
2476 + */
2477 +int nmbm_detach(struct nmbm_instance *ni)
2478 +{
2479 + if (!ni)
2480 + return -EINVAL;
2481 +
2482 + if (!(ni->lower.flags & NMBM_F_READ_ONLY))
2483 + nmbm_update_info_table(ni);
2484 +
2485 + nmbm_mark_block_color_normal(ni, 0, ni->block_count - 1);
2486 +
2487 + return 0;
2488 +}
2489 +
2490 +/*
2491 + * nmbm_erase_logic_block - Erase a logic block
2492 + * @ni: NMBM instance structure
2493 + * @nmbm_erase_logic_block: logic block address
2494 + *
2495 + * Logic block will be mapped to physical block before erasing.
2496 + * Bad block found during erasinh will be remapped to a good block if there is
2497 + * still at least one good spare block available.
2498 + */
2499 +static int nmbm_erase_logic_block(struct nmbm_instance *ni, uint32_t block_addr)
2500 +{
2501 + uint32_t pb;
2502 + bool success;
2503 +
2504 +retry:
2505 + /* Map logic block to physical block */
2506 + pb = ni->block_mapping[block_addr];
2507 +
2508 + /* Whether the logic block is good (has valid mapping) */
2509 + if ((int32_t)pb < 0) {
2510 + nlog_debug(ni, "Logic block %u is a bad block\n", block_addr);
2511 + return -EIO;
2512 + }
2513 +
2514 + /* Remap logic block if current physical block is a bad block */
2515 + if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD ||
2516 + nmbm_get_block_state(ni, pb) == BLOCK_ST_NEED_REMAP)
2517 + goto remap_logic_block;
2518 +
2519 + /* Insurance to detect unexpected bad block marked by user */
2520 + if (nmbm_check_bad_phys_block(ni, pb)) {
2521 + nlog_warn(ni, "Found unexpected bad block possibly marked by user\n");
2522 + nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2523 + goto remap_logic_block;
2524 + }
2525 +
2526 + success = nmbm_erase_block_and_check(ni, pb);
2527 + if (success)
2528 + return 0;
2529 +
2530 + /* Mark bad block */
2531 + nmbm_mark_phys_bad_block(ni, pb);
2532 + nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2533 +
2534 +remap_logic_block:
2535 + /* Try to assign a new block */
2536 + success = nmbm_map_block(ni, block_addr);
2537 + if (!success) {
2538 + /* Mark logic block unusable, and update info table */
2539 + ni->block_mapping[block_addr] = -1;
2540 + if (nmbm_get_block_state(ni, pb) != BLOCK_ST_NEED_REMAP)
2541 + nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2542 + nmbm_update_info_table(ni);
2543 + return -EIO;
2544 + }
2545 +
2546 + /* Update info table before erasing */
2547 + if (nmbm_get_block_state(ni, pb) != BLOCK_ST_NEED_REMAP)
2548 + nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2549 + nmbm_update_info_table(ni);
2550 +
2551 + goto retry;
2552 +}
2553 +
2554 +/*
2555 + * nmbm_erase_block_range - Erase logic blocks
2556 + * @ni: NMBM instance structure
2557 + * @addr: logic linear address
2558 + * @size: erase range
2559 + * @failed_addr: return failed block address if error occurs
2560 + */
2561 +int nmbm_erase_block_range(struct nmbm_instance *ni, uint64_t addr,
2562 + uint64_t size, uint64_t *failed_addr)
2563 +{
2564 + uint32_t start_ba, end_ba;
2565 + int ret;
2566 +
2567 + if (!ni)
2568 + return -EINVAL;
2569 +
2570 + /* Sanity check */
2571 + if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
2572 + nlog_debug(ni, "Device is forced read-only\n");
2573 + return -EROFS;
2574 + }
2575 +
2576 + if (addr >= ba2addr(ni, ni->data_block_count)) {
2577 + nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2578 + return -EINVAL;
2579 + }
2580 +
2581 + if (addr + size > ba2addr(ni, ni->data_block_count)) {
2582 + nlog_err(ni, "Erase range 0xllxu is too large\n", size);
2583 + return -EINVAL;
2584 + }
2585 +
2586 + if (!size) {
2587 + nlog_warn(ni, "No blocks to be erased\n");
2588 + return 0;
2589 + }
2590 +
2591 + start_ba = addr2ba(ni, addr);
2592 + end_ba = addr2ba(ni, addr + size - 1);
2593 +
2594 + while (start_ba <= end_ba) {
2595 + schedule();
2596 +
2597 + ret = nmbm_erase_logic_block(ni, start_ba);
2598 + if (ret) {
2599 + if (failed_addr)
2600 + *failed_addr = ba2addr(ni, start_ba);
2601 + return ret;
2602 + }
2603 +
2604 + start_ba++;
2605 + }
2606 +
2607 + return 0;
2608 +}
2609 +
2610 +/*
2611 + * nmbm_read_logic_page - Read page based on logic address
2612 + * @ni: NMBM instance structure
2613 + * @addr: logic linear address
2614 + * @data: buffer to store main data. optional.
2615 + * @oob: buffer to store oob data. optional.
2616 + * @mode: read mode
2617 + *
2618 + * Return 0 for success, positive value for corrected bitflip count,
2619 + * -EBADMSG for ecc error, other negative values for other errors
2620 + */
2621 +static int nmbm_read_logic_page(struct nmbm_instance *ni, uint64_t addr,
2622 + void *data, void *oob, enum nmbm_oob_mode mode)
2623 +{
2624 + uint32_t lb, pb, offset;
2625 + uint64_t paddr;
2626 +
2627 + /* Extract block address and in-block offset */
2628 + lb = addr2ba(ni, addr);
2629 + offset = addr & ni->erasesize_mask;
2630 +
2631 + /* Map logic block to physical block */
2632 + pb = ni->block_mapping[lb];
2633 +
2634 + /* Whether the logic block is good (has valid mapping) */
2635 + if ((int32_t)pb < 0) {
2636 + nlog_debug(ni, "Logic block %u is a bad block\n", lb);
2637 + return -EIO;
2638 + }
2639 +
2640 + /* Fail if physical block is marked bad */
2641 + if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2642 + return -EIO;
2643 +
2644 + /* Assemble new address */
2645 + paddr = ba2addr(ni, pb) + offset;
2646 +
2647 + return nmbm_read_phys_page(ni, paddr, data, oob, mode);
2648 +}
2649 +
2650 +/*
2651 + * nmbm_read_single_page - Read one page based on logic address
2652 + * @ni: NMBM instance structure
2653 + * @addr: logic linear address
2654 + * @data: buffer to store main data. optional.
2655 + * @oob: buffer to store oob data. optional.
2656 + * @mode: read mode
2657 + *
2658 + * Return 0 for success, positive value for corrected bitflip count,
2659 + * -EBADMSG for ecc error, other negative values for other errors
2660 + */
2661 +int nmbm_read_single_page(struct nmbm_instance *ni, uint64_t addr, void *data,
2662 + void *oob, enum nmbm_oob_mode mode)
2663 +{
2664 + if (!ni)
2665 + return -EINVAL;
2666 +
2667 + /* Sanity check */
2668 + if (ni->protected) {
2669 + nlog_debug(ni, "Device is forced read-only\n");
2670 + return -EROFS;
2671 + }
2672 +
2673 + if (addr >= ba2addr(ni, ni->data_block_count)) {
2674 + nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2675 + return -EINVAL;
2676 + }
2677 +
2678 + return nmbm_read_logic_page(ni, addr, data, oob, mode);
2679 +}
2680 +
2681 +/*
2682 + * nmbm_read_range - Read data without oob
2683 + * @ni: NMBM instance structure
2684 + * @addr: logic linear address
2685 + * @size: data size to read
2686 + * @data: buffer to store main data to be read
2687 + * @mode: read mode
2688 + * @retlen: return actual data size read
2689 + *
2690 + * Return 0 for success, positive value for corrected bitflip count,
2691 + * -EBADMSG for ecc error, other negative values for other errors
2692 + */
2693 +int nmbm_read_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
2694 + void *data, enum nmbm_oob_mode mode, size_t *retlen)
2695 +{
2696 + uint64_t off = addr;
2697 + uint8_t *ptr = data;
2698 + size_t sizeremain = size, chunksize, leading;
2699 + bool has_ecc_err = false;
2700 + int ret, max_bitflips = 0;
2701 +
2702 + if (!ni)
2703 + return -EINVAL;
2704 +
2705 + /* Sanity check */
2706 + if (ni->protected) {
2707 + nlog_debug(ni, "Device is forced read-only\n");
2708 + return -EROFS;
2709 + }
2710 +
2711 + if (addr >= ba2addr(ni, ni->data_block_count)) {
2712 + nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2713 + return -EINVAL;
2714 + }
2715 +
2716 + if (addr + size > ba2addr(ni, ni->data_block_count)) {
2717 + nlog_err(ni, "Read range 0x%llx is too large\n", size);
2718 + return -EINVAL;
2719 + }
2720 +
2721 + if (!size) {
2722 + nlog_warn(ni, "No data to be read\n");
2723 + return 0;
2724 + }
2725 +
2726 + while (sizeremain) {
2727 + schedule();
2728 +
2729 + leading = off & ni->writesize_mask;
2730 + chunksize = ni->lower.writesize - leading;
2731 + if (chunksize > sizeremain)
2732 + chunksize = sizeremain;
2733 +
2734 + if (chunksize == ni->lower.writesize) {
2735 + ret = nmbm_read_logic_page(ni, off - leading, ptr,
2736 + NULL, mode);
2737 + if (ret < 0 && ret != -EBADMSG)
2738 + break;
2739 + } else {
2740 + ret = nmbm_read_logic_page(ni, off - leading,
2741 + ni->page_cache, NULL,
2742 + mode);
2743 + if (ret < 0 && ret != -EBADMSG)
2744 + break;
2745 +
2746 + memcpy(ptr, ni->page_cache + leading, chunksize);
2747 + }
2748 +
2749 + if (ret == -EBADMSG)
2750 + has_ecc_err = true;
2751 +
2752 + if (ret > max_bitflips)
2753 + max_bitflips = ret;
2754 +
2755 + off += chunksize;
2756 + ptr += chunksize;
2757 + sizeremain -= chunksize;
2758 + }
2759 +
2760 + if (retlen)
2761 + *retlen = size - sizeremain;
2762 +
2763 + if (ret < 0 && ret != -EBADMSG)
2764 + return ret;
2765 +
2766 + if (has_ecc_err)
2767 + return -EBADMSG;
2768 +
2769 + return max_bitflips;
2770 +}
2771 +
2772 +/*
2773 + * nmbm_write_logic_page - Read page based on logic address
2774 + * @ni: NMBM instance structure
2775 + * @addr: logic linear address
2776 + * @data: buffer contains main data. optional.
2777 + * @oob: buffer contains oob data. optional.
2778 + * @mode: write mode
2779 + */
2780 +static int nmbm_write_logic_page(struct nmbm_instance *ni, uint64_t addr,
2781 + const void *data, const void *oob,
2782 + enum nmbm_oob_mode mode)
2783 +{
2784 + uint32_t lb, pb, offset;
2785 + uint64_t paddr;
2786 + bool success;
2787 +
2788 + /* Extract block address and in-block offset */
2789 + lb = addr2ba(ni, addr);
2790 + offset = addr & ni->erasesize_mask;
2791 +
2792 + /* Map logic block to physical block */
2793 + pb = ni->block_mapping[lb];
2794 +
2795 + /* Whether the logic block is good (has valid mapping) */
2796 + if ((int32_t)pb < 0) {
2797 + nlog_debug(ni, "Logic block %u is a bad block\n", lb);
2798 + return -EIO;
2799 + }
2800 +
2801 + /* Fail if physical block is marked bad */
2802 + if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2803 + return -EIO;
2804 +
2805 + /* Assemble new address */
2806 + paddr = ba2addr(ni, pb) + offset;
2807 +
2808 + success = nmbm_write_phys_page(ni, paddr, data, oob, mode);
2809 + if (success)
2810 + return 0;
2811 +
2812 + /*
2813 + * Do not remap bad block here. Just mark this block in state table.
2814 + * Remap this block on erasing.
2815 + */
2816 + nmbm_set_block_state(ni, pb, BLOCK_ST_NEED_REMAP);
2817 + nmbm_update_info_table(ni);
2818 +
2819 + return -EIO;
2820 +}
2821 +
2822 +/*
2823 + * nmbm_write_single_page - Write one page based on logic address
2824 + * @ni: NMBM instance structure
2825 + * @addr: logic linear address
2826 + * @data: buffer contains main data. optional.
2827 + * @oob: buffer contains oob data. optional.
2828 + * @mode: write mode
2829 + */
2830 +int nmbm_write_single_page(struct nmbm_instance *ni, uint64_t addr,
2831 + const void *data, const void *oob,
2832 + enum nmbm_oob_mode mode)
2833 +{
2834 + if (!ni)
2835 + return -EINVAL;
2836 +
2837 + /* Sanity check */
2838 + if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
2839 + nlog_debug(ni, "Device is forced read-only\n");
2840 + return -EROFS;
2841 + }
2842 +
2843 + if (addr >= ba2addr(ni, ni->data_block_count)) {
2844 + nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2845 + return -EINVAL;
2846 + }
2847 +
2848 + return nmbm_write_logic_page(ni, addr, data, oob, mode);
2849 +}
2850 +
2851 +/*
2852 + * nmbm_write_range - Write data without oob
2853 + * @ni: NMBM instance structure
2854 + * @addr: logic linear address
2855 + * @size: data size to write
2856 + * @data: buffer contains data to be written
2857 + * @mode: write mode
2858 + * @retlen: return actual data size written
2859 + */
2860 +int nmbm_write_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
2861 + const void *data, enum nmbm_oob_mode mode,
2862 + size_t *retlen)
2863 +{
2864 + uint64_t off = addr;
2865 + const uint8_t *ptr = data;
2866 + size_t sizeremain = size, chunksize, leading;
2867 + int ret;
2868 +
2869 + if (!ni)
2870 + return -EINVAL;
2871 +
2872 + /* Sanity check */
2873 + if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
2874 + nlog_debug(ni, "Device is forced read-only\n");
2875 + return -EROFS;
2876 + }
2877 +
2878 + if (addr >= ba2addr(ni, ni->data_block_count)) {
2879 + nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2880 + return -EINVAL;
2881 + }
2882 +
2883 + if (addr + size > ba2addr(ni, ni->data_block_count)) {
2884 + nlog_err(ni, "Write size 0x%zx is too large\n", size);
2885 + return -EINVAL;
2886 + }
2887 +
2888 + if (!size) {
2889 + nlog_warn(ni, "No data to be written\n");
2890 + return 0;
2891 + }
2892 +
2893 + while (sizeremain) {
2894 + schedule();
2895 +
2896 + leading = off & ni->writesize_mask;
2897 + chunksize = ni->lower.writesize - leading;
2898 + if (chunksize > sizeremain)
2899 + chunksize = sizeremain;
2900 +
2901 + if (chunksize == ni->lower.writesize) {
2902 + ret = nmbm_write_logic_page(ni, off - leading, ptr,
2903 + NULL, mode);
2904 + if (ret)
2905 + break;
2906 + } else {
2907 + memset(ni->page_cache, 0xff, leading);
2908 + memcpy(ni->page_cache + leading, ptr, chunksize);
2909 +
2910 + ret = nmbm_write_logic_page(ni, off - leading,
2911 + ni->page_cache, NULL,
2912 + mode);
2913 + if (ret)
2914 + break;
2915 + }
2916 +
2917 + off += chunksize;
2918 + ptr += chunksize;
2919 + sizeremain -= chunksize;
2920 + }
2921 +
2922 + if (retlen)
2923 + *retlen = size - sizeremain;
2924 +
2925 + return ret;
2926 +}
2927 +
2928 +/*
2929 + * nmbm_check_bad_block - Check whether a logic block is usable
2930 + * @ni: NMBM instance structure
2931 + * @addr: logic linear address
2932 + */
2933 +int nmbm_check_bad_block(struct nmbm_instance *ni, uint64_t addr)
2934 +{
2935 + uint32_t lb, pb;
2936 +
2937 + if (!ni)
2938 + return -EINVAL;
2939 +
2940 + if (addr >= ba2addr(ni, ni->data_block_count)) {
2941 + nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2942 + return -EINVAL;
2943 + }
2944 +
2945 + lb = addr2ba(ni, addr);
2946 +
2947 + /* Map logic block to physical block */
2948 + pb = ni->block_mapping[lb];
2949 +
2950 + if ((int32_t)pb < 0)
2951 + return 1;
2952 +
2953 + if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2954 + return 1;
2955 +
2956 + return 0;
2957 +}
2958 +
2959 +/*
2960 + * nmbm_mark_bad_block - Mark a logic block unusable
2961 + * @ni: NMBM instance structure
2962 + * @addr: logic linear address
2963 + */
2964 +int nmbm_mark_bad_block(struct nmbm_instance *ni, uint64_t addr)
2965 +{
2966 + uint32_t lb, pb;
2967 +
2968 + if (!ni)
2969 + return -EINVAL;
2970 +
2971 + /* Sanity check */
2972 + if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
2973 + nlog_debug(ni, "Device is forced read-only\n");
2974 + return -EROFS;
2975 + }
2976 +
2977 + if (addr >= ba2addr(ni, ni->data_block_count)) {
2978 + nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2979 + return -EINVAL;
2980 + }
2981 +
2982 + lb = addr2ba(ni, addr);
2983 +
2984 + /* Map logic block to physical block */
2985 + pb = ni->block_mapping[lb];
2986 +
2987 + if ((int32_t)pb < 0)
2988 + return 0;
2989 +
2990 + ni->block_mapping[lb] = -1;
2991 + nmbm_mark_phys_bad_block(ni, pb);
2992 + nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2993 + nmbm_update_info_table(ni);
2994 +
2995 + return 0;
2996 +}
2997 +
2998 +/*
2999 + * nmbm_get_avail_size - Get available user data size
3000 + * @ni: NMBM instance structure
3001 + */
3002 +uint64_t nmbm_get_avail_size(struct nmbm_instance *ni)
3003 +{
3004 + if (!ni)
3005 + return 0;
3006 +
3007 + return (uint64_t)ni->data_block_count << ni->erasesize_shift;
3008 +}
3009 +
3010 +/*
3011 + * nmbm_get_lower_device - Get lower device structure
3012 + * @ni: NMBM instance structure
3013 + * @nld: pointer to hold the data of lower device structure
3014 + */
3015 +int nmbm_get_lower_device(struct nmbm_instance *ni, struct nmbm_lower_device *nld)
3016 +{
3017 + if (!ni)
3018 + return -EINVAL;
3019 +
3020 + if (nld)
3021 + memcpy(nld, &ni->lower, sizeof(*nld));
3022 +
3023 + return 0;
3024 +}
3025 +
3026 +#include "nmbm-debug.inl"
3027 --- /dev/null
3028 +++ b/drivers/mtd/nmbm/nmbm-debug.h
3029 @@ -0,0 +1,37 @@
3030 +/* SPDX-License-Identifier: GPL-2.0 */
3031 +/*
3032 + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
3033 + *
3034 + * Debug addons for NAND Mapped-block Management (NMBM)
3035 + *
3036 + * Author: Weijie Gao <weijie.gao@mediatek.com>
3037 + */
3038 +
3039 +#ifndef _NMBM_DEBUG_H_
3040 +#define _NMBM_DEBUG_H_
3041 +
3042 +#include "nmbm-private.h"
3043 +
3044 +#define nmbm_mark_block_color_normal(ni, start_ba, end_ba)
3045 +#define nmbm_mark_block_color_bad(ni, ba)
3046 +#define nmbm_mark_block_color_mgmt(ni, start_ba, end_ba)
3047 +#define nmbm_mark_block_color_signature(ni, ba)
3048 +#define nmbm_mark_block_color_info_table(ni, start_ba, end_ba)
3049 +#define nmbm_mark_block_color_mapped(ni, ba)
3050 +
3051 +uint32_t nmbm_debug_get_block_state(struct nmbm_instance *ni, uint32_t ba);
3052 +char nmbm_debug_get_phys_block_type(struct nmbm_instance *ni, uint32_t ba);
3053 +
3054 +enum nmmb_block_type {
3055 + NMBM_BLOCK_GOOD_DATA,
3056 + NMBM_BLOCK_GOOD_MGMT,
3057 + NMBM_BLOCK_BAD,
3058 + NMBM_BLOCK_MAIN_INFO_TABLE,
3059 + NMBM_BLOCK_BACKUP_INFO_TABLE,
3060 + NMBM_BLOCK_REMAPPED,
3061 + NMBM_BLOCK_SIGNATURE,
3062 +
3063 + __NMBM_BLOCK_TYPE_MAX
3064 +};
3065 +
3066 +#endif /* _NMBM_DEBUG_H_ */
3067 --- /dev/null
3068 +++ b/drivers/mtd/nmbm/nmbm-debug.inl
3069 @@ -0,0 +1,39 @@
3070 +
3071 +uint32_t nmbm_debug_get_block_state(struct nmbm_instance *ni, uint32_t ba)
3072 +{
3073 + return nmbm_get_block_state(ni, ba);
3074 +}
3075 +
3076 +char nmbm_debug_get_phys_block_type(struct nmbm_instance *ni, uint32_t ba)
3077 +{
3078 + uint32_t eba, limit;
3079 + bool success;
3080 +
3081 + if (nmbm_get_block_state(ni, ba) == BLOCK_ST_BAD)
3082 + return NMBM_BLOCK_BAD;
3083 +
3084 + if (ba < ni->data_block_count)
3085 + return NMBM_BLOCK_GOOD_DATA;
3086 +
3087 + if (ba == ni->signature_ba)
3088 + return NMBM_BLOCK_SIGNATURE;
3089 +
3090 + if (ni->main_table_ba) {
3091 + limit = ni->backup_table_ba ? ni->backup_table_ba :
3092 + ni->mapping_blocks_ba;
3093 +
3094 + success = nmbm_block_walk_asc(ni, ni->main_table_ba, &eba,
3095 + size2blk(ni, ni->info_table_size), limit);
3096 +
3097 + if (success && ba >= ni->main_table_ba && ba < eba)
3098 + return NMBM_BLOCK_MAIN_INFO_TABLE;
3099 + }
3100 +
3101 + if (ba >= ni->backup_table_ba && ba < ni->mapping_blocks_ba)
3102 + return NMBM_BLOCK_BACKUP_INFO_TABLE;
3103 +
3104 + if (ba > ni->mapping_blocks_top_ba && ba < ni->signature_ba)
3105 + return NMBM_BLOCK_REMAPPED;
3106 +
3107 + return NMBM_BLOCK_GOOD_MGMT;
3108 +}
3109 --- /dev/null
3110 +++ b/drivers/mtd/nmbm/nmbm-private.h
3111 @@ -0,0 +1,137 @@
3112 +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3113 +/*
3114 + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
3115 + *
3116 + * Definitions for NAND Mapped-block Management (NMBM)
3117 + *
3118 + * Author: Weijie Gao <weijie.gao@mediatek.com>
3119 + */
3120 +
3121 +#ifndef _NMBM_PRIVATE_H_
3122 +#define _NMBM_PRIVATE_H_
3123 +
3124 +#include <nmbm/nmbm.h>
3125 +
3126 +#define NMBM_MAGIC_SIGNATURE 0x304d4d4e /* NMM0 */
3127 +#define NMBM_MAGIC_INFO_TABLE 0x314d4d4e /* NMM1 */
3128 +
3129 +#define NMBM_VERSION_MAJOR_S 0
3130 +#define NMBM_VERSION_MAJOR_M 0xffff
3131 +#define NMBM_VERSION_MINOR_S 16
3132 +#define NMBM_VERSION_MINOR_M 0xffff
3133 +#define NMBM_VERSION_MAKE(major, minor) (((major) & NMBM_VERSION_MAJOR_M) | \
3134 + (((minor) & NMBM_VERSION_MINOR_M) << \
3135 + NMBM_VERSION_MINOR_S))
3136 +#define NMBM_VERSION_MAJOR_GET(ver) (((ver) >> NMBM_VERSION_MAJOR_S) & \
3137 + NMBM_VERSION_MAJOR_M)
3138 +#define NMBM_VERSION_MINOR_GET(ver) (((ver) >> NMBM_VERSION_MINOR_S) & \
3139 + NMBM_VERSION_MINOR_M)
3140 +
3141 +typedef uint32_t nmbm_bitmap_t;
3142 +#define NMBM_BITMAP_UNIT_SIZE (sizeof(nmbm_bitmap_t))
3143 +#define NMBM_BITMAP_BITS_PER_BLOCK 2
3144 +#define NMBM_BITMAP_BITS_PER_UNIT (8 * sizeof(nmbm_bitmap_t))
3145 +#define NMBM_BITMAP_BLOCKS_PER_UNIT (NMBM_BITMAP_BITS_PER_UNIT / \
3146 + NMBM_BITMAP_BITS_PER_BLOCK)
3147 +
3148 +#define NMBM_SPARE_BLOCK_MULTI 1
3149 +#define NMBM_SPARE_BLOCK_DIV 2
3150 +#define NMBM_SPARE_BLOCK_MIN 2
3151 +
3152 +#define NMBM_MGMT_DIV 16
3153 +#define NMBM_MGMT_BLOCKS_MIN 32
3154 +
3155 +#define NMBM_TRY_COUNT 3
3156 +
3157 +#define BLOCK_ST_BAD 0
3158 +#define BLOCK_ST_NEED_REMAP 2
3159 +#define BLOCK_ST_GOOD 3
3160 +#define BLOCK_ST_MASK 3
3161 +
3162 +struct nmbm_header {
3163 + uint32_t magic;
3164 + uint32_t version;
3165 + uint32_t size;
3166 + uint32_t checksum;
3167 +};
3168 +
3169 +struct nmbm_signature {
3170 + struct nmbm_header header;
3171 + uint64_t nand_size;
3172 + uint32_t block_size;
3173 + uint32_t page_size;
3174 + uint32_t spare_size;
3175 + uint32_t mgmt_start_pb;
3176 + uint8_t max_try_count;
3177 + uint8_t padding[3];
3178 +};
3179 +
3180 +struct nmbm_info_table_header {
3181 + struct nmbm_header header;
3182 + uint32_t write_count;
3183 + uint32_t state_table_off;
3184 + uint32_t mapping_table_off;
3185 + uint32_t padding;
3186 +};
3187 +
3188 +struct nmbm_instance {
3189 + struct nmbm_lower_device lower;
3190 +
3191 + uint32_t rawpage_size;
3192 + uint32_t rawblock_size;
3193 + uint32_t rawchip_size;
3194 +
3195 + uint32_t writesize_mask;
3196 + uint32_t erasesize_mask;
3197 + uint16_t writesize_shift;
3198 + uint16_t erasesize_shift;
3199 +
3200 + struct nmbm_signature signature;
3201 +
3202 + uint8_t *info_table_cache;
3203 + uint32_t info_table_size;
3204 + uint32_t info_table_spare_blocks;
3205 + struct nmbm_info_table_header info_table;
3206 +
3207 + nmbm_bitmap_t *block_state;
3208 + uint32_t block_state_changed;
3209 + uint32_t state_table_size;
3210 +
3211 + int32_t *block_mapping;
3212 + uint32_t block_mapping_changed;
3213 + uint32_t mapping_table_size;
3214 +
3215 + uint8_t *page_cache;
3216 +
3217 + int protected;
3218 +
3219 + uint32_t block_count;
3220 + uint32_t data_block_count;
3221 +
3222 + uint32_t mgmt_start_ba;
3223 + uint32_t main_table_ba;
3224 + uint32_t backup_table_ba;
3225 + uint32_t mapping_blocks_ba;
3226 + uint32_t mapping_blocks_top_ba;
3227 + uint32_t signature_ba;
3228 +
3229 + enum nmbm_log_category log_display_level;
3230 +};
3231 +
3232 +/* Log utilities */
3233 +#define nlog_debug(ni, fmt, ...) \
3234 + nmbm_log(ni, NMBM_LOG_DEBUG, fmt, ##__VA_ARGS__)
3235 +
3236 +#define nlog_info(ni, fmt, ...) \
3237 + nmbm_log(ni, NMBM_LOG_INFO, fmt, ##__VA_ARGS__)
3238 +
3239 +#define nlog_warn(ni, fmt, ...) \
3240 + nmbm_log(ni, NMBM_LOG_WARN, fmt, ##__VA_ARGS__)
3241 +
3242 +#define nlog_err(ni, fmt, ...) \
3243 + nmbm_log(ni, NMBM_LOG_ERR, fmt, ##__VA_ARGS__)
3244 +
3245 +#define nlog_emerg(ni, fmt, ...) \
3246 + nmbm_log(ni, NMBM_LOG_EMERG, fmt, ##__VA_ARGS__)
3247 +
3248 +#endif /* _NMBM_PRIVATE_H_ */
3249 --- /dev/null
3250 +++ b/include/nmbm/nmbm-os.h
3251 @@ -0,0 +1,66 @@
3252 +/* SPDX-License-Identifier: GPL-2.0 */
3253 +/*
3254 + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
3255 + *
3256 + * OS-dependent definitions for NAND Mapped-block Management (NMBM)
3257 + *
3258 + * Author: Weijie Gao <weijie.gao@mediatek.com>
3259 + */
3260 +
3261 +#ifndef _NMBM_OS_H_
3262 +#define _NMBM_OS_H_
3263 +
3264 +#include <div64.h>
3265 +#include <stdbool.h>
3266 +#include <watchdog.h>
3267 +#include <u-boot/crc.h>
3268 +#include <linux/errno.h>
3269 +#include <linux/log2.h>
3270 +#include <linux/types.h>
3271 +
3272 +static inline uint32_t nmbm_crc32(uint32_t crcval, const void *buf, size_t size)
3273 +{
3274 + uint chksz;
3275 + const unsigned char *p = buf;
3276 +
3277 + while (size) {
3278 + if (size > UINT_MAX)
3279 + chksz = UINT_MAX;
3280 + else
3281 + chksz = (uint)size;
3282 +
3283 + crcval = crc32_no_comp(crcval, p, chksz);
3284 + size -= chksz;
3285 + p += chksz;
3286 + }
3287 +
3288 + return crcval;
3289 +}
3290 +
3291 +static inline uint32_t nmbm_lldiv(uint64_t dividend, uint32_t divisor)
3292 +{
3293 +#if BITS_PER_LONG == 64
3294 + return dividend / divisor;
3295 +#else
3296 + __div64_32(&dividend, divisor);
3297 + return dividend;
3298 +#endif
3299 +}
3300 +
3301 +#ifdef CONFIG_NMBM_LOG_LEVEL_DEBUG
3302 +#define NMBM_DEFAULT_LOG_LEVEL 0
3303 +#elif defined(NMBM_LOG_LEVEL_INFO)
3304 +#define NMBM_DEFAULT_LOG_LEVEL 1
3305 +#elif defined(NMBM_LOG_LEVEL_WARN)
3306 +#define NMBM_DEFAULT_LOG_LEVEL 2
3307 +#elif defined(NMBM_LOG_LEVEL_ERR)
3308 +#define NMBM_DEFAULT_LOG_LEVEL 3
3309 +#elif defined(NMBM_LOG_LEVEL_EMERG)
3310 +#define NMBM_DEFAULT_LOG_LEVEL 4
3311 +#elif defined(NMBM_LOG_LEVEL_NONE)
3312 +#define NMBM_DEFAULT_LOG_LEVEL 5
3313 +#else
3314 +#define NMBM_DEFAULT_LOG_LEVEL 1
3315 +#endif
3316 +
3317 +#endif /* _NMBM_OS_H_ */
3318 --- /dev/null
3319 +++ b/include/nmbm/nmbm.h
3320 @@ -0,0 +1,102 @@
3321 +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3322 +/*
3323 + * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
3324 + *
3325 + * Definitions for NAND Mapped-block Management (NMBM)
3326 + *
3327 + * Author: Weijie Gao <weijie.gao@mediatek.com>
3328 + */
3329 +
3330 +#ifndef _NMBM_H_
3331 +#define _NMBM_H_
3332 +
3333 +#include <nmbm/nmbm-os.h>
3334 +
3335 +enum nmbm_log_category {
3336 + NMBM_LOG_DEBUG,
3337 + NMBM_LOG_INFO,
3338 + NMBM_LOG_WARN,
3339 + NMBM_LOG_ERR,
3340 + NMBM_LOG_EMERG,
3341 +
3342 + __NMBM_LOG_MAX
3343 +};
3344 +
3345 +enum nmbm_oob_mode {
3346 + NMBM_MODE_PLACE_OOB,
3347 + NMBM_MODE_AUTO_OOB,
3348 + NMBM_MODE_RAW,
3349 +
3350 + __NMBM_MODE_MAX
3351 +};
3352 +
3353 +struct nmbm_lower_device {
3354 + uint32_t max_ratio;
3355 + uint32_t max_reserved_blocks;
3356 + int flags;
3357 +
3358 + uint64_t size;
3359 + uint32_t erasesize;
3360 + uint32_t writesize;
3361 + uint32_t oobsize;
3362 + uint32_t oobavail;
3363 +
3364 + void *arg;
3365 + int (*reset_chip)(void *arg);
3366 +
3367 + /*
3368 + * read_page:
3369 + * return 0 if succeeds
3370 + * return positive number for ecc error
3371 + * return negative number for other errors
3372 + */
3373 + int (*read_page)(void *arg, uint64_t addr, void *buf, void *oob, enum nmbm_oob_mode mode);
3374 + int (*write_page)(void *arg, uint64_t addr, const void *buf, const void *oob, enum nmbm_oob_mode mode);
3375 + int (*erase_block)(void *arg, uint64_t addr);
3376 +
3377 + int (*is_bad_block)(void *arg, uint64_t addr);
3378 + int (*mark_bad_block)(void *arg, uint64_t addr);
3379 +
3380 + /* OS-dependent logging function */
3381 + void (*logprint)(void *arg, enum nmbm_log_category level, const char *fmt, va_list ap);
3382 +};
3383 +
3384 +struct nmbm_instance;
3385 +
3386 +/* Create NMBM if management area not found, or not complete */
3387 +#define NMBM_F_CREATE 0x01
3388 +
3389 +/* Empty page is also protected by ECC, and bitflip(s) can be corrected */
3390 +#define NMBM_F_EMPTY_PAGE_ECC_OK 0x02
3391 +
3392 +/* Do not write anything back to flash */
3393 +#define NMBM_F_READ_ONLY 0x04
3394 +
3395 +size_t nmbm_calc_structure_size(struct nmbm_lower_device *nld);
3396 +int nmbm_attach(struct nmbm_lower_device *nld, struct nmbm_instance *ni);
3397 +int nmbm_detach(struct nmbm_instance *ni);
3398 +
3399 +enum nmbm_log_category nmbm_set_log_level(struct nmbm_instance *ni,
3400 + enum nmbm_log_category level);
3401 +
3402 +int nmbm_erase_block_range(struct nmbm_instance *ni, uint64_t addr,
3403 + uint64_t size, uint64_t *failed_addr);
3404 +int nmbm_read_single_page(struct nmbm_instance *ni, uint64_t addr, void *data,
3405 + void *oob, enum nmbm_oob_mode mode);
3406 +int nmbm_read_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
3407 + void *data, enum nmbm_oob_mode mode, size_t *retlen);
3408 +int nmbm_write_single_page(struct nmbm_instance *ni, uint64_t addr,
3409 + const void *data, const void *oob,
3410 + enum nmbm_oob_mode mode);
3411 +int nmbm_write_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
3412 + const void *data, enum nmbm_oob_mode mode,
3413 + size_t *retlen);
3414 +
3415 +int nmbm_check_bad_block(struct nmbm_instance *ni, uint64_t addr);
3416 +int nmbm_mark_bad_block(struct nmbm_instance *ni, uint64_t addr);
3417 +
3418 +uint64_t nmbm_get_avail_size(struct nmbm_instance *ni);
3419 +
3420 +int nmbm_get_lower_device(struct nmbm_instance *ni, struct nmbm_lower_device *nld);
3421 +
3422 +#endif /* _NMBM_H_ */