7c06ee01f03afb75439a8330003053d6dc51131a
[openwrt/openwrt.git] / target / linux / generic / files / drivers / mtd / nand / mtk_bmt.c
1 /*
2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020 Felix Fietkau <nbd@nbd.name>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/slab.h>
17 #include <linux/gfp.h>
18 #include <linux/kernel.h>
19 #include <linux/of.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/mtd/partitions.h>
22 #include <linux/mtd/mtk_bmt.h>
23 #include <linux/module.h>
24 #include <linux/debugfs.h>
25 #include <linux/bits.h>
26
27 #define MAIN_SIGNATURE_OFFSET 0
28 #define OOB_SIGNATURE_OFFSET 1
29 #define BBPOOL_RATIO 2
30
31 #define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
32
33 /* Maximum 8k blocks */
34 #define BB_TABLE_MAX bmtd.table_size
35 #define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
36 #define BMT_TBL_DEF_VAL 0x0
37
38 struct mtk_bmt_ops {
39 char *sig;
40 unsigned int sig_len;
41 int (*init)(struct device_node *np);
42 bool (*remap_block)(u16 block, u16 mapped_block, int copy_len);
43 void (*unmap_block)(u16 block);
44 u16 (*get_mapping_block)(int block);
45 int (*debug)(void *data, u64 val);
46 };
47
48 struct bbbt {
49 char signature[3];
50 /* This version is used to distinguish the legacy and new algorithm */
51 #define BBMT_VERSION 2
52 unsigned char version;
53 /* Below 2 tables will be written in SLC */
54 u16 bb_tbl[];
55 };
56
57 struct bbmt {
58 u16 block;
59 #define NO_MAPPED 0
60 #define NORMAL_MAPPED 1
61 #define BMT_MAPPED 2
62 u16 mapped;
63 };
64
65 static struct bmt_desc {
66 struct mtd_info *mtd;
67
68 int (*_read_oob) (struct mtd_info *mtd, loff_t from,
69 struct mtd_oob_ops *ops);
70 int (*_write_oob) (struct mtd_info *mtd, loff_t to,
71 struct mtd_oob_ops *ops);
72 int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
73 int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
74 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
75
76 const struct mtk_bmt_ops *ops;
77
78 struct bbbt *bbt;
79
80 struct dentry *debugfs_dir;
81
82 u32 table_size;
83 u32 pg_size;
84 u32 blk_size;
85 u16 pg_shift;
86 u16 blk_shift;
87 /* bbt logical address */
88 u16 pool_lba;
89 /* bbt physical address */
90 u16 pool_pba;
91 /* Maximum count of bad blocks that the vendor guaranteed */
92 u16 bb_max;
93 /* Total blocks of the Nand Chip */
94 u16 total_blks;
95 /* The block(n) BMT is located at (bmt_tbl[n]) */
96 u16 bmt_blk_idx;
97 /* How many pages needs to store 'struct bbbt' */
98 u32 bmt_pgs;
99
100 const __be32 *remap_range;
101 int remap_range_len;
102
103 /* to compensate for driver level remapping */
104 u8 oob_offset;
105 } bmtd = {0};
106
107 static unsigned char *nand_bbt_buf;
108 static unsigned char *nand_data_buf;
109
110 /* -------- Unit conversions -------- */
111 static inline u32 blk_pg(u16 block)
112 {
113 return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift));
114 }
115
116 /* -------- Nand operations wrapper -------- */
117 static inline int
118 bbt_nand_read(u32 page, unsigned char *dat, int dat_len,
119 unsigned char *fdm, int fdm_len)
120 {
121 struct mtd_oob_ops ops = {
122 .mode = MTD_OPS_PLACE_OOB,
123 .ooboffs = bmtd.oob_offset,
124 .oobbuf = fdm,
125 .ooblen = fdm_len,
126 .datbuf = dat,
127 .len = dat_len,
128 };
129
130 return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops);
131 }
132
133 static inline int bbt_nand_erase(u16 block)
134 {
135 struct mtd_info *mtd = bmtd.mtd;
136 struct erase_info instr = {
137 .addr = (loff_t)block << bmtd.blk_shift,
138 .len = bmtd.blk_size,
139 };
140
141 return bmtd._erase(mtd, &instr);
142 }
143
144 static inline int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset)
145 {
146 int pages = bmtd.blk_size >> bmtd.pg_shift;
147 loff_t src = (loff_t)src_blk << bmtd.blk_shift;
148 loff_t dest = (loff_t)dest_blk << bmtd.blk_shift;
149 loff_t offset = 0;
150 uint8_t oob[64];
151 int i, ret;
152
153 for (i = 0; i < pages; i++) {
154 struct mtd_oob_ops rd_ops = {
155 .mode = MTD_OPS_PLACE_OOB,
156 .oobbuf = oob,
157 .ooblen = min_t(int, bmtd.mtd->oobsize / pages, sizeof(oob)),
158 .datbuf = nand_data_buf,
159 .len = bmtd.pg_size,
160 };
161 struct mtd_oob_ops wr_ops = {
162 .mode = MTD_OPS_PLACE_OOB,
163 .oobbuf = oob,
164 .datbuf = nand_data_buf,
165 .len = bmtd.pg_size,
166 };
167
168 if (offset >= max_offset)
169 break;
170
171 ret = bmtd._read_oob(bmtd.mtd, src + offset, &rd_ops);
172 if (ret < 0 && !mtd_is_bitflip(ret))
173 return ret;
174
175 if (!rd_ops.retlen)
176 break;
177
178 ret = bmtd._write_oob(bmtd.mtd, dest + offset, &wr_ops);
179 if (ret < 0)
180 return ret;
181
182 wr_ops.ooblen = rd_ops.oobretlen;
183 offset += rd_ops.retlen;
184 }
185
186 return 0;
187 }
188
189 /* -------- Bad Blocks Management -------- */
190 static inline struct bbmt *bmt_tbl(struct bbbt *bbbt)
191 {
192 return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size];
193 }
194
195 static int
196 read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len)
197 {
198 u32 len = bmtd.bmt_pgs << bmtd.pg_shift;
199
200 return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len);
201 }
202
203 static int write_bmt(u16 block, unsigned char *dat)
204 {
205 struct mtd_oob_ops ops = {
206 .mode = MTD_OPS_PLACE_OOB,
207 .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset,
208 .oobbuf = bmtd.ops->sig,
209 .ooblen = bmtd.ops->sig_len,
210 .datbuf = dat,
211 .len = bmtd.bmt_pgs << bmtd.pg_shift,
212 };
213 loff_t addr = (loff_t)block << bmtd.blk_shift;
214
215 return bmtd._write_oob(bmtd.mtd, addr, &ops);
216 }
217
218 static u16 find_valid_block(u16 block)
219 {
220 u8 fdm[4];
221 int ret;
222 int loop = 0;
223
224 retry:
225 if (block >= bmtd.total_blks)
226 return 0;
227
228 ret = bbt_nand_read(blk_pg(block), nand_data_buf, bmtd.pg_size,
229 fdm, sizeof(fdm));
230 /* Read the 1st byte of FDM to judge whether it's a bad
231 * or not
232 */
233 if (ret || fdm[0] != 0xff) {
234 pr_info("nand: found bad block 0x%x\n", block);
235 if (loop >= bmtd.bb_max) {
236 pr_info("nand: FATAL ERR: too many bad blocks!!\n");
237 return 0;
238 }
239
240 loop++;
241 block++;
242 goto retry;
243 }
244
245 return block;
246 }
247
248 /* Find out all bad blocks, and fill in the mapping table */
249 static int scan_bad_blocks(struct bbbt *bbt)
250 {
251 int i;
252 u16 block = 0;
253
254 /* First time download, the block0 MUST NOT be a bad block,
255 * this is guaranteed by vendor
256 */
257 bbt->bb_tbl[0] = 0;
258
259 /*
260 * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
261 * G - Good block; B - Bad block
262 * ---------------------------
263 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
264 * ---------------------------
265 * What bb_tbl[i] looks like:
266 * physical block(i):
267 * 0 1 2 3 4 5 6 7 8 9 a b c
268 * mapped block(bb_tbl[i]):
269 * 0 1 3 6 7 8 9 b ......
270 * ATTENTION:
271 * If new bad block ocurred(n), search bmt_tbl to find
272 * a available block(x), and fill in the bb_tbl[n] = x;
273 */
274 for (i = 1; i < bmtd.pool_lba; i++) {
275 bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1);
276 BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]);
277 if (bbt->bb_tbl[i] == 0)
278 return -1;
279 }
280
281 /* Physical Block start Address of BMT pool */
282 bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1;
283 if (bmtd.pool_pba >= bmtd.total_blks - 2) {
284 pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
285 return -1;
286 }
287
288 BBT_LOG("pool_pba=0x%x", bmtd.pool_pba);
289 i = 0;
290 block = bmtd.pool_pba;
291 /*
292 * The bmt table is used for runtime bad block mapping
293 * G - Good block; B - Bad block
294 * ---------------------------
295 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
296 * ---------------------------
297 * block: 0 1 2 3 4 5 6 7 8 9 a b c
298 * What bmt_tbl[i] looks like in initial state:
299 * i:
300 * 0 1 2 3 4 5 6 7
301 * bmt_tbl[i].block:
302 * 0 1 3 6 7 8 9 b
303 * bmt_tbl[i].mapped:
304 * N N N N N N N B
305 * N - Not mapped(Available)
306 * M - Mapped
307 * B - BMT
308 * ATTENTION:
309 * BMT always in the last valid block in pool
310 */
311 while ((block = find_valid_block(block)) != 0) {
312 bmt_tbl(bbt)[i].block = block;
313 bmt_tbl(bbt)[i].mapped = NO_MAPPED;
314 BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block);
315 block++;
316 i++;
317 }
318
319 /* i - How many available blocks in pool, which is the length of bmt_tbl[]
320 * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
321 */
322 bmtd.bmt_blk_idx = i - 1;
323 bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
324
325 if (i < 1) {
326 pr_info("nand: FATAL ERR: no space to store BMT!!\n");
327 return -1;
328 }
329
330 pr_info("[BBT] %d available blocks in BMT pool\n", i);
331
332 return 0;
333 }
334
335 static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm)
336 {
337 struct bbbt *bbt = (struct bbbt *)buf;
338 u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET;
339
340
341 if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 &&
342 memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) {
343 if (bbt->version == BBMT_VERSION)
344 return true;
345 }
346 BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
347 sig[0], sig[1], sig[2],
348 fdm[1], fdm[2], fdm[3]);
349 return false;
350 }
351
352 static u16 get_bmt_index(struct bbmt *bmt)
353 {
354 int i = 0;
355
356 while (bmt[i].block != BMT_TBL_DEF_VAL) {
357 if (bmt[i].mapped == BMT_MAPPED)
358 return i;
359 i++;
360 }
361 return 0;
362 }
363
364 static struct bbbt *scan_bmt(u16 block)
365 {
366 u8 fdm[4];
367
368 if (block < bmtd.pool_lba)
369 return NULL;
370
371 if (read_bmt(block, nand_bbt_buf, fdm, sizeof(fdm)))
372 return scan_bmt(block - 1);
373
374 if (is_valid_bmt(nand_bbt_buf, fdm)) {
375 bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)nand_bbt_buf));
376 if (bmtd.bmt_blk_idx == 0) {
377 pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
378 return NULL;
379 }
380 pr_info("[BBT] BMT.v2 is found at 0x%x\n", block);
381 return (struct bbbt *)nand_bbt_buf;
382 } else
383 return scan_bmt(block - 1);
384 }
385
386 /* Write the Burner Bad Block Table to Nand Flash
387 * n - write BMT to bmt_tbl[n]
388 */
389 static u16 upload_bmt(struct bbbt *bbt, int n)
390 {
391 u16 block;
392
393 retry:
394 if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) {
395 pr_info("nand: FATAL ERR: no space to store BMT!\n");
396 return (u16)-1;
397 }
398
399 block = bmt_tbl(bbt)[n].block;
400 BBT_LOG("n = 0x%x, block = 0x%x", n, block);
401 if (bbt_nand_erase(block)) {
402 bmt_tbl(bbt)[n].block = 0;
403 /* erase failed, try the previous block: bmt_tbl[n - 1].block */
404 n--;
405 goto retry;
406 }
407
408 /* The signature offset is fixed set to 0,
409 * oob signature offset is fixed set to 1
410 */
411 memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3);
412 bbt->version = BBMT_VERSION;
413
414 if (write_bmt(block, (unsigned char *)bbt)) {
415 bmt_tbl(bbt)[n].block = 0;
416
417 /* write failed, try the previous block in bmt_tbl[n - 1] */
418 n--;
419 goto retry;
420 }
421
422 /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
423 return n;
424 }
425
426 static u16 find_valid_block_in_pool(struct bbbt *bbt)
427 {
428 int i;
429
430 if (bmtd.bmt_blk_idx == 0)
431 goto error;
432
433 for (i = 0; i < bmtd.bmt_blk_idx; i++) {
434 if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) {
435 bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED;
436 return bmt_tbl(bbt)[i].block;
437 }
438 }
439
440 error:
441 pr_info("nand: FATAL ERR: BMT pool is run out!\n");
442 return 0;
443 }
444
445 /* We met a bad block, mark it as bad and map it to a valid block in pool,
446 * if it's a write failure, we need to write the data to mapped block
447 */
448 static bool remap_block_v2(u16 block, u16 mapped_block, int copy_len)
449 {
450 u16 mapped_blk;
451 struct bbbt *bbt;
452
453 bbt = bmtd.bbt;
454 mapped_blk = find_valid_block_in_pool(bbt);
455 if (mapped_blk == 0)
456 return false;
457
458 /* Map new bad block to available block in pool */
459 bbt->bb_tbl[block] = mapped_blk;
460
461 /* Erase new block */
462 bbt_nand_erase(mapped_blk);
463 if (copy_len > 0)
464 bbt_nand_copy(mapped_blk, block, copy_len);
465
466 bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
467
468 return true;
469 }
470
471 static bool
472 mapping_block_in_range(int block, int *start, int *end)
473 {
474 const __be32 *cur = bmtd.remap_range;
475 u32 addr = block << bmtd.blk_shift;
476 int i;
477
478 if (!cur || !bmtd.remap_range_len) {
479 *start = 0;
480 *end = bmtd.total_blks;
481 return true;
482 }
483
484 for (i = 0; i < bmtd.remap_range_len; i++, cur += 2) {
485 if (addr < be32_to_cpu(cur[0]) || addr >= be32_to_cpu(cur[1]))
486 continue;
487
488 *start = be32_to_cpu(cur[0]);
489 *end = be32_to_cpu(cur[1]);
490 return true;
491 }
492
493 return false;
494 }
495
496 static u16
497 get_mapping_block_index_v2(int block)
498 {
499 int start, end;
500
501 if (block >= bmtd.pool_lba)
502 return block;
503
504 if (!mapping_block_in_range(block, &start, &end))
505 return block;
506
507 return bmtd.bbt->bb_tbl[block];
508 }
509
510 static int
511 mtk_bmt_read(struct mtd_info *mtd, loff_t from,
512 struct mtd_oob_ops *ops)
513 {
514 struct mtd_oob_ops cur_ops = *ops;
515 int retry_count = 0;
516 loff_t cur_from;
517 int ret = 0;
518 int max_bitflips = 0;
519 int start, end;
520
521 ops->retlen = 0;
522 ops->oobretlen = 0;
523
524 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
525 int cur_ret;
526
527 u32 offset = from & (bmtd.blk_size - 1);
528 u32 block = from >> bmtd.blk_shift;
529 u32 cur_block;
530
531 cur_block = bmtd.ops->get_mapping_block(block);
532 cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
533
534 cur_ops.oobretlen = 0;
535 cur_ops.retlen = 0;
536 cur_ops.len = min_t(u32, mtd->erasesize - offset,
537 ops->len - ops->retlen);
538 cur_ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
539 if (cur_ret < 0)
540 ret = cur_ret;
541 else
542 max_bitflips = max_t(int, max_bitflips, cur_ret);
543 if (cur_ret < 0 && !mtd_is_bitflip(cur_ret)) {
544 bmtd.ops->remap_block(block, cur_block, mtd->erasesize);
545 if (retry_count++ < 10)
546 continue;
547
548 goto out;
549 }
550
551 if (cur_ret >= mtd->bitflip_threshold &&
552 mapping_block_in_range(block, &start, &end))
553 bmtd.ops->remap_block(block, cur_block, mtd->erasesize);
554
555 ops->retlen += cur_ops.retlen;
556 ops->oobretlen += cur_ops.oobretlen;
557
558 cur_ops.ooboffs = 0;
559 cur_ops.datbuf += cur_ops.retlen;
560 cur_ops.oobbuf += cur_ops.oobretlen;
561 cur_ops.ooblen -= cur_ops.oobretlen;
562
563 if (!cur_ops.len)
564 cur_ops.len = mtd->erasesize - offset;
565
566 from += cur_ops.len;
567 retry_count = 0;
568 }
569
570 out:
571 if (ret < 0)
572 return ret;
573
574 return max_bitflips;
575 }
576
577 static int
578 mtk_bmt_write(struct mtd_info *mtd, loff_t to,
579 struct mtd_oob_ops *ops)
580 {
581 struct mtd_oob_ops cur_ops = *ops;
582 int retry_count = 0;
583 loff_t cur_to;
584 int ret;
585
586 ops->retlen = 0;
587 ops->oobretlen = 0;
588
589 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
590 u32 offset = to & (bmtd.blk_size - 1);
591 u32 block = to >> bmtd.blk_shift;
592 u32 cur_block;
593
594 cur_block = bmtd.ops->get_mapping_block(block);
595 cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
596
597 cur_ops.oobretlen = 0;
598 cur_ops.retlen = 0;
599 cur_ops.len = min_t(u32, bmtd.blk_size - offset,
600 ops->len - ops->retlen);
601 ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
602 if (ret < 0) {
603 bmtd.ops->remap_block(block, cur_block, offset);
604 if (retry_count++ < 10)
605 continue;
606
607 return ret;
608 }
609
610 ops->retlen += cur_ops.retlen;
611 ops->oobretlen += cur_ops.oobretlen;
612
613 cur_ops.ooboffs = 0;
614 cur_ops.datbuf += cur_ops.retlen;
615 cur_ops.oobbuf += cur_ops.oobretlen;
616 cur_ops.ooblen -= cur_ops.oobretlen;
617
618 if (!cur_ops.len)
619 cur_ops.len = mtd->erasesize - offset;
620
621 to += cur_ops.len;
622 retry_count = 0;
623 }
624
625 return 0;
626 }
627
628 static int
629 mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
630 {
631 struct erase_info mapped_instr = {
632 .len = bmtd.blk_size,
633 };
634 int retry_count = 0;
635 u64 start_addr, end_addr;
636 int ret;
637 u16 orig_block, block;
638
639 start_addr = instr->addr & (~mtd->erasesize_mask);
640 end_addr = instr->addr + instr->len;
641
642 while (start_addr < end_addr) {
643 orig_block = start_addr >> bmtd.blk_shift;
644 block = bmtd.ops->get_mapping_block(orig_block);
645 mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
646 ret = bmtd._erase(mtd, &mapped_instr);
647 if (ret) {
648 bmtd.ops->remap_block(orig_block, block, 0);
649 if (retry_count++ < 10)
650 continue;
651 instr->fail_addr = start_addr;
652 break;
653 }
654 start_addr += mtd->erasesize;
655 retry_count = 0;
656 }
657
658 return ret;
659 }
660 static int
661 mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs)
662 {
663 int retry_count = 0;
664 u16 orig_block = ofs >> bmtd.blk_shift;
665 u16 block;
666 int ret;
667
668 retry:
669 block = bmtd.ops->get_mapping_block(orig_block);
670 ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
671 if (ret) {
672 bmtd.ops->remap_block(orig_block, block, bmtd.blk_size);
673 if (retry_count++ < 10)
674 goto retry;
675 }
676 return ret;
677 }
678
679 static int
680 mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
681 {
682 u16 orig_block = ofs >> bmtd.blk_shift;
683 u16 block = bmtd.ops->get_mapping_block(orig_block);
684
685 bmtd.ops->remap_block(orig_block, block, bmtd.blk_size);
686
687 return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
688 }
689
690 static void
691 mtk_bmt_replace_ops(struct mtd_info *mtd)
692 {
693 bmtd._read_oob = mtd->_read_oob;
694 bmtd._write_oob = mtd->_write_oob;
695 bmtd._erase = mtd->_erase;
696 bmtd._block_isbad = mtd->_block_isbad;
697 bmtd._block_markbad = mtd->_block_markbad;
698
699 mtd->_read_oob = mtk_bmt_read;
700 mtd->_write_oob = mtk_bmt_write;
701 mtd->_erase = mtk_bmt_mtd_erase;
702 mtd->_block_isbad = mtk_bmt_block_isbad;
703 mtd->_block_markbad = mtk_bmt_block_markbad;
704 }
705
706 static void
707 unmap_block_v2(u16 block)
708 {
709 bmtd.bbt->bb_tbl[block] = block;
710 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
711 }
712
713 static int mtk_bmt_debug_mark_good(void *data, u64 val)
714 {
715 bmtd.ops->unmap_block(val >> bmtd.blk_shift);
716
717 return 0;
718 }
719
720 static int mtk_bmt_debug_mark_bad(void *data, u64 val)
721 {
722 u32 block = val >> bmtd.blk_shift;
723 u16 cur_block = bmtd.ops->get_mapping_block(block);
724
725 bmtd.ops->remap_block(block, cur_block, bmtd.blk_size);
726
727 return 0;
728 }
729
730 static unsigned long *
731 mtk_bmt_get_mapping_mask(void)
732 {
733 struct bbmt *bbmt = bmt_tbl(bmtd.bbt);
734 int main_blocks = bmtd.mtd->size >> bmtd.blk_shift;
735 unsigned long *used;
736 int i, k;
737
738 used = kcalloc(sizeof(unsigned long), BIT_WORD(bmtd.bmt_blk_idx) + 1, GFP_KERNEL);
739 if (!used)
740 return NULL;
741
742 for (i = 1; i < main_blocks; i++) {
743 if (bmtd.bbt->bb_tbl[i] == i)
744 continue;
745
746 for (k = 0; k < bmtd.bmt_blk_idx; k++) {
747 if (bmtd.bbt->bb_tbl[i] != bbmt[k].block)
748 continue;
749
750 set_bit(k, used);
751 break;
752 }
753 }
754
755 return used;
756 }
757
758 static int mtk_bmt_debug_v2(void *data, u64 val)
759 {
760 struct bbmt *bbmt = bmt_tbl(bmtd.bbt);
761 struct mtd_info *mtd = bmtd.mtd;
762 unsigned long *used;
763 int main_blocks = mtd->size >> bmtd.blk_shift;
764 int n_remap = 0;
765 int i;
766
767 used = mtk_bmt_get_mapping_mask();
768 if (!used)
769 return -ENOMEM;
770
771 switch (val) {
772 case 0:
773 for (i = 1; i < main_blocks; i++) {
774 if (bmtd.bbt->bb_tbl[i] == i)
775 continue;
776
777 printk("remap [%x->%x]\n", i, bmtd.bbt->bb_tbl[i]);
778 n_remap++;
779 }
780 for (i = 0; i <= bmtd.bmt_blk_idx; i++) {
781 char c;
782
783 switch (bbmt[i].mapped) {
784 case NO_MAPPED:
785 continue;
786 case NORMAL_MAPPED:
787 c = 'm';
788 if (test_bit(i, used))
789 c = 'M';
790 break;
791 case BMT_MAPPED:
792 c = 'B';
793 break;
794 default:
795 c = 'X';
796 break;
797 }
798 printk("[%x:%c] = 0x%x\n", i, c, bbmt[i].block);
799 }
800 break;
801 case 100:
802 for (i = 0; i <= bmtd.bmt_blk_idx; i++) {
803 if (bbmt[i].mapped != NORMAL_MAPPED)
804 continue;
805
806 if (test_bit(i, used))
807 continue;
808
809 n_remap++;
810 bbmt[i].mapped = NO_MAPPED;
811 printk("free block [%d:%x]\n", i, bbmt[i].block);
812 }
813 if (n_remap)
814 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
815 break;
816 }
817
818 kfree(used);
819
820 return 0;
821 }
822
823 static int mtk_bmt_debug(void *data, u64 val)
824 {
825 return bmtd.ops->debug(data, val);
826 }
827
828
829 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
830 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
831 DEFINE_DEBUGFS_ATTRIBUTE(fops_debug, NULL, mtk_bmt_debug, "%llu\n");
832
833 static void
834 mtk_bmt_add_debugfs(void)
835 {
836 struct dentry *dir;
837
838 dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
839 if (!dir)
840 return;
841
842 debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
843 debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
844 debugfs_create_file_unsafe("debug", S_IWUSR, dir, NULL, &fops_debug);
845 }
846
847 void mtk_bmt_detach(struct mtd_info *mtd)
848 {
849 if (bmtd.mtd != mtd)
850 return;
851
852 if (bmtd.debugfs_dir)
853 debugfs_remove_recursive(bmtd.debugfs_dir);
854 bmtd.debugfs_dir = NULL;
855
856 kfree(nand_bbt_buf);
857 kfree(nand_data_buf);
858
859 mtd->_read_oob = bmtd._read_oob;
860 mtd->_write_oob = bmtd._write_oob;
861 mtd->_erase = bmtd._erase;
862 mtd->_block_isbad = bmtd._block_isbad;
863 mtd->_block_markbad = bmtd._block_markbad;
864 mtd->size = bmtd.total_blks << bmtd.blk_shift;
865
866 memset(&bmtd, 0, sizeof(bmtd));
867 }
868
869 static int mtk_bmt_init_v2(struct device_node *np)
870 {
871 u32 bmt_pool_size, bmt_table_size;
872 u32 bufsz, block;
873 u16 pmt_block;
874
875 if (of_property_read_u32(np, "mediatek,bmt-pool-size",
876 &bmt_pool_size) != 0)
877 bmt_pool_size = 80;
878
879 if (of_property_read_u8(np, "mediatek,bmt-oob-offset",
880 &bmtd.oob_offset) != 0)
881 bmtd.oob_offset = 0;
882
883 if (of_property_read_u32(np, "mediatek,bmt-table-size",
884 &bmt_table_size) != 0)
885 bmt_table_size = 0x2000U;
886
887 bmtd.table_size = bmt_table_size;
888
889 pmt_block = bmtd.total_blks - bmt_pool_size - 2;
890
891 bmtd.mtd->size = pmt_block << bmtd.blk_shift;
892
893 /*
894 * ---------------------------------------
895 * | PMT(2blks) | BMT POOL(totalblks * 2%) |
896 * ---------------------------------------
897 * ^ ^
898 * | |
899 * pmt_block pmt_block + 2blocks(pool_lba)
900 *
901 * ATTETION!!!!!!
902 * The blocks ahead of the boundary block are stored in bb_tbl
903 * and blocks behind are stored in bmt_tbl
904 */
905
906 bmtd.pool_lba = (u16)(pmt_block + 2);
907 bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100;
908
909 bufsz = round_up(sizeof(struct bbbt) +
910 bmt_table_size * sizeof(struct bbmt), bmtd.pg_size);
911 bmtd.bmt_pgs = bufsz >> bmtd.pg_shift;
912
913 nand_bbt_buf = kzalloc(bufsz, GFP_KERNEL);
914 if (!nand_bbt_buf)
915 return -ENOMEM;
916
917 memset(nand_bbt_buf, 0xff, bufsz);
918
919 /* Scanning start from the first page of the last block
920 * of whole flash
921 */
922 bmtd.bbt = scan_bmt(bmtd.total_blks - 1);
923 if (!bmtd.bbt) {
924 /* BMT not found */
925 if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) {
926 pr_info("nand: FATAL: Too many blocks, can not support!\n");
927 return -1;
928 }
929
930 bmtd.bbt = (struct bbbt *)nand_bbt_buf;
931 memset(bmt_tbl(bmtd.bbt), BMT_TBL_DEF_VAL,
932 bmtd.table_size * sizeof(struct bbmt));
933
934 if (scan_bad_blocks(bmtd.bbt))
935 return -1;
936
937 /* BMT always in the last valid block in pool */
938 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
939 block = bmt_tbl(bmtd.bbt)[bmtd.bmt_blk_idx].block;
940 pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block);
941
942 if (bmtd.bmt_blk_idx == 0)
943 pr_info("nand: Warning: no available block in BMT pool!\n");
944 else if (bmtd.bmt_blk_idx == (u16)-1)
945 return -1;
946 }
947
948 return 0;
949 }
950
951 static bool
952 bbt_block_is_bad(u16 block)
953 {
954 u8 cur = nand_bbt_buf[block / 4];
955
956 return cur & (3 << ((block % 4) * 2));
957 }
958
959 static void
960 bbt_set_block_state(u16 block, bool bad)
961 {
962 u8 mask = (3 << ((block % 4) * 2));
963
964 if (bad)
965 nand_bbt_buf[block / 4] |= mask;
966 else
967 nand_bbt_buf[block / 4] &= ~mask;
968
969 bbt_nand_erase(bmtd.bmt_blk_idx);
970 write_bmt(bmtd.bmt_blk_idx, nand_bbt_buf);
971 }
972
973 static u16
974 get_mapping_block_index_bbt(int block)
975 {
976 int start, end, ofs;
977 int bad_blocks = 0;
978 int i;
979
980 if (!mapping_block_in_range(block, &start, &end))
981 return block;
982
983 start >>= bmtd.blk_shift;
984 end >>= bmtd.blk_shift;
985 /* skip bad blocks within the mapping range */
986 ofs = block - start;
987 for (i = start; i < end; i++) {
988 if (bbt_block_is_bad(i))
989 bad_blocks++;
990 else if (ofs)
991 ofs--;
992 else
993 break;
994 }
995
996 if (i < end)
997 return i;
998
999 /* when overflowing, remap remaining blocks to bad ones */
1000 for (i = end - 1; bad_blocks > 0; i--) {
1001 if (!bbt_block_is_bad(i))
1002 continue;
1003
1004 bad_blocks--;
1005 if (bad_blocks <= ofs)
1006 return i;
1007 }
1008
1009 return block;
1010 }
1011
1012 static bool remap_block_bbt(u16 block, u16 mapped_blk, int copy_len)
1013 {
1014 int start, end;
1015 u16 new_blk;
1016
1017 if (!mapping_block_in_range(block, &start, &end))
1018 return false;
1019
1020 bbt_set_block_state(mapped_blk, true);
1021
1022 new_blk = get_mapping_block_index_bbt(block);
1023 bbt_nand_erase(new_blk);
1024 if (copy_len > 0)
1025 bbt_nand_copy(new_blk, mapped_blk, copy_len);
1026
1027 return false;
1028 }
1029
1030 static void
1031 unmap_block_bbt(u16 block)
1032 {
1033 bbt_set_block_state(block, false);
1034 }
1035
1036 static int
1037 mtk_bmt_read_bbt(void)
1038 {
1039 u8 oob_buf[8];
1040 int i;
1041
1042 for (i = bmtd.total_blks - 1; i >= bmtd.total_blks - 5; i--) {
1043 u32 page = i << (bmtd.blk_shift - bmtd.pg_shift);
1044
1045 if (bbt_nand_read(page, nand_bbt_buf, bmtd.pg_size,
1046 oob_buf, sizeof(oob_buf))) {
1047 pr_info("read_bbt: could not read block %d\n", i);
1048 continue;
1049 }
1050
1051 if (oob_buf[0] != 0xff) {
1052 pr_info("read_bbt: bad block at %d\n", i);
1053 continue;
1054 }
1055
1056 if (memcmp(&oob_buf[1], "mtknand", 7) != 0) {
1057 pr_info("read_bbt: signature mismatch in block %d\n", i);
1058 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, oob_buf, 8, 1);
1059 continue;
1060 }
1061
1062 pr_info("read_bbt: found bbt at block %d\n", i);
1063 bmtd.bmt_blk_idx = i;
1064 return 0;
1065 }
1066
1067 return -EIO;
1068 }
1069
1070
1071 static int
1072 mtk_bmt_init_bbt(struct device_node *np)
1073 {
1074 int buf_size = round_up(bmtd.total_blks >> 2, bmtd.blk_size);
1075 int ret;
1076
1077 nand_bbt_buf = kmalloc(buf_size, GFP_KERNEL);
1078 if (!nand_bbt_buf)
1079 return -ENOMEM;
1080
1081 memset(nand_bbt_buf, 0xff, buf_size);
1082 bmtd.mtd->size -= 4 * bmtd.mtd->erasesize;
1083
1084 ret = mtk_bmt_read_bbt();
1085 if (ret)
1086 return ret;
1087
1088 bmtd.bmt_pgs = buf_size / bmtd.pg_size;
1089
1090 return 0;
1091 }
1092
1093 static int mtk_bmt_debug_bbt(void *data, u64 val)
1094 {
1095 char buf[5];
1096 int i, k;
1097
1098 switch (val) {
1099 case 0:
1100 for (i = 0; i < bmtd.total_blks; i += 4) {
1101 u8 cur = nand_bbt_buf[i / 4];
1102
1103 for (k = 0; k < 4; k++, cur >>= 2)
1104 buf[k] = (cur & 3) ? 'B' : '.';
1105
1106 buf[4] = 0;
1107 printk("[%06x] %s\n", i * bmtd.blk_size, buf);
1108 }
1109 break;
1110 case 100:
1111 #if 0
1112 for (i = bmtd.bmt_blk_idx; i < bmtd.total_blks - 1; i++)
1113 bbt_nand_erase(bmtd.bmt_blk_idx);
1114 #endif
1115
1116 bmtd.bmt_blk_idx = bmtd.total_blks - 1;
1117 bbt_nand_erase(bmtd.bmt_blk_idx);
1118 write_bmt(bmtd.bmt_blk_idx, nand_bbt_buf);
1119 break;
1120 default:
1121 break;
1122 }
1123 return 0;
1124 }
1125
1126 int mtk_bmt_attach(struct mtd_info *mtd)
1127 {
1128 static const struct mtk_bmt_ops v2_ops = {
1129 .sig = "bmt",
1130 .sig_len = 3,
1131 .init = mtk_bmt_init_v2,
1132 .remap_block = remap_block_v2,
1133 .unmap_block = unmap_block_v2,
1134 .get_mapping_block = get_mapping_block_index_v2,
1135 .debug = mtk_bmt_debug_v2,
1136 };
1137 static const struct mtk_bmt_ops bbt_ops = {
1138 .sig = "mtknand",
1139 .sig_len = 7,
1140 .init = mtk_bmt_init_bbt,
1141 .remap_block = remap_block_bbt,
1142 .unmap_block = unmap_block_bbt,
1143 .get_mapping_block = get_mapping_block_index_bbt,
1144 .debug = mtk_bmt_debug_bbt,
1145 };
1146 struct device_node *np;
1147 int ret = 0;
1148
1149 if (bmtd.mtd)
1150 return -ENOSPC;
1151
1152 np = mtd_get_of_node(mtd);
1153 if (!np)
1154 return 0;
1155
1156 if (of_property_read_bool(np, "mediatek,bmt-v2"))
1157 bmtd.ops = &v2_ops;
1158 else if (of_property_read_bool(np, "mediatek,bbt"))
1159 bmtd.ops = &bbt_ops;
1160 else
1161 return 0;
1162
1163 bmtd.remap_range = of_get_property(np, "mediatek,bmt-remap-range",
1164 &bmtd.remap_range_len);
1165 bmtd.remap_range_len /= 8;
1166
1167 bmtd.mtd = mtd;
1168 mtk_bmt_replace_ops(mtd);
1169
1170 bmtd.blk_size = mtd->erasesize;
1171 bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
1172 bmtd.pg_size = mtd->writesize;
1173 bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
1174 bmtd.total_blks = mtd->size >> bmtd.blk_shift;
1175
1176 nand_data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL);
1177 if (!nand_data_buf) {
1178 pr_info("nand: FATAL ERR: allocate buffer failed!\n");
1179 ret = -1;
1180 goto error;
1181 }
1182
1183 memset(nand_data_buf, 0xff, bmtd.pg_size);
1184
1185 ret = bmtd.ops->init(np);
1186 if (ret)
1187 goto error;
1188
1189 mtk_bmt_add_debugfs();
1190 return 0;
1191
1192 error:
1193 mtk_bmt_detach(mtd);
1194 return ret;
1195 }
1196
1197
1198 MODULE_LICENSE("GPL");
1199 MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
1200 MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");
1201