2db31fe066bbeba9f4e6569dd2db3fcba78214ca
[openwrt/staging/mkresin.git] / target / linux / generic / files / drivers / mtd / nand / mtk_bmt.c
1 /*
2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020 Felix Fietkau <nbd@nbd.name>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/slab.h>
17 #include <linux/gfp.h>
18 #include <linux/kernel.h>
19 #include <linux/of.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/mtd/partitions.h>
22 #include <linux/mtd/mtk_bmt.h>
23 #include <linux/module.h>
24 #include <linux/debugfs.h>
25 #include <linux/bits.h>
26
27 #define MAIN_SIGNATURE_OFFSET 0
28 #define OOB_SIGNATURE_OFFSET 1
29 #define BBPOOL_RATIO 2
30
31 #define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
32
33 /* Maximum 8k blocks */
34 #define BB_TABLE_MAX bmtd.table_size
35 #define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
36 #define BMT_TBL_DEF_VAL 0x0
37
38 /*
39 * Burner Bad Block Table
40 * --------- Only support SLC Nand Chips!!!!!!!!!!! ----------
41 */
42
43 struct bbbt {
44 char signature[3];
45 /* This version is used to distinguish the legacy and new algorithm */
46 #define BBMT_VERSION 2
47 unsigned char version;
48 /* Below 2 tables will be written in SLC */
49 u16 bb_tbl[];
50 };
51
52 struct bbmt {
53 u16 block;
54 #define NO_MAPPED 0
55 #define NORMAL_MAPPED 1
56 #define BMT_MAPPED 2
57 u16 mapped;
58 };
59
60 static struct bmt_desc {
61 struct mtd_info *mtd;
62
63 int (*_read_oob) (struct mtd_info *mtd, loff_t from,
64 struct mtd_oob_ops *ops);
65 int (*_write_oob) (struct mtd_info *mtd, loff_t to,
66 struct mtd_oob_ops *ops);
67 int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
68 int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
69 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
70
71 struct bbbt *bbt;
72
73 struct dentry *debugfs_dir;
74
75 u32 table_size;
76 u32 pg_size;
77 u32 blk_size;
78 u16 pg_shift;
79 u16 blk_shift;
80 /* bbt logical address */
81 u16 pool_lba;
82 /* bbt physical address */
83 u16 pool_pba;
84 /* Maximum count of bad blocks that the vendor guaranteed */
85 u16 bb_max;
86 /* Total blocks of the Nand Chip */
87 u16 total_blks;
88 /* The block(n) BMT is located at (bmt_tbl[n]) */
89 u16 bmt_blk_idx;
90 /* How many pages needs to store 'struct bbbt' */
91 u32 bmt_pgs;
92
93 /* to compensate for driver level remapping */
94 u8 oob_offset;
95 } bmtd = {0};
96
97 static unsigned char *nand_bbt_buf;
98 static unsigned char *nand_data_buf;
99
100 /* -------- Unit conversions -------- */
101 static inline u32 blk_pg(u16 block)
102 {
103 return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift));
104 }
105
106 /* -------- Nand operations wrapper -------- */
107 static inline int
108 bbt_nand_read(u32 page, unsigned char *dat, int dat_len,
109 unsigned char *fdm, int fdm_len)
110 {
111 struct mtd_oob_ops ops = {
112 .mode = MTD_OPS_PLACE_OOB,
113 .ooboffs = bmtd.oob_offset,
114 .oobbuf = fdm,
115 .ooblen = fdm_len,
116 .datbuf = dat,
117 .len = dat_len,
118 };
119
120 return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops);
121 }
122
123 static inline int bbt_nand_erase(u16 block)
124 {
125 struct mtd_info *mtd = bmtd.mtd;
126 struct erase_info instr = {
127 .addr = (loff_t)block << bmtd.blk_shift,
128 .len = bmtd.blk_size,
129 };
130
131 return bmtd._erase(mtd, &instr);
132 }
133
134 static inline int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset)
135 {
136 int pages = bmtd.blk_size >> bmtd.pg_shift;
137 loff_t src = (loff_t)src_blk << bmtd.blk_shift;
138 loff_t dest = (loff_t)dest_blk << bmtd.blk_shift;
139 loff_t offset = 0;
140 uint8_t oob[64];
141 int i, ret;
142
143 for (i = 0; i < pages; i++) {
144 struct mtd_oob_ops rd_ops = {
145 .mode = MTD_OPS_PLACE_OOB,
146 .oobbuf = oob,
147 .ooblen = min_t(int, bmtd.mtd->oobsize / pages, sizeof(oob)),
148 .datbuf = nand_data_buf,
149 .len = bmtd.pg_size,
150 };
151 struct mtd_oob_ops wr_ops = {
152 .mode = MTD_OPS_PLACE_OOB,
153 .oobbuf = oob,
154 .datbuf = nand_data_buf,
155 .len = bmtd.pg_size,
156 };
157
158 if (offset >= max_offset)
159 break;
160
161 ret = bmtd._read_oob(bmtd.mtd, src + offset, &rd_ops);
162 if (ret < 0 && !mtd_is_bitflip(ret))
163 return ret;
164
165 if (!rd_ops.retlen)
166 break;
167
168 ret = bmtd._write_oob(bmtd.mtd, dest + offset, &wr_ops);
169 if (ret < 0)
170 return ret;
171
172 wr_ops.ooblen = rd_ops.oobretlen;
173 offset += rd_ops.retlen;
174 }
175
176 return 0;
177 }
178
179 /* -------- Bad Blocks Management -------- */
180 static inline struct bbmt *bmt_tbl(struct bbbt *bbbt)
181 {
182 return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size];
183 }
184
185 static int
186 read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len)
187 {
188 u32 len = bmtd.bmt_pgs << bmtd.pg_shift;
189
190 return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len);
191 }
192
193 static int write_bmt(u16 block, unsigned char *dat)
194 {
195 struct mtd_oob_ops ops = {
196 .mode = MTD_OPS_PLACE_OOB,
197 .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset,
198 .oobbuf = "bmt",
199 .ooblen = 3,
200 .datbuf = dat,
201 .len = bmtd.bmt_pgs << bmtd.pg_shift,
202 };
203 loff_t addr = (loff_t)block << bmtd.blk_shift;
204
205 return bmtd._write_oob(bmtd.mtd, addr, &ops);
206 }
207
208 static u16 find_valid_block(u16 block)
209 {
210 u8 fdm[4];
211 int ret;
212 int loop = 0;
213
214 retry:
215 if (block >= bmtd.total_blks)
216 return 0;
217
218 ret = bbt_nand_read(blk_pg(block), nand_data_buf, bmtd.pg_size,
219 fdm, sizeof(fdm));
220 /* Read the 1st byte of FDM to judge whether it's a bad
221 * or not
222 */
223 if (ret || fdm[0] != 0xff) {
224 pr_info("nand: found bad block 0x%x\n", block);
225 if (loop >= bmtd.bb_max) {
226 pr_info("nand: FATAL ERR: too many bad blocks!!\n");
227 return 0;
228 }
229
230 loop++;
231 block++;
232 goto retry;
233 }
234
235 return block;
236 }
237
238 /* Find out all bad blocks, and fill in the mapping table */
239 static int scan_bad_blocks(struct bbbt *bbt)
240 {
241 int i;
242 u16 block = 0;
243
244 /* First time download, the block0 MUST NOT be a bad block,
245 * this is guaranteed by vendor
246 */
247 bbt->bb_tbl[0] = 0;
248
249 /*
250 * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
251 * G - Good block; B - Bad block
252 * ---------------------------
253 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
254 * ---------------------------
255 * What bb_tbl[i] looks like:
256 * physical block(i):
257 * 0 1 2 3 4 5 6 7 8 9 a b c
258 * mapped block(bb_tbl[i]):
259 * 0 1 3 6 7 8 9 b ......
260 * ATTENTION:
261 * If new bad block ocurred(n), search bmt_tbl to find
262 * a available block(x), and fill in the bb_tbl[n] = x;
263 */
264 for (i = 1; i < bmtd.pool_lba; i++) {
265 bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1);
266 BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]);
267 if (bbt->bb_tbl[i] == 0)
268 return -1;
269 }
270
271 /* Physical Block start Address of BMT pool */
272 bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1;
273 if (bmtd.pool_pba >= bmtd.total_blks - 2) {
274 pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
275 return -1;
276 }
277
278 BBT_LOG("pool_pba=0x%x", bmtd.pool_pba);
279 i = 0;
280 block = bmtd.pool_pba;
281 /*
282 * The bmt table is used for runtime bad block mapping
283 * G - Good block; B - Bad block
284 * ---------------------------
285 * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
286 * ---------------------------
287 * block: 0 1 2 3 4 5 6 7 8 9 a b c
288 * What bmt_tbl[i] looks like in initial state:
289 * i:
290 * 0 1 2 3 4 5 6 7
291 * bmt_tbl[i].block:
292 * 0 1 3 6 7 8 9 b
293 * bmt_tbl[i].mapped:
294 * N N N N N N N B
295 * N - Not mapped(Available)
296 * M - Mapped
297 * B - BMT
298 * ATTENTION:
299 * BMT always in the last valid block in pool
300 */
301 while ((block = find_valid_block(block)) != 0) {
302 bmt_tbl(bbt)[i].block = block;
303 bmt_tbl(bbt)[i].mapped = NO_MAPPED;
304 BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block);
305 block++;
306 i++;
307 }
308
309 /* i - How many available blocks in pool, which is the length of bmt_tbl[]
310 * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
311 */
312 bmtd.bmt_blk_idx = i - 1;
313 bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
314
315 if (i < 1) {
316 pr_info("nand: FATAL ERR: no space to store BMT!!\n");
317 return -1;
318 }
319
320 pr_info("[BBT] %d available blocks in BMT pool\n", i);
321
322 return 0;
323 }
324
325 static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm)
326 {
327 struct bbbt *bbt = (struct bbbt *)buf;
328 u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET;
329
330
331 if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 &&
332 memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) {
333 if (bbt->version == BBMT_VERSION)
334 return true;
335 }
336 BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
337 sig[0], sig[1], sig[2],
338 fdm[1], fdm[2], fdm[3]);
339 return false;
340 }
341
342 static u16 get_bmt_index(struct bbmt *bmt)
343 {
344 int i = 0;
345
346 while (bmt[i].block != BMT_TBL_DEF_VAL) {
347 if (bmt[i].mapped == BMT_MAPPED)
348 return i;
349 i++;
350 }
351 return 0;
352 }
353
354 static struct bbbt *scan_bmt(u16 block)
355 {
356 u8 fdm[4];
357
358 if (block < bmtd.pool_lba)
359 return NULL;
360
361 if (read_bmt(block, nand_bbt_buf, fdm, sizeof(fdm)))
362 return scan_bmt(block - 1);
363
364 if (is_valid_bmt(nand_bbt_buf, fdm)) {
365 bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)nand_bbt_buf));
366 if (bmtd.bmt_blk_idx == 0) {
367 pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
368 return NULL;
369 }
370 pr_info("[BBT] BMT.v2 is found at 0x%x\n", block);
371 return (struct bbbt *)nand_bbt_buf;
372 } else
373 return scan_bmt(block - 1);
374 }
375
376 /* Write the Burner Bad Block Table to Nand Flash
377 * n - write BMT to bmt_tbl[n]
378 */
379 static u16 upload_bmt(struct bbbt *bbt, int n)
380 {
381 u16 block;
382
383 retry:
384 if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) {
385 pr_info("nand: FATAL ERR: no space to store BMT!\n");
386 return (u16)-1;
387 }
388
389 block = bmt_tbl(bbt)[n].block;
390 BBT_LOG("n = 0x%x, block = 0x%x", n, block);
391 if (bbt_nand_erase(block)) {
392 bmt_tbl(bbt)[n].block = 0;
393 /* erase failed, try the previous block: bmt_tbl[n - 1].block */
394 n--;
395 goto retry;
396 }
397
398 /* The signature offset is fixed set to 0,
399 * oob signature offset is fixed set to 1
400 */
401 memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3);
402 bbt->version = BBMT_VERSION;
403
404 if (write_bmt(block, (unsigned char *)bbt)) {
405 bmt_tbl(bbt)[n].block = 0;
406
407 /* write failed, try the previous block in bmt_tbl[n - 1] */
408 n--;
409 goto retry;
410 }
411
412 /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
413 return n;
414 }
415
416 static u16 find_valid_block_in_pool(struct bbbt *bbt)
417 {
418 int i;
419
420 if (bmtd.bmt_blk_idx == 0)
421 goto error;
422
423 for (i = 0; i < bmtd.bmt_blk_idx; i++) {
424 if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) {
425 bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED;
426 return bmt_tbl(bbt)[i].block;
427 }
428 }
429
430 error:
431 pr_info("nand: FATAL ERR: BMT pool is run out!\n");
432 return 0;
433 }
434
435 /* We met a bad block, mark it as bad and map it to a valid block in pool,
436 * if it's a write failure, we need to write the data to mapped block
437 */
438 static bool update_bmt(u16 block, int copy_len)
439 {
440 u16 mapped_blk;
441 struct bbbt *bbt;
442
443 bbt = bmtd.bbt;
444 mapped_blk = find_valid_block_in_pool(bbt);
445 if (mapped_blk == 0)
446 return false;
447
448 /* Map new bad block to available block in pool */
449 bbt->bb_tbl[block] = mapped_blk;
450
451 /* Erase new block */
452 bbt_nand_erase(mapped_blk);
453 if (copy_len > 0)
454 bbt_nand_copy(mapped_blk, block, copy_len);
455
456 bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
457
458 return true;
459 }
460
461 u16 get_mapping_block_index(int block)
462 {
463 int mapping_block;
464
465 if (block < bmtd.pool_lba)
466 mapping_block = bmtd.bbt->bb_tbl[block];
467 else
468 mapping_block = block;
469 BBT_LOG("0x%x mapped to 0x%x", block, mapping_block);
470
471 return mapping_block;
472 }
473
474 static int
475 mtk_bmt_read(struct mtd_info *mtd, loff_t from,
476 struct mtd_oob_ops *ops)
477 {
478 struct mtd_oob_ops cur_ops = *ops;
479 int retry_count = 0;
480 loff_t cur_from;
481 int ret = 0;
482
483 ops->retlen = 0;
484 ops->oobretlen = 0;
485
486 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
487 int cur_ret;
488
489 u32 offset = from & (bmtd.blk_size - 1);
490 u32 block = from >> bmtd.blk_shift;
491 u32 cur_block;
492
493 cur_block = get_mapping_block_index(block);
494 cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
495
496 cur_ops.oobretlen = 0;
497 cur_ops.retlen = 0;
498 cur_ops.len = min_t(u32, mtd->erasesize - offset,
499 ops->len - ops->retlen);
500 cur_ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
501 if (cur_ret < 0)
502 ret = cur_ret;
503 if (cur_ret < 0 && !mtd_is_bitflip(cur_ret)) {
504 update_bmt(block, mtd->erasesize);
505 if (retry_count++ < 10)
506 continue;
507
508 return ret;
509 }
510
511 ops->retlen += cur_ops.retlen;
512 ops->oobretlen += cur_ops.oobretlen;
513
514 cur_ops.ooboffs = 0;
515 cur_ops.datbuf += cur_ops.retlen;
516 cur_ops.oobbuf += cur_ops.oobretlen;
517 cur_ops.ooblen -= cur_ops.oobretlen;
518
519 if (!cur_ops.len)
520 cur_ops.len = mtd->erasesize - offset;
521
522 from += cur_ops.len;
523 retry_count = 0;
524 }
525
526 return ret;
527 }
528
529 static int
530 mtk_bmt_write(struct mtd_info *mtd, loff_t to,
531 struct mtd_oob_ops *ops)
532 {
533 struct mtd_oob_ops cur_ops = *ops;
534 int retry_count = 0;
535 loff_t cur_to;
536 int ret;
537
538 ops->retlen = 0;
539 ops->oobretlen = 0;
540
541 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
542 u32 offset = to & (bmtd.blk_size - 1);
543 u32 block = to >> bmtd.blk_shift;
544 u32 cur_block;
545
546 cur_block = get_mapping_block_index(block);
547 cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
548
549 cur_ops.oobretlen = 0;
550 cur_ops.retlen = 0;
551 cur_ops.len = min_t(u32, bmtd.blk_size - offset,
552 ops->len - ops->retlen);
553 ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
554 if (ret < 0) {
555 update_bmt(block, offset);
556 if (retry_count++ < 10)
557 continue;
558
559 return ret;
560 }
561
562 ops->retlen += cur_ops.retlen;
563 ops->oobretlen += cur_ops.oobretlen;
564
565 cur_ops.ooboffs = 0;
566 cur_ops.datbuf += cur_ops.retlen;
567 cur_ops.oobbuf += cur_ops.oobretlen;
568 cur_ops.ooblen -= cur_ops.oobretlen;
569
570 if (!cur_ops.len)
571 cur_ops.len = mtd->erasesize - offset;
572
573 to += cur_ops.len;
574 retry_count = 0;
575 }
576
577 return 0;
578 }
579
580 static int
581 mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
582 {
583 struct erase_info mapped_instr = {
584 .len = bmtd.blk_size,
585 };
586 int retry_count = 0;
587 u64 start_addr, end_addr;
588 int ret;
589 u16 orig_block, block;
590
591 start_addr = instr->addr & (~mtd->erasesize_mask);
592 end_addr = instr->addr + instr->len;
593
594 while (start_addr < end_addr) {
595 orig_block = start_addr >> bmtd.blk_shift;
596 block = get_mapping_block_index(orig_block);
597 mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
598 ret = bmtd._erase(mtd, &mapped_instr);
599 if (ret) {
600 update_bmt(orig_block, 0);
601 if (retry_count++ < 10)
602 continue;
603 instr->fail_addr = start_addr;
604 break;
605 }
606 start_addr += mtd->erasesize;
607 retry_count = 0;
608 }
609
610 return ret;
611 }
612 static int
613 mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs)
614 {
615 int retry_count = 0;
616 u16 orig_block = ofs >> bmtd.blk_shift;
617 u16 block;
618 int ret;
619
620 retry:
621 block = get_mapping_block_index(orig_block);
622 ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
623 if (ret) {
624 update_bmt(orig_block, bmtd.blk_size);
625 if (retry_count++ < 10)
626 goto retry;
627 }
628 return ret;
629 }
630
631 static int
632 mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
633 {
634 u16 orig_block = ofs >> bmtd.blk_shift;
635 u16 block = get_mapping_block_index(orig_block);
636 update_bmt(orig_block, bmtd.blk_size);
637 return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
638 }
639
640 static void
641 mtk_bmt_replace_ops(struct mtd_info *mtd)
642 {
643 bmtd._read_oob = mtd->_read_oob;
644 bmtd._write_oob = mtd->_write_oob;
645 bmtd._erase = mtd->_erase;
646 bmtd._block_isbad = mtd->_block_isbad;
647 bmtd._block_markbad = mtd->_block_markbad;
648
649 mtd->_read_oob = mtk_bmt_read;
650 mtd->_write_oob = mtk_bmt_write;
651 mtd->_erase = mtk_bmt_mtd_erase;
652 mtd->_block_isbad = mtk_bmt_block_isbad;
653 mtd->_block_markbad = mtk_bmt_block_markbad;
654 }
655
656 static int mtk_bmt_debug_mark_good(void *data, u64 val)
657 {
658 u32 block = val >> bmtd.blk_shift;
659
660 bmtd.bbt->bb_tbl[block] = block;
661 bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
662
663 return 0;
664 }
665
666 static int mtk_bmt_debug_mark_bad(void *data, u64 val)
667 {
668 u32 block = val >> bmtd.blk_shift;
669
670 update_bmt(block, bmtd.blk_size);
671
672 return 0;
673 }
674
675 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
676 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
677
678 static void
679 mtk_bmt_add_debugfs(void)
680 {
681 struct dentry *dir;
682
683 dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
684 if (!dir)
685 return;
686
687 debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
688 debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
689 }
690
691 void mtk_bmt_detach(struct mtd_info *mtd)
692 {
693 if (bmtd.mtd != mtd)
694 return;
695
696 if (bmtd.debugfs_dir)
697 debugfs_remove_recursive(bmtd.debugfs_dir);
698 bmtd.debugfs_dir = NULL;
699
700 kfree(nand_bbt_buf);
701 kfree(nand_data_buf);
702
703 mtd->_read_oob = bmtd._read_oob;
704 mtd->_write_oob = bmtd._write_oob;
705 mtd->_erase = bmtd._erase;
706 mtd->_block_isbad = bmtd._block_isbad;
707 mtd->_block_markbad = bmtd._block_markbad;
708 mtd->size = bmtd.total_blks << bmtd.blk_shift;
709
710 memset(&bmtd, 0, sizeof(bmtd));
711 }
712
713 /* total_blocks - The total count of blocks that the Nand Chip has */
714 int mtk_bmt_attach(struct mtd_info *mtd)
715 {
716 struct device_node *np;
717 struct bbbt *bbt;
718 u32 bufsz;
719 u32 block;
720 u16 total_blocks, pmt_block;
721 int ret = 0;
722 u32 bmt_pool_size, bmt_table_size;
723
724 if (bmtd.mtd)
725 return -ENOSPC;
726
727 np = mtd_get_of_node(mtd);
728 if (!np)
729 return 0;
730
731 if (!of_property_read_bool(np, "mediatek,bmt-v2"))
732 return 0;
733
734 if (of_property_read_u32(np, "mediatek,bmt-pool-size",
735 &bmt_pool_size) != 0)
736 bmt_pool_size = 80;
737
738 if (of_property_read_u8(np, "mediatek,bmt-oob-offset",
739 &bmtd.oob_offset) != 0)
740 bmtd.oob_offset = 0;
741
742 if (of_property_read_u32(np, "mediatek,bmt-table-size",
743 &bmt_table_size) != 0)
744 bmt_table_size = 0x2000U;
745
746 bmtd.mtd = mtd;
747 mtk_bmt_replace_ops(mtd);
748
749 bmtd.table_size = bmt_table_size;
750 bmtd.blk_size = mtd->erasesize;
751 bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
752 bmtd.pg_size = mtd->writesize;
753 bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
754 total_blocks = mtd->size >> bmtd.blk_shift;
755 pmt_block = total_blocks - bmt_pool_size - 2;
756
757 mtd->size = pmt_block << bmtd.blk_shift;
758
759 /*
760 * ---------------------------------------
761 * | PMT(2blks) | BMT POOL(totalblks * 2%) |
762 * ---------------------------------------
763 * ^ ^
764 * | |
765 * pmt_block pmt_block + 2blocks(pool_lba)
766 *
767 * ATTETION!!!!!!
768 * The blocks ahead of the boundary block are stored in bb_tbl
769 * and blocks behind are stored in bmt_tbl
770 */
771
772 bmtd.pool_lba = (u16)(pmt_block + 2);
773 bmtd.total_blks = total_blocks;
774 bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100;
775
776 /* 3 buffers we need */
777 bufsz = round_up(sizeof(struct bbbt) +
778 bmt_table_size * sizeof(struct bbmt), bmtd.pg_size);
779 bmtd.bmt_pgs = bufsz >> bmtd.pg_shift;
780
781 nand_bbt_buf = kzalloc(bufsz, GFP_KERNEL);
782 nand_data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL);
783
784 if (!nand_bbt_buf || !nand_data_buf) {
785 pr_info("nand: FATAL ERR: allocate buffer failed!\n");
786 ret = -1;
787 goto error;
788 }
789
790 memset(nand_bbt_buf, 0xff, bufsz);
791 memset(nand_data_buf, 0xff, bmtd.pg_size);
792
793 BBT_LOG("bbtbuf=0x%p(0x%x) dat=0x%p(0x%x)",
794 nand_bbt_buf, bufsz, nand_data_buf, bmtd.pg_size);
795 BBT_LOG("pool_lba=0x%x total_blks=0x%x bb_max=0x%x",
796 bmtd.pool_lba, bmtd.total_blks, bmtd.bb_max);
797
798 /* Scanning start from the first page of the last block
799 * of whole flash
800 */
801 bbt = scan_bmt(bmtd.total_blks - 1);
802 if (!bbt) {
803 /* BMT not found */
804 if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) {
805 pr_info("nand: FATAL: Too many blocks, can not support!\n");
806 ret = -1;
807 goto error;
808 }
809
810 bbt = (struct bbbt *)nand_bbt_buf;
811 memset(bmt_tbl(bbt), BMT_TBL_DEF_VAL, bmtd.table_size * sizeof(struct bbmt));
812
813 if (scan_bad_blocks(bbt)) {
814 ret = -1;
815 goto error;
816 }
817
818 /* BMT always in the last valid block in pool */
819 bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
820 block = bmt_tbl(bbt)[bmtd.bmt_blk_idx].block;
821 pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block);
822
823 if (bmtd.bmt_blk_idx == 0)
824 pr_info("nand: Warning: no available block in BMT pool!\n");
825 else if (bmtd.bmt_blk_idx == (u16)-1) {
826 ret = -1;
827 goto error;
828 }
829 }
830 mtk_bmt_add_debugfs();
831
832 bmtd.bbt = bbt;
833 return 0;
834
835 error:
836 mtk_bmt_detach(mtd);
837 return ret;
838 }
839
840
841 MODULE_LICENSE("GPL");
842 MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
843 MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");
844