mediatek: mt7622: add Linux 5.10 support
[openwrt/staging/rmilecki.git] / target / linux / mediatek / patches-5.10 / 330-mtk-bmt-support.patch
1 --- a/drivers/mtd/nand/Kconfig
2 +++ b/drivers/mtd/nand/Kconfig
3 @@ -15,6 +15,10 @@ config MTD_NAND_ECC
4 bool
5 depends on MTD_NAND_CORE
6
7 +config MTD_NAND_MTK_BMT
8 + bool "Support MediaTek NAND Bad-block Management Table"
9 + default n
10 +
11 endmenu
12
13 endmenu
14 --- a/drivers/mtd/nand/Makefile
15 +++ b/drivers/mtd/nand/Makefile
16 @@ -2,6 +2,7 @@
17
18 nandcore-objs := core.o bbt.o
19 obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
20 +obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o
21
22 obj-y += onenand/
23 obj-y += raw/
24 --- /dev/null
25 +++ b/drivers/mtd/nand/mtk_bmt.c
26 @@ -0,0 +1,766 @@
27 +/*
28 + * Copyright (c) 2017 MediaTek Inc.
29 + * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
30 + * Copyright (c) 2020 Felix Fietkau <nbd@nbd.name>
31 + *
32 + * This program is free software; you can redistribute it and/or modify
33 + * it under the terms of the GNU General Public License version 2 as
34 + * published by the Free Software Foundation.
35 + *
36 + * This program is distributed in the hope that it will be useful,
37 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
38 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
39 + * GNU General Public License for more details.
40 + */
41 +
42 +#include <linux/slab.h>
43 +#include <linux/gfp.h>
44 +#include <linux/kernel.h>
45 +#include <linux/of.h>
46 +#include <linux/mtd/nand.h>
47 +#include <linux/mtd/partitions.h>
48 +#include <linux/mtd/mtk_bmt.h>
49 +#include <linux/module.h>
50 +#include <linux/debugfs.h>
51 +
52 +#define MAIN_SIGNATURE_OFFSET 0
53 +#define OOB_SIGNATURE_OFFSET 1
54 +#define BBPOOL_RATIO 2
55 +
56 +#define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
57 +
58 +/* Maximum 8k blocks */
59 +#define BB_TABLE_MAX 0x2000U
60 +#define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100)
61 +#define BMT_TBL_DEF_VAL 0x0
62 +
63 +/*
64 + * Burner Bad Block Table
65 + * --------- Only support SLC Nand Chips!!!!!!!!!!! ----------
66 + */
67 +
68 +struct bbbt {
69 + char signature[3];
70 + /* This version is used to distinguish the legacy and new algorithm */
71 +#define BBMT_VERSION 2
72 + unsigned char version;
73 + /* Below 2 tables will be written in SLC */
74 + u16 bb_tbl[BB_TABLE_MAX];
75 + struct bbmt {
76 + u16 block;
77 +#define NO_MAPPED 0
78 +#define NORMAL_MAPPED 1
79 +#define BMT_MAPPED 2
80 + u16 mapped;
81 + } bmt_tbl[BMT_TABLE_MAX];
82 +};
83 +
84 +static struct bmt_desc {
85 + struct mtd_info *mtd;
86 +
87 + int (*_read_oob) (struct mtd_info *mtd, loff_t from,
88 + struct mtd_oob_ops *ops);
89 + int (*_write_oob) (struct mtd_info *mtd, loff_t to,
90 + struct mtd_oob_ops *ops);
91 + const struct nand_ops *nand_ops;
92 +
93 + struct bbbt *bbt;
94 +
95 + struct dentry *debugfs_dir;
96 +
97 + u32 pg_size;
98 + u32 blk_size;
99 + u16 pg_shift;
100 + u16 blk_shift;
101 + /* bbt logical address */
102 + u16 pool_lba;
103 + /* bbt physical address */
104 + u16 pool_pba;
105 + /* Maximum count of bad blocks that the vendor guaranteed */
106 + u16 bb_max;
107 + /* Total blocks of the Nand Chip */
108 + u16 total_blks;
109 + /* The block(n) BMT is located at (bmt_tbl[n]) */
110 + u16 bmt_blk_idx;
111 + /* How many pages needs to store 'struct bbbt' */
112 + u32 bmt_pgs;
113 +
114 + /* to compensate for driver level remapping */
115 + u8 oob_offset;
116 +} bmtd = {0};
117 +
118 +static unsigned char *nand_bbt_buf;
119 +static unsigned char *nand_data_buf;
120 +
121 +/* -------- Unit conversions -------- */
122 +static inline u32 blk_pg(u16 block)
123 +{
124 + return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift));
125 +}
126 +
127 +/* -------- Nand operations wrapper -------- */
128 +static inline int
129 +bbt_nand_read(u32 page, unsigned char *dat, int dat_len,
130 + unsigned char *fdm, int fdm_len)
131 +{
132 + struct mtd_oob_ops ops = {
133 + .mode = MTD_OPS_PLACE_OOB,
134 + .ooboffs = bmtd.oob_offset,
135 + .oobbuf = fdm,
136 + .ooblen = fdm_len,
137 + .datbuf = dat,
138 + .len = dat_len,
139 + };
140 +
141 + return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops);
142 +}
143 +
144 +static inline int bbt_nand_erase(u16 block)
145 +{
146 + struct nand_device *nand = mtd_to_nanddev(bmtd.mtd);
147 + loff_t addr = (loff_t)block << bmtd.blk_shift;
148 + struct nand_pos pos;
149 +
150 + nanddev_offs_to_pos(nand, addr, &pos);
151 + return bmtd.nand_ops->erase(nand, &pos);
152 +}
153 +
154 +/* -------- Bad Blocks Management -------- */
155 +static int
156 +read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len)
157 +{
158 + u32 len = bmtd.bmt_pgs << bmtd.pg_shift;
159 +
160 + return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len);
161 +}
162 +
163 +static int write_bmt(u16 block, unsigned char *dat)
164 +{
165 + struct mtd_oob_ops ops = {
166 + .mode = MTD_OPS_PLACE_OOB,
167 + .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset,
168 + .oobbuf = "bmt",
169 + .ooblen = 3,
170 + .datbuf = dat,
171 + .len = bmtd.bmt_pgs << bmtd.pg_shift,
172 + };
173 + loff_t addr = (loff_t)block << bmtd.blk_shift;
174 +
175 + return bmtd._write_oob(bmtd.mtd, addr, &ops);
176 +}
177 +
178 +static u16 find_valid_block(u16 block)
179 +{
180 + u8 fdm[4];
181 + int ret;
182 + int loop = 0;
183 +
184 +retry:
185 + if (block >= bmtd.total_blks)
186 + return 0;
187 +
188 + ret = bbt_nand_read(blk_pg(block), nand_data_buf, bmtd.pg_size,
189 + fdm, sizeof(fdm));
190 + /* Read the 1st byte of FDM to judge whether it's a bad
191 + * or not
192 + */
193 + if (ret || fdm[0] != 0xff) {
194 + pr_info("nand: found bad block 0x%x\n", block);
195 + if (loop >= bmtd.bb_max) {
196 + pr_info("nand: FATAL ERR: too many bad blocks!!\n");
197 + return 0;
198 + }
199 +
200 + loop++;
201 + block++;
202 + goto retry;
203 + }
204 +
205 + return block;
206 +}
207 +
208 +/* Find out all bad blocks, and fill in the mapping table */
209 +static int scan_bad_blocks(struct bbbt *bbt)
210 +{
211 + int i;
212 + u16 block = 0;
213 +
214 + /* First time download, the block0 MUST NOT be a bad block,
215 + * this is guaranteed by vendor
216 + */
217 + bbt->bb_tbl[0] = 0;
218 +
219 + /*
220 + * Construct the mapping table of Normal data area(non-PMT/BMTPOOL)
221 + * G - Good block; B - Bad block
222 + * ---------------------------
223 + * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
224 + * ---------------------------
225 + * What bb_tbl[i] looks like:
226 + * physical block(i):
227 + * 0 1 2 3 4 5 6 7 8 9 a b c
228 + * mapped block(bb_tbl[i]):
229 + * 0 1 3 6 7 8 9 b ......
230 + * ATTENTION:
231 + * If new bad block ocurred(n), search bmt_tbl to find
232 + * a available block(x), and fill in the bb_tbl[n] = x;
233 + */
234 + for (i = 1; i < bmtd.pool_lba; i++) {
235 + bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1);
236 + BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]);
237 + if (bbt->bb_tbl[i] == 0)
238 + return -1;
239 + }
240 +
241 + /* Physical Block start Address of BMT pool */
242 + bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1;
243 + if (bmtd.pool_pba >= bmtd.total_blks - 2) {
244 + pr_info("nand: FATAL ERR: Too many bad blocks!!\n");
245 + return -1;
246 + }
247 +
248 + BBT_LOG("pool_pba=0x%x", bmtd.pool_pba);
249 + i = 0;
250 + block = bmtd.pool_pba;
251 + /*
252 + * The bmt table is used for runtime bad block mapping
253 + * G - Good block; B - Bad block
254 + * ---------------------------
255 + * physical |G|G|B|G|B|B|G|G|G|G|B|G|B|
256 + * ---------------------------
257 + * block: 0 1 2 3 4 5 6 7 8 9 a b c
258 + * What bmt_tbl[i] looks like in initial state:
259 + * i:
260 + * 0 1 2 3 4 5 6 7
261 + * bmt_tbl[i].block:
262 + * 0 1 3 6 7 8 9 b
263 + * bmt_tbl[i].mapped:
264 + * N N N N N N N B
265 + * N - Not mapped(Available)
266 + * M - Mapped
267 + * B - BMT
268 + * ATTENTION:
269 + * BMT always in the last valid block in pool
270 + */
271 + while ((block = find_valid_block(block)) != 0) {
272 + bbt->bmt_tbl[i].block = block;
273 + bbt->bmt_tbl[i].mapped = NO_MAPPED;
274 + BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block);
275 + block++;
276 + i++;
277 + }
278 +
279 + /* i - How many available blocks in pool, which is the length of bmt_tbl[]
280 + * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block
281 + */
282 + bmtd.bmt_blk_idx = i - 1;
283 + bbt->bmt_tbl[bmtd.bmt_blk_idx].mapped = BMT_MAPPED;
284 +
285 + if (i < 1) {
286 + pr_info("nand: FATAL ERR: no space to store BMT!!\n");
287 + return -1;
288 + }
289 +
290 + pr_info("[BBT] %d available blocks in BMT pool\n", i);
291 +
292 + return 0;
293 +}
294 +
295 +static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm)
296 +{
297 + struct bbbt *bbt = (struct bbbt *)buf;
298 + u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET;
299 +
300 +
301 + if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 &&
302 + memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) {
303 + if (bbt->version == BBMT_VERSION)
304 + return true;
305 + }
306 + BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x",
307 + sig[0], sig[1], sig[2],
308 + fdm[1], fdm[2], fdm[3]);
309 + return false;
310 +}
311 +
312 +static u16 get_bmt_index(struct bbmt *bmt)
313 +{
314 + int i = 0;
315 +
316 + while (bmt[i].block != BMT_TBL_DEF_VAL) {
317 + if (bmt[i].mapped == BMT_MAPPED)
318 + return i;
319 + i++;
320 + }
321 + return 0;
322 +}
323 +
324 +static struct bbbt *scan_bmt(u16 block)
325 +{
326 + u8 fdm[4];
327 +
328 + if (block < bmtd.pool_lba)
329 + return NULL;
330 +
331 + if (read_bmt(block, nand_bbt_buf, fdm, sizeof(fdm)))
332 + return scan_bmt(block - 1);
333 +
334 + if (is_valid_bmt(nand_bbt_buf, fdm)) {
335 + bmtd.bmt_blk_idx = get_bmt_index(((struct bbbt *)nand_bbt_buf)->bmt_tbl);
336 + if (bmtd.bmt_blk_idx == 0) {
337 + pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n");
338 + return NULL;
339 + }
340 + pr_info("[BBT] BMT.v2 is found at 0x%x\n", block);
341 + return (struct bbbt *)nand_bbt_buf;
342 + } else
343 + return scan_bmt(block - 1);
344 +}
345 +
346 +/* Write the Burner Bad Block Table to Nand Flash
347 + * n - write BMT to bmt_tbl[n]
348 + */
349 +static u16 upload_bmt(struct bbbt *bbt, int n)
350 +{
351 + u16 block;
352 +
353 +retry:
354 + if (n < 0 || bbt->bmt_tbl[n].mapped == NORMAL_MAPPED) {
355 + pr_info("nand: FATAL ERR: no space to store BMT!\n");
356 + return (u16)-1;
357 + }
358 +
359 + block = bbt->bmt_tbl[n].block;
360 + BBT_LOG("n = 0x%x, block = 0x%x", n, block);
361 + if (bbt_nand_erase(block)) {
362 + bbt->bmt_tbl[n].block = 0;
363 + /* erase failed, try the previous block: bmt_tbl[n - 1].block */
364 + n--;
365 + goto retry;
366 + }
367 +
368 + /* The signature offset is fixed set to 0,
369 + * oob signature offset is fixed set to 1
370 + */
371 + memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3);
372 + bbt->version = BBMT_VERSION;
373 +
374 + if (write_bmt(block, (unsigned char *)bbt)) {
375 + bbt->bmt_tbl[n].block = 0;
376 +
377 + /* write failed, try the previous block in bmt_tbl[n - 1] */
378 + n--;
379 + goto retry;
380 + }
381 +
382 + /* Return the current index(n) of BMT pool (bmt_tbl[n]) */
383 + return n;
384 +}
385 +
386 +static u16 find_valid_block_in_pool(struct bbbt *bbt)
387 +{
388 + int i;
389 +
390 + if (bmtd.bmt_blk_idx == 0)
391 + goto error;
392 +
393 + for (i = 0; i < bmtd.bmt_blk_idx; i++) {
394 + if (bbt->bmt_tbl[i].block != 0 && bbt->bmt_tbl[i].mapped == NO_MAPPED) {
395 + bbt->bmt_tbl[i].mapped = NORMAL_MAPPED;
396 + return bbt->bmt_tbl[i].block;
397 + }
398 + }
399 +
400 +error:
401 + pr_info("nand: FATAL ERR: BMT pool is run out!\n");
402 + return 0;
403 +}
404 +
405 +/* We met a bad block, mark it as bad and map it to a valid block in pool,
406 + * if it's a write failure, we need to write the data to mapped block
407 + */
408 +static bool update_bmt(u16 block)
409 +{
410 + u16 mapped_blk;
411 + struct bbbt *bbt;
412 +
413 + bbt = bmtd.bbt;
414 + mapped_blk = find_valid_block_in_pool(bbt);
415 + if (mapped_blk == 0)
416 + return false;
417 +
418 + /* Map new bad block to available block in pool */
419 + bbt->bb_tbl[block] = mapped_blk;
420 + bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
421 +
422 + return true;
423 +}
424 +
425 +u16 get_mapping_block_index(int block)
426 +{
427 + int mapping_block;
428 +
429 + if (block < bmtd.pool_lba)
430 + mapping_block = bmtd.bbt->bb_tbl[block];
431 + else
432 + mapping_block = block;
433 + BBT_LOG("0x%x mapped to 0x%x", block, mapping_block);
434 +
435 + return mapping_block;
436 +}
437 +
438 +static int
439 +mtk_bmt_read(struct mtd_info *mtd, loff_t from,
440 + struct mtd_oob_ops *ops)
441 +{
442 + struct mtd_oob_ops cur_ops = *ops;
443 + int retry_count = 0;
444 + loff_t cur_from;
445 + int ret;
446 +
447 + ops->retlen = 0;
448 + ops->oobretlen = 0;
449 +
450 + while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
451 + u32 offset = from & (bmtd.blk_size - 1);
452 + u32 block = from >> bmtd.blk_shift;
453 + u32 cur_block;
454 +
455 + cur_block = get_mapping_block_index(block);
456 + cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
457 +
458 + cur_ops.oobretlen = 0;
459 + cur_ops.retlen = 0;
460 + cur_ops.len = min_t(u32, mtd->erasesize - offset,
461 + ops->len - ops->retlen);
462 + ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
463 + if (ret < 0) {
464 + update_bmt(block);
465 + if (retry_count++ < 10)
466 + continue;
467 +
468 + return ret;
469 + }
470 +
471 + ops->retlen += cur_ops.retlen;
472 + ops->oobretlen += cur_ops.oobretlen;
473 +
474 + cur_ops.datbuf += cur_ops.retlen;
475 + cur_ops.oobbuf += cur_ops.oobretlen;
476 + cur_ops.ooblen -= cur_ops.oobretlen;
477 +
478 + if (!cur_ops.len)
479 + cur_ops.len = mtd->erasesize - offset;
480 +
481 + from += cur_ops.len;
482 + retry_count = 0;
483 + }
484 +
485 + return 0;
486 +}
487 +
488 +static int
489 +mtk_bmt_write(struct mtd_info *mtd, loff_t to,
490 + struct mtd_oob_ops *ops)
491 +{
492 + struct mtd_oob_ops cur_ops = *ops;
493 + int retry_count = 0;
494 + loff_t cur_to;
495 + int ret;
496 +
497 + ops->retlen = 0;
498 + ops->oobretlen = 0;
499 +
500 + while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
501 + u32 offset = to & (bmtd.blk_size - 1);
502 + u32 block = to >> bmtd.blk_shift;
503 + u32 cur_block;
504 +
505 + cur_block = get_mapping_block_index(block);
506 + cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
507 +
508 + cur_ops.oobretlen = 0;
509 + cur_ops.retlen = 0;
510 + cur_ops.len = min_t(u32, bmtd.blk_size - offset,
511 + ops->len - ops->retlen);
512 + ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
513 + if (ret < 0) {
514 + update_bmt(block);
515 + if (retry_count++ < 10)
516 + continue;
517 +
518 + return ret;
519 + }
520 +
521 + ops->retlen += cur_ops.retlen;
522 + ops->oobretlen += cur_ops.oobretlen;
523 +
524 + cur_ops.datbuf += cur_ops.retlen;
525 + cur_ops.oobbuf += cur_ops.oobretlen;
526 + cur_ops.ooblen -= cur_ops.oobretlen;
527 +
528 + if (!cur_ops.len)
529 + cur_ops.len = mtd->erasesize - offset;
530 +
531 + to += cur_ops.len;
532 + retry_count = 0;
533 + }
534 +
535 + return 0;
536 +}
537 +
538 +
539 +
540 +static int
541 +mtk_bmt_erase(struct nand_device *nand, const struct nand_pos *pos)
542 +{
543 + struct nand_pos new_pos = *pos;
544 + int retry_count = 0;
545 + int ret;
546 +
547 +retry:
548 + new_pos.eraseblock = get_mapping_block_index(pos->eraseblock);
549 +
550 + ret = bmtd.nand_ops->erase(nand, &new_pos);
551 + if (ret) {
552 + update_bmt(pos->eraseblock);
553 + if (retry_count++ < 10)
554 + goto retry;
555 + }
556 +
557 + return ret;
558 +}
559 +
560 +static bool
561 +mtk_bmt_isbad(struct nand_device *nand, const struct nand_pos *pos)
562 +{
563 + struct nand_pos new_pos = *pos;
564 + int retry_count = 0;
565 + bool ret;
566 +
567 +retry:
568 + new_pos.eraseblock = get_mapping_block_index(pos->eraseblock);
569 +
570 + ret = bmtd.nand_ops->isbad(nand, &new_pos);
571 + if (ret) {
572 + update_bmt(pos->eraseblock);
573 + if (retry_count++ < 10)
574 + goto retry;
575 + }
576 +
577 + return ret;
578 +}
579 +
580 +static int
581 +mtk_bmt_markbad(struct nand_device *nand, const struct nand_pos *pos)
582 +{
583 + struct nand_pos new_pos = *pos;
584 +
585 + new_pos.eraseblock = get_mapping_block_index(new_pos.eraseblock);
586 + update_bmt(pos->eraseblock);
587 +
588 + return bmtd.nand_ops->markbad(nand, &new_pos);
589 +}
590 +
591 +static void
592 +mtk_bmt_replace_ops(struct mtd_info *mtd)
593 +{
594 + static const struct nand_ops mtk_bmt_nand_ops = {
595 + .erase = mtk_bmt_erase,
596 + .isbad = mtk_bmt_isbad,
597 + .markbad = mtk_bmt_markbad,
598 + };
599 + struct nand_device *nand = mtd_to_nanddev(mtd);
600 +
601 + bmtd.nand_ops = nand->ops;
602 + bmtd._read_oob = mtd->_read_oob;
603 + bmtd._write_oob = mtd->_write_oob;
604 +
605 + mtd->_read_oob = mtk_bmt_read;
606 + mtd->_write_oob = mtk_bmt_write;
607 + nand->ops = &mtk_bmt_nand_ops;
608 +}
609 +
610 +static int mtk_bmt_debug_mark_good(void *data, u64 val)
611 +{
612 + u32 block = val >> bmtd.blk_shift;
613 +
614 + bmtd.bbt->bb_tbl[block] = block;
615 + bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx);
616 +
617 + return 0;
618 +}
619 +
620 +static int mtk_bmt_debug_mark_bad(void *data, u64 val)
621 +{
622 + u32 block = val >> bmtd.blk_shift;
623 +
624 + update_bmt(block);
625 +
626 + return 0;
627 +}
628 +
629 +DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
630 +DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
631 +
632 +static void
633 +mtk_bmt_add_debugfs(void)
634 +{
635 + struct dentry *dir;
636 +
637 + dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
638 + if (!dir)
639 + return;
640 +
641 + debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
642 + debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
643 +}
644 +
645 +void mtk_bmt_detach(struct mtd_info *mtd)
646 +{
647 + struct nand_device *nand = mtd_to_nanddev(mtd);
648 +
649 + if (bmtd.mtd != mtd)
650 + return;
651 +
652 + if (bmtd.debugfs_dir)
653 + debugfs_remove_recursive(bmtd.debugfs_dir);
654 + bmtd.debugfs_dir = NULL;
655 +
656 + kfree(nand_bbt_buf);
657 + kfree(nand_data_buf);
658 +
659 + mtd->_read_oob = bmtd._read_oob;
660 + mtd->_write_oob = bmtd._write_oob;
661 + mtd->size = bmtd.total_blks << bmtd.blk_shift;
662 + nand->ops = bmtd.nand_ops;
663 +
664 + memset(&bmtd, 0, sizeof(bmtd));
665 +}
666 +
667 +/* total_blocks - The total count of blocks that the Nand Chip has */
668 +int mtk_bmt_attach(struct mtd_info *mtd)
669 +{
670 + struct device_node *np;
671 + struct bbbt *bbt;
672 + u32 bufsz;
673 + u32 block;
674 + u16 total_blocks, pmt_block;
675 + int ret = 0;
676 + u32 bmt_pool_size;
677 +
678 + if (bmtd.mtd)
679 + return -ENOSPC;
680 +
681 + np = mtd_get_of_node(mtd);
682 + if (!np)
683 + return 0;
684 +
685 + if (!of_property_read_bool(np, "mediatek,bmt-v2"))
686 + return 0;
687 +
688 + if (of_property_read_u32(np, "mediatek,bmt-pool-size",
689 + &bmt_pool_size) != 0)
690 + bmt_pool_size = 80;
691 +
692 + if (of_property_read_u8(np, "mediatek,bmt-oob-offset",
693 + &bmtd.oob_offset) != 0)
694 + bmtd.oob_offset = 8;
695 +
696 + bmtd.mtd = mtd;
697 + mtk_bmt_replace_ops(mtd);
698 +
699 + bmtd.blk_size = mtd->erasesize;
700 + bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
701 + bmtd.pg_size = mtd->writesize;
702 + bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
703 + total_blocks = mtd->size >> bmtd.blk_shift;
704 + pmt_block = total_blocks - bmt_pool_size - 2;
705 +
706 + mtd->size = pmt_block << bmtd.blk_shift;
707 +
708 + /*
709 + * ---------------------------------------
710 + * | PMT(2blks) | BMT POOL(totalblks * 2%) |
711 + * ---------------------------------------
712 + * ^ ^
713 + * | |
714 + * pmt_block pmt_block + 2blocks(pool_lba)
715 + *
716 + * ATTETION!!!!!!
717 + * The blocks ahead of the boundary block are stored in bb_tbl
718 + * and blocks behind are stored in bmt_tbl
719 + */
720 +
721 + bmtd.pool_lba = (u16)(pmt_block + 2);
722 + bmtd.total_blks = total_blocks;
723 + bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100;
724 +
725 + /* 3 buffers we need */
726 + bufsz = round_up(sizeof(struct bbbt), bmtd.pg_size);
727 + bmtd.bmt_pgs = bufsz >> bmtd.pg_shift;
728 +
729 + nand_bbt_buf = kzalloc(bufsz, GFP_KERNEL);
730 + nand_data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL);
731 +
732 + if (!nand_bbt_buf || !nand_data_buf) {
733 + pr_info("nand: FATAL ERR: allocate buffer failed!\n");
734 + ret = -1;
735 + goto error;
736 + }
737 +
738 + memset(nand_bbt_buf, 0xff, bufsz);
739 + memset(nand_data_buf, 0xff, bmtd.pg_size);
740 +
741 + BBT_LOG("bbtbuf=0x%p(0x%x) dat=0x%p(0x%x)",
742 + nand_bbt_buf, bufsz, nand_data_buf, bmtd.pg_size);
743 + BBT_LOG("pool_lba=0x%x total_blks=0x%x bb_max=0x%x",
744 + bmtd.pool_lba, bmtd.total_blks, bmtd.bb_max);
745 +
746 + /* Scanning start from the first page of the last block
747 + * of whole flash
748 + */
749 + bbt = scan_bmt(bmtd.total_blks - 1);
750 + if (!bbt) {
751 + /* BMT not found */
752 + if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) {
753 + pr_info("nand: FATAL: Too many blocks, can not support!\n");
754 + ret = -1;
755 + goto error;
756 + }
757 +
758 + bbt = (struct bbbt *)nand_bbt_buf;
759 + memset(bbt->bmt_tbl, BMT_TBL_DEF_VAL, sizeof(bbt->bmt_tbl));
760 +
761 + if (scan_bad_blocks(bbt)) {
762 + ret = -1;
763 + goto error;
764 + }
765 +
766 + /* BMT always in the last valid block in pool */
767 + bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx);
768 + block = bbt->bmt_tbl[bmtd.bmt_blk_idx].block;
769 + pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block);
770 +
771 + if (bmtd.bmt_blk_idx == 0)
772 + pr_info("nand: Warning: no available block in BMT pool!\n");
773 + else if (bmtd.bmt_blk_idx == (u16)-1) {
774 + ret = -1;
775 + goto error;
776 + }
777 + }
778 + mtk_bmt_add_debugfs();
779 +
780 + bmtd.bbt = bbt;
781 + return 0;
782 +
783 +error:
784 + mtk_bmt_detach(mtd);
785 + return ret;
786 +}
787 +
788 +
789 +MODULE_LICENSE("GPL");
790 +MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
791 +MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");
792 +
793 --- a/drivers/mtd/nand/spi/core.c
794 +++ b/drivers/mtd/nand/spi/core.c
795 @@ -19,6 +19,7 @@
796 #include <linux/string.h>
797 #include <linux/spi/spi.h>
798 #include <linux/spi/spi-mem.h>
799 +#include <linux/mtd/mtk_bmt.h>
800
801 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
802 {
803 @@ -1139,6 +1140,8 @@ static int spinand_probe(struct spi_mem
804 if (ret)
805 return ret;
806
807 + mtk_bmt_attach(mtd);
808 +
809 ret = mtd_device_register(mtd, NULL, 0);
810 if (ret)
811 goto err_spinand_cleanup;
812 @@ -1164,6 +1167,7 @@ static int spinand_remove(struct spi_mem
813 if (ret)
814 return ret;
815
816 + mtk_bmt_detach(mtd);
817 spinand_cleanup(spinand);
818
819 return 0;
820 --- /dev/null
821 +++ b/include/linux/mtd/mtk_bmt.h
822 @@ -0,0 +1,18 @@
823 +#ifndef __MTK_BMT_H
824 +#define __MTK_BMT_H
825 +
826 +#ifdef CONFIG_MTD_NAND_MTK_BMT
827 +int mtk_bmt_attach(struct mtd_info *mtd);
828 +void mtk_bmt_detach(struct mtd_info *mtd);
829 +#else
830 +static inline int mtk_bmt_attach(struct mtd_info *mtd)
831 +{
832 + return 0;
833 +}
834 +
835 +static inline void mtk_bmt_detach(struct mtd_info *mtd)
836 +{
837 +}
838 +#endif
839 +
840 +#endif