kernel: mtk_bmt: add debugfs file to attempt repair of remapped sectors
[openwrt/staging/mkresin.git] / target / linux / generic / files / drivers / mtd / nand / mtk_bmt.c
1 /*
2 * Copyright (c) 2017 MediaTek Inc.
3 * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
4 * Copyright (c) 2020-2022 Felix Fietkau <nbd@nbd.name>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 #include <linux/module.h>
17 #include <linux/gfp.h>
18 #include <linux/slab.h>
19 #include <linux/bits.h>
20 #include "mtk_bmt.h"
21
22 struct bmt_desc bmtd = {};
23
24 /* -------- Nand operations wrapper -------- */
25 int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset)
26 {
27 int pages = bmtd.blk_size >> bmtd.pg_shift;
28 loff_t src = (loff_t)src_blk << bmtd.blk_shift;
29 loff_t dest = (loff_t)dest_blk << bmtd.blk_shift;
30 loff_t offset = 0;
31 uint8_t oob[64];
32 int i, ret;
33
34 for (i = 0; i < pages; i++) {
35 struct mtd_oob_ops rd_ops = {
36 .mode = MTD_OPS_PLACE_OOB,
37 .oobbuf = oob,
38 .ooblen = min_t(int, bmtd.mtd->oobsize / pages, sizeof(oob)),
39 .datbuf = bmtd.data_buf,
40 .len = bmtd.pg_size,
41 };
42 struct mtd_oob_ops wr_ops = {
43 .mode = MTD_OPS_PLACE_OOB,
44 .oobbuf = oob,
45 .datbuf = bmtd.data_buf,
46 .len = bmtd.pg_size,
47 };
48
49 if (offset >= max_offset)
50 break;
51
52 ret = bmtd._read_oob(bmtd.mtd, src + offset, &rd_ops);
53 if (ret < 0 && !mtd_is_bitflip(ret))
54 return ret;
55
56 if (!rd_ops.retlen)
57 break;
58
59 ret = bmtd._write_oob(bmtd.mtd, dest + offset, &wr_ops);
60 if (ret < 0)
61 return ret;
62
63 wr_ops.ooblen = rd_ops.oobretlen;
64 offset += rd_ops.retlen;
65 }
66
67 return 0;
68 }
69
70 /* -------- Bad Blocks Management -------- */
71 bool mapping_block_in_range(int block, int *start, int *end)
72 {
73 const __be32 *cur = bmtd.remap_range;
74 u32 addr = block << bmtd.blk_shift;
75 int i;
76
77 if (!cur || !bmtd.remap_range_len) {
78 *start = 0;
79 *end = bmtd.total_blks;
80 return true;
81 }
82
83 for (i = 0; i < bmtd.remap_range_len; i++, cur += 2) {
84 if (addr < be32_to_cpu(cur[0]) || addr >= be32_to_cpu(cur[1]))
85 continue;
86
87 *start = be32_to_cpu(cur[0]);
88 *end = be32_to_cpu(cur[1]);
89 return true;
90 }
91
92 return false;
93 }
94
95 static bool
96 mtk_bmt_remap_block(u32 block, u32 mapped_block, int copy_len)
97 {
98 int start, end;
99
100 if (!mapping_block_in_range(block, &start, &end))
101 return false;
102
103 return bmtd.ops->remap_block(block, mapped_block, copy_len);
104 }
105
106 static int
107 mtk_bmt_read(struct mtd_info *mtd, loff_t from,
108 struct mtd_oob_ops *ops)
109 {
110 struct mtd_oob_ops cur_ops = *ops;
111 int retry_count = 0;
112 loff_t cur_from;
113 int ret = 0;
114 int max_bitflips = 0;
115
116 ops->retlen = 0;
117 ops->oobretlen = 0;
118
119 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
120 int cur_ret;
121
122 u32 offset = from & (bmtd.blk_size - 1);
123 u32 block = from >> bmtd.blk_shift;
124 int cur_block;
125
126 cur_block = bmtd.ops->get_mapping_block(block);
127 if (cur_block < 0)
128 return -EIO;
129
130 cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset;
131
132 cur_ops.oobretlen = 0;
133 cur_ops.retlen = 0;
134 cur_ops.len = min_t(u32, mtd->erasesize - offset,
135 ops->len - ops->retlen);
136 cur_ret = bmtd._read_oob(mtd, cur_from, &cur_ops);
137 if (cur_ret < 0)
138 ret = cur_ret;
139 else
140 max_bitflips = max_t(int, max_bitflips, cur_ret);
141 if (cur_ret < 0 && !mtd_is_bitflip(cur_ret)) {
142 if (mtk_bmt_remap_block(block, cur_block, mtd->erasesize) &&
143 retry_count++ < 10)
144 continue;
145
146 goto out;
147 }
148
149 if (cur_ret >= mtd->bitflip_threshold)
150 mtk_bmt_remap_block(block, cur_block, mtd->erasesize);
151
152 ops->retlen += cur_ops.retlen;
153 ops->oobretlen += cur_ops.oobretlen;
154
155 cur_ops.ooboffs = 0;
156 cur_ops.datbuf += cur_ops.retlen;
157 cur_ops.oobbuf += cur_ops.oobretlen;
158 cur_ops.ooblen -= cur_ops.oobretlen;
159
160 if (!cur_ops.len)
161 cur_ops.len = mtd->erasesize - offset;
162
163 from += cur_ops.len;
164 retry_count = 0;
165 }
166
167 out:
168 if (ret < 0)
169 return ret;
170
171 return max_bitflips;
172 }
173
174 static int
175 mtk_bmt_write(struct mtd_info *mtd, loff_t to,
176 struct mtd_oob_ops *ops)
177 {
178 struct mtd_oob_ops cur_ops = *ops;
179 int retry_count = 0;
180 loff_t cur_to;
181 int ret;
182
183 ops->retlen = 0;
184 ops->oobretlen = 0;
185
186 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
187 u32 offset = to & (bmtd.blk_size - 1);
188 u32 block = to >> bmtd.blk_shift;
189 int cur_block;
190
191 cur_block = bmtd.ops->get_mapping_block(block);
192 if (cur_block < 0)
193 return -EIO;
194
195 cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset;
196
197 cur_ops.oobretlen = 0;
198 cur_ops.retlen = 0;
199 cur_ops.len = min_t(u32, bmtd.blk_size - offset,
200 ops->len - ops->retlen);
201 ret = bmtd._write_oob(mtd, cur_to, &cur_ops);
202 if (ret < 0) {
203 if (mtk_bmt_remap_block(block, cur_block, offset) &&
204 retry_count++ < 10)
205 continue;
206
207 return ret;
208 }
209
210 ops->retlen += cur_ops.retlen;
211 ops->oobretlen += cur_ops.oobretlen;
212
213 cur_ops.ooboffs = 0;
214 cur_ops.datbuf += cur_ops.retlen;
215 cur_ops.oobbuf += cur_ops.oobretlen;
216 cur_ops.ooblen -= cur_ops.oobretlen;
217
218 if (!cur_ops.len)
219 cur_ops.len = mtd->erasesize - offset;
220
221 to += cur_ops.len;
222 retry_count = 0;
223 }
224
225 return 0;
226 }
227
228 static int
229 mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
230 {
231 struct erase_info mapped_instr = {
232 .len = bmtd.blk_size,
233 };
234 int retry_count = 0;
235 u64 start_addr, end_addr;
236 int ret;
237 u16 orig_block;
238 int block;
239
240 start_addr = instr->addr & (~mtd->erasesize_mask);
241 end_addr = instr->addr + instr->len;
242
243 while (start_addr < end_addr) {
244 orig_block = start_addr >> bmtd.blk_shift;
245 block = bmtd.ops->get_mapping_block(orig_block);
246 if (block < 0)
247 return -EIO;
248 mapped_instr.addr = (loff_t)block << bmtd.blk_shift;
249 ret = bmtd._erase(mtd, &mapped_instr);
250 if (ret) {
251 if (mtk_bmt_remap_block(orig_block, block, 0) &&
252 retry_count++ < 10)
253 continue;
254 instr->fail_addr = start_addr;
255 break;
256 }
257 start_addr += mtd->erasesize;
258 retry_count = 0;
259 }
260
261 return ret;
262 }
263 static int
264 mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs)
265 {
266 int retry_count = 0;
267 u16 orig_block = ofs >> bmtd.blk_shift;
268 u16 block;
269 int ret;
270
271 retry:
272 block = bmtd.ops->get_mapping_block(orig_block);
273 ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift);
274 if (ret) {
275 if (mtk_bmt_remap_block(orig_block, block, bmtd.blk_size) &&
276 retry_count++ < 10)
277 goto retry;
278 }
279 return ret;
280 }
281
282 static int
283 mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs)
284 {
285 u16 orig_block = ofs >> bmtd.blk_shift;
286 int block;
287
288 block = bmtd.ops->get_mapping_block(orig_block);
289 if (block < 0)
290 return -EIO;
291
292 mtk_bmt_remap_block(orig_block, block, bmtd.blk_size);
293
294 return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift);
295 }
296
297 static void
298 mtk_bmt_replace_ops(struct mtd_info *mtd)
299 {
300 bmtd._read_oob = mtd->_read_oob;
301 bmtd._write_oob = mtd->_write_oob;
302 bmtd._erase = mtd->_erase;
303 bmtd._block_isbad = mtd->_block_isbad;
304 bmtd._block_markbad = mtd->_block_markbad;
305
306 mtd->_read_oob = mtk_bmt_read;
307 mtd->_write_oob = mtk_bmt_write;
308 mtd->_erase = mtk_bmt_mtd_erase;
309 mtd->_block_isbad = mtk_bmt_block_isbad;
310 mtd->_block_markbad = mtk_bmt_block_markbad;
311 }
312
313 static int mtk_bmt_debug_repair(void *data, u64 val)
314 {
315 int block = val >> bmtd.blk_shift;
316 int prev_block, new_block;
317
318 prev_block = bmtd.ops->get_mapping_block(block);
319 if (prev_block < 0)
320 return -EIO;
321
322 bmtd.ops->unmap_block(block);
323 new_block = bmtd.ops->get_mapping_block(block);
324 if (new_block < 0)
325 return -EIO;
326
327 if (prev_block == new_block)
328 return 0;
329
330 bbt_nand_erase(new_block);
331 bbt_nand_copy(new_block, prev_block, bmtd.blk_size);
332
333 return 0;
334 }
335
336 static int mtk_bmt_debug_mark_good(void *data, u64 val)
337 {
338 bmtd.ops->unmap_block(val >> bmtd.blk_shift);
339
340 return 0;
341 }
342
343 static int mtk_bmt_debug_mark_bad(void *data, u64 val)
344 {
345 u32 block = val >> bmtd.blk_shift;
346 int cur_block;
347
348 cur_block = bmtd.ops->get_mapping_block(block);
349 if (cur_block < 0)
350 return -EIO;
351
352 mtk_bmt_remap_block(block, cur_block, bmtd.blk_size);
353
354 return 0;
355 }
356
357 static int mtk_bmt_debug(void *data, u64 val)
358 {
359 return bmtd.ops->debug(data, val);
360 }
361
362
363 DEFINE_DEBUGFS_ATTRIBUTE(fops_repair, NULL, mtk_bmt_debug_repair, "%llu\n");
364 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n");
365 DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n");
366 DEFINE_DEBUGFS_ATTRIBUTE(fops_debug, NULL, mtk_bmt_debug, "%llu\n");
367
368 static void
369 mtk_bmt_add_debugfs(void)
370 {
371 struct dentry *dir;
372
373 dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL);
374 if (!dir)
375 return;
376
377 debugfs_create_file_unsafe("repair", S_IWUSR, dir, NULL, &fops_repair);
378 debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good);
379 debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad);
380 debugfs_create_file_unsafe("debug", S_IWUSR, dir, NULL, &fops_debug);
381 }
382
383 void mtk_bmt_detach(struct mtd_info *mtd)
384 {
385 if (bmtd.mtd != mtd)
386 return;
387
388 if (bmtd.debugfs_dir)
389 debugfs_remove_recursive(bmtd.debugfs_dir);
390 bmtd.debugfs_dir = NULL;
391
392 kfree(bmtd.bbt_buf);
393 kfree(bmtd.data_buf);
394
395 mtd->_read_oob = bmtd._read_oob;
396 mtd->_write_oob = bmtd._write_oob;
397 mtd->_erase = bmtd._erase;
398 mtd->_block_isbad = bmtd._block_isbad;
399 mtd->_block_markbad = bmtd._block_markbad;
400 mtd->size = bmtd.total_blks << bmtd.blk_shift;
401
402 memset(&bmtd, 0, sizeof(bmtd));
403 }
404
405
406 int mtk_bmt_attach(struct mtd_info *mtd)
407 {
408 struct device_node *np;
409 int ret = 0;
410
411 if (bmtd.mtd)
412 return -ENOSPC;
413
414 np = mtd_get_of_node(mtd);
415 if (!np)
416 return 0;
417
418 if (of_property_read_bool(np, "mediatek,bmt-v2"))
419 bmtd.ops = &mtk_bmt_v2_ops;
420 else if (of_property_read_bool(np, "mediatek,nmbm"))
421 bmtd.ops = &mtk_bmt_nmbm_ops;
422 else if (of_property_read_bool(np, "mediatek,bbt"))
423 bmtd.ops = &mtk_bmt_bbt_ops;
424 else
425 return 0;
426
427 bmtd.remap_range = of_get_property(np, "mediatek,bmt-remap-range",
428 &bmtd.remap_range_len);
429 bmtd.remap_range_len /= 8;
430
431 bmtd.mtd = mtd;
432 mtk_bmt_replace_ops(mtd);
433
434 bmtd.blk_size = mtd->erasesize;
435 bmtd.blk_shift = ffs(bmtd.blk_size) - 1;
436 bmtd.pg_size = mtd->writesize;
437 bmtd.pg_shift = ffs(bmtd.pg_size) - 1;
438 bmtd.total_blks = mtd->size >> bmtd.blk_shift;
439
440 bmtd.data_buf = kzalloc(bmtd.pg_size + bmtd.mtd->oobsize, GFP_KERNEL);
441 if (!bmtd.data_buf) {
442 pr_info("nand: FATAL ERR: allocate buffer failed!\n");
443 ret = -1;
444 goto error;
445 }
446
447 memset(bmtd.data_buf, 0xff, bmtd.pg_size + bmtd.mtd->oobsize);
448
449 ret = bmtd.ops->init(np);
450 if (ret)
451 goto error;
452
453 mtk_bmt_add_debugfs();
454 return 0;
455
456 error:
457 mtk_bmt_detach(mtd);
458 return ret;
459 }
460
461
462 MODULE_LICENSE("GPL");
463 MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>");
464 MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver");
465