ramips: fixup nand support on v4.9
[openwrt/staging/jow.git] / target / linux / ramips / patches-4.9 / 0039-mtd-add-mt7621-nand-support.patch
1 From 0e1c4e3c97b83b4e7da65b1c56f0a7d40736ac53 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Sun, 27 Jul 2014 11:05:17 +0100
4 Subject: [PATCH 39/53] mtd: add mt7621 nand support
5
6 Signed-off-by: John Crispin <blogic@openwrt.org>
7 ---
8 drivers/mtd/nand/Kconfig | 6 +
9 drivers/mtd/nand/Makefile | 1 +
10 drivers/mtd/nand/bmt.c | 750 ++++++++++++
11 drivers/mtd/nand/bmt.h | 80 ++
12 drivers/mtd/nand/dev-nand.c | 63 +
13 drivers/mtd/nand/mt6575_typedefs.h | 340 ++++++
14 drivers/mtd/nand/mtk_nand2.c | 2304 +++++++++++++++++++++++++++++++++++
15 drivers/mtd/nand/mtk_nand2.h | 452 +++++++
16 drivers/mtd/nand/nand_base.c | 6 +-
17 drivers/mtd/nand/nand_bbt.c | 19 +
18 drivers/mtd/nand/nand_def.h | 123 ++
19 drivers/mtd/nand/nand_device_list.h | 55 +
20 drivers/mtd/nand/partition.h | 115 ++
21 13 files changed, 4311 insertions(+), 3 deletions(-)
22 create mode 100644 drivers/mtd/nand/bmt.c
23 create mode 100644 drivers/mtd/nand/bmt.h
24 create mode 100644 drivers/mtd/nand/dev-nand.c
25 create mode 100644 drivers/mtd/nand/mt6575_typedefs.h
26 create mode 100644 drivers/mtd/nand/mtk_nand2.c
27 create mode 100644 drivers/mtd/nand/mtk_nand2.h
28 create mode 100644 drivers/mtd/nand/nand_def.h
29 create mode 100644 drivers/mtd/nand/nand_device_list.h
30 create mode 100644 drivers/mtd/nand/partition.h
31
32 Index: linux-4.9.30/drivers/mtd/nand/Kconfig
33 ===================================================================
34 --- linux-4.9.30.orig/drivers/mtd/nand/Kconfig
35 +++ linux-4.9.30/drivers/mtd/nand/Kconfig
36 @@ -569,4 +569,10 @@ config MTD_NAND_MTK
37 Enables support for NAND controller on MTK SoCs.
38 This controller is found on mt27xx, mt81xx, mt65xx SoCs.
39
40 +config MTK_MTD_NAND
41 + tristate "Support for MTK SoC NAND controller"
42 + depends on SOC_MT7621
43 + select MTD_NAND_IDS
44 + select MTD_NAND_ECC
45 +
46 endif # MTD_NAND
47 Index: linux-4.9.30/drivers/mtd/nand/Makefile
48 ===================================================================
49 --- linux-4.9.30.orig/drivers/mtd/nand/Makefile
50 +++ linux-4.9.30/drivers/mtd/nand/Makefile
51 @@ -58,5 +58,6 @@ obj-$(CONFIG_MTD_NAND_HISI504) +
52 obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
53 obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
54 obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o
55 +obj-$(CONFIG_MTK_MTD_NAND) += mtk_nand2.o bmt.o
56
57 nand-objs := nand_base.o nand_bbt.o nand_timings.o
58 Index: linux-4.9.30/drivers/mtd/nand/bmt.c
59 ===================================================================
60 --- /dev/null
61 +++ linux-4.9.30/drivers/mtd/nand/bmt.c
62 @@ -0,0 +1,750 @@
63 +#include "bmt.h"
64 +
65 +typedef struct
66 +{
67 + char signature[3];
68 + u8 version;
69 + u8 bad_count; // bad block count in pool
70 + u8 mapped_count; // mapped block count in pool
71 + u8 checksum;
72 + u8 reseverd[13];
73 +} phys_bmt_header;
74 +
75 +typedef struct
76 +{
77 + phys_bmt_header header;
78 + bmt_entry table[MAX_BMT_SIZE];
79 +} phys_bmt_struct;
80 +
81 +typedef struct
82 +{
83 + char signature[3];
84 +} bmt_oob_data;
85 +
86 +static char MAIN_SIGNATURE[] = "BMT";
87 +static char OOB_SIGNATURE[] = "bmt";
88 +#define SIGNATURE_SIZE (3)
89 +
90 +#define MAX_DAT_SIZE 0x1000
91 +#define MAX_OOB_SIZE 0x80
92 +
93 +static struct mtd_info *mtd_bmt;
94 +static struct nand_chip *nand_chip_bmt;
95 +#define BLOCK_SIZE_BMT (1 << nand_chip_bmt->phys_erase_shift)
96 +#define PAGE_SIZE_BMT (1 << nand_chip_bmt->page_shift)
97 +
98 +#define OFFSET(block) ((block) * BLOCK_SIZE_BMT)
99 +#define PAGE_ADDR(block) ((block) * BLOCK_SIZE_BMT / PAGE_SIZE_BMT)
100 +
101 +/*********************************************************************
102 +* Flash is splited into 2 parts, system part is for normal system *
103 +* system usage, size is system_block_count, another is replace pool *
104 +* +-------------------------------------------------+ *
105 +* | system_block_count | bmt_block_count | *
106 +* +-------------------------------------------------+ *
107 +*********************************************************************/
108 +static u32 total_block_count; // block number in flash
109 +static u32 system_block_count;
110 +static int bmt_block_count; // bmt table size
111 +// static int bmt_count; // block used in bmt
112 +static int page_per_block; // page per count
113 +
114 +static u32 bmt_block_index; // bmt block index
115 +static bmt_struct bmt; // dynamic created global bmt table
116 +
117 +static u8 dat_buf[MAX_DAT_SIZE];
118 +static u8 oob_buf[MAX_OOB_SIZE];
119 +static bool pool_erased;
120 +
121 +/***************************************************************
122 +*
123 +* Interface adaptor for preloader/uboot/kernel
124 +* These interfaces operate on physical address, read/write
125 +* physical data.
126 +*
127 +***************************************************************/
128 +int nand_read_page_bmt(u32 page, u8 * dat, u8 * oob)
129 +{
130 + return mtk_nand_exec_read_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob);
131 +}
132 +
133 +bool nand_block_bad_bmt(u32 offset)
134 +{
135 + return mtk_nand_block_bad_hw(mtd_bmt, offset);
136 +}
137 +
138 +bool nand_erase_bmt(u32 offset)
139 +{
140 + int status;
141 + if (offset < 0x20000)
142 + {
143 + MSG(INIT, "erase offset: 0x%x\n", offset);
144 + }
145 +
146 + status = mtk_nand_erase_hw(mtd_bmt, offset / PAGE_SIZE_BMT); // as nand_chip structure doesn't have a erase function defined
147 + if (status & NAND_STATUS_FAIL)
148 + return false;
149 + else
150 + return true;
151 +}
152 +
153 +int mark_block_bad_bmt(u32 offset)
154 +{
155 + return mtk_nand_block_markbad_hw(mtd_bmt, offset); //mark_block_bad_hw(offset);
156 +}
157 +
158 +bool nand_write_page_bmt(u32 page, u8 * dat, u8 * oob)
159 +{
160 + if (mtk_nand_exec_write_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob))
161 + return false;
162 + else
163 + return true;
164 +}
165 +
166 +/***************************************************************
167 +* *
168 +* static internal function *
169 +* *
170 +***************************************************************/
171 +static void dump_bmt_info(bmt_struct * bmt)
172 +{
173 + int i;
174 +
175 + MSG(INIT, "BMT v%d. total %d mapping:\n", bmt->version, bmt->mapped_count);
176 + for (i = 0; i < bmt->mapped_count; i++)
177 + {
178 + MSG(INIT, "\t0x%x -> 0x%x\n", bmt->table[i].bad_index, bmt->table[i].mapped_index);
179 + }
180 +}
181 +
182 +static bool match_bmt_signature(u8 * dat, u8 * oob)
183 +{
184 +
185 + if (memcmp(dat + MAIN_SIGNATURE_OFFSET, MAIN_SIGNATURE, SIGNATURE_SIZE))
186 + {
187 + return false;
188 + }
189 +
190 + if (memcmp(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE))
191 + {
192 + MSG(INIT, "main signature match, oob signature doesn't match, but ignore\n");
193 + }
194 + return true;
195 +}
196 +
197 +static u8 cal_bmt_checksum(phys_bmt_struct * phys_table, int bmt_size)
198 +{
199 + int i;
200 + u8 checksum = 0;
201 + u8 *dat = (u8 *) phys_table;
202 +
203 + checksum += phys_table->header.version;
204 + checksum += phys_table->header.mapped_count;
205 +
206 + dat += sizeof(phys_bmt_header);
207 + for (i = 0; i < bmt_size * sizeof(bmt_entry); i++)
208 + {
209 + checksum += dat[i];
210 + }
211 +
212 + return checksum;
213 +}
214 +
215 +
216 +static int is_block_mapped(int index)
217 +{
218 + int i;
219 + for (i = 0; i < bmt.mapped_count; i++)
220 + {
221 + if (index == bmt.table[i].mapped_index)
222 + return i;
223 + }
224 + return -1;
225 +}
226 +
227 +static bool is_page_used(u8 * dat, u8 * oob)
228 +{
229 + return ((oob[OOB_INDEX_OFFSET] != 0xFF) || (oob[OOB_INDEX_OFFSET + 1] != 0xFF));
230 +}
231 +
232 +static bool valid_bmt_data(phys_bmt_struct * phys_table)
233 +{
234 + int i;
235 + u8 checksum = cal_bmt_checksum(phys_table, bmt_block_count);
236 +
237 + // checksum correct?
238 + if (phys_table->header.checksum != checksum)
239 + {
240 + MSG(INIT, "BMT Data checksum error: %x %x\n", phys_table->header.checksum, checksum);
241 + return false;
242 + }
243 +
244 + MSG(INIT, "BMT Checksum is: 0x%x\n", phys_table->header.checksum);
245 +
246 + // block index correct?
247 + for (i = 0; i < phys_table->header.mapped_count; i++)
248 + {
249 + if (phys_table->table[i].bad_index >= total_block_count || phys_table->table[i].mapped_index >= total_block_count || phys_table->table[i].mapped_index < system_block_count)
250 + {
251 + MSG(INIT, "index error: bad_index: %d, mapped_index: %d\n", phys_table->table[i].bad_index, phys_table->table[i].mapped_index);
252 + return false;
253 + }
254 + }
255 +
256 + // pass check, valid bmt.
257 + MSG(INIT, "Valid BMT, version v%d\n", phys_table->header.version);
258 + return true;
259 +}
260 +
261 +static void fill_nand_bmt_buffer(bmt_struct * bmt, u8 * dat, u8 * oob)
262 +{
263 + phys_bmt_struct phys_bmt;
264 +
265 + dump_bmt_info(bmt);
266 +
267 + // fill phys_bmt_struct structure with bmt_struct
268 + memset(&phys_bmt, 0xFF, sizeof(phys_bmt));
269 +
270 + memcpy(phys_bmt.header.signature, MAIN_SIGNATURE, SIGNATURE_SIZE);
271 + phys_bmt.header.version = BMT_VERSION;
272 + // phys_bmt.header.bad_count = bmt->bad_count;
273 + phys_bmt.header.mapped_count = bmt->mapped_count;
274 + memcpy(phys_bmt.table, bmt->table, sizeof(bmt_entry) * bmt_block_count);
275 +
276 + phys_bmt.header.checksum = cal_bmt_checksum(&phys_bmt, bmt_block_count);
277 +
278 + memcpy(dat + MAIN_SIGNATURE_OFFSET, &phys_bmt, sizeof(phys_bmt));
279 + memcpy(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE);
280 +}
281 +
282 +// return valid index if found BMT, else return 0
283 +static int load_bmt_data(int start, int pool_size)
284 +{
285 + int bmt_index = start + pool_size - 1; // find from the end
286 + phys_bmt_struct phys_table;
287 + int i;
288 +
289 + MSG(INIT, "[%s]: begin to search BMT from block 0x%x\n", __FUNCTION__, bmt_index);
290 +
291 + for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--)
292 + {
293 + if (nand_block_bad_bmt(OFFSET(bmt_index)))
294 + {
295 + MSG(INIT, "Skip bad block: %d\n", bmt_index);
296 + continue;
297 + }
298 +
299 + if (!nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf))
300 + {
301 + MSG(INIT, "Error found when read block %d\n", bmt_index);
302 + continue;
303 + }
304 +
305 + if (!match_bmt_signature(dat_buf, oob_buf))
306 + {
307 + continue;
308 + }
309 +
310 + MSG(INIT, "Match bmt signature @ block: 0x%x\n", bmt_index);
311 +
312 + memcpy(&phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(phys_table));
313 +
314 + if (!valid_bmt_data(&phys_table))
315 + {
316 + MSG(INIT, "BMT data is not correct %d\n", bmt_index);
317 + continue;
318 + } else
319 + {
320 + bmt.mapped_count = phys_table.header.mapped_count;
321 + bmt.version = phys_table.header.version;
322 + // bmt.bad_count = phys_table.header.bad_count;
323 + memcpy(bmt.table, phys_table.table, bmt.mapped_count * sizeof(bmt_entry));
324 +
325 + MSG(INIT, "bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count);
326 +
327 + for (i = 0; i < bmt.mapped_count; i++)
328 + {
329 + if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index)))
330 + {
331 + MSG(INIT, "block 0x%x is not mark bad, should be power lost last time\n", bmt.table[i].bad_index);
332 + mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index));
333 + }
334 + }
335 +
336 + return bmt_index;
337 + }
338 + }
339 +
340 + MSG(INIT, "bmt block not found!\n");
341 + return 0;
342 +}
343 +
344 +/*************************************************************************
345 +* Find an available block and erase. *
346 +* start_from_end: if true, find available block from end of flash. *
347 +* else, find from the beginning of the pool *
348 +* need_erase: if true, all unmapped blocks in the pool will be erased *
349 +*************************************************************************/
350 +static int find_available_block(bool start_from_end)
351 +{
352 + int i; // , j;
353 + int block = system_block_count;
354 + int direction;
355 + // int avail_index = 0;
356 + MSG(INIT, "Try to find_available_block, pool_erase: %d\n", pool_erased);
357 +
358 + // erase all un-mapped blocks in pool when finding avaliable block
359 + if (!pool_erased)
360 + {
361 + MSG(INIT, "Erase all un-mapped blocks in pool\n");
362 + for (i = 0; i < bmt_block_count; i++)
363 + {
364 + if (block == bmt_block_index)
365 + {
366 + MSG(INIT, "Skip bmt block 0x%x\n", block);
367 + continue;
368 + }
369 +
370 + if (nand_block_bad_bmt(OFFSET(block + i)))
371 + {
372 + MSG(INIT, "Skip bad block 0x%x\n", block + i);
373 + continue;
374 + }
375 +//if(block==4095)
376 +//{
377 +// continue;
378 +//}
379 +
380 + if (is_block_mapped(block + i) >= 0)
381 + {
382 + MSG(INIT, "Skip mapped block 0x%x\n", block + i);
383 + continue;
384 + }
385 +
386 + if (!nand_erase_bmt(OFFSET(block + i)))
387 + {
388 + MSG(INIT, "Erase block 0x%x failed\n", block + i);
389 + mark_block_bad_bmt(OFFSET(block + i));
390 + }
391 + }
392 +
393 + pool_erased = 1;
394 + }
395 +
396 + if (start_from_end)
397 + {
398 + block = total_block_count - 1;
399 + direction = -1;
400 + } else
401 + {
402 + block = system_block_count;
403 + direction = 1;
404 + }
405 +
406 + for (i = 0; i < bmt_block_count; i++, block += direction)
407 + {
408 + if (block == bmt_block_index)
409 + {
410 + MSG(INIT, "Skip bmt block 0x%x\n", block);
411 + continue;
412 + }
413 +
414 + if (nand_block_bad_bmt(OFFSET(block)))
415 + {
416 + MSG(INIT, "Skip bad block 0x%x\n", block);
417 + continue;
418 + }
419 +
420 + if (is_block_mapped(block) >= 0)
421 + {
422 + MSG(INIT, "Skip mapped block 0x%x\n", block);
423 + continue;
424 + }
425 +
426 + MSG(INIT, "Find block 0x%x available\n", block);
427 + return block;
428 + }
429 +
430 + return 0;
431 +}
432 +
433 +static unsigned short get_bad_index_from_oob(u8 * oob_buf)
434 +{
435 + unsigned short index;
436 + memcpy(&index, oob_buf + OOB_INDEX_OFFSET, OOB_INDEX_SIZE);
437 +
438 + return index;
439 +}
440 +
441 +void set_bad_index_to_oob(u8 * oob, u16 index)
442 +{
443 + memcpy(oob + OOB_INDEX_OFFSET, &index, sizeof(index));
444 +}
445 +
446 +static int migrate_from_bad(int offset, u8 * write_dat, u8 * write_oob)
447 +{
448 + int page;
449 + int error_block = offset / BLOCK_SIZE_BMT;
450 + int error_page = (offset / PAGE_SIZE_BMT) % page_per_block;
451 + int to_index;
452 +
453 + memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
454 +
455 + to_index = find_available_block(false);
456 +
457 + if (!to_index)
458 + {
459 + MSG(INIT, "Cannot find an available block for BMT\n");
460 + return 0;
461 + }
462 +
463 + { // migrate error page first
464 + MSG(INIT, "Write error page: 0x%x\n", error_page);
465 + if (!write_dat)
466 + {
467 + nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL);
468 + write_dat = dat_buf;
469 + }
470 + // memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
471 +
472 + if (error_block < system_block_count)
473 + set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB.
474 +
475 + if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf))
476 + {
477 + MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page);
478 + mark_block_bad_bmt(to_index);
479 + return migrate_from_bad(offset, write_dat, write_oob);
480 + }
481 + }
482 +
483 + for (page = 0; page < page_per_block; page++)
484 + {
485 + if (page != error_page)
486 + {
487 + nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf);
488 + if (is_page_used(dat_buf, oob_buf))
489 + {
490 + if (error_block < system_block_count)
491 + {
492 + set_bad_index_to_oob(oob_buf, error_block);
493 + }
494 + MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n", PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page);
495 + if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf))
496 + {
497 + MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + page);
498 + mark_block_bad_bmt(to_index);
499 + return migrate_from_bad(offset, write_dat, write_oob);
500 + }
501 + }
502 + }
503 + }
504 +
505 + MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index);
506 +
507 + return to_index;
508 +}
509 +
510 +static bool write_bmt_to_flash(u8 * dat, u8 * oob)
511 +{
512 + bool need_erase = true;
513 + MSG(INIT, "Try to write BMT\n");
514 +
515 + if (bmt_block_index == 0)
516 + {
517 + // if we don't have index, we don't need to erase found block as it has been erased in find_available_block()
518 + need_erase = false;
519 + if (!(bmt_block_index = find_available_block(true)))
520 + {
521 + MSG(INIT, "Cannot find an available block for BMT\n");
522 + return false;
523 + }
524 + }
525 +
526 + MSG(INIT, "Find BMT block: 0x%x\n", bmt_block_index);
527 +
528 + // write bmt to flash
529 + if (need_erase)
530 + {
531 + if (!nand_erase_bmt(OFFSET(bmt_block_index)))
532 + {
533 + MSG(INIT, "BMT block erase fail, mark bad: 0x%x\n", bmt_block_index);
534 + mark_block_bad_bmt(OFFSET(bmt_block_index));
535 + // bmt.bad_count++;
536 +
537 + bmt_block_index = 0;
538 + return write_bmt_to_flash(dat, oob); // recursive call
539 + }
540 + }
541 +
542 + if (!nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob))
543 + {
544 + MSG(INIT, "Write BMT data fail, need to write again\n");
545 + mark_block_bad_bmt(OFFSET(bmt_block_index));
546 + // bmt.bad_count++;
547 +
548 + bmt_block_index = 0;
549 + return write_bmt_to_flash(dat, oob); // recursive call
550 + }
551 +
552 + MSG(INIT, "Write BMT data to block 0x%x success\n", bmt_block_index);
553 + return true;
554 +}
555 +
556 +/*******************************************************************
557 +* Reconstruct bmt, called when found bmt info doesn't match bad
558 +* block info in flash.
559 +*
560 +* Return NULL for failure
561 +*******************************************************************/
562 +bmt_struct *reconstruct_bmt(bmt_struct * bmt)
563 +{
564 + int i;
565 + int index = system_block_count;
566 + unsigned short bad_index;
567 + int mapped;
568 +
569 + // init everything in BMT struct
570 + bmt->version = BMT_VERSION;
571 + bmt->bad_count = 0;
572 + bmt->mapped_count = 0;
573 +
574 + memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry));
575 +
576 + for (i = 0; i < bmt_block_count; i++, index++)
577 + {
578 + if (nand_block_bad_bmt(OFFSET(index)))
579 + {
580 + MSG(INIT, "Skip bad block: 0x%x\n", index);
581 + // bmt->bad_count++;
582 + continue;
583 + }
584 +
585 + MSG(INIT, "read page: 0x%x\n", PAGE_ADDR(index));
586 + nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf);
587 + /* if (mtk_nand_read_page_hw(PAGE_ADDR(index), dat_buf))
588 + {
589 + MSG(INIT, "Error when read block %d\n", bmt_block_index);
590 + continue;
591 + } */
592 +
593 + if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count)
594 + {
595 + MSG(INIT, "get bad index: 0x%x\n", bad_index);
596 + if (bad_index != 0xFFFF)
597 + MSG(INIT, "Invalid bad index found in block 0x%x, bad index 0x%x\n", index, bad_index);
598 + continue;
599 + }
600 +
601 + MSG(INIT, "Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index);
602 +
603 + if (!nand_block_bad_bmt(OFFSET(bad_index)))
604 + {
605 + MSG(INIT, "\tbut block 0x%x is not marked as bad, invalid mapping\n", bad_index);
606 + continue; // no need to erase here, it will be erased later when trying to write BMT
607 + }
608 +
609 + if ((mapped = is_block_mapped(bad_index)) >= 0)
610 + {
611 + MSG(INIT, "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n", bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index);
612 + bmt->table[mapped].mapped_index = index; // use new one instead.
613 + } else
614 + {
615 + // add mapping to BMT
616 + bmt->table[bmt->mapped_count].bad_index = bad_index;
617 + bmt->table[bmt->mapped_count].mapped_index = index;
618 + bmt->mapped_count++;
619 + }
620 +
621 + MSG(INIT, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index);
622 +
623 + }
624 +
625 + MSG(INIT, "Scan replace pool done, mapped block: %d\n", bmt->mapped_count);
626 + // dump_bmt_info(bmt);
627 +
628 + // fill NAND BMT buffer
629 + memset(oob_buf, 0xFF, sizeof(oob_buf));
630 + fill_nand_bmt_buffer(bmt, dat_buf, oob_buf);
631 +
632 + // write BMT back
633 + if (!write_bmt_to_flash(dat_buf, oob_buf))
634 + {
635 + MSG(INIT, "TRAGEDY: cannot find a place to write BMT!!!!\n");
636 + }
637 +
638 + return bmt;
639 +}
640 +
641 +/*******************************************************************
642 +* [BMT Interface]
643 +*
644 +* Description:
645 +* Init bmt from nand. Reconstruct if not found or data error
646 +*
647 +* Parameter:
648 +* size: size of bmt and replace pool
649 +*
650 +* Return:
651 +* NULL for failure, and a bmt struct for success
652 +*******************************************************************/
653 +bmt_struct *init_bmt(struct nand_chip * chip, int size)
654 +{
655 + struct mtk_nand_host *host;
656 +
657 + if (size > 0 && size < MAX_BMT_SIZE)
658 + {
659 + MSG(INIT, "Init bmt table, size: %d\n", size);
660 + bmt_block_count = size;
661 + } else
662 + {
663 + MSG(INIT, "Invalid bmt table size: %d\n", size);
664 + return NULL;
665 + }
666 + nand_chip_bmt = chip;
667 + system_block_count = chip->chipsize >> chip->phys_erase_shift;
668 + total_block_count = bmt_block_count + system_block_count;
669 + page_per_block = BLOCK_SIZE_BMT / PAGE_SIZE_BMT;
670 + host = (struct mtk_nand_host *)chip->priv;
671 + mtd_bmt = host->mtd;
672 +
673 + MSG(INIT, "mtd_bmt: %p, nand_chip_bmt: %p\n", mtd_bmt, nand_chip_bmt);
674 + MSG(INIT, "bmt count: %d, system count: %d\n", bmt_block_count, system_block_count);
675 +
676 + // set this flag, and unmapped block in pool will be erased.
677 + pool_erased = 0;
678 + memset(bmt.table, 0, size * sizeof(bmt_entry));
679 + if ((bmt_block_index = load_bmt_data(system_block_count, size)))
680 + {
681 + MSG(INIT, "Load bmt data success @ block 0x%x\n", bmt_block_index);
682 + dump_bmt_info(&bmt);
683 + return &bmt;
684 + } else
685 + {
686 + MSG(INIT, "Load bmt data fail, need re-construct!\n");
687 +#ifndef __UBOOT_NAND__ // BMT is not re-constructed in UBOOT.
688 + if (reconstruct_bmt(&bmt))
689 + return &bmt;
690 + else
691 +#endif
692 + return NULL;
693 + }
694 +}
695 +
696 +/*******************************************************************
697 +* [BMT Interface]
698 +*
699 +* Description:
700 +* Update BMT.
701 +*
702 +* Parameter:
703 +* offset: update block/page offset.
704 +* reason: update reason, see update_reason_t for reason.
705 +* dat/oob: data and oob buffer for write fail.
706 +*
707 +* Return:
708 +* Return true for success, and false for failure.
709 +*******************************************************************/
710 +bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob)
711 +{
712 + int map_index;
713 + int orig_bad_block = -1;
714 + // int bmt_update_index;
715 + int i;
716 + int bad_index = offset / BLOCK_SIZE_BMT;
717 +
718 +#ifndef MTK_NAND_BMT
719 + return false;
720 +#endif
721 + if (reason == UPDATE_WRITE_FAIL)
722 + {
723 + MSG(INIT, "Write fail, need to migrate\n");
724 + if (!(map_index = migrate_from_bad(offset, dat, oob)))
725 + {
726 + MSG(INIT, "migrate fail\n");
727 + return false;
728 + }
729 + } else
730 + {
731 + if (!(map_index = find_available_block(false)))
732 + {
733 + MSG(INIT, "Cannot find block in pool\n");
734 + return false;
735 + }
736 + }
737 +
738 + // now let's update BMT
739 + if (bad_index >= system_block_count) // mapped block become bad, find original bad block
740 + {
741 + for (i = 0; i < bmt_block_count; i++)
742 + {
743 + if (bmt.table[i].mapped_index == bad_index)
744 + {
745 + orig_bad_block = bmt.table[i].bad_index;
746 + break;
747 + }
748 + }
749 + // bmt.bad_count++;
750 + MSG(INIT, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block);
751 +
752 + bmt.table[i].mapped_index = map_index;
753 + } else
754 + {
755 + bmt.table[bmt.mapped_count].mapped_index = map_index;
756 + bmt.table[bmt.mapped_count].bad_index = bad_index;
757 + bmt.mapped_count++;
758 + }
759 +
760 + memset(oob_buf, 0xFF, sizeof(oob_buf));
761 + fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf);
762 + if (!write_bmt_to_flash(dat_buf, oob_buf))
763 + return false;
764 +
765 + mark_block_bad_bmt(offset);
766 +
767 + return true;
768 +}
769 +
770 +/*******************************************************************
771 +* [BMT Interface]
772 +*
773 +* Description:
774 +* Given an block index, return mapped index if it's mapped, else
775 +* return given index.
776 +*
777 +* Parameter:
778 +* index: given an block index. This value cannot exceed
779 +* system_block_count.
780 +*
781 +* Return NULL for failure
782 +*******************************************************************/
783 +u16 get_mapping_block_index(int index)
784 +{
785 + int i;
786 +#ifndef MTK_NAND_BMT
787 + return index;
788 +#endif
789 + if (index > system_block_count)
790 + {
791 + return index;
792 + }
793 +
794 + for (i = 0; i < bmt.mapped_count; i++)
795 + {
796 + if (bmt.table[i].bad_index == index)
797 + {
798 + return bmt.table[i].mapped_index;
799 + }
800 + }
801 +
802 + return index;
803 +}
804 +#ifdef __KERNEL_NAND__
805 +EXPORT_SYMBOL_GPL(init_bmt);
806 +EXPORT_SYMBOL_GPL(update_bmt);
807 +EXPORT_SYMBOL_GPL(get_mapping_block_index);
808 +
809 +MODULE_LICENSE("GPL");
810 +MODULE_AUTHOR("MediaTek");
811 +MODULE_DESCRIPTION("Bad Block mapping management for MediaTek NAND Flash Driver");
812 +#endif
813 Index: linux-4.9.30/drivers/mtd/nand/bmt.h
814 ===================================================================
815 --- /dev/null
816 +++ linux-4.9.30/drivers/mtd/nand/bmt.h
817 @@ -0,0 +1,80 @@
818 +#ifndef __BMT_H__
819 +#define __BMT_H__
820 +
821 +#include "nand_def.h"
822 +
823 +#if defined(__PRELOADER_NAND__)
824 +
825 +#include "nand.h"
826 +
827 +#elif defined(__UBOOT_NAND__)
828 +
829 +#include <linux/mtd/nand.h>
830 +#include "mtk_nand2.h"
831 +
832 +#elif defined(__KERNEL_NAND__)
833 +
834 +#include <linux/mtd/mtd.h>
835 +#include <linux/mtd/nand.h>
836 +#include <linux/module.h>
837 +#include "mtk_nand2.h"
838 +
839 +#endif
840 +
841 +
842 +#define MAX_BMT_SIZE (0x80)
843 +#define BMT_VERSION (1) // initial version
844 +
845 +#define MAIN_SIGNATURE_OFFSET (0)
846 +#define OOB_SIGNATURE_OFFSET (1)
847 +#define OOB_INDEX_OFFSET (29)
848 +#define OOB_INDEX_SIZE (2)
849 +#define FAKE_INDEX (0xAAAA)
850 +
851 +typedef struct _bmt_entry_
852 +{
853 + u16 bad_index; // bad block index
854 + u16 mapped_index; // mapping block index in the replace pool
855 +} bmt_entry;
856 +
857 +typedef enum
858 +{
859 + UPDATE_ERASE_FAIL,
860 + UPDATE_WRITE_FAIL,
861 + UPDATE_UNMAPPED_BLOCK,
862 + UPDATE_REASON_COUNT,
863 +} update_reason_t;
864 +
865 +typedef struct
866 +{
867 + bmt_entry table[MAX_BMT_SIZE];
868 + u8 version;
869 + u8 mapped_count; // mapped block count in pool
870 + u8 bad_count; // bad block count in pool. Not used in V1
871 +} bmt_struct;
872 +
873 +/***************************************************************
874 +* *
875 +* Interface BMT need to use *
876 +* *
877 +***************************************************************/
878 +extern bool mtk_nand_exec_read_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob);
879 +extern int mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs);
880 +extern int mtk_nand_erase_hw(struct mtd_info *mtd, int page);
881 +extern int mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t ofs);
882 +extern int mtk_nand_exec_write_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob);
883 +
884 +
885 +/***************************************************************
886 +* *
887 +* Different function interface for preloader/uboot/kernel *
888 +* *
889 +***************************************************************/
890 +void set_bad_index_to_oob(u8 * oob, u16 index);
891 +
892 +
893 +bmt_struct *init_bmt(struct nand_chip *nand, int size);
894 +bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob);
895 +unsigned short get_mapping_block_index(int index);
896 +
897 +#endif // #ifndef __BMT_H__
898 Index: linux-4.9.30/drivers/mtd/nand/dev-nand.c
899 ===================================================================
900 --- /dev/null
901 +++ linux-4.9.30/drivers/mtd/nand/dev-nand.c
902 @@ -0,0 +1,63 @@
903 +#include <linux/init.h>
904 +#include <linux/kernel.h>
905 +#include <linux/platform_device.h>
906 +
907 +#include "mt6575_typedefs.h"
908 +
909 +#define RALINK_NAND_CTRL_BASE 0xBE003000
910 +#define NFI_base RALINK_NAND_CTRL_BASE
911 +#define RALINK_NANDECC_CTRL_BASE 0xBE003800
912 +#define NFIECC_base RALINK_NANDECC_CTRL_BASE
913 +#define MT7621_NFI_IRQ_ID SURFBOARDINT_NAND
914 +#define MT7621_NFIECC_IRQ_ID SURFBOARDINT_NAND_ECC
915 +
916 +#define SURFBOARDINT_NAND 22
917 +#define SURFBOARDINT_NAND_ECC 23
918 +
919 +static struct resource MT7621_resource_nand[] = {
920 + {
921 + .start = NFI_base,
922 + .end = NFI_base + 0x1A0,
923 + .flags = IORESOURCE_MEM,
924 + },
925 + {
926 + .start = NFIECC_base,
927 + .end = NFIECC_base + 0x150,
928 + .flags = IORESOURCE_MEM,
929 + },
930 + {
931 + .start = MT7621_NFI_IRQ_ID,
932 + .flags = IORESOURCE_IRQ,
933 + },
934 + {
935 + .start = MT7621_NFIECC_IRQ_ID,
936 + .flags = IORESOURCE_IRQ,
937 + },
938 +};
939 +
940 +static struct platform_device MT7621_nand_dev = {
941 + .name = "MT7621-NAND",
942 + .id = 0,
943 + .num_resources = ARRAY_SIZE(MT7621_resource_nand),
944 + .resource = MT7621_resource_nand,
945 + .dev = {
946 + .platform_data = &mt7621_nand_hw,
947 + },
948 +};
949 +
950 +
951 +int __init mtk_nand_register(void)
952 +{
953 +
954 + int retval = 0;
955 +
956 + retval = platform_device_register(&MT7621_nand_dev);
957 + if (retval != 0) {
958 + printk(KERN_ERR "register nand device fail\n");
959 + return retval;
960 + }
961 +
962 +
963 + return retval;
964 +}
965 +arch_initcall(mtk_nand_register);
966 Index: linux-4.9.30/drivers/mtd/nand/mt6575_typedefs.h
967 ===================================================================
968 --- /dev/null
969 +++ linux-4.9.30/drivers/mtd/nand/mt6575_typedefs.h
970 @@ -0,0 +1,340 @@
971 +/* Copyright Statement:
972 + *
973 + * This software/firmware and related documentation ("MediaTek Software") are
974 + * protected under relevant copyright laws. The information contained herein
975 + * is confidential and proprietary to MediaTek Inc. and/or its licensors.
976 + * Without the prior written permission of MediaTek inc. and/or its licensors,
977 + * any reproduction, modification, use or disclosure of MediaTek Software,
978 + * and information contained herein, in whole or in part, shall be strictly prohibited.
979 + */
980 +/* MediaTek Inc. (C) 2010. All rights reserved.
981 + *
982 + * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
983 + * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
984 + * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
985 + * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
986 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
987 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
988 + * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
989 + * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
990 + * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
991 + * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
992 + * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
993 + * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
994 + * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
995 + * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
996 + * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
997 + * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
998 + * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
999 + * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
1000 + *
1001 + * The following software/firmware and/or related documentation ("MediaTek Software")
1002 + * have been modified by MediaTek Inc. All revisions are subject to any receiver's
1003 + * applicable license agreements with MediaTek Inc.
1004 + */
1005 +
1006 +/*****************************************************************************
1007 +* Copyright Statement:
1008 +* --------------------
1009 +* This software is protected by Copyright and the information contained
1010 +* herein is confidential. The software may not be copied and the information
1011 +* contained herein may not be used or disclosed except with the written
1012 +* permission of MediaTek Inc. (C) 2008
1013 +*
1014 +* BY OPENING THIS FILE, BUYER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
1015 +* THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
1016 +* RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO BUYER ON
1017 +* AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
1018 +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
1019 +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
1020 +* NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
1021 +* SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
1022 +* SUPPLIED WITH THE MEDIATEK SOFTWARE, AND BUYER AGREES TO LOOK ONLY TO SUCH
1023 +* THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. MEDIATEK SHALL ALSO
1024 +* NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO BUYER'S
1025 +* SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM.
1026 +*
1027 +* BUYER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE
1028 +* LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
1029 +* AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
1030 +* OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY BUYER TO
1031 +* MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
1032 +*
1033 +* THE TRANSACTION CONTEMPLATED HEREUNDER SHALL BE CONSTRUED IN ACCORDANCE
1034 +* WITH THE LAWS OF THE STATE OF CALIFORNIA, USA, EXCLUDING ITS CONFLICT OF
1035 +* LAWS PRINCIPLES. ANY DISPUTES, CONTROVERSIES OR CLAIMS ARISING THEREOF AND
1036 +* RELATED THERETO SHALL BE SETTLED BY ARBITRATION IN SAN FRANCISCO, CA, UNDER
1037 +* THE RULES OF THE INTERNATIONAL CHAMBER OF COMMERCE (ICC).
1038 +*
1039 +*****************************************************************************/
1040 +
1041 +#ifndef _MT6575_TYPEDEFS_H
1042 +#define _MT6575_TYPEDEFS_H
1043 +
1044 +#if defined (__KERNEL_NAND__)
1045 +#include <linux/bug.h>
1046 +#else
1047 +#define true 1
1048 +#define false 0
1049 +#define bool u8
1050 +#endif
1051 +
1052 +// ---------------------------------------------------------------------------
1053 +// Basic Type Definitions
1054 +// ---------------------------------------------------------------------------
1055 +
1056 +typedef volatile unsigned char *P_kal_uint8;
1057 +typedef volatile unsigned short *P_kal_uint16;
1058 +typedef volatile unsigned int *P_kal_uint32;
1059 +
1060 +typedef long LONG;
1061 +typedef unsigned char UBYTE;
1062 +typedef short SHORT;
1063 +
1064 +typedef signed char kal_int8;
1065 +typedef signed short kal_int16;
1066 +typedef signed int kal_int32;
1067 +typedef long long kal_int64;
1068 +typedef unsigned char kal_uint8;
1069 +typedef unsigned short kal_uint16;
1070 +typedef unsigned int kal_uint32;
1071 +typedef unsigned long long kal_uint64;
1072 +typedef char kal_char;
1073 +
1074 +typedef unsigned int *UINT32P;
1075 +typedef volatile unsigned short *UINT16P;
1076 +typedef volatile unsigned char *UINT8P;
1077 +typedef unsigned char *U8P;
1078 +
1079 +typedef volatile unsigned char *P_U8;
1080 +typedef volatile signed char *P_S8;
1081 +typedef volatile unsigned short *P_U16;
1082 +typedef volatile signed short *P_S16;
1083 +typedef volatile unsigned int *P_U32;
1084 +typedef volatile signed int *P_S32;
1085 +typedef unsigned long long *P_U64;
1086 +typedef signed long long *P_S64;
1087 +
1088 +typedef unsigned char U8;
1089 +typedef signed char S8;
1090 +typedef unsigned short U16;
1091 +typedef signed short S16;
1092 +typedef unsigned int U32;
1093 +typedef signed int S32;
1094 +typedef unsigned long long U64;
1095 +typedef signed long long S64;
1096 +//typedef unsigned char bool;
1097 +
1098 +typedef unsigned char UINT8;
1099 +typedef unsigned short UINT16;
1100 +typedef unsigned int UINT32;
1101 +typedef unsigned short USHORT;
1102 +typedef signed char INT8;
1103 +typedef signed short INT16;
1104 +typedef signed int INT32;
1105 +typedef unsigned int DWORD;
1106 +typedef void VOID;
1107 +typedef unsigned char BYTE;
1108 +typedef float FLOAT;
1109 +
1110 +typedef char *LPCSTR;
1111 +typedef short *LPWSTR;
1112 +
1113 +
1114 +// ---------------------------------------------------------------------------
1115 +// Constants
1116 +// ---------------------------------------------------------------------------
1117 +
1118 +#define IMPORT EXTERN
1119 +#ifndef __cplusplus
1120 + #define EXTERN extern
1121 +#else
1122 + #define EXTERN extern "C"
1123 +#endif
1124 +#define LOCAL static
1125 +#define GLOBAL
1126 +#define EXPORT GLOBAL
1127 +
1128 +#define EQ ==
1129 +#define NEQ !=
1130 +#define AND &&
1131 +#define OR ||
1132 +#define XOR(A,B) ((!(A) AND (B)) OR ((A) AND !(B)))
1133 +
1134 +#ifndef FALSE
1135 + #define FALSE (0)
1136 +#endif
1137 +
1138 +#ifndef TRUE
1139 + #define TRUE (1)
1140 +#endif
1141 +
1142 +#ifndef NULL
1143 + #define NULL (0)
1144 +#endif
1145 +
1146 +//enum boolean {false, true};
1147 +enum {RX, TX, NONE};
1148 +
1149 +#ifndef BOOL
1150 +typedef unsigned char BOOL;
1151 +#endif
1152 +
1153 +typedef enum {
1154 + KAL_FALSE = 0,
1155 + KAL_TRUE = 1,
1156 +} kal_bool;
1157 +
1158 +
1159 +// ---------------------------------------------------------------------------
1160 +// Type Casting
1161 +// ---------------------------------------------------------------------------
1162 +
1163 +#define AS_INT32(x) (*(INT32 *)((void*)x))
1164 +#define AS_INT16(x) (*(INT16 *)((void*)x))
1165 +#define AS_INT8(x) (*(INT8 *)((void*)x))
1166 +
1167 +#define AS_UINT32(x) (*(UINT32 *)((void*)x))
1168 +#define AS_UINT16(x) (*(UINT16 *)((void*)x))
1169 +#define AS_UINT8(x) (*(UINT8 *)((void*)x))
1170 +
1171 +
1172 +// ---------------------------------------------------------------------------
1173 +// Register Manipulations
1174 +// ---------------------------------------------------------------------------
1175 +
1176 +#define READ_REGISTER_UINT32(reg) \
1177 + (*(volatile UINT32 * const)(reg))
1178 +
1179 +#define WRITE_REGISTER_UINT32(reg, val) \
1180 + (*(volatile UINT32 * const)(reg)) = (val)
1181 +
1182 +#define READ_REGISTER_UINT16(reg) \
1183 + (*(volatile UINT16 * const)(reg))
1184 +
1185 +#define WRITE_REGISTER_UINT16(reg, val) \
1186 + (*(volatile UINT16 * const)(reg)) = (val)
1187 +
1188 +#define READ_REGISTER_UINT8(reg) \
1189 + (*(volatile UINT8 * const)(reg))
1190 +
1191 +#define WRITE_REGISTER_UINT8(reg, val) \
1192 + (*(volatile UINT8 * const)(reg)) = (val)
1193 +
1194 +#define INREG8(x) READ_REGISTER_UINT8((UINT8*)((void*)(x)))
1195 +#define OUTREG8(x, y) WRITE_REGISTER_UINT8((UINT8*)((void*)(x)), (UINT8)(y))
1196 +#define SETREG8(x, y) OUTREG8(x, INREG8(x)|(y))
1197 +#define CLRREG8(x, y) OUTREG8(x, INREG8(x)&~(y))
1198 +#define MASKREG8(x, y, z) OUTREG8(x, (INREG8(x)&~(y))|(z))
1199 +
1200 +#define INREG16(x) READ_REGISTER_UINT16((UINT16*)((void*)(x)))
1201 +#define OUTREG16(x, y) WRITE_REGISTER_UINT16((UINT16*)((void*)(x)),(UINT16)(y))
1202 +#define SETREG16(x, y) OUTREG16(x, INREG16(x)|(y))
1203 +#define CLRREG16(x, y) OUTREG16(x, INREG16(x)&~(y))
1204 +#define MASKREG16(x, y, z) OUTREG16(x, (INREG16(x)&~(y))|(z))
1205 +
1206 +#define INREG32(x) READ_REGISTER_UINT32((UINT32*)((void*)(x)))
1207 +#define OUTREG32(x, y) WRITE_REGISTER_UINT32((UINT32*)((void*)(x)), (UINT32)(y))
1208 +#define SETREG32(x, y) OUTREG32(x, INREG32(x)|(y))
1209 +#define CLRREG32(x, y) OUTREG32(x, INREG32(x)&~(y))
1210 +#define MASKREG32(x, y, z) OUTREG32(x, (INREG32(x)&~(y))|(z))
1211 +
1212 +
1213 +#define DRV_Reg8(addr) INREG8(addr)
1214 +#define DRV_WriteReg8(addr, data) OUTREG8(addr, data)
1215 +#define DRV_SetReg8(addr, data) SETREG8(addr, data)
1216 +#define DRV_ClrReg8(addr, data) CLRREG8(addr, data)
1217 +
1218 +#define DRV_Reg16(addr) INREG16(addr)
1219 +#define DRV_WriteReg16(addr, data) OUTREG16(addr, data)
1220 +#define DRV_SetReg16(addr, data) SETREG16(addr, data)
1221 +#define DRV_ClrReg16(addr, data) CLRREG16(addr, data)
1222 +
1223 +#define DRV_Reg32(addr) INREG32(addr)
1224 +#define DRV_WriteReg32(addr, data) OUTREG32(addr, data)
1225 +#define DRV_SetReg32(addr, data) SETREG32(addr, data)
1226 +#define DRV_ClrReg32(addr, data) CLRREG32(addr, data)
1227 +
1228 +// !!! DEPRECATED, WILL BE REMOVED LATER !!!
1229 +#define DRV_Reg(addr) DRV_Reg16(addr)
1230 +#define DRV_WriteReg(addr, data) DRV_WriteReg16(addr, data)
1231 +#define DRV_SetReg(addr, data) DRV_SetReg16(addr, data)
1232 +#define DRV_ClrReg(addr, data) DRV_ClrReg16(addr, data)
1233 +
1234 +
1235 +// ---------------------------------------------------------------------------
1236 +// Compiler Time Deduction Macros
1237 +// ---------------------------------------------------------------------------
1238 +
1239 +#define _MASK_OFFSET_1(x, n) ((x) & 0x1) ? (n) :
1240 +#define _MASK_OFFSET_2(x, n) _MASK_OFFSET_1((x), (n)) _MASK_OFFSET_1((x) >> 1, (n) + 1)
1241 +#define _MASK_OFFSET_4(x, n) _MASK_OFFSET_2((x), (n)) _MASK_OFFSET_2((x) >> 2, (n) + 2)
1242 +#define _MASK_OFFSET_8(x, n) _MASK_OFFSET_4((x), (n)) _MASK_OFFSET_4((x) >> 4, (n) + 4)
1243 +#define _MASK_OFFSET_16(x, n) _MASK_OFFSET_8((x), (n)) _MASK_OFFSET_8((x) >> 8, (n) + 8)
1244 +#define _MASK_OFFSET_32(x, n) _MASK_OFFSET_16((x), (n)) _MASK_OFFSET_16((x) >> 16, (n) + 16)
1245 +
1246 +#define MASK_OFFSET_ERROR (0xFFFFFFFF)
1247 +
1248 +#define MASK_OFFSET(x) (_MASK_OFFSET_32(x, 0) MASK_OFFSET_ERROR)
1249 +
1250 +
1251 +// ---------------------------------------------------------------------------
1252 +// Assertions
1253 +// ---------------------------------------------------------------------------
1254 +
1255 +#ifndef ASSERT
1256 + #define ASSERT(expr) BUG_ON(!(expr))
1257 +#endif
1258 +
1259 +#ifndef NOT_IMPLEMENTED
1260 + #define NOT_IMPLEMENTED() BUG_ON(1)
1261 +#endif
1262 +
1263 +#define STATIC_ASSERT(pred) STATIC_ASSERT_X(pred, __LINE__)
1264 +#define STATIC_ASSERT_X(pred, line) STATIC_ASSERT_XX(pred, line)
1265 +#define STATIC_ASSERT_XX(pred, line) \
1266 + extern char assertion_failed_at_##line[(pred) ? 1 : -1]
1267 +
1268 +// ---------------------------------------------------------------------------
1269 +// Resolve Compiler Warnings
1270 +// ---------------------------------------------------------------------------
1271 +
1272 +#define NOT_REFERENCED(x) { (x) = (x); }
1273 +
1274 +
1275 +// ---------------------------------------------------------------------------
1276 +// Utilities
1277 +// ---------------------------------------------------------------------------
1278 +
1279 +#define MAXIMUM(A,B) (((A)>(B))?(A):(B))
1280 +#define MINIMUM(A,B) (((A)<(B))?(A):(B))
1281 +
1282 +#define ARY_SIZE(x) (sizeof((x)) / sizeof((x[0])))
1283 +#define DVT_DELAYMACRO(u4Num) \
1284 +{ \
1285 + UINT32 u4Count = 0 ; \
1286 + for (u4Count = 0; u4Count < u4Num; u4Count++ ); \
1287 +} \
1288 +
1289 +#define A68351B 0
1290 +#define B68351B 1
1291 +#define B68351D 2
1292 +#define B68351E 3
1293 +#define UNKNOWN_IC_VERSION 0xFF
1294 +
1295 +/* NAND driver */
1296 +struct mtk_nand_host_hw {
1297 + unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */
1298 + unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */
1299 + unsigned int nfi_cs_num; /* NFI_CS_NUM */
1300 + unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */
1301 + unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */
1302 + unsigned int nand_ecc_size;
1303 + unsigned int nand_ecc_bytes;
1304 + unsigned int nand_ecc_mode;
1305 +};
1306 +extern struct mtk_nand_host_hw mt7621_nand_hw;
1307 +extern unsigned int CFG_BLOCKSIZE;
1308 +
1309 +#endif // _MT6575_TYPEDEFS_H
1310 +
1311 Index: linux-4.9.30/drivers/mtd/nand/mtk_nand2.c
1312 ===================================================================
1313 --- /dev/null
1314 +++ linux-4.9.30/drivers/mtd/nand/mtk_nand2.c
1315 @@ -0,0 +1,2363 @@
1316 +/******************************************************************************
1317 +* mtk_nand2.c - MTK NAND Flash Device Driver
1318 + *
1319 +* Copyright 2009-2012 MediaTek Co.,Ltd.
1320 + *
1321 +* DESCRIPTION:
1322 +* This file provid the other drivers nand relative functions
1323 + *
1324 +* modification history
1325 +* ----------------------------------------
1326 +* v3.0, 11 Feb 2010, mtk
1327 +* ----------------------------------------
1328 +******************************************************************************/
1329 +#include "nand_def.h"
1330 +#include <linux/slab.h>
1331 +#include <linux/init.h>
1332 +#include <linux/module.h>
1333 +#include <linux/delay.h>
1334 +#include <linux/errno.h>
1335 +#include <linux/sched.h>
1336 +#include <linux/types.h>
1337 +#include <linux/wait.h>
1338 +#include <linux/spinlock.h>
1339 +#include <linux/interrupt.h>
1340 +#include <linux/mtd/mtd.h>
1341 +#include <linux/mtd/nand.h>
1342 +#include <linux/mtd/partitions.h>
1343 +#include <linux/mtd/nand_ecc.h>
1344 +#include <linux/dma-mapping.h>
1345 +#include <linux/jiffies.h>
1346 +#include <linux/platform_device.h>
1347 +#include <linux/proc_fs.h>
1348 +#include <linux/time.h>
1349 +#include <linux/mm.h>
1350 +#include <asm/io.h>
1351 +#include <asm/cacheflush.h>
1352 +#include <asm/uaccess.h>
1353 +#include <linux/miscdevice.h>
1354 +#include "mtk_nand2.h"
1355 +#include "nand_device_list.h"
1356 +
1357 +#include "bmt.h"
1358 +#include "partition.h"
1359 +
1360 +unsigned int CFG_BLOCKSIZE;
1361 +
1362 +static int shift_on_bbt = 0;
1363 +extern void nand_bbt_set(struct mtd_info *mtd, int page, int flag);
1364 +extern int nand_bbt_get(struct mtd_info *mtd, int page);
1365 +int mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page);
1366 +
1367 +static const char * const probe_types[] = { "cmdlinepart", "ofpart", NULL };
1368 +
1369 +#define NAND_CMD_STATUS_MULTI 0x71
1370 +
1371 +void show_stack(struct task_struct *tsk, unsigned long *sp);
1372 +extern void mt_irq_set_sens(unsigned int irq, unsigned int sens);
1373 +extern void mt_irq_set_polarity(unsigned int irq,unsigned int polarity);
1374 +
1375 +struct mtk_nand_host mtk_nand_host; /* include mtd_info and nand_chip structs */
1376 +struct mtk_nand_host_hw mt7621_nand_hw = {
1377 + .nfi_bus_width = 8,
1378 + .nfi_access_timing = NFI_DEFAULT_ACCESS_TIMING,
1379 + .nfi_cs_num = NFI_CS_NUM,
1380 + .nand_sec_size = 512,
1381 + .nand_sec_shift = 9,
1382 + .nand_ecc_size = 2048,
1383 + .nand_ecc_bytes = 32,
1384 + .nand_ecc_mode = NAND_ECC_HW,
1385 +};
1386 +
1387 +
1388 +/*******************************************************************************
1389 + * Gloable Varible Definition
1390 + *******************************************************************************/
1391 +
1392 +#define NFI_ISSUE_COMMAND(cmd, col_addr, row_addr, col_num, row_num) \
1393 + do { \
1394 + DRV_WriteReg(NFI_CMD_REG16,cmd);\
1395 + while (DRV_Reg32(NFI_STA_REG32) & STA_CMD_STATE);\
1396 + DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);\
1397 + DRV_WriteReg32(NFI_ROWADDR_REG32, row_addr);\
1398 + DRV_WriteReg(NFI_ADDRNOB_REG16, col_num | (row_num<<ADDR_ROW_NOB_SHIFT));\
1399 + while (DRV_Reg32(NFI_STA_REG32) & STA_ADDR_STATE);\
1400 + }while(0);
1401 +
1402 +//-------------------------------------------------------------------------------
1403 +static struct NAND_CMD g_kCMD;
1404 +static u32 g_u4ChipVer;
1405 +bool g_bInitDone;
1406 +static bool g_bcmdstatus;
1407 +static u32 g_value = 0;
1408 +static int g_page_size;
1409 +
1410 +BOOL g_bHwEcc = true;
1411 +
1412 +
1413 +static u8 *local_buffer_16_align; // 16 byte aligned buffer, for HW issue
1414 +static u8 local_buffer[4096 + 512];
1415 +
1416 +extern void nand_release_device(struct mtd_info *mtd);
1417 +extern int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state);
1418 +
1419 +#if defined(MTK_NAND_BMT)
1420 +static bmt_struct *g_bmt;
1421 +#endif
1422 +struct mtk_nand_host *host;
1423 +extern struct mtd_partition g_pasStatic_Partition[];
1424 +int part_num = NUM_PARTITIONS;
1425 +int manu_id;
1426 +int dev_id;
1427 +
1428 +/* this constant was taken from linux/nand/nand.h v 3.14
1429 + * in later versions it seems it was removed in order to save a bit of space
1430 + */
1431 +#define NAND_MAX_OOBSIZE 774
1432 +static u8 local_oob_buf[NAND_MAX_OOBSIZE];
1433 +
1434 +static u8 nand_badblock_offset = 0;
1435 +
1436 +void nand_enable_clock(void)
1437 +{
1438 + //enable_clock(MT65XX_PDN_PERI_NFI, "NAND");
1439 +}
1440 +
1441 +void nand_disable_clock(void)
1442 +{
1443 + //disable_clock(MT65XX_PDN_PERI_NFI, "NAND");
1444 +}
1445 +
1446 +struct nand_ecclayout {
1447 + __u32 eccbytes;
1448 + __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
1449 + __u32 oobavail;
1450 + struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
1451 +};
1452 +
1453 +static struct nand_ecclayout *layout;
1454 +
1455 +static struct nand_ecclayout nand_oob_16 = {
1456 + .eccbytes = 8,
1457 + .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
1458 + .oobfree = {{1, 6}, {0, 0}}
1459 +};
1460 +
1461 +struct nand_ecclayout nand_oob_64 = {
1462 + .eccbytes = 32,
1463 + .eccpos = {32, 33, 34, 35, 36, 37, 38, 39,
1464 + 40, 41, 42, 43, 44, 45, 46, 47,
1465 + 48, 49, 50, 51, 52, 53, 54, 55,
1466 + 56, 57, 58, 59, 60, 61, 62, 63},
1467 + .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 6}, {0, 0}}
1468 +};
1469 +
1470 +struct nand_ecclayout nand_oob_128 = {
1471 + .eccbytes = 64,
1472 + .eccpos = {
1473 + 64, 65, 66, 67, 68, 69, 70, 71,
1474 + 72, 73, 74, 75, 76, 77, 78, 79,
1475 + 80, 81, 82, 83, 84, 85, 86, 86,
1476 + 88, 89, 90, 91, 92, 93, 94, 95,
1477 + 96, 97, 98, 99, 100, 101, 102, 103,
1478 + 104, 105, 106, 107, 108, 109, 110, 111,
1479 + 112, 113, 114, 115, 116, 117, 118, 119,
1480 + 120, 121, 122, 123, 124, 125, 126, 127},
1481 + .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 7}, {33, 7}, {41, 7}, {49, 7}, {57, 6}}
1482 +};
1483 +
1484 +flashdev_info devinfo;
1485 +
1486 +void dump_nfi(void)
1487 +{
1488 +}
1489 +
1490 +void dump_ecc(void)
1491 +{
1492 +}
1493 +
1494 +u32
1495 +nand_virt_to_phys_add(u32 va)
1496 +{
1497 + u32 pageOffset = (va & (PAGE_SIZE - 1));
1498 + pgd_t *pgd;
1499 + pmd_t *pmd;
1500 + pte_t *pte;
1501 + u32 pa;
1502 +
1503 + if (virt_addr_valid(va))
1504 + return __virt_to_phys(va);
1505 +
1506 + if (NULL == current) {
1507 + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR ,current is NULL! \n");
1508 + return 0;
1509 + }
1510 +
1511 + if (NULL == current->mm) {
1512 + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm);
1513 + return 0;
1514 + }
1515 +
1516 + pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
1517 + if (pgd_none(*pgd) || pgd_bad(*pgd)) {
1518 + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pgd invalid! \n", va);
1519 + return 0;
1520 + }
1521 +
1522 + pmd = pmd_offset((pud_t *)pgd, va);
1523 + if (pmd_none(*pmd) || pmd_bad(*pmd)) {
1524 + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pmd invalid! \n", va);
1525 + return 0;
1526 + }
1527 +
1528 + pte = pte_offset_map(pmd, va);
1529 + if (pte_present(*pte)) {
1530 + pa = (pte_val(*pte) & (PAGE_MASK)) | pageOffset;
1531 + return pa;
1532 + }
1533 +
1534 + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR va=0x%x, pte invalid! \n", va);
1535 + return 0;
1536 +}
1537 +EXPORT_SYMBOL(nand_virt_to_phys_add);
1538 +
1539 +bool
1540 +get_device_info(u16 id, u32 ext_id, flashdev_info * pdevinfo)
1541 +{
1542 + u32 index;
1543 + for (index = 0; gen_FlashTable[index].id != 0; index++) {
1544 + if (id == gen_FlashTable[index].id && ext_id == gen_FlashTable[index].ext_id) {
1545 + pdevinfo->id = gen_FlashTable[index].id;
1546 + pdevinfo->ext_id = gen_FlashTable[index].ext_id;
1547 + pdevinfo->blocksize = gen_FlashTable[index].blocksize;
1548 + pdevinfo->addr_cycle = gen_FlashTable[index].addr_cycle;
1549 + pdevinfo->iowidth = gen_FlashTable[index].iowidth;
1550 + pdevinfo->timmingsetting = gen_FlashTable[index].timmingsetting;
1551 + pdevinfo->advancedmode = gen_FlashTable[index].advancedmode;
1552 + pdevinfo->pagesize = gen_FlashTable[index].pagesize;
1553 + pdevinfo->sparesize = gen_FlashTable[index].sparesize;
1554 + pdevinfo->totalsize = gen_FlashTable[index].totalsize;
1555 + memcpy(pdevinfo->devciename, gen_FlashTable[index].devciename, sizeof(pdevinfo->devciename));
1556 + printk(KERN_INFO "Device found in MTK table, ID: %x, EXT_ID: %x\n", id, ext_id);
1557 +
1558 + goto find;
1559 + }
1560 + }
1561 +
1562 +find:
1563 + if (0 == pdevinfo->id) {
1564 + printk(KERN_INFO "Device not found, ID: %x\n", id);
1565 + return false;
1566 + } else {
1567 + return true;
1568 + }
1569 +}
1570 +
1571 +static void
1572 +ECC_Config(struct mtk_nand_host_hw *hw,u32 ecc_bit)
1573 +{
1574 + u32 u4ENCODESize;
1575 + u32 u4DECODESize;
1576 + u32 ecc_bit_cfg = ECC_CNFG_ECC4;
1577 +
1578 + switch(ecc_bit){
1579 + case 4:
1580 + ecc_bit_cfg = ECC_CNFG_ECC4;
1581 + break;
1582 + case 8:
1583 + ecc_bit_cfg = ECC_CNFG_ECC8;
1584 + break;
1585 + case 10:
1586 + ecc_bit_cfg = ECC_CNFG_ECC10;
1587 + break;
1588 + case 12:
1589 + ecc_bit_cfg = ECC_CNFG_ECC12;
1590 + break;
1591 + default:
1592 + break;
1593 + }
1594 + DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
1595 + do {
1596 + } while (!DRV_Reg16(ECC_DECIDLE_REG16));
1597 +
1598 + DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
1599 + do {
1600 + } while (!DRV_Reg32(ECC_ENCIDLE_REG32));
1601 +
1602 + /* setup FDM register base */
1603 + DRV_WriteReg32(ECC_FDMADDR_REG32, NFI_FDM0L_REG32);
1604 +
1605 + /* Sector + FDM */
1606 + u4ENCODESize = (hw->nand_sec_size + 8) << 3;
1607 + /* Sector + FDM + YAFFS2 meta data bits */
1608 + u4DECODESize = ((hw->nand_sec_size + 8) << 3) + ecc_bit * 13;
1609 +
1610 + /* configure ECC decoder && encoder */
1611 + DRV_WriteReg32(ECC_DECCNFG_REG32, ecc_bit_cfg | DEC_CNFG_NFI | DEC_CNFG_EMPTY_EN | (u4DECODESize << DEC_CNFG_CODE_SHIFT));
1612 +
1613 + DRV_WriteReg32(ECC_ENCCNFG_REG32, ecc_bit_cfg | ENC_CNFG_NFI | (u4ENCODESize << ENC_CNFG_MSG_SHIFT));
1614 + NFI_SET_REG32(ECC_DECCNFG_REG32, DEC_CNFG_EL);
1615 +}
1616 +
1617 +static void
1618 +ECC_Decode_Start(void)
1619 +{
1620 + while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE))
1621 + ;
1622 + DRV_WriteReg16(ECC_DECCON_REG16, DEC_EN);
1623 +}
1624 +
1625 +static void
1626 +ECC_Decode_End(void)
1627 +{
1628 + while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE))
1629 + ;
1630 + DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
1631 +}
1632 +
1633 +static void
1634 +ECC_Encode_Start(void)
1635 +{
1636 + while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE))
1637 + ;
1638 + mb();
1639 + DRV_WriteReg16(ECC_ENCCON_REG16, ENC_EN);
1640 +}
1641 +
1642 +static void
1643 +ECC_Encode_End(void)
1644 +{
1645 + /* wait for device returning idle */
1646 + while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE)) ;
1647 + mb();
1648 + DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
1649 +}
1650 +
1651 +static bool
1652 +mtk_nand_check_bch_error(struct mtd_info *mtd, u8 * pDataBuf, u32 u4SecIndex, u32 u4PageAddr)
1653 +{
1654 + bool bRet = true;
1655 + u16 u2SectorDoneMask = 1 << u4SecIndex;
1656 + u32 u4ErrorNumDebug, i, u4ErrNum;
1657 + u32 timeout = 0xFFFF;
1658 + // int el;
1659 + u32 au4ErrBitLoc[6];
1660 + u32 u4ErrByteLoc, u4BitOffset;
1661 + u32 u4ErrBitLoc1th, u4ErrBitLoc2nd;
1662 +
1663 + //4 // Wait for Decode Done
1664 + while (0 == (u2SectorDoneMask & DRV_Reg16(ECC_DECDONE_REG16))) {
1665 + timeout--;
1666 + if (0 == timeout)
1667 + return false;
1668 + }
1669 + /* We will manually correct the error bits in the last sector, not all the sectors of the page! */
1670 + memset(au4ErrBitLoc, 0x0, sizeof(au4ErrBitLoc));
1671 + u4ErrorNumDebug = DRV_Reg32(ECC_DECENUM_REG32);
1672 + u4ErrNum = DRV_Reg32(ECC_DECENUM_REG32) >> (u4SecIndex << 2);
1673 + u4ErrNum &= 0xF;
1674 +
1675 + if (u4ErrNum) {
1676 + if (0xF == u4ErrNum) {
1677 + mtd->ecc_stats.failed++;
1678 + bRet = false;
1679 + printk(KERN_ERR"mtk_nand: UnCorrectable at PageAddr=%d\n", u4PageAddr);
1680 + } else {
1681 + for (i = 0; i < ((u4ErrNum + 1) >> 1); ++i) {
1682 + au4ErrBitLoc[i] = DRV_Reg32(ECC_DECEL0_REG32 + i);
1683 + u4ErrBitLoc1th = au4ErrBitLoc[i] & 0x1FFF;
1684 + if (u4ErrBitLoc1th < 0x1000) {
1685 + u4ErrByteLoc = u4ErrBitLoc1th / 8;
1686 + u4BitOffset = u4ErrBitLoc1th % 8;
1687 + pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset);
1688 + mtd->ecc_stats.corrected++;
1689 + } else {
1690 + mtd->ecc_stats.failed++;
1691 + }
1692 + u4ErrBitLoc2nd = (au4ErrBitLoc[i] >> 16) & 0x1FFF;
1693 + if (0 != u4ErrBitLoc2nd) {
1694 + if (u4ErrBitLoc2nd < 0x1000) {
1695 + u4ErrByteLoc = u4ErrBitLoc2nd / 8;
1696 + u4BitOffset = u4ErrBitLoc2nd % 8;
1697 + pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset);
1698 + mtd->ecc_stats.corrected++;
1699 + } else {
1700 + mtd->ecc_stats.failed++;
1701 + //printk(KERN_ERR"UnCorrectable High ErrLoc=%d\n", au4ErrBitLoc[i]);
1702 + }
1703 + }
1704 + }
1705 + }
1706 + if (0 == (DRV_Reg16(ECC_DECFER_REG16) & (1 << u4SecIndex)))
1707 + bRet = false;
1708 + }
1709 + return bRet;
1710 +}
1711 +
1712 +static bool
1713 +mtk_nand_RFIFOValidSize(u16 u2Size)
1714 +{
1715 + u32 timeout = 0xFFFF;
1716 + while (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) < u2Size) {
1717 + timeout--;
1718 + if (0 == timeout)
1719 + return false;
1720 + }
1721 + return true;
1722 +}
1723 +
1724 +static bool
1725 +mtk_nand_WFIFOValidSize(u16 u2Size)
1726 +{
1727 + u32 timeout = 0xFFFF;
1728 +
1729 + while (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) > u2Size) {
1730 + timeout--;
1731 + if (0 == timeout)
1732 + return false;
1733 + }
1734 + return true;
1735 +}
1736 +
1737 +static bool
1738 +mtk_nand_status_ready(u32 u4Status)
1739 +{
1740 + u32 timeout = 0xFFFF;
1741 +
1742 + while ((DRV_Reg32(NFI_STA_REG32) & u4Status) != 0) {
1743 + timeout--;
1744 + if (0 == timeout)
1745 + return false;
1746 + }
1747 + return true;
1748 +}
1749 +
1750 +static bool
1751 +mtk_nand_reset(void)
1752 +{
1753 + int timeout = 0xFFFF;
1754 + if (DRV_Reg16(NFI_MASTERSTA_REG16)) {
1755 + mb();
1756 + DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
1757 + while (DRV_Reg16(NFI_MASTERSTA_REG16)) {
1758 + timeout--;
1759 + if (!timeout)
1760 + MSG(INIT, "Wait for NFI_MASTERSTA timeout\n");
1761 + }
1762 + }
1763 + /* issue reset operation */
1764 + mb();
1765 + DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
1766 +
1767 + return mtk_nand_status_ready(STA_NFI_FSM_MASK | STA_NAND_BUSY) && mtk_nand_RFIFOValidSize(0) && mtk_nand_WFIFOValidSize(0);
1768 +}
1769 +
1770 +static void
1771 +mtk_nand_set_mode(u16 u2OpMode)
1772 +{
1773 + u16 u2Mode = DRV_Reg16(NFI_CNFG_REG16);
1774 + u2Mode &= ~CNFG_OP_MODE_MASK;
1775 + u2Mode |= u2OpMode;
1776 + DRV_WriteReg16(NFI_CNFG_REG16, u2Mode);
1777 +}
1778 +
1779 +static void
1780 +mtk_nand_set_autoformat(bool bEnable)
1781 +{
1782 + if (bEnable)
1783 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
1784 + else
1785 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
1786 +}
1787 +
1788 +static void
1789 +mtk_nand_configure_fdm(u16 u2FDMSize)
1790 +{
1791 + NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_FDM_MASK | PAGEFMT_FDM_ECC_MASK);
1792 + NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_SHIFT);
1793 + NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_ECC_SHIFT);
1794 +}
1795 +
1796 +static void
1797 +mtk_nand_configure_lock(void)
1798 +{
1799 + u32 u4WriteColNOB = 2;
1800 + u32 u4WriteRowNOB = 3;
1801 + u32 u4EraseColNOB = 0;
1802 + u32 u4EraseRowNOB = 3;
1803 + DRV_WriteReg16(NFI_LOCKANOB_REG16,
1804 + (u4WriteColNOB << PROG_CADD_NOB_SHIFT) | (u4WriteRowNOB << PROG_RADD_NOB_SHIFT) | (u4EraseColNOB << ERASE_CADD_NOB_SHIFT) | (u4EraseRowNOB << ERASE_RADD_NOB_SHIFT));
1805 +
1806 + if (CHIPVER_ECO_1 == g_u4ChipVer) {
1807 + int i;
1808 + for (i = 0; i < 16; ++i) {
1809 + DRV_WriteReg32(NFI_LOCK00ADD_REG32 + (i << 1), 0xFFFFFFFF);
1810 + DRV_WriteReg32(NFI_LOCK00FMT_REG32 + (i << 1), 0xFFFFFFFF);
1811 + }
1812 + //DRV_WriteReg16(NFI_LOCKANOB_REG16, 0x0);
1813 + DRV_WriteReg32(NFI_LOCKCON_REG32, 0xFFFFFFFF);
1814 + DRV_WriteReg16(NFI_LOCK_REG16, NFI_LOCK_ON);
1815 + }
1816 +}
1817 +
1818 +static bool
1819 +mtk_nand_pio_ready(void)
1820 +{
1821 + int count = 0;
1822 + while (!(DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)) {
1823 + count++;
1824 + if (count > 0xffff) {
1825 + printk("PIO_DIRDY timeout\n");
1826 + return false;
1827 + }
1828 + }
1829 +
1830 + return true;
1831 +}
1832 +
1833 +static bool
1834 +mtk_nand_set_command(u16 command)
1835 +{
1836 + mb();
1837 + DRV_WriteReg16(NFI_CMD_REG16, command);
1838 + return mtk_nand_status_ready(STA_CMD_STATE);
1839 +}
1840 +
1841 +static bool
1842 +mtk_nand_set_address(u32 u4ColAddr, u32 u4RowAddr, u16 u2ColNOB, u16 u2RowNOB)
1843 +{
1844 + mb();
1845 + DRV_WriteReg32(NFI_COLADDR_REG32, u4ColAddr);
1846 + DRV_WriteReg32(NFI_ROWADDR_REG32, u4RowAddr);
1847 + DRV_WriteReg16(NFI_ADDRNOB_REG16, u2ColNOB | (u2RowNOB << ADDR_ROW_NOB_SHIFT));
1848 + return mtk_nand_status_ready(STA_ADDR_STATE);
1849 +}
1850 +
1851 +static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
1852 +{
1853 + if (ctrl & NAND_ALE) {
1854 + mtk_nand_set_address(dat, 0, 1, 0);
1855 + } else if (ctrl & NAND_CLE) {
1856 + mtk_nand_reset();
1857 + mtk_nand_set_mode(0x6000);
1858 + mtk_nand_set_command(dat);
1859 + }
1860 +}
1861 +
1862 +static bool
1863 +mtk_nand_check_RW_count(u16 u2WriteSize)
1864 +{
1865 + u32 timeout = 0xFFFF;
1866 + u16 u2SecNum = u2WriteSize >> 9;
1867 +
1868 + while (ADDRCNTR_CNTR(DRV_Reg16(NFI_ADDRCNTR_REG16)) < u2SecNum) {
1869 + timeout--;
1870 + if (0 == timeout) {
1871 + printk(KERN_INFO "[%s] timeout\n", __FUNCTION__);
1872 + return false;
1873 + }
1874 + }
1875 + return true;
1876 +}
1877 +
1878 +static bool
1879 +mtk_nand_ready_for_read(struct nand_chip *nand, u32 u4RowAddr, u32 u4ColAddr, bool full, u8 * buf)
1880 +{
1881 + /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
1882 + bool bRet = false;
1883 + u16 sec_num = 1 << (nand->page_shift - 9);
1884 + u32 col_addr = u4ColAddr;
1885 + u32 colnob = 2, rownob = devinfo.addr_cycle - 2;
1886 + if (nand->options & NAND_BUSWIDTH_16)
1887 + col_addr /= 2;
1888 +
1889 + if (!mtk_nand_reset())
1890 + goto cleanup;
1891 + if (g_bHwEcc) {
1892 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1893 + } else {
1894 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1895 + }
1896 +
1897 + mtk_nand_set_mode(CNFG_OP_READ);
1898 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
1899 + DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
1900 +
1901 + if (full) {
1902 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
1903 +
1904 + if (g_bHwEcc)
1905 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1906 + else
1907 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1908 + } else {
1909 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1910 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
1911 + }
1912 +
1913 + mtk_nand_set_autoformat(full);
1914 + if (full)
1915 + if (g_bHwEcc)
1916 + ECC_Decode_Start();
1917 + if (!mtk_nand_set_command(NAND_CMD_READ0))
1918 + goto cleanup;
1919 + if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
1920 + goto cleanup;
1921 + if (!mtk_nand_set_command(NAND_CMD_READSTART))
1922 + goto cleanup;
1923 + if (!mtk_nand_status_ready(STA_NAND_BUSY))
1924 + goto cleanup;
1925 +
1926 + bRet = true;
1927 +
1928 +cleanup:
1929 + return bRet;
1930 +}
1931 +
1932 +static bool
1933 +mtk_nand_ready_for_write(struct nand_chip *nand, u32 u4RowAddr, u32 col_addr, bool full, u8 * buf)
1934 +{
1935 + bool bRet = false;
1936 + u32 sec_num = 1 << (nand->page_shift - 9);
1937 + u32 colnob = 2, rownob = devinfo.addr_cycle - 2;
1938 + if (nand->options & NAND_BUSWIDTH_16)
1939 + col_addr /= 2;
1940 +
1941 + /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
1942 + if (!mtk_nand_reset())
1943 + return false;
1944 +
1945 + mtk_nand_set_mode(CNFG_OP_PRGM);
1946 +
1947 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
1948 +
1949 + DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
1950 +
1951 + if (full) {
1952 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
1953 + if (g_bHwEcc)
1954 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1955 + else
1956 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1957 + } else {
1958 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1959 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
1960 + }
1961 +
1962 + mtk_nand_set_autoformat(full);
1963 +
1964 + if (full)
1965 + if (g_bHwEcc)
1966 + ECC_Encode_Start();
1967 +
1968 + if (!mtk_nand_set_command(NAND_CMD_SEQIN))
1969 + goto cleanup;
1970 + //1 FIXED ME: For Any Kind of AddrCycle
1971 + if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
1972 + goto cleanup;
1973 +
1974 + if (!mtk_nand_status_ready(STA_NAND_BUSY))
1975 + goto cleanup;
1976 +
1977 + bRet = true;
1978 +
1979 +cleanup:
1980 + return bRet;
1981 +}
1982 +
1983 +static bool
1984 +mtk_nand_check_dececc_done(u32 u4SecNum)
1985 +{
1986 + u32 timeout, dec_mask;
1987 +
1988 + timeout = 0xffff;
1989 + dec_mask = (1 << u4SecNum) - 1;
1990 + while ((dec_mask != DRV_Reg(ECC_DECDONE_REG16)) && timeout > 0)
1991 + timeout--;
1992 + if (timeout == 0) {
1993 + MSG(VERIFY, "ECC_DECDONE: timeout\n");
1994 + return false;
1995 + }
1996 + return true;
1997 +}
1998 +
1999 +static bool
2000 +mtk_nand_mcu_read_data(u8 * buf, u32 length)
2001 +{
2002 + int timeout = 0xffff;
2003 + u32 i;
2004 + u32 *buf32 = (u32 *) buf;
2005 + if ((u32) buf % 4 || length % 4)
2006 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2007 + else
2008 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2009 +
2010 + //DRV_WriteReg32(NFI_STRADDR_REG32, 0);
2011 + mb();
2012 + NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BRD);
2013 +
2014 + if ((u32) buf % 4 || length % 4) {
2015 + for (i = 0; (i < (length)) && (timeout > 0);) {
2016 + if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
2017 + *buf++ = (u8) DRV_Reg32(NFI_DATAR_REG32);
2018 + i++;
2019 + } else {
2020 + timeout--;
2021 + }
2022 + if (0 == timeout) {
2023 + printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2024 + dump_nfi();
2025 + return false;
2026 + }
2027 + }
2028 + } else {
2029 + for (i = 0; (i < (length >> 2)) && (timeout > 0);) {
2030 + if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
2031 + *buf32++ = DRV_Reg32(NFI_DATAR_REG32);
2032 + i++;
2033 + } else {
2034 + timeout--;
2035 + }
2036 + if (0 == timeout) {
2037 + printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2038 + dump_nfi();
2039 + return false;
2040 + }
2041 + }
2042 + }
2043 + return true;
2044 +}
2045 +
2046 +static bool
2047 +mtk_nand_read_page_data(struct mtd_info *mtd, u8 * pDataBuf, u32 u4Size)
2048 +{
2049 + return mtk_nand_mcu_read_data(pDataBuf, u4Size);
2050 +}
2051 +
2052 +static bool
2053 +mtk_nand_mcu_write_data(struct mtd_info *mtd, const u8 * buf, u32 length)
2054 +{
2055 + u32 timeout = 0xFFFF;
2056 + u32 i;
2057 + u32 *pBuf32;
2058 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2059 + mb();
2060 + NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BWR);
2061 + pBuf32 = (u32 *) buf;
2062 +
2063 + if ((u32) buf % 4 || length % 4)
2064 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2065 + else
2066 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2067 +
2068 + if ((u32) buf % 4 || length % 4) {
2069 + for (i = 0; (i < (length)) && (timeout > 0);) {
2070 + if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
2071 + DRV_WriteReg32(NFI_DATAW_REG32, *buf++);
2072 + i++;
2073 + } else {
2074 + timeout--;
2075 + }
2076 + if (0 == timeout) {
2077 + printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2078 + dump_nfi();
2079 + return false;
2080 + }
2081 + }
2082 + } else {
2083 + for (i = 0; (i < (length >> 2)) && (timeout > 0);) {
2084 + if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
2085 + DRV_WriteReg32(NFI_DATAW_REG32, *pBuf32++);
2086 + i++;
2087 + } else {
2088 + timeout--;
2089 + }
2090 + if (0 == timeout) {
2091 + printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2092 + dump_nfi();
2093 + return false;
2094 + }
2095 + }
2096 + }
2097 +
2098 + return true;
2099 +}
2100 +
2101 +static bool
2102 +mtk_nand_write_page_data(struct mtd_info *mtd, u8 * buf, u32 size)
2103 +{
2104 + return mtk_nand_mcu_write_data(mtd, buf, size);
2105 +}
2106 +
2107 +static void
2108 +mtk_nand_read_fdm_data(u8 * pDataBuf, u32 u4SecNum)
2109 +{
2110 + u32 i;
2111 + u32 *pBuf32 = (u32 *) pDataBuf;
2112 +
2113 + if (pBuf32) {
2114 + for (i = 0; i < u4SecNum; ++i) {
2115 + *pBuf32++ = DRV_Reg32(NFI_FDM0L_REG32 + (i << 1));
2116 + *pBuf32++ = DRV_Reg32(NFI_FDM0M_REG32 + (i << 1));
2117 + }
2118 + }
2119 +}
2120 +
2121 +static u8 fdm_buf[64];
2122 +static void
2123 +mtk_nand_write_fdm_data(struct nand_chip *chip, u8 * pDataBuf, u32 u4SecNum)
2124 +{
2125 + u32 i, j;
2126 + u8 checksum = 0;
2127 + bool empty = true;
2128 + struct nand_oobfree *free_entry;
2129 + u32 *pBuf32;
2130 +
2131 + memcpy(fdm_buf, pDataBuf, u4SecNum * 8);
2132 +
2133 + free_entry = layout->oobfree;
2134 + for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free_entry[i].length; i++) {
2135 + for (j = 0; j < free_entry[i].length; j++) {
2136 + if (pDataBuf[free_entry[i].offset + j] != 0xFF)
2137 + empty = false;
2138 + checksum ^= pDataBuf[free_entry[i].offset + j];
2139 + }
2140 + }
2141 +
2142 + if (!empty) {
2143 + fdm_buf[free_entry[i - 1].offset + free_entry[i - 1].length] = checksum;
2144 + }
2145 +
2146 + pBuf32 = (u32 *) fdm_buf;
2147 + for (i = 0; i < u4SecNum; ++i) {
2148 + DRV_WriteReg32(NFI_FDM0L_REG32 + (i << 1), *pBuf32++);
2149 + DRV_WriteReg32(NFI_FDM0M_REG32 + (i << 1), *pBuf32++);
2150 + }
2151 +}
2152 +
2153 +static void
2154 +mtk_nand_stop_read(void)
2155 +{
2156 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD);
2157 + mtk_nand_reset();
2158 + if (g_bHwEcc)
2159 + ECC_Decode_End();
2160 + DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
2161 +}
2162 +
2163 +static void
2164 +mtk_nand_stop_write(void)
2165 +{
2166 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR);
2167 + if (g_bHwEcc)
2168 + ECC_Encode_End();
2169 + DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
2170 +}
2171 +
2172 +bool
2173 +mtk_nand_exec_read_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf)
2174 +{
2175 + u8 *buf;
2176 + bool bRet = true;
2177 + struct nand_chip *nand = mtd->priv;
2178 + u32 u4SecNum = u4PageSize >> 9;
2179 +
2180 + if (((u32) pPageBuf % 16) && local_buffer_16_align)
2181 + buf = local_buffer_16_align;
2182 + else
2183 + buf = pPageBuf;
2184 + if (mtk_nand_ready_for_read(nand, u4RowAddr, 0, true, buf)) {
2185 + int j;
2186 + for (j = 0 ; j < u4SecNum; j++) {
2187 + if (!mtk_nand_read_page_data(mtd, buf+j*512, 512))
2188 + bRet = false;
2189 + if(g_bHwEcc && !mtk_nand_check_dececc_done(j+1))
2190 + bRet = false;
2191 + if(g_bHwEcc && !mtk_nand_check_bch_error(mtd, buf+j*512, j, u4RowAddr))
2192 + bRet = false;
2193 + }
2194 + if (!mtk_nand_status_ready(STA_NAND_BUSY))
2195 + bRet = false;
2196 +
2197 + mtk_nand_read_fdm_data(pFDMBuf, u4SecNum);
2198 + mtk_nand_stop_read();
2199 + }
2200 +
2201 + if (buf == local_buffer_16_align)
2202 + memcpy(pPageBuf, buf, u4PageSize);
2203 +
2204 + return bRet;
2205 +}
2206 +
2207 +int
2208 +mtk_nand_exec_write_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf)
2209 +{
2210 + struct nand_chip *chip = mtd->priv;
2211 + u32 u4SecNum = u4PageSize >> 9;
2212 + u8 *buf;
2213 + u8 status;
2214 +
2215 + MSG(WRITE, "mtk_nand_exec_write_page, page: 0x%x\n", u4RowAddr);
2216 +
2217 + if (((u32) pPageBuf % 16) && local_buffer_16_align) {
2218 + printk(KERN_INFO "Data buffer not 16 bytes aligned: %p\n", pPageBuf);
2219 + memcpy(local_buffer_16_align, pPageBuf, mtd->writesize);
2220 + buf = local_buffer_16_align;
2221 + } else
2222 + buf = pPageBuf;
2223 +
2224 + if (mtk_nand_ready_for_write(chip, u4RowAddr, 0, true, buf)) {
2225 + mtk_nand_write_fdm_data(chip, pFDMBuf, u4SecNum);
2226 + (void)mtk_nand_write_page_data(mtd, buf, u4PageSize);
2227 + (void)mtk_nand_check_RW_count(u4PageSize);
2228 + mtk_nand_stop_write();
2229 + (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
2230 + while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ;
2231 + }
2232 +
2233 + status = chip->waitfunc(mtd, chip);
2234 + if (status & NAND_STATUS_FAIL)
2235 + return -EIO;
2236 + return 0;
2237 +}
2238 +
2239 +static int
2240 +get_start_end_block(struct mtd_info *mtd, int block, int *start_blk, int *end_blk)
2241 +{
2242 + struct nand_chip *chip = mtd->priv;
2243 + int i;
2244 +
2245 + *start_blk = 0;
2246 + for (i = 0; i <= part_num; i++)
2247 + {
2248 + if (i == part_num)
2249 + {
2250 + // try the last reset partition
2251 + *end_blk = (chip->chipsize >> chip->phys_erase_shift) - 1;
2252 + if (*start_blk <= *end_blk)
2253 + {
2254 + if ((block >= *start_blk) && (block <= *end_blk))
2255 + break;
2256 + }
2257 + }
2258 + // skip All partition entry
2259 + else if (g_pasStatic_Partition[i].size == MTDPART_SIZ_FULL)
2260 + {
2261 + continue;
2262 + }
2263 + *end_blk = *start_blk + (g_pasStatic_Partition[i].size >> chip->phys_erase_shift) - 1;
2264 + if ((block >= *start_blk) && (block <= *end_blk))
2265 + break;
2266 + *start_blk = *end_blk + 1;
2267 + }
2268 + if (*start_blk > *end_blk)
2269 + {
2270 + return -1;
2271 + }
2272 + return 0;
2273 +}
2274 +
2275 +static int
2276 +block_remap(struct mtd_info *mtd, int block)
2277 +{
2278 + struct nand_chip *chip = mtd->priv;
2279 + int start_blk, end_blk;
2280 + int j, block_offset;
2281 + int bad_block = 0;
2282 +
2283 + if (chip->bbt == NULL) {
2284 + printk("ERROR!! no bbt table for block_remap\n");
2285 + return -1;
2286 + }
2287 +
2288 + if (get_start_end_block(mtd, block, &start_blk, &end_blk) < 0) {
2289 + printk("ERROR!! can not find start_blk and end_blk\n");
2290 + return -1;
2291 + }
2292 +
2293 + block_offset = block - start_blk;
2294 + for (j = start_blk; j <= end_blk;j++) {
2295 + if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) == 0x0) {
2296 + if (!block_offset)
2297 + break;
2298 + block_offset--;
2299 + } else {
2300 + bad_block++;
2301 + }
2302 + }
2303 + if (j <= end_blk) {
2304 + return j;
2305 + } else {
2306 + // remap to the bad block
2307 + for (j = end_blk; bad_block > 0; j--)
2308 + {
2309 + if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) != 0x0)
2310 + {
2311 + bad_block--;
2312 + if (bad_block <= block_offset)
2313 + return j;
2314 + }
2315 + }
2316 + }
2317 +
2318 + printk("Error!! block_remap error\n");
2319 + return -1;
2320 +}
2321 +
2322 +int
2323 +check_block_remap(struct mtd_info *mtd, int block)
2324 +{
2325 + if (shift_on_bbt)
2326 + return block_remap(mtd, block);
2327 + else
2328 + return block;
2329 +}
2330 +EXPORT_SYMBOL(check_block_remap);
2331 +
2332 +
2333 +static int
2334 +write_next_on_fail(struct mtd_info *mtd, char *write_buf, int page, int * to_blk)
2335 +{
2336 + struct nand_chip *chip = mtd->priv;
2337 + int i, j, to_page = 0, first_page;
2338 + char *buf, *oob;
2339 + int start_blk = 0, end_blk;
2340 + int mapped_block;
2341 + int page_per_block_bit = chip->phys_erase_shift - chip->page_shift;
2342 + int block = page >> page_per_block_bit;
2343 +
2344 + // find next available block in the same MTD partition
2345 + mapped_block = block_remap(mtd, block);
2346 + if (mapped_block == -1)
2347 + return NAND_STATUS_FAIL;
2348 +
2349 + get_start_end_block(mtd, block, &start_blk, &end_blk);
2350 +
2351 + buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL | GFP_DMA);
2352 + if (buf == NULL)
2353 + return -1;
2354 +
2355 + oob = buf + mtd->writesize;
2356 + for ((*to_blk) = block + 1; (*to_blk) <= end_blk ; (*to_blk)++) {
2357 + if (nand_bbt_get(mtd, (*to_blk) << page_per_block_bit) == 0) {
2358 + int status;
2359 + status = mtk_nand_erase_hw(mtd, (*to_blk) << page_per_block_bit);
2360 + if (status & NAND_STATUS_FAIL) {
2361 + mtk_nand_block_markbad_hw(mtd, (*to_blk) << chip->phys_erase_shift);
2362 + nand_bbt_set(mtd, (*to_blk) << page_per_block_bit, 0x3);
2363 + } else {
2364 + /* good block */
2365 + to_page = (*to_blk) << page_per_block_bit;
2366 + break;
2367 + }
2368 + }
2369 + }
2370 +
2371 + if (!to_page) {
2372 + kfree(buf);
2373 + return -1;
2374 + }
2375 +
2376 + first_page = (page >> page_per_block_bit) << page_per_block_bit;
2377 + for (i = 0; i < (1 << page_per_block_bit); i++) {
2378 + if ((first_page + i) != page) {
2379 + mtk_nand_read_oob_hw(mtd, chip, (first_page+i));
2380 + for (j = 0; j < mtd->oobsize; j++)
2381 + if (chip->oob_poi[j] != (unsigned char)0xff)
2382 + break;
2383 + if (j < mtd->oobsize) {
2384 + mtk_nand_exec_read_page(mtd, (first_page+i), mtd->writesize, buf, oob);
2385 + memset(oob, 0xff, mtd->oobsize);
2386 + if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)buf, oob) != 0) {
2387 + int ret, new_blk = 0;
2388 + nand_bbt_set(mtd, to_page, 0x3);
2389 + ret = write_next_on_fail(mtd, buf, to_page + i, &new_blk);
2390 + if (ret) {
2391 + kfree(buf);
2392 + mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
2393 + return ret;
2394 + }
2395 + mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
2396 + *to_blk = new_blk;
2397 + to_page = ((*to_blk) << page_per_block_bit);
2398 + }
2399 + }
2400 + } else {
2401 + memset(chip->oob_poi, 0xff, mtd->oobsize);
2402 + if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)write_buf, chip->oob_poi) != 0) {
2403 + int ret, new_blk = 0;
2404 + nand_bbt_set(mtd, to_page, 0x3);
2405 + ret = write_next_on_fail(mtd, write_buf, to_page + i, &new_blk);
2406 + if (ret) {
2407 + kfree(buf);
2408 + mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
2409 + return ret;
2410 + }
2411 + mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
2412 + *to_blk = new_blk;
2413 + to_page = ((*to_blk) << page_per_block_bit);
2414 + }
2415 + }
2416 + }
2417 +
2418 + kfree(buf);
2419 +
2420 + return 0;
2421 +}
2422 +
2423 +static int
2424 +mtk_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offset,
2425 + int data_len, const u8 * buf, int oob_required, int page, int cached, int raw)
2426 +{
2427 + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2428 + int block = page / page_per_block;
2429 + u16 page_in_block = page % page_per_block;
2430 + int mapped_block = block;
2431 +
2432 +#if defined(MTK_NAND_BMT)
2433 + mapped_block = get_mapping_block_index(block);
2434 + // write bad index into oob
2435 + if (mapped_block != block)
2436 + set_bad_index_to_oob(chip->oob_poi, block);
2437 + else
2438 + set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
2439 +#else
2440 + if (shift_on_bbt) {
2441 + mapped_block = block_remap(mtd, block);
2442 + if (mapped_block == -1)
2443 + return NAND_STATUS_FAIL;
2444 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2445 + return NAND_STATUS_FAIL;
2446 + }
2447 +#endif
2448 + do {
2449 + if (mtk_nand_exec_write_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, (u8 *)buf, chip->oob_poi)) {
2450 + MSG(INIT, "write fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
2451 +#if defined(MTK_NAND_BMT)
2452 + if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift, UPDATE_WRITE_FAIL, (u8 *) buf, chip->oob_poi)) {
2453 + MSG(INIT, "Update BMT success\n");
2454 + return 0;
2455 + } else {
2456 + MSG(INIT, "Update BMT fail\n");
2457 + return -EIO;
2458 + }
2459 +#else
2460 + {
2461 + int new_blk;
2462 + nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
2463 + if (write_next_on_fail(mtd, (char *)buf, page_in_block + mapped_block * page_per_block, &new_blk) != 0)
2464 + {
2465 + mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
2466 + return NAND_STATUS_FAIL;
2467 + }
2468 + mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
2469 + break;
2470 + }
2471 +#endif
2472 + } else
2473 + break;
2474 + } while(1);
2475 +
2476 + return 0;
2477 +}
2478 +
2479 +static void
2480 +mtk_nand_command_bp(struct mtd_info *mtd, unsigned int command, int column, int page_addr)
2481 +{
2482 + struct nand_chip *nand = mtd->priv;
2483 +
2484 + switch (command) {
2485 + case NAND_CMD_SEQIN:
2486 + memset(g_kCMD.au1OOB, 0xFF, sizeof(g_kCMD.au1OOB));
2487 + g_kCMD.pDataBuf = NULL;
2488 + g_kCMD.u4RowAddr = page_addr;
2489 + g_kCMD.u4ColAddr = column;
2490 + break;
2491 +
2492 + case NAND_CMD_PAGEPROG:
2493 + if (g_kCMD.pDataBuf || (0xFF != g_kCMD.au1OOB[nand_badblock_offset])) {
2494 + u8 *pDataBuf = g_kCMD.pDataBuf ? g_kCMD.pDataBuf : nand->buffers->databuf;
2495 + mtk_nand_exec_write_page(mtd, g_kCMD.u4RowAddr, mtd->writesize, pDataBuf, g_kCMD.au1OOB);
2496 + g_kCMD.u4RowAddr = (u32) - 1;
2497 + g_kCMD.u4OOBRowAddr = (u32) - 1;
2498 + }
2499 + break;
2500 +
2501 + case NAND_CMD_READOOB:
2502 + g_kCMD.u4RowAddr = page_addr;
2503 + g_kCMD.u4ColAddr = column + mtd->writesize;
2504 + break;
2505 +
2506 + case NAND_CMD_READ0:
2507 + g_kCMD.u4RowAddr = page_addr;
2508 + g_kCMD.u4ColAddr = column;
2509 + break;
2510 +
2511 + case NAND_CMD_ERASE1:
2512 + nand->state=FL_ERASING;
2513 + (void)mtk_nand_reset();
2514 + mtk_nand_set_mode(CNFG_OP_ERASE);
2515 + (void)mtk_nand_set_command(NAND_CMD_ERASE1);
2516 + (void)mtk_nand_set_address(0, page_addr, 0, devinfo.addr_cycle - 2);
2517 + break;
2518 +
2519 + case NAND_CMD_ERASE2:
2520 + (void)mtk_nand_set_command(NAND_CMD_ERASE2);
2521 + while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
2522 + ;
2523 + break;
2524 +
2525 + case NAND_CMD_STATUS:
2526 + (void)mtk_nand_reset();
2527 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2528 + mtk_nand_set_mode(CNFG_OP_SRD);
2529 + mtk_nand_set_mode(CNFG_READ_EN);
2530 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
2531 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2532 + (void)mtk_nand_set_command(NAND_CMD_STATUS);
2533 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK);
2534 + mb();
2535 + DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD | (1 << CON_NFI_NOB_SHIFT));
2536 + g_bcmdstatus = true;
2537 + break;
2538 +
2539 + case NAND_CMD_RESET:
2540 + (void)mtk_nand_reset();
2541 + DRV_WriteReg16(NFI_INTR_EN_REG16, INTR_RST_DONE_EN);
2542 + (void)mtk_nand_set_command(NAND_CMD_RESET);
2543 + DRV_WriteReg16(NFI_BASE+0x44, 0xF1);
2544 + while(!(DRV_Reg16(NFI_INTR_REG16)&INTR_RST_DONE_EN))
2545 + ;
2546 + break;
2547 +
2548 + case NAND_CMD_READID:
2549 + mtk_nand_reset();
2550 + /* Disable HW ECC */
2551 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2552 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
2553 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN | CNFG_BYTE_RW);
2554 + (void)mtk_nand_reset();
2555 + mb();
2556 + mtk_nand_set_mode(CNFG_OP_SRD);
2557 + (void)mtk_nand_set_command(NAND_CMD_READID);
2558 + (void)mtk_nand_set_address(0, 0, 1, 0);
2559 + DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD);
2560 + while (DRV_Reg32(NFI_STA_REG32) & STA_DATAR_STATE)
2561 + ;
2562 + break;
2563 +
2564 + default:
2565 + BUG();
2566 + break;
2567 + }
2568 +}
2569 +
2570 +static void
2571 +mtk_nand_select_chip(struct mtd_info *mtd, int chip)
2572 +{
2573 + if ((chip == -1) && (false == g_bInitDone)) {
2574 + struct nand_chip *nand = mtd->priv;
2575 + struct mtk_nand_host *host = nand->priv;
2576 + struct mtk_nand_host_hw *hw = host->hw;
2577 + u32 spare_per_sector = mtd->oobsize / (mtd->writesize / 512);
2578 + u32 ecc_bit = 4;
2579 + u32 spare_bit = PAGEFMT_SPARE_16;
2580 +
2581 + if (spare_per_sector >= 28) {
2582 + spare_bit = PAGEFMT_SPARE_28;
2583 + ecc_bit = 12;
2584 + spare_per_sector = 28;
2585 + } else if (spare_per_sector >= 27) {
2586 + spare_bit = PAGEFMT_SPARE_27;
2587 + ecc_bit = 8;
2588 + spare_per_sector = 27;
2589 + } else if (spare_per_sector >= 26) {
2590 + spare_bit = PAGEFMT_SPARE_26;
2591 + ecc_bit = 8;
2592 + spare_per_sector = 26;
2593 + } else if (spare_per_sector >= 16) {
2594 + spare_bit = PAGEFMT_SPARE_16;
2595 + ecc_bit = 4;
2596 + spare_per_sector = 16;
2597 + } else {
2598 + MSG(INIT, "[NAND]: NFI not support oobsize: %x\n", spare_per_sector);
2599 + ASSERT(0);
2600 + }
2601 + mtd->oobsize = spare_per_sector*(mtd->writesize/512);
2602 + MSG(INIT, "[NAND]select ecc bit:%d, sparesize :%d spare_per_sector=%d\n",ecc_bit,mtd->oobsize,spare_per_sector);
2603 + /* Setup PageFormat */
2604 + if (4096 == mtd->writesize) {
2605 + NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_4K);
2606 + nand->cmdfunc = mtk_nand_command_bp;
2607 + } else if (2048 == mtd->writesize) {
2608 + NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_2K);
2609 + nand->cmdfunc = mtk_nand_command_bp;
2610 + }
2611 + ECC_Config(hw,ecc_bit);
2612 + g_bInitDone = true;
2613 + }
2614 + switch (chip) {
2615 + case -1:
2616 + break;
2617 + case 0:
2618 + case 1:
2619 + /* Jun Shen, 2011.04.13 */
2620 + /* Note: MT6577 EVB NAND is mounted on CS0, but FPGA is CS1 */
2621 + DRV_WriteReg16(NFI_CSEL_REG16, chip);
2622 + /* Jun Shen, 2011.04.13 */
2623 + break;
2624 + }
2625 +}
2626 +
2627 +static uint8_t
2628 +mtk_nand_read_byte(struct mtd_info *mtd)
2629 +{
2630 + uint8_t retval = 0;
2631 +
2632 + if (!mtk_nand_pio_ready()) {
2633 + printk("pio ready timeout\n");
2634 + retval = false;
2635 + }
2636 +
2637 + if (g_bcmdstatus) {
2638 + retval = DRV_Reg8(NFI_DATAR_REG32);
2639 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK);
2640 + mtk_nand_reset();
2641 + if (g_bHwEcc) {
2642 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2643 + } else {
2644 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2645 + }
2646 + g_bcmdstatus = false;
2647 + } else
2648 + retval = DRV_Reg8(NFI_DATAR_REG32);
2649 +
2650 + return retval;
2651 +}
2652 +
2653 +static void
2654 +mtk_nand_read_buf(struct mtd_info *mtd, uint8_t * buf, int len)
2655 +{
2656 + struct nand_chip *nand = (struct nand_chip *)mtd->priv;
2657 + struct NAND_CMD *pkCMD = &g_kCMD;
2658 + u32 u4ColAddr = pkCMD->u4ColAddr;
2659 + u32 u4PageSize = mtd->writesize;
2660 +
2661 + if (u4ColAddr < u4PageSize) {
2662 + if ((u4ColAddr == 0) && (len >= u4PageSize)) {
2663 + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, pkCMD->au1OOB);
2664 + if (len > u4PageSize) {
2665 + u32 u4Size = min(len - u4PageSize, sizeof(pkCMD->au1OOB));
2666 + memcpy(buf + u4PageSize, pkCMD->au1OOB, u4Size);
2667 + }
2668 + } else {
2669 + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB);
2670 + memcpy(buf, nand->buffers->databuf + u4ColAddr, len);
2671 + }
2672 + pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
2673 + } else {
2674 + u32 u4Offset = u4ColAddr - u4PageSize;
2675 + u32 u4Size = min(len - u4Offset, sizeof(pkCMD->au1OOB));
2676 + if (pkCMD->u4OOBRowAddr != pkCMD->u4RowAddr) {
2677 + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB);
2678 + pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
2679 + }
2680 + memcpy(buf, pkCMD->au1OOB + u4Offset, u4Size);
2681 + }
2682 + pkCMD->u4ColAddr += len;
2683 +}
2684 +
2685 +static void
2686 +mtk_nand_write_buf(struct mtd_info *mtd, const uint8_t * buf, int len)
2687 +{
2688 + struct NAND_CMD *pkCMD = &g_kCMD;
2689 + u32 u4ColAddr = pkCMD->u4ColAddr;
2690 + u32 u4PageSize = mtd->writesize;
2691 + int i4Size, i;
2692 +
2693 + if (u4ColAddr >= u4PageSize) {
2694 + u32 u4Offset = u4ColAddr - u4PageSize;
2695 + u8 *pOOB = pkCMD->au1OOB + u4Offset;
2696 + i4Size = min(len, (int)(sizeof(pkCMD->au1OOB) - u4Offset));
2697 + for (i = 0; i < i4Size; i++) {
2698 + pOOB[i] &= buf[i];
2699 + }
2700 + } else {
2701 + pkCMD->pDataBuf = (u8 *) buf;
2702 + }
2703 +
2704 + pkCMD->u4ColAddr += len;
2705 +}
2706 +
2707 +static int
2708 +mtk_nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t * buf, int oob_required, int page)
2709 +{
2710 + mtk_nand_write_buf(mtd, buf, mtd->writesize);
2711 + mtk_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
2712 + return 0;
2713 +}
2714 +
2715 +static int
2716 +mtk_nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t * buf, int oob_required, int page)
2717 +{
2718 + struct NAND_CMD *pkCMD = &g_kCMD;
2719 + u32 u4ColAddr = pkCMD->u4ColAddr;
2720 + u32 u4PageSize = mtd->writesize;
2721 +
2722 + if (u4ColAddr == 0) {
2723 + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, chip->oob_poi);
2724 + pkCMD->u4ColAddr += u4PageSize + mtd->oobsize;
2725 + }
2726 +
2727 + return 0;
2728 +}
2729 +
2730 +static int
2731 +mtk_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, u8 * buf, int page)
2732 +{
2733 + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2734 + int block = page / page_per_block;
2735 + u16 page_in_block = page % page_per_block;
2736 + int mapped_block = block;
2737 +
2738 +#if defined (MTK_NAND_BMT)
2739 + mapped_block = get_mapping_block_index(block);
2740 + if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block,
2741 + mtd->writesize, buf, chip->oob_poi))
2742 + return 0;
2743 +#else
2744 + if (shift_on_bbt) {
2745 + mapped_block = block_remap(mtd, block);
2746 + if (mapped_block == -1)
2747 + return NAND_STATUS_FAIL;
2748 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2749 + return NAND_STATUS_FAIL;
2750 + }
2751 +
2752 + if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, buf, chip->oob_poi))
2753 + return 0;
2754 + else
2755 + return -EIO;
2756 +#endif
2757 +}
2758 +
2759 +int
2760 +mtk_nand_erase_hw(struct mtd_info *mtd, int page)
2761 +{
2762 + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
2763 +
2764 + chip->erase(mtd, page);
2765 +
2766 + return chip->waitfunc(mtd, chip);
2767 +}
2768 +
2769 +static int
2770 +mtk_nand_erase(struct mtd_info *mtd, int page)
2771 +{
2772 + // get mapping
2773 + struct nand_chip *chip = mtd->priv;
2774 + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2775 + int page_in_block = page % page_per_block;
2776 + int block = page / page_per_block;
2777 + int mapped_block = block;
2778 +
2779 +#if defined(MTK_NAND_BMT)
2780 + mapped_block = get_mapping_block_index(block);
2781 +#else
2782 + if (shift_on_bbt) {
2783 + mapped_block = block_remap(mtd, block);
2784 + if (mapped_block == -1)
2785 + return NAND_STATUS_FAIL;
2786 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2787 + return NAND_STATUS_FAIL;
2788 + }
2789 +#endif
2790 +
2791 + do {
2792 + int status = mtk_nand_erase_hw(mtd, page_in_block + page_per_block * mapped_block);
2793 +
2794 + if (status & NAND_STATUS_FAIL) {
2795 +#if defined (MTK_NAND_BMT)
2796 + if (update_bmt( (page_in_block + mapped_block * page_per_block) << chip->page_shift,
2797 + UPDATE_ERASE_FAIL, NULL, NULL))
2798 + {
2799 + MSG(INIT, "Erase fail at block: 0x%x, update BMT success\n", mapped_block);
2800 + return 0;
2801 + } else {
2802 + MSG(INIT, "Erase fail at block: 0x%x, update BMT fail\n", mapped_block);
2803 + return NAND_STATUS_FAIL;
2804 + }
2805 +#else
2806 + mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
2807 + nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
2808 + if (shift_on_bbt) {
2809 + mapped_block = block_remap(mtd, block);
2810 + if (mapped_block == -1)
2811 + return NAND_STATUS_FAIL;
2812 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2813 + return NAND_STATUS_FAIL;
2814 + } else
2815 + return NAND_STATUS_FAIL;
2816 +#endif
2817 + } else
2818 + break;
2819 + } while(1);
2820 +
2821 + return 0;
2822 +}
2823 +
2824 +static int
2825 +mtk_nand_read_oob_raw(struct mtd_info *mtd, uint8_t * buf, int page_addr, int len)
2826 +{
2827 + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
2828 + u32 col_addr = 0;
2829 + u32 sector = 0;
2830 + int res = 0;
2831 + u32 colnob = 2, rawnob = devinfo.addr_cycle - 2;
2832 + int randomread = 0;
2833 + int read_len = 0;
2834 + int sec_num = 1<<(chip->page_shift-9);
2835 + int spare_per_sector = mtd->oobsize/sec_num;
2836 +
2837 + if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) {
2838 + printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf);
2839 + return -EINVAL;
2840 + }
2841 + if (len > spare_per_sector)
2842 + randomread = 1;
2843 + if (!randomread || !(devinfo.advancedmode & RAMDOM_READ)) {
2844 + while (len > 0) {
2845 + read_len = min(len, spare_per_sector);
2846 + col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector); // TODO: Fix this hard-code 16
2847 + if (!mtk_nand_ready_for_read(chip, page_addr, col_addr, false, NULL)) {
2848 + printk(KERN_WARNING "mtk_nand_ready_for_read return failed\n");
2849 + res = -EIO;
2850 + goto error;
2851 + }
2852 + if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
2853 + printk(KERN_WARNING "mtk_nand_mcu_read_data return failed\n");
2854 + res = -EIO;
2855 + goto error;
2856 + }
2857 + mtk_nand_check_RW_count(read_len);
2858 + mtk_nand_stop_read();
2859 + sector++;
2860 + len -= read_len;
2861 + }
2862 + } else {
2863 + col_addr = NAND_SECTOR_SIZE;
2864 + if (chip->options & NAND_BUSWIDTH_16)
2865 + col_addr /= 2;
2866 + if (!mtk_nand_reset())
2867 + goto error;
2868 + mtk_nand_set_mode(0x6000);
2869 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
2870 + DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
2871 +
2872 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
2873 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2874 +
2875 + mtk_nand_set_autoformat(false);
2876 +
2877 + if (!mtk_nand_set_command(NAND_CMD_READ0))
2878 + goto error;
2879 + //1 FIXED ME: For Any Kind of AddrCycle
2880 + if (!mtk_nand_set_address(col_addr, page_addr, colnob, rawnob))
2881 + goto error;
2882 + if (!mtk_nand_set_command(NAND_CMD_READSTART))
2883 + goto error;
2884 + if (!mtk_nand_status_ready(STA_NAND_BUSY))
2885 + goto error;
2886 + read_len = min(len, spare_per_sector);
2887 + if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
2888 + printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n");
2889 + res = -EIO;
2890 + goto error;
2891 + }
2892 + sector++;
2893 + len -= read_len;
2894 + mtk_nand_stop_read();
2895 + while (len > 0) {
2896 + read_len = min(len, spare_per_sector);
2897 + if (!mtk_nand_set_command(0x05))
2898 + goto error;
2899 + col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector);
2900 + if (chip->options & NAND_BUSWIDTH_16)
2901 + col_addr /= 2;
2902 + DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);
2903 + DRV_WriteReg16(NFI_ADDRNOB_REG16, 2);
2904 + DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
2905 + if (!mtk_nand_status_ready(STA_ADDR_STATE))
2906 + goto error;
2907 + if (!mtk_nand_set_command(0xE0))
2908 + goto error;
2909 + if (!mtk_nand_status_ready(STA_NAND_BUSY))
2910 + goto error;
2911 + if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
2912 + printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n");
2913 + res = -EIO;
2914 + goto error;
2915 + }
2916 + mtk_nand_stop_read();
2917 + sector++;
2918 + len -= read_len;
2919 + }
2920 + }
2921 +error:
2922 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD);
2923 + return res;
2924 +}
2925 +
2926 +static int
2927 +mtk_nand_write_oob_raw(struct mtd_info *mtd, const uint8_t * buf, int page_addr, int len)
2928 +{
2929 + struct nand_chip *chip = mtd->priv;
2930 + u32 col_addr = 0;
2931 + u32 sector = 0;
2932 + int write_len = 0;
2933 + int status;
2934 + int sec_num = 1<<(chip->page_shift-9);
2935 + int spare_per_sector = mtd->oobsize/sec_num;
2936 +
2937 + if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) {
2938 + printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf);
2939 + return -EINVAL;
2940 + }
2941 +
2942 + while (len > 0) {
2943 + write_len = min(len, spare_per_sector);
2944 + col_addr = sector * (NAND_SECTOR_SIZE + spare_per_sector) + NAND_SECTOR_SIZE;
2945 + if (!mtk_nand_ready_for_write(chip, page_addr, col_addr, false, NULL))
2946 + return -EIO;
2947 + if (!mtk_nand_mcu_write_data(mtd, buf + sector * spare_per_sector, write_len))
2948 + return -EIO;
2949 + (void)mtk_nand_check_RW_count(write_len);
2950 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR);
2951 + (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
2952 + while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
2953 + ;
2954 + status = chip->waitfunc(mtd, chip);
2955 + if (status & NAND_STATUS_FAIL) {
2956 + printk(KERN_INFO "status: %d\n", status);
2957 + return -EIO;
2958 + }
2959 + len -= write_len;
2960 + sector++;
2961 + }
2962 +
2963 + return 0;
2964 +}
2965 +
2966 +static int
2967 +mtk_nand_write_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
2968 +{
2969 + int i, iter;
2970 + int sec_num = 1<<(chip->page_shift-9);
2971 + int spare_per_sector = mtd->oobsize/sec_num;
2972 +
2973 + memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
2974 +
2975 + // copy ecc data
2976 + for (i = 0; i < layout->eccbytes; i++) {
2977 + iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR);
2978 + local_oob_buf[iter] = chip->oob_poi[layout->eccpos[i]];
2979 + }
2980 +
2981 + // copy FDM data
2982 + for (i = 0; i < sec_num; i++)
2983 + memcpy(&local_oob_buf[i * spare_per_sector], &chip->oob_poi[i * OOB_AVAI_PER_SECTOR], OOB_AVAI_PER_SECTOR);
2984 +
2985 + return mtk_nand_write_oob_raw(mtd, local_oob_buf, page, mtd->oobsize);
2986 +}
2987 +
2988 +static int mtk_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
2989 +{
2990 + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2991 + int block = page / page_per_block;
2992 + u16 page_in_block = page % page_per_block;
2993 + int mapped_block = block;
2994 +
2995 +#if defined(MTK_NAND_BMT)
2996 + mapped_block = get_mapping_block_index(block);
2997 + // write bad index into oob
2998 + if (mapped_block != block)
2999 + set_bad_index_to_oob(chip->oob_poi, block);
3000 + else
3001 + set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
3002 +#else
3003 + if (shift_on_bbt)
3004 + {
3005 + mapped_block = block_remap(mtd, block);
3006 + if (mapped_block == -1)
3007 + return NAND_STATUS_FAIL;
3008 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
3009 + return NAND_STATUS_FAIL;
3010 + }
3011 +#endif
3012 + do {
3013 + if (mtk_nand_write_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block /* page */)) {
3014 + MSG(INIT, "write oob fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
3015 +#if defined(MTK_NAND_BMT)
3016 + if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift,
3017 + UPDATE_WRITE_FAIL, NULL, chip->oob_poi))
3018 + {
3019 + MSG(INIT, "Update BMT success\n");
3020 + return 0;
3021 + } else {
3022 + MSG(INIT, "Update BMT fail\n");
3023 + return -EIO;
3024 + }
3025 +#else
3026 + mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
3027 + nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
3028 + if (shift_on_bbt) {
3029 + mapped_block = block_remap(mtd, mapped_block);
3030 + if (mapped_block == -1)
3031 + return NAND_STATUS_FAIL;
3032 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
3033 + return NAND_STATUS_FAIL;
3034 + } else {
3035 + return NAND_STATUS_FAIL;
3036 + }
3037 +#endif
3038 + } else
3039 + break;
3040 + } while (1);
3041 +
3042 + return 0;
3043 +}
3044 +
3045 +int
3046 +mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t offset)
3047 +{
3048 + struct nand_chip *chip = mtd->priv;
3049 + int block = (int)offset >> chip->phys_erase_shift;
3050 + int page = block * (1 << (chip->phys_erase_shift - chip->page_shift));
3051 + u8 buf[8];
3052 +
3053 + memset(buf, 0xFF, 8);
3054 + buf[0] = 0;
3055 + return mtk_nand_write_oob_raw(mtd, buf, page, 8);
3056 +}
3057 +
3058 +static int
3059 +mtk_nand_block_markbad(struct mtd_info *mtd, loff_t offset)
3060 +{
3061 + struct nand_chip *chip = mtd->priv;
3062 + int block = (int)offset >> chip->phys_erase_shift;
3063 + int ret;
3064 + int mapped_block = block;
3065 +
3066 + nand_get_device(chip, mtd, FL_WRITING);
3067 +
3068 +#if defined(MTK_NAND_BMT)
3069 + mapped_block = get_mapping_block_index(block);
3070 + ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift);
3071 +#else
3072 + if (shift_on_bbt) {
3073 + mapped_block = block_remap(mtd, block);
3074 + if (mapped_block == -1) {
3075 + printk("NAND mark bad failed\n");
3076 + nand_release_device(mtd);
3077 + return NAND_STATUS_FAIL;
3078 + }
3079 + }
3080 + ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift);
3081 +#endif
3082 + nand_release_device(mtd);
3083 +
3084 + return ret;
3085 +}
3086 +
3087 +int
3088 +mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
3089 +{
3090 + int i;
3091 + u8 iter = 0;
3092 +
3093 + int sec_num = 1<<(chip->page_shift-9);
3094 + int spare_per_sector = mtd->oobsize/sec_num;
3095 +
3096 + if (mtk_nand_read_oob_raw(mtd, chip->oob_poi, page, mtd->oobsize)) {
3097 + printk(KERN_ERR "[%s]mtk_nand_read_oob_raw return failed\n", __FUNCTION__);
3098 + return -EIO;
3099 + }
3100 +
3101 + // adjust to ecc physical layout to memory layout
3102 + /*********************************************************/
3103 + /* FDM0 | ECC0 | FDM1 | ECC1 | FDM2 | ECC2 | FDM3 | ECC3 */
3104 + /* 8B | 8B | 8B | 8B | 8B | 8B | 8B | 8B */
3105 + /*********************************************************/
3106 +
3107 + memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
3108 + // copy ecc data
3109 + for (i = 0; i < layout->eccbytes; i++) {
3110 + iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR);
3111 + chip->oob_poi[layout->eccpos[i]] = local_oob_buf[iter];
3112 + }
3113 +
3114 + // copy FDM data
3115 + for (i = 0; i < sec_num; i++) {
3116 + memcpy(&chip->oob_poi[i * OOB_AVAI_PER_SECTOR], &local_oob_buf[i * spare_per_sector], OOB_AVAI_PER_SECTOR);
3117 + }
3118 +
3119 + return 0;
3120 +}
3121 +
3122 +static int
3123 +mtk_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
3124 +{
3125 + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3126 + int block = page / page_per_block;
3127 + u16 page_in_block = page % page_per_block;
3128 + int mapped_block = block;
3129 +
3130 +#if defined (MTK_NAND_BMT)
3131 + mapped_block = get_mapping_block_index(block);
3132 + mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block);
3133 +#else
3134 + if (shift_on_bbt) {
3135 + mapped_block = block_remap(mtd, block);
3136 + if (mapped_block == -1)
3137 + return NAND_STATUS_FAIL;
3138 + // allow to read oob even if the block is bad
3139 + }
3140 + if (mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block)!=0)
3141 + return -1;
3142 +#endif
3143 + return 0;
3144 +}
3145 +
3146 +int
3147 +mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs)
3148 +{
3149 + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
3150 + int page_addr = (int)(ofs >> chip->page_shift);
3151 + unsigned int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3152 + unsigned char oob_buf[8];
3153 +
3154 + page_addr &= ~(page_per_block - 1);
3155 + if (mtk_nand_read_oob_raw(mtd, oob_buf, page_addr, sizeof(oob_buf))) {
3156 + printk(KERN_WARNING "mtk_nand_read_oob_raw return error\n");
3157 + return 1;
3158 + }
3159 +
3160 + if (oob_buf[0] != 0xff) {
3161 + printk(KERN_WARNING "Bad block detected at 0x%x, oob_buf[0] is 0x%x\n", page_addr, oob_buf[0]);
3162 + // dump_nfi();
3163 + return 1;
3164 + }
3165 +
3166 + return 0;
3167 +}
3168 +
3169 +static int
3170 +mtk_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
3171 +{
3172 + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
3173 + int block = (int)ofs >> chip->phys_erase_shift;
3174 + int mapped_block = block;
3175 + int ret;
3176 +
3177 +#if defined(MTK_NAND_BMT)
3178 + mapped_block = get_mapping_block_index(block);
3179 +#else
3180 + if (shift_on_bbt) {
3181 + mapped_block = block_remap(mtd, block);
3182 + }
3183 +#endif
3184 +
3185 + ret = mtk_nand_block_bad_hw(mtd, mapped_block << chip->phys_erase_shift);
3186 +#if defined (MTK_NAND_BMT)
3187 + if (ret) {
3188 + MSG(INIT, "Unmapped bad block: 0x%x\n", mapped_block);
3189 + if (update_bmt(mapped_block << chip->phys_erase_shift, UPDATE_UNMAPPED_BLOCK, NULL, NULL)) {
3190 + MSG(INIT, "Update BMT success\n");
3191 + ret = 0;
3192 + } else {
3193 + MSG(INIT, "Update BMT fail\n");
3194 + ret = 1;
3195 + }
3196 + }
3197 +#endif
3198 +
3199 + return ret;
3200 +}
3201 +
3202 +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
3203 +char gacBuf[4096 + 288];
3204 +
3205 +static int
3206 +mtk_nand_verify_buf(struct mtd_info *mtd, const uint8_t * buf, int len)
3207 +{
3208 + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
3209 + struct NAND_CMD *pkCMD = &g_kCMD;
3210 + u32 u4PageSize = mtd->writesize;
3211 + u32 *pSrc, *pDst;
3212 + int i;
3213 +
3214 + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, gacBuf, gacBuf + u4PageSize);
3215 +
3216 + pSrc = (u32 *) buf;
3217 + pDst = (u32 *) gacBuf;
3218 + len = len / sizeof(u32);
3219 + for (i = 0; i < len; ++i) {
3220 + if (*pSrc != *pDst) {
3221 + MSG(VERIFY, "mtk_nand_verify_buf page fail at page %d\n", pkCMD->u4RowAddr);
3222 + return -1;
3223 + }
3224 + pSrc++;
3225 + pDst++;
3226 + }
3227 +
3228 + pSrc = (u32 *) chip->oob_poi;
3229 + pDst = (u32 *) (gacBuf + u4PageSize);
3230 +
3231 + if ((pSrc[0] != pDst[0]) || (pSrc[1] != pDst[1]) || (pSrc[2] != pDst[2]) || (pSrc[3] != pDst[3]) || (pSrc[4] != pDst[4]) || (pSrc[5] != pDst[5])) {
3232 + // TODO: Ask Designer Why?
3233 + //(pSrc[6] != pDst[6]) || (pSrc[7] != pDst[7]))
3234 + MSG(VERIFY, "mtk_nand_verify_buf oob fail at page %d\n", pkCMD->u4RowAddr);
3235 + MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pSrc[0], pSrc[1], pSrc[2], pSrc[3], pSrc[4], pSrc[5], pSrc[6], pSrc[7]);
3236 + MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pDst[0], pDst[1], pDst[2], pDst[3], pDst[4], pDst[5], pDst[6], pDst[7]);
3237 + return -1;
3238 + }
3239 + return 0;
3240 +}
3241 +#endif
3242 +
3243 +static void
3244 +mtk_nand_init_hw(struct mtk_nand_host *host) {
3245 + struct mtk_nand_host_hw *hw = host->hw;
3246 + u32 data;
3247 +
3248 + data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60);
3249 + data &= ~((0x3<<18)|(0x3<<16));
3250 + data |= ((0x2<<18) |(0x2<<16));
3251 + DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data);
3252 +
3253 + MSG(INIT, "Enable NFI Clock\n");
3254 + nand_enable_clock();
3255 +
3256 + g_bInitDone = false;
3257 + g_kCMD.u4OOBRowAddr = (u32) - 1;
3258 +
3259 + /* Set default NFI access timing control */
3260 + DRV_WriteReg32(NFI_ACCCON_REG32, hw->nfi_access_timing);
3261 + DRV_WriteReg16(NFI_CNFG_REG16, 0);
3262 + DRV_WriteReg16(NFI_PAGEFMT_REG16, 0);
3263 +
3264 + /* Reset the state machine and data FIFO, because flushing FIFO */
3265 + (void)mtk_nand_reset();
3266 +
3267 + /* Set the ECC engine */
3268 + if (hw->nand_ecc_mode == NAND_ECC_HW) {
3269 + MSG(INIT, "%s : Use HW ECC\n", MODULE_NAME);
3270 + if (g_bHwEcc)
3271 + NFI_SET_REG32(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
3272 + ECC_Config(host->hw,4);
3273 + mtk_nand_configure_fdm(8);
3274 + mtk_nand_configure_lock();
3275 + }
3276 +
3277 + NFI_SET_REG16(NFI_IOCON_REG16, 0x47);
3278 +}
3279 +
3280 +static int mtk_nand_dev_ready(struct mtd_info *mtd)
3281 +{
3282 + return !(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY);
3283 +}
3284 +
3285 +#define FACT_BBT_BLOCK_NUM 32 // use the latest 32 BLOCK for factory bbt table
3286 +#define FACT_BBT_OOB_SIGNATURE 1
3287 +#define FACT_BBT_SIGNATURE_LEN 7
3288 +const u8 oob_signature[] = "mtknand";
3289 +static u8 *fact_bbt = 0;
3290 +static u32 bbt_size = 0;
3291 +
3292 +static int
3293 +read_fact_bbt(struct mtd_info *mtd, unsigned int page)
3294 +{
3295 + struct nand_chip *chip = mtd->priv;
3296 +
3297 + // read oob
3298 + if (mtk_nand_read_oob_hw(mtd, chip, page)==0)
3299 + {
3300 + if (chip->oob_poi[nand_badblock_offset] != 0xFF)
3301 + {
3302 + printk("Bad Block on Page %x\n", page);
3303 + return -1;
3304 + }
3305 + if (memcmp(&chip->oob_poi[FACT_BBT_OOB_SIGNATURE], oob_signature, FACT_BBT_SIGNATURE_LEN) != 0)
3306 + {
3307 + printk("compare signature failed %x\n", page);
3308 + return -1;
3309 + }
3310 + if (mtk_nand_exec_read_page(mtd, page, mtd->writesize, chip->buffers->databuf, chip->oob_poi))
3311 + {
3312 + printk("Signature matched and data read!\n");
3313 + memcpy(fact_bbt, chip->buffers->databuf, (bbt_size <= mtd->writesize)? bbt_size:mtd->writesize);
3314 + return 0;
3315 + }
3316 +
3317 + }
3318 + printk("failed at page %x\n", page);
3319 + return -1;
3320 +}
3321 +
3322 +static int
3323 +load_fact_bbt(struct mtd_info *mtd)
3324 +{
3325 + struct nand_chip *chip = mtd->priv;
3326 + int i;
3327 + u32 total_block;
3328 +
3329 + total_block = 1 << (chip->chip_shift - chip->phys_erase_shift);
3330 + bbt_size = total_block >> 2;
3331 +
3332 + if ((!fact_bbt) && (bbt_size))
3333 + fact_bbt = (u8 *)kmalloc(bbt_size, GFP_KERNEL);
3334 + if (!fact_bbt)
3335 + return -1;
3336 +
3337 + for (i = total_block - 1; i >= (total_block - FACT_BBT_BLOCK_NUM); i--)
3338 + {
3339 + if (read_fact_bbt(mtd, i << (chip->phys_erase_shift - chip->page_shift)) == 0)
3340 + {
3341 + printk("load_fact_bbt success %d\n", i);
3342 + return 0;
3343 + }
3344 +
3345 + }
3346 + printk("load_fact_bbt failed\n");
3347 + return -1;
3348 +}
3349 +
3350 +static int oob_mtk_ooblayout_ecc(struct mtd_info *mtd, int section,
3351 + struct mtd_oob_region *oobregion)
3352 +{
3353 + oobregion->length = 8;
3354 + oobregion->offset = layout->eccpos[section * 8];
3355 +
3356 + return 0;
3357 +}
3358 +
3359 +static int oob_mtk_ooblayout_free(struct mtd_info *mtd, int section,
3360 + struct mtd_oob_region *oobregion)
3361 +{
3362 + if (section >= (layout->eccbytes / 8)) {
3363 + return -ERANGE;
3364 + }
3365 + oobregion->offset = layout->oobfree[section].offset;
3366 + oobregion->length = layout->oobfree[section].length;
3367 +
3368 + return 0;
3369 +}
3370 +
3371 +
3372 +static const struct mtd_ooblayout_ops oob_mtk_ops = {
3373 + .ecc = oob_mtk_ooblayout_ecc,
3374 + .free = oob_mtk_ooblayout_free,
3375 +};
3376 +
3377 +static int
3378 +mtk_nand_probe(struct platform_device *pdev)
3379 +{
3380 + struct mtd_part_parser_data ppdata;
3381 + struct mtk_nand_host_hw *hw;
3382 + struct nand_chip *nand_chip;
3383 + struct mtd_info *mtd;
3384 + u8 ext_id1, ext_id2, ext_id3;
3385 + int err = 0;
3386 + int id;
3387 + u32 ext_id;
3388 + int i;
3389 + u32 data;
3390 +
3391 + data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60);
3392 + data &= ~((0x3<<18)|(0x3<<16));
3393 + data |= ((0x2<<18) |(0x2<<16));
3394 + DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data);
3395 +
3396 + hw = &mt7621_nand_hw,
3397 + BUG_ON(!hw);
3398 + /* Allocate memory for the device structure (and zero it) */
3399 + host = kzalloc(sizeof(struct mtk_nand_host), GFP_KERNEL);
3400 + if (!host) {
3401 + MSG(INIT, "mtk_nand: failed to allocate device structure.\n");
3402 + return -ENOMEM;
3403 + }
3404 +
3405 + /* Allocate memory for 16 byte aligned buffer */
3406 + local_buffer_16_align = local_buffer + 16 - ((u32) local_buffer % 16);
3407 + printk(KERN_INFO "Allocate 16 byte aligned buffer: %p\n", local_buffer_16_align);
3408 + host->hw = hw;
3409 +
3410 + /* init mtd data structure */
3411 + nand_chip = &host->nand_chip;
3412 + nand_chip->priv = host; /* link the private data structures */
3413 +
3414 + mtd = host->mtd = &nand_chip->mtd;
3415 + mtd->priv = nand_chip;
3416 + mtd->owner = THIS_MODULE;
3417 + mtd->name = "MT7621-NAND";
3418 +
3419 + hw->nand_ecc_mode = NAND_ECC_HW;
3420 +
3421 + /* Set address of NAND IO lines */
3422 + nand_chip->IO_ADDR_R = (void __iomem *)NFI_DATAR_REG32;
3423 + nand_chip->IO_ADDR_W = (void __iomem *)NFI_DATAW_REG32;
3424 + nand_chip->chip_delay = 20; /* 20us command delay time */
3425 + nand_chip->ecc.mode = hw->nand_ecc_mode; /* enable ECC */
3426 + nand_chip->ecc.strength = 1;
3427 + nand_chip->read_byte = mtk_nand_read_byte;
3428 + nand_chip->read_buf = mtk_nand_read_buf;
3429 + nand_chip->write_buf = mtk_nand_write_buf;
3430 +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
3431 + nand_chip->verify_buf = mtk_nand_verify_buf;
3432 +#endif
3433 + nand_chip->select_chip = mtk_nand_select_chip;
3434 + nand_chip->dev_ready = mtk_nand_dev_ready;
3435 + nand_chip->cmdfunc = mtk_nand_command_bp;
3436 + nand_chip->ecc.read_page = mtk_nand_read_page_hwecc;
3437 + nand_chip->ecc.write_page = mtk_nand_write_page_hwecc;
3438 +
3439 + mtd_set_ooblayout(mtd, &oob_mtk_ops);
3440 + nand_chip->ecc.size = hw->nand_ecc_size; //2048
3441 + nand_chip->ecc.bytes = hw->nand_ecc_bytes; //32
3442 +
3443 + // For BMT, we need to revise driver architecture
3444 + nand_chip->write_page = mtk_nand_write_page;
3445 + nand_chip->ecc.write_oob = mtk_nand_write_oob;
3446 + nand_chip->block_markbad = mtk_nand_block_markbad; // need to add nand_get_device()/nand_release_device().
3447 + nand_chip->erase_mtk = mtk_nand_erase;
3448 + nand_chip->read_page = mtk_nand_read_page;
3449 + nand_chip->ecc.read_oob = mtk_nand_read_oob;
3450 + nand_chip->block_bad = mtk_nand_block_bad;
3451 + nand_chip->cmd_ctrl = mtk_nfc_cmd_ctrl;
3452 +
3453 + //Qwert:Add for Uboot
3454 + mtk_nand_init_hw(host);
3455 + /* Select the device */
3456 + nand_chip->select_chip(mtd, NFI_DEFAULT_CS);
3457 +
3458 + /*
3459 + * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
3460 + * after power-up
3461 + */
3462 + nand_chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
3463 +
3464 + memset(&devinfo, 0 , sizeof(flashdev_info));
3465 +
3466 + /* Send the command for reading device ID */
3467 +
3468 + nand_chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3469 +
3470 + /* Read manufacturer and device IDs */
3471 + manu_id = nand_chip->read_byte(mtd);
3472 + dev_id = nand_chip->read_byte(mtd);
3473 + id = dev_id | (manu_id << 8);
3474 + ext_id1 = nand_chip->read_byte(mtd);
3475 + ext_id2 = nand_chip->read_byte(mtd);
3476 + ext_id3 = nand_chip->read_byte(mtd);
3477 + ext_id = ext_id1 << 16 | ext_id2 << 8 | ext_id3;
3478 + if (!get_device_info(id, ext_id, &devinfo)) {
3479 + u32 chip_mode = RALINK_REG(RALINK_SYSCTL_BASE+0x010)&0x0F;
3480 + MSG(INIT, "Not Support this Device! \r\n");
3481 + memset(&devinfo, 0 , sizeof(flashdev_info));
3482 + MSG(INIT, "chip_mode=%08X\n",chip_mode);
3483 +
3484 + /* apply bootstrap first */
3485 + devinfo.addr_cycle = 5;
3486 + devinfo.iowidth = 8;
3487 +
3488 + switch (chip_mode) {
3489 + case 10:
3490 + devinfo.pagesize = 2048;
3491 + devinfo.sparesize = 128;
3492 + devinfo.totalsize = 128;
3493 + devinfo.blocksize = 128;
3494 + break;
3495 + case 11:
3496 + devinfo.pagesize = 4096;
3497 + devinfo.sparesize = 128;
3498 + devinfo.totalsize = 1024;
3499 + devinfo.blocksize = 256;
3500 + break;
3501 + case 12:
3502 + devinfo.pagesize = 4096;
3503 + devinfo.sparesize = 224;
3504 + devinfo.totalsize = 2048;
3505 + devinfo.blocksize = 512;
3506 + break;
3507 + default:
3508 + case 1:
3509 + devinfo.pagesize = 2048;
3510 + devinfo.sparesize = 64;
3511 + devinfo.totalsize = 128;
3512 + devinfo.blocksize = 128;
3513 + break;
3514 + }
3515 +
3516 + devinfo.timmingsetting = NFI_DEFAULT_ACCESS_TIMING;
3517 + devinfo.devciename[0] = 'U';
3518 + devinfo.advancedmode = 0;
3519 + }
3520 + mtd->writesize = devinfo.pagesize;
3521 + mtd->erasesize = (devinfo.blocksize<<10);
3522 + mtd->oobsize = devinfo.sparesize;
3523 +
3524 + nand_chip->chipsize = (devinfo.totalsize<<20);
3525 + nand_chip->page_shift = ffs(mtd->writesize) - 1;
3526 + nand_chip->pagemask = (nand_chip->chipsize >> nand_chip->page_shift) - 1;
3527 + nand_chip->phys_erase_shift = ffs(mtd->erasesize) - 1;
3528 + nand_chip->chip_shift = ffs(nand_chip->chipsize) - 1;//0x1C;//ffs(nand_chip->chipsize) - 1;
3529 + nand_chip->cmd_ctrl = mtk_nfc_cmd_ctrl;
3530 +
3531 + /* allocate buffers or call select_chip here or a bit earlier*/
3532 + {
3533 + struct nand_buffers *nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize + mtd->oobsize * 3, GFP_KERNEL);
3534 + if (!nbuf) {
3535 + return -ENOMEM;
3536 + }
3537 + nbuf->ecccalc = (uint8_t *)(nbuf + 1);
3538 + nbuf->ecccode = nbuf->ecccalc + mtd->oobsize;
3539 + nbuf->databuf = nbuf->ecccode + mtd->oobsize;
3540 +
3541 + nand_chip->buffers = nbuf;
3542 + nand_chip->options |= NAND_OWN_BUFFERS;
3543 + }
3544 +
3545 + nand_chip->oob_poi = nand_chip->buffers->databuf + mtd->writesize;
3546 + nand_chip->badblockpos = 0;
3547 +
3548 + if (devinfo.pagesize == 4096)
3549 + layout = &nand_oob_128;
3550 + else if (devinfo.pagesize == 2048)
3551 + layout = &nand_oob_64;
3552 + else if (devinfo.pagesize == 512)
3553 + layout = &nand_oob_16;
3554 +
3555 + layout->eccbytes = devinfo.sparesize-OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE);
3556 + for (i = 0; i < layout->eccbytes; i++)
3557 + layout->eccpos[i]=OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE)+i;
3558 +
3559 + MSG(INIT, "Support this Device in MTK table! %x \r\n", id);
3560 + hw->nfi_bus_width = devinfo.iowidth;
3561 + DRV_WriteReg32(NFI_ACCCON_REG32, devinfo.timmingsetting);
3562 +
3563 + /* 16-bit bus width */
3564 + if (hw->nfi_bus_width == 16) {
3565 + MSG(INIT, "%s : Set the 16-bit I/O settings!\n", MODULE_NAME);
3566 + nand_chip->options |= NAND_BUSWIDTH_16;
3567 + }
3568 + mtd->oobsize = devinfo.sparesize;
3569 + hw->nfi_cs_num = 1;
3570 +
3571 + /* Scan to find existance of the device */
3572 + if (nand_scan(mtd, hw->nfi_cs_num)) {
3573 + MSG(INIT, "%s : nand_scan fail.\n", MODULE_NAME);
3574 + err = -ENXIO;
3575 + goto out;
3576 + }
3577 +
3578 + g_page_size = mtd->writesize;
3579 + platform_set_drvdata(pdev, host);
3580 + if (hw->nfi_bus_width == 16) {
3581 + NFI_SET_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN);
3582 + }
3583 +
3584 + nand_chip->select_chip(mtd, 0);
3585 +#if defined(MTK_NAND_BMT)
3586 + nand_chip->chipsize -= (BMT_POOL_SIZE) << nand_chip->phys_erase_shift;
3587 +#endif
3588 + mtd->size = nand_chip->chipsize;
3589 +
3590 + CFG_BLOCKSIZE = mtd->erasesize;
3591 +
3592 +#if defined(MTK_NAND_BMT)
3593 + if (!g_bmt) {
3594 + if (!(g_bmt = init_bmt(nand_chip, BMT_POOL_SIZE))) {
3595 + MSG(INIT, "Error: init bmt failed\n");
3596 + return 0;
3597 + }
3598 + }
3599 +#endif
3600 +
3601 + nand_set_flash_node(nand_chip, pdev->dev.of_node);
3602 + err = mtd_device_parse_register(mtd, probe_types, &ppdata,
3603 + NULL, 0);
3604 + if (!err) {
3605 + MSG(INIT, "[mtk_nand] probe successfully!\n");
3606 + nand_disable_clock();
3607 + shift_on_bbt = 1;
3608 + if (load_fact_bbt(mtd) == 0) {
3609 + int i;
3610 + for (i = 0; i < 0x100; i++)
3611 + nand_chip->bbt[i] |= fact_bbt[i];
3612 + }
3613 +
3614 + return err;
3615 + }
3616 +
3617 +out:
3618 + MSG(INIT, "[NFI] mtk_nand_probe fail, err = %d!\n", err);
3619 + nand_release(mtd);
3620 + platform_set_drvdata(pdev, NULL);
3621 + if ( NULL != nand_chip->buffers) {
3622 + kfree(nand_chip->buffers);
3623 + }
3624 + kfree(host);
3625 + nand_disable_clock();
3626 + return err;
3627 +}
3628 +
3629 +static int
3630 +mtk_nand_remove(struct platform_device *pdev)
3631 +{
3632 + struct mtk_nand_host *host = platform_get_drvdata(pdev);
3633 + struct mtd_info *mtd = host->mtd;
3634 + struct nand_chip *nand_chip = &host->nand_chip;
3635 +
3636 + nand_release(mtd);
3637 + if ( NULL != nand_chip->buffers) {
3638 + kfree(nand_chip->buffers);
3639 + }
3640 + kfree(host);
3641 + nand_disable_clock();
3642 +
3643 + return 0;
3644 +}
3645 +
3646 +static const struct of_device_id mt7621_nand_match[] = {
3647 + { .compatible = "mtk,mt7621-nand" },
3648 + {},
3649 +};
3650 +MODULE_DEVICE_TABLE(of, mt7621_nand_match);
3651 +
3652 +static struct platform_driver mtk_nand_driver = {
3653 + .probe = mtk_nand_probe,
3654 + .remove = mtk_nand_remove,
3655 + .driver = {
3656 + .name = "MT7621-NAND",
3657 + .owner = THIS_MODULE,
3658 + .of_match_table = mt7621_nand_match,
3659 + },
3660 +};
3661 +
3662 +static int __init
3663 +mtk_nand_init(void)
3664 +{
3665 + printk("MediaTek Nand driver init, version %s\n", VERSION);
3666 +
3667 + return platform_driver_register(&mtk_nand_driver);
3668 +}
3669 +
3670 +static void __exit
3671 +mtk_nand_exit(void)
3672 +{
3673 + platform_driver_unregister(&mtk_nand_driver);
3674 +}
3675 +
3676 +module_init(mtk_nand_init);
3677 +module_exit(mtk_nand_exit);
3678 +MODULE_LICENSE("GPL");
3679 Index: linux-4.9.30/drivers/mtd/nand/mtk_nand2.h
3680 ===================================================================
3681 --- /dev/null
3682 +++ linux-4.9.30/drivers/mtd/nand/mtk_nand2.h
3683 @@ -0,0 +1,452 @@
3684 +#ifndef __MTK_NAND_H
3685 +#define __MTK_NAND_H
3686 +
3687 +#define RALINK_NAND_CTRL_BASE 0xBE003000
3688 +#define RALINK_SYSCTL_BASE 0xBE000000
3689 +#define RALINK_NANDECC_CTRL_BASE 0xBE003800
3690 +/*******************************************************************************
3691 + * NFI Register Definition
3692 + *******************************************************************************/
3693 +
3694 +#define NFI_CNFG_REG16 ((volatile P_U16)(NFI_BASE+0x0000))
3695 +#define NFI_PAGEFMT_REG16 ((volatile P_U16)(NFI_BASE+0x0004))
3696 +#define NFI_CON_REG16 ((volatile P_U16)(NFI_BASE+0x0008))
3697 +#define NFI_ACCCON_REG32 ((volatile P_U32)(NFI_BASE+0x000C))
3698 +#define NFI_INTR_EN_REG16 ((volatile P_U16)(NFI_BASE+0x0010))
3699 +#define NFI_INTR_REG16 ((volatile P_U16)(NFI_BASE+0x0014))
3700 +
3701 +#define NFI_CMD_REG16 ((volatile P_U16)(NFI_BASE+0x0020))
3702 +
3703 +#define NFI_ADDRNOB_REG16 ((volatile P_U16)(NFI_BASE+0x0030))
3704 +#define NFI_COLADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0034))
3705 +#define NFI_ROWADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0038))
3706 +
3707 +#define NFI_STRDATA_REG16 ((volatile P_U16)(NFI_BASE+0x0040))
3708 +
3709 +#define NFI_DATAW_REG32 ((volatile P_U32)(NFI_BASE+0x0050))
3710 +#define NFI_DATAR_REG32 ((volatile P_U32)(NFI_BASE+0x0054))
3711 +#define NFI_PIO_DIRDY_REG16 ((volatile P_U16)(NFI_BASE+0x0058))
3712 +
3713 +#define NFI_STA_REG32 ((volatile P_U32)(NFI_BASE+0x0060))
3714 +#define NFI_FIFOSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0064))
3715 +#define NFI_LOCKSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0068))
3716 +
3717 +#define NFI_ADDRCNTR_REG16 ((volatile P_U16)(NFI_BASE+0x0070))
3718 +
3719 +#define NFI_STRADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0080))
3720 +#define NFI_BYTELEN_REG16 ((volatile P_U16)(NFI_BASE+0x0084))
3721 +
3722 +#define NFI_CSEL_REG16 ((volatile P_U16)(NFI_BASE+0x0090))
3723 +#define NFI_IOCON_REG16 ((volatile P_U16)(NFI_BASE+0x0094))
3724 +
3725 +#define NFI_FDM0L_REG32 ((volatile P_U32)(NFI_BASE+0x00A0))
3726 +#define NFI_FDM0M_REG32 ((volatile P_U32)(NFI_BASE+0x00A4))
3727 +
3728 +#define NFI_LOCK_REG16 ((volatile P_U16)(NFI_BASE+0x0100))
3729 +#define NFI_LOCKCON_REG32 ((volatile P_U32)(NFI_BASE+0x0104))
3730 +#define NFI_LOCKANOB_REG16 ((volatile P_U16)(NFI_BASE+0x0108))
3731 +#define NFI_LOCK00ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0110))
3732 +#define NFI_LOCK00FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0114))
3733 +#define NFI_LOCK01ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0118))
3734 +#define NFI_LOCK01FMT_REG32 ((volatile P_U32)(NFI_BASE+0x011C))
3735 +#define NFI_LOCK02ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0120))
3736 +#define NFI_LOCK02FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0124))
3737 +#define NFI_LOCK03ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0128))
3738 +#define NFI_LOCK03FMT_REG32 ((volatile P_U32)(NFI_BASE+0x012C))
3739 +#define NFI_LOCK04ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0130))
3740 +#define NFI_LOCK04FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0134))
3741 +#define NFI_LOCK05ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0138))
3742 +#define NFI_LOCK05FMT_REG32 ((volatile P_U32)(NFI_BASE+0x013C))
3743 +#define NFI_LOCK06ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0140))
3744 +#define NFI_LOCK06FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0144))
3745 +#define NFI_LOCK07ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0148))
3746 +#define NFI_LOCK07FMT_REG32 ((volatile P_U32)(NFI_BASE+0x014C))
3747 +#define NFI_LOCK08ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0150))
3748 +#define NFI_LOCK08FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0154))
3749 +#define NFI_LOCK09ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0158))
3750 +#define NFI_LOCK09FMT_REG32 ((volatile P_U32)(NFI_BASE+0x015C))
3751 +#define NFI_LOCK10ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0160))
3752 +#define NFI_LOCK10FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0164))
3753 +#define NFI_LOCK11ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0168))
3754 +#define NFI_LOCK11FMT_REG32 ((volatile P_U32)(NFI_BASE+0x016C))
3755 +#define NFI_LOCK12ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0170))
3756 +#define NFI_LOCK12FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0174))
3757 +#define NFI_LOCK13ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0178))
3758 +#define NFI_LOCK13FMT_REG32 ((volatile P_U32)(NFI_BASE+0x017C))
3759 +#define NFI_LOCK14ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0180))
3760 +#define NFI_LOCK14FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0184))
3761 +#define NFI_LOCK15ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0188))
3762 +#define NFI_LOCK15FMT_REG32 ((volatile P_U32)(NFI_BASE+0x018C))
3763 +
3764 +#define NFI_FIFODATA0_REG32 ((volatile P_U32)(NFI_BASE+0x0190))
3765 +#define NFI_FIFODATA1_REG32 ((volatile P_U32)(NFI_BASE+0x0194))
3766 +#define NFI_FIFODATA2_REG32 ((volatile P_U32)(NFI_BASE+0x0198))
3767 +#define NFI_FIFODATA3_REG32 ((volatile P_U32)(NFI_BASE+0x019C))
3768 +#define NFI_MASTERSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0210))
3769 +
3770 +
3771 +/*******************************************************************************
3772 + * NFI Register Field Definition
3773 + *******************************************************************************/
3774 +
3775 +/* NFI_CNFG */
3776 +#define CNFG_AHB (0x0001)
3777 +#define CNFG_READ_EN (0x0002)
3778 +#define CNFG_DMA_BURST_EN (0x0004)
3779 +#define CNFG_BYTE_RW (0x0040)
3780 +#define CNFG_HW_ECC_EN (0x0100)
3781 +#define CNFG_AUTO_FMT_EN (0x0200)
3782 +#define CNFG_OP_IDLE (0x0000)
3783 +#define CNFG_OP_READ (0x1000)
3784 +#define CNFG_OP_SRD (0x2000)
3785 +#define CNFG_OP_PRGM (0x3000)
3786 +#define CNFG_OP_ERASE (0x4000)
3787 +#define CNFG_OP_RESET (0x5000)
3788 +#define CNFG_OP_CUST (0x6000)
3789 +#define CNFG_OP_MODE_MASK (0x7000)
3790 +#define CNFG_OP_MODE_SHIFT (12)
3791 +
3792 +/* NFI_PAGEFMT */
3793 +#define PAGEFMT_512 (0x0000)
3794 +#define PAGEFMT_2K (0x0001)
3795 +#define PAGEFMT_4K (0x0002)
3796 +
3797 +#define PAGEFMT_PAGE_MASK (0x0003)
3798 +
3799 +#define PAGEFMT_DBYTE_EN (0x0008)
3800 +
3801 +#define PAGEFMT_SPARE_16 (0x0000)
3802 +#define PAGEFMT_SPARE_26 (0x0001)
3803 +#define PAGEFMT_SPARE_27 (0x0002)
3804 +#define PAGEFMT_SPARE_28 (0x0003)
3805 +#define PAGEFMT_SPARE_MASK (0x0030)
3806 +#define PAGEFMT_SPARE_SHIFT (4)
3807 +
3808 +#define PAGEFMT_FDM_MASK (0x0F00)
3809 +#define PAGEFMT_FDM_SHIFT (8)
3810 +
3811 +#define PAGEFMT_FDM_ECC_MASK (0xF000)
3812 +#define PAGEFMT_FDM_ECC_SHIFT (12)
3813 +
3814 +/* NFI_CON */
3815 +#define CON_FIFO_FLUSH (0x0001)
3816 +#define CON_NFI_RST (0x0002)
3817 +#define CON_NFI_SRD (0x0010)
3818 +
3819 +#define CON_NFI_NOB_MASK (0x0060)
3820 +#define CON_NFI_NOB_SHIFT (5)
3821 +
3822 +#define CON_NFI_BRD (0x0100)
3823 +#define CON_NFI_BWR (0x0200)
3824 +
3825 +#define CON_NFI_SEC_MASK (0xF000)
3826 +#define CON_NFI_SEC_SHIFT (12)
3827 +
3828 +/* NFI_ACCCON */
3829 +#define ACCCON_SETTING ()
3830 +
3831 +/* NFI_INTR_EN */
3832 +#define INTR_RD_DONE_EN (0x0001)
3833 +#define INTR_WR_DONE_EN (0x0002)
3834 +#define INTR_RST_DONE_EN (0x0004)
3835 +#define INTR_ERASE_DONE_EN (0x0008)
3836 +#define INTR_BSY_RTN_EN (0x0010)
3837 +#define INTR_ACC_LOCK_EN (0x0020)
3838 +#define INTR_AHB_DONE_EN (0x0040)
3839 +#define INTR_ALL_INTR_DE (0x0000)
3840 +#define INTR_ALL_INTR_EN (0x007F)
3841 +
3842 +/* NFI_INTR */
3843 +#define INTR_RD_DONE (0x0001)
3844 +#define INTR_WR_DONE (0x0002)
3845 +#define INTR_RST_DONE (0x0004)
3846 +#define INTR_ERASE_DONE (0x0008)
3847 +#define INTR_BSY_RTN (0x0010)
3848 +#define INTR_ACC_LOCK (0x0020)
3849 +#define INTR_AHB_DONE (0x0040)
3850 +
3851 +/* NFI_ADDRNOB */
3852 +#define ADDR_COL_NOB_MASK (0x0003)
3853 +#define ADDR_COL_NOB_SHIFT (0)
3854 +#define ADDR_ROW_NOB_MASK (0x0030)
3855 +#define ADDR_ROW_NOB_SHIFT (4)
3856 +
3857 +/* NFI_STA */
3858 +#define STA_READ_EMPTY (0x00001000)
3859 +#define STA_ACC_LOCK (0x00000010)
3860 +#define STA_CMD_STATE (0x00000001)
3861 +#define STA_ADDR_STATE (0x00000002)
3862 +#define STA_DATAR_STATE (0x00000004)
3863 +#define STA_DATAW_STATE (0x00000008)
3864 +
3865 +#define STA_NAND_FSM_MASK (0x1F000000)
3866 +#define STA_NAND_BUSY (0x00000100)
3867 +#define STA_NAND_BUSY_RETURN (0x00000200)
3868 +#define STA_NFI_FSM_MASK (0x000F0000)
3869 +#define STA_NFI_OP_MASK (0x0000000F)
3870 +
3871 +/* NFI_FIFOSTA */
3872 +#define FIFO_RD_EMPTY (0x0040)
3873 +#define FIFO_RD_FULL (0x0080)
3874 +#define FIFO_WR_FULL (0x8000)
3875 +#define FIFO_WR_EMPTY (0x4000)
3876 +#define FIFO_RD_REMAIN(x) (0x1F&(x))
3877 +#define FIFO_WR_REMAIN(x) ((0x1F00&(x))>>8)
3878 +
3879 +/* NFI_ADDRCNTR */
3880 +#define ADDRCNTR_CNTR(x) ((0xF000&(x))>>12)
3881 +#define ADDRCNTR_OFFSET(x) (0x03FF&(x))
3882 +
3883 +/* NFI_LOCK */
3884 +#define NFI_LOCK_ON (0x0001)
3885 +
3886 +/* NFI_LOCKANOB */
3887 +#define PROG_RADD_NOB_MASK (0x7000)
3888 +#define PROG_RADD_NOB_SHIFT (12)
3889 +#define PROG_CADD_NOB_MASK (0x0300)
3890 +#define PROG_CADD_NOB_SHIFT (8)
3891 +#define ERASE_RADD_NOB_MASK (0x0070)
3892 +#define ERASE_RADD_NOB_SHIFT (4)
3893 +#define ERASE_CADD_NOB_MASK (0x0007)
3894 +#define ERASE_CADD_NOB_SHIFT (0)
3895 +
3896 +/*******************************************************************************
3897 + * ECC Register Definition
3898 + *******************************************************************************/
3899 +
3900 +#define ECC_ENCCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0000))
3901 +#define ECC_ENCCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0004))
3902 +#define ECC_ENCDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0008))
3903 +#define ECC_ENCIDLE_REG32 ((volatile P_U32)(NFIECC_BASE+0x000C))
3904 +#define ECC_ENCPAR0_REG32 ((volatile P_U32)(NFIECC_BASE+0x0010))
3905 +#define ECC_ENCPAR1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0014))
3906 +#define ECC_ENCPAR2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0018))
3907 +#define ECC_ENCPAR3_REG32 ((volatile P_U32)(NFIECC_BASE+0x001C))
3908 +#define ECC_ENCPAR4_REG32 ((volatile P_U32)(NFIECC_BASE+0x0020))
3909 +#define ECC_ENCSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0024))
3910 +#define ECC_ENCIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0028))
3911 +#define ECC_ENCIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x002C))
3912 +
3913 +#define ECC_DECCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0100))
3914 +#define ECC_DECCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0104))
3915 +#define ECC_DECDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0108))
3916 +#define ECC_DECIDLE_REG16 ((volatile P_U16)(NFIECC_BASE+0x010C))
3917 +#define ECC_DECFER_REG16 ((volatile P_U16)(NFIECC_BASE+0x0110))
3918 +#define ECC_DECENUM_REG32 ((volatile P_U32)(NFIECC_BASE+0x0114))
3919 +#define ECC_DECDONE_REG16 ((volatile P_U16)(NFIECC_BASE+0x0118))
3920 +#define ECC_DECEL0_REG32 ((volatile P_U32)(NFIECC_BASE+0x011C))
3921 +#define ECC_DECEL1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0120))
3922 +#define ECC_DECEL2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0124))
3923 +#define ECC_DECEL3_REG32 ((volatile P_U32)(NFIECC_BASE+0x0128))
3924 +#define ECC_DECEL4_REG32 ((volatile P_U32)(NFIECC_BASE+0x012C))
3925 +#define ECC_DECEL5_REG32 ((volatile P_U32)(NFIECC_BASE+0x0130))
3926 +#define ECC_DECIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0134))
3927 +#define ECC_DECIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x0138))
3928 +#define ECC_FDMADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x013C))
3929 +#define ECC_DECFSM_REG32 ((volatile P_U32)(NFIECC_BASE+0x0140))
3930 +#define ECC_SYNSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0144))
3931 +#define ECC_DECNFIDI_REG32 ((volatile P_U32)(NFIECC_BASE+0x0148))
3932 +#define ECC_SYN0_REG32 ((volatile P_U32)(NFIECC_BASE+0x014C))
3933 +
3934 +/*******************************************************************************
3935 + * ECC register definition
3936 + *******************************************************************************/
3937 +/* ECC_ENCON */
3938 +#define ENC_EN (0x0001)
3939 +#define ENC_DE (0x0000)
3940 +
3941 +/* ECC_ENCCNFG */
3942 +#define ECC_CNFG_ECC4 (0x0000)
3943 +#define ECC_CNFG_ECC6 (0x0001)
3944 +#define ECC_CNFG_ECC8 (0x0002)
3945 +#define ECC_CNFG_ECC10 (0x0003)
3946 +#define ECC_CNFG_ECC12 (0x0004)
3947 +#define ECC_CNFG_ECC_MASK (0x00000007)
3948 +
3949 +#define ENC_CNFG_NFI (0x0010)
3950 +#define ENC_CNFG_MODE_MASK (0x0010)
3951 +
3952 +#define ENC_CNFG_META6 (0x10300000)
3953 +#define ENC_CNFG_META8 (0x10400000)
3954 +
3955 +#define ENC_CNFG_MSG_MASK (0x1FFF0000)
3956 +#define ENC_CNFG_MSG_SHIFT (0x10)
3957 +
3958 +/* ECC_ENCIDLE */
3959 +#define ENC_IDLE (0x0001)
3960 +
3961 +/* ECC_ENCSTA */
3962 +#define STA_FSM (0x001F)
3963 +#define STA_COUNT_PS (0xFF10)
3964 +#define STA_COUNT_MS (0x3FFF0000)
3965 +
3966 +/* ECC_ENCIRQEN */
3967 +#define ENC_IRQEN (0x0001)
3968 +
3969 +/* ECC_ENCIRQSTA */
3970 +#define ENC_IRQSTA (0x0001)
3971 +
3972 +/* ECC_DECCON */
3973 +#define DEC_EN (0x0001)
3974 +#define DEC_DE (0x0000)
3975 +
3976 +/* ECC_ENCCNFG */
3977 +#define DEC_CNFG_ECC4 (0x0000)
3978 +//#define DEC_CNFG_ECC6 (0x0001)
3979 +//#define DEC_CNFG_ECC12 (0x0002)
3980 +#define DEC_CNFG_NFI (0x0010)
3981 +//#define DEC_CNFG_META6 (0x10300000)
3982 +//#define DEC_CNFG_META8 (0x10400000)
3983 +
3984 +#define DEC_CNFG_FER (0x01000)
3985 +#define DEC_CNFG_EL (0x02000)
3986 +#define DEC_CNFG_CORRECT (0x03000)
3987 +#define DEC_CNFG_TYPE_MASK (0x03000)
3988 +
3989 +#define DEC_CNFG_EMPTY_EN (0x80000000)
3990 +
3991 +#define DEC_CNFG_CODE_MASK (0x1FFF0000)
3992 +#define DEC_CNFG_CODE_SHIFT (0x10)
3993 +
3994 +/* ECC_DECIDLE */
3995 +#define DEC_IDLE (0x0001)
3996 +
3997 +/* ECC_DECFER */
3998 +#define DEC_FER0 (0x0001)
3999 +#define DEC_FER1 (0x0002)
4000 +#define DEC_FER2 (0x0004)
4001 +#define DEC_FER3 (0x0008)
4002 +#define DEC_FER4 (0x0010)
4003 +#define DEC_FER5 (0x0020)
4004 +#define DEC_FER6 (0x0040)
4005 +#define DEC_FER7 (0x0080)
4006 +
4007 +/* ECC_DECENUM */
4008 +#define ERR_NUM0 (0x0000000F)
4009 +#define ERR_NUM1 (0x000000F0)
4010 +#define ERR_NUM2 (0x00000F00)
4011 +#define ERR_NUM3 (0x0000F000)
4012 +#define ERR_NUM4 (0x000F0000)
4013 +#define ERR_NUM5 (0x00F00000)
4014 +#define ERR_NUM6 (0x0F000000)
4015 +#define ERR_NUM7 (0xF0000000)
4016 +
4017 +/* ECC_DECDONE */
4018 +#define DEC_DONE0 (0x0001)
4019 +#define DEC_DONE1 (0x0002)
4020 +#define DEC_DONE2 (0x0004)
4021 +#define DEC_DONE3 (0x0008)
4022 +#define DEC_DONE4 (0x0010)
4023 +#define DEC_DONE5 (0x0020)
4024 +#define DEC_DONE6 (0x0040)
4025 +#define DEC_DONE7 (0x0080)
4026 +
4027 +/* ECC_DECIRQEN */
4028 +#define DEC_IRQEN (0x0001)
4029 +
4030 +/* ECC_DECIRQSTA */
4031 +#define DEC_IRQSTA (0x0001)
4032 +
4033 +#define CHIPVER_ECO_1 (0x8a00)
4034 +#define CHIPVER_ECO_2 (0x8a01)
4035 +
4036 +//#define NAND_PFM
4037 +
4038 +/*******************************************************************************
4039 + * Data Structure Definition
4040 + *******************************************************************************/
4041 +struct mtk_nand_host
4042 +{
4043 + struct nand_chip nand_chip;
4044 + struct mtd_info *mtd;
4045 + struct mtk_nand_host_hw *hw;
4046 +};
4047 +
4048 +struct NAND_CMD
4049 +{
4050 + u32 u4ColAddr;
4051 + u32 u4RowAddr;
4052 + u32 u4OOBRowAddr;
4053 + u8 au1OOB[288];
4054 + u8* pDataBuf;
4055 +#ifdef NAND_PFM
4056 + u32 pureReadOOB;
4057 + u32 pureReadOOBNum;
4058 +#endif
4059 +};
4060 +
4061 +/*
4062 + * ECC layout control structure. Exported to userspace for
4063 + * diagnosis and to allow creation of raw images
4064 +struct nand_ecclayout {
4065 + uint32_t eccbytes;
4066 + uint32_t eccpos[64];
4067 + uint32_t oobavail;
4068 + struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES];
4069 +};
4070 +*/
4071 +#define __DEBUG_NAND 1 /* Debug information on/off */
4072 +
4073 +/* Debug message event */
4074 +#define DBG_EVT_NONE 0x00000000 /* No event */
4075 +#define DBG_EVT_INIT 0x00000001 /* Initial related event */
4076 +#define DBG_EVT_VERIFY 0x00000002 /* Verify buffer related event */
4077 +#define DBG_EVT_PERFORMANCE 0x00000004 /* Performance related event */
4078 +#define DBG_EVT_READ 0x00000008 /* Read related event */
4079 +#define DBG_EVT_WRITE 0x00000010 /* Write related event */
4080 +#define DBG_EVT_ERASE 0x00000020 /* Erase related event */
4081 +#define DBG_EVT_BADBLOCK 0x00000040 /* Badblock related event */
4082 +#define DBG_EVT_POWERCTL 0x00000080 /* Suspend/Resume related event */
4083 +
4084 +#define DBG_EVT_ALL 0xffffffff
4085 +
4086 +#define DBG_EVT_MASK (DBG_EVT_INIT)
4087 +
4088 +#if __DEBUG_NAND
4089 +#define MSG(evt, fmt, args...) \
4090 +do { \
4091 + if ((DBG_EVT_##evt) & DBG_EVT_MASK) { \
4092 + printk(fmt, ##args); \
4093 + } \
4094 +} while(0)
4095 +
4096 +#define MSG_FUNC_ENTRY(f) MSG(FUC, "<FUN_ENT>: %s\n", __FUNCTION__)
4097 +#else
4098 +#define MSG(evt, fmt, args...) do{}while(0)
4099 +#define MSG_FUNC_ENTRY(f) do{}while(0)
4100 +#endif
4101 +
4102 +#define RAMDOM_READ 1<<0
4103 +#define CACHE_READ 1<<1
4104 +
4105 +typedef struct
4106 +{
4107 + u16 id; //deviceid+menuid
4108 + u32 ext_id;
4109 + u8 addr_cycle;
4110 + u8 iowidth;
4111 + u16 totalsize;
4112 + u16 blocksize;
4113 + u16 pagesize;
4114 + u16 sparesize;
4115 + u32 timmingsetting;
4116 + char devciename[14];
4117 + u32 advancedmode; //
4118 +}flashdev_info,*pflashdev_info;
4119 +
4120 +/* NAND driver */
4121 +#if 0
4122 +struct mtk_nand_host_hw {
4123 + unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */
4124 + unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */
4125 + unsigned int nfi_cs_num; /* NFI_CS_NUM */
4126 + unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */
4127 + unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */
4128 + unsigned int nand_ecc_size;
4129 + unsigned int nand_ecc_bytes;
4130 + unsigned int nand_ecc_mode;
4131 +};
4132 +extern struct mtk_nand_host_hw mt7621_nand_hw;
4133 +extern u32 CFG_BLOCKSIZE;
4134 +#endif
4135 +#endif
4136 Index: linux-4.9.30/drivers/mtd/nand/nand_base.c
4137 ===================================================================
4138 --- linux-4.9.30.orig/drivers/mtd/nand/nand_base.c
4139 +++ linux-4.9.30/drivers/mtd/nand/nand_base.c
4140 @@ -47,7 +47,7 @@
4141 #include <linux/mtd/partitions.h>
4142 #include <linux/of.h>
4143
4144 -static int nand_get_device(struct mtd_info *mtd, int new_state);
4145 +int nand_get_device(struct mtd_info *mtd, int new_state);
4146
4147 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
4148 struct mtd_oob_ops *ops);
4149 @@ -233,7 +233,7 @@ static int check_offs_len(struct mtd_inf
4150 *
4151 * Release chip lock and wake up anyone waiting on the device.
4152 */
4153 -static void nand_release_device(struct mtd_info *mtd)
4154 +void nand_release_device(struct mtd_info *mtd)
4155 {
4156 struct nand_chip *chip = mtd_to_nand(mtd);
4157
4158 @@ -915,7 +915,7 @@ static void panic_nand_get_device(struct
4159 *
4160 * Get the device and lock it for exclusive access
4161 */
4162 -static int
4163 +int
4164 nand_get_device(struct mtd_info *mtd, int new_state)
4165 {
4166 struct nand_chip *chip = mtd_to_nand(mtd);
4167 Index: linux-4.9.30/drivers/mtd/nand/nand_bbt.c
4168 ===================================================================
4169 --- linux-4.9.30.orig/drivers/mtd/nand/nand_bbt.c
4170 +++ linux-4.9.30/drivers/mtd/nand/nand_bbt.c
4171 @@ -1215,6 +1215,25 @@ err:
4172 return res;
4173 }
4174
4175 +void nand_bbt_set(struct mtd_info *mtd, int page, int flag)
4176 +{
4177 + struct nand_chip *this = mtd->priv;
4178 + int block;
4179 +
4180 + block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1));
4181 + this->bbt[block >> 3] &= ~(0x03 << (block & 0x6));
4182 + this->bbt[block >> 3] |= (flag & 0x3) << (block & 0x6);
4183 +}
4184 +
4185 +int nand_bbt_get(struct mtd_info *mtd, int page)
4186 +{
4187 + struct nand_chip *this = mtd->priv;
4188 + int block;
4189 +
4190 + block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1));
4191 + return (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
4192 +}
4193 +
4194 /**
4195 * nand_update_bbt - update bad block table(s)
4196 * @mtd: MTD device structure
4197 Index: linux-4.9.30/drivers/mtd/nand/nand_def.h
4198 ===================================================================
4199 --- /dev/null
4200 +++ linux-4.9.30/drivers/mtd/nand/nand_def.h
4201 @@ -0,0 +1,123 @@
4202 +#ifndef __NAND_DEF_H__
4203 +#define __NAND_DEF_H__
4204 +
4205 +#define VERSION "v2.1 Fix AHB virt2phys error"
4206 +#define MODULE_NAME "# MTK NAND #"
4207 +#define PROCNAME "driver/nand"
4208 +
4209 +#undef TESTTIME
4210 +//#define __UBOOT_NAND__ 1
4211 +#define __KERNEL_NAND__ 1
4212 +//#define __PRELOADER_NAND__ 1
4213 +//#define PMT 1
4214 +//#define _MTK_NAND_DUMMY_DRIVER
4215 +//#define CONFIG_BADBLOCK_CHECK 1
4216 +//#ifdef CONFIG_BADBLOCK_CHECK
4217 +//#define MTK_NAND_BMT 1
4218 +//#endif
4219 +#define ECC_ENABLE 1
4220 +#define MANUAL_CORRECT 1
4221 +//#define __INTERNAL_USE_AHB_MODE__ (0)
4222 +#define SKIP_BAD_BLOCK
4223 +#define FACT_BBT
4224 +
4225 +#ifndef NAND_OTP_SUPPORT
4226 +#define NAND_OTP_SUPPORT 0
4227 +#endif
4228 +
4229 +/*******************************************************************************
4230 + * Macro definition
4231 + *******************************************************************************/
4232 +//#define NFI_SET_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) | (value)))
4233 +//#define NFI_SET_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) | (value)))
4234 +//#define NFI_CLN_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) & (~(value))))
4235 +//#define NFI_CLN_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) & (~(value))))
4236 +
4237 +#if defined (__KERNEL_NAND__)
4238 +#define NFI_SET_REG32(reg, value) \
4239 +do { \
4240 + g_value = (DRV_Reg32(reg) | (value));\
4241 + DRV_WriteReg32(reg, g_value); \
4242 +} while(0)
4243 +
4244 +#define NFI_SET_REG16(reg, value) \
4245 +do { \
4246 + g_value = (DRV_Reg16(reg) | (value));\
4247 + DRV_WriteReg16(reg, g_value); \
4248 +} while(0)
4249 +
4250 +#define NFI_CLN_REG32(reg, value) \
4251 +do { \
4252 + g_value = (DRV_Reg32(reg) & (~(value)));\
4253 + DRV_WriteReg32(reg, g_value); \
4254 +} while(0)
4255 +
4256 +#define NFI_CLN_REG16(reg, value) \
4257 +do { \
4258 + g_value = (DRV_Reg16(reg) & (~(value)));\
4259 + DRV_WriteReg16(reg, g_value); \
4260 +} while(0)
4261 +#endif
4262 +
4263 +#define NFI_WAIT_STATE_DONE(state) do{;}while (__raw_readl(NFI_STA_REG32) & state)
4264 +#define NFI_WAIT_TO_READY() do{;}while (!(__raw_readl(NFI_STA_REG32) & STA_BUSY2READY))
4265 +
4266 +
4267 +#define NAND_SECTOR_SIZE (512)
4268 +#define OOB_PER_SECTOR (16)
4269 +#define OOB_AVAI_PER_SECTOR (8)
4270 +
4271 +#ifndef PART_SIZE_BMTPOOL
4272 +#define BMT_POOL_SIZE (80)
4273 +#else
4274 +#define BMT_POOL_SIZE (PART_SIZE_BMTPOOL)
4275 +#endif
4276 +
4277 +#define PMT_POOL_SIZE (2)
4278 +
4279 +#define TIMEOUT_1 0x1fff
4280 +#define TIMEOUT_2 0x8ff
4281 +#define TIMEOUT_3 0xffff
4282 +#define TIMEOUT_4 0xffff//5000 //PIO
4283 +
4284 +
4285 +/* temporarity definiation */
4286 +#if !defined (__KERNEL_NAND__)
4287 +#define KERN_INFO
4288 +#define KERN_WARNING
4289 +#define KERN_ERR
4290 +#define PAGE_SIZE (4096)
4291 +#endif
4292 +#define AddStorageTrace //AddStorageTrace
4293 +#define STORAGE_LOGGER_MSG_NAND 0
4294 +#define NFI_BASE RALINK_NAND_CTRL_BASE
4295 +#define NFIECC_BASE RALINK_NANDECC_CTRL_BASE
4296 +
4297 +#ifdef __INTERNAL_USE_AHB_MODE__
4298 +#define MT65xx_POLARITY_LOW 0
4299 +#define MT65XX_PDN_PERI_NFI 0
4300 +#define MT65xx_EDGE_SENSITIVE 0
4301 +#define MT6575_NFI_IRQ_ID (58)
4302 +#endif
4303 +
4304 +#if defined (__KERNEL_NAND__)
4305 +#define RALINK_REG(x) (*((volatile u32 *)(x)))
4306 +#define __virt_to_phys(x) virt_to_phys((volatile void*)x)
4307 +#else
4308 +#define CONFIG_MTD_NAND_VERIFY_WRITE (1)
4309 +#define printk printf
4310 +#define ra_dbg printf
4311 +#define BUG() //BUG()
4312 +#define BUG_ON(x) //BUG_ON()
4313 +#define NUM_PARTITIONS 1
4314 +#endif
4315 +
4316 +#define NFI_DEFAULT_ACCESS_TIMING (0x30C77fff) //(0x44333)
4317 +
4318 +//uboot only support 1 cs
4319 +#define NFI_CS_NUM (1)
4320 +#define NFI_DEFAULT_CS (0)
4321 +
4322 +#include "mt6575_typedefs.h"
4323 +
4324 +#endif /* __NAND_DEF_H__ */
4325 Index: linux-4.9.30/drivers/mtd/nand/nand_device_list.h
4326 ===================================================================
4327 --- /dev/null
4328 +++ linux-4.9.30/drivers/mtd/nand/nand_device_list.h
4329 @@ -0,0 +1,55 @@
4330 +/* Copyright Statement:
4331 + *
4332 + * This software/firmware and related documentation ("MediaTek Software") are
4333 + * protected under relevant copyright laws. The information contained herein
4334 + * is confidential and proprietary to MediaTek Inc. and/or its licensors.
4335 + * Without the prior written permission of MediaTek inc. and/or its licensors,
4336 + * any reproduction, modification, use or disclosure of MediaTek Software,
4337 + * and information contained herein, in whole or in part, shall be strictly prohibited.
4338 + */
4339 +/* MediaTek Inc. (C) 2010. All rights reserved.
4340 + *
4341 + * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
4342 + * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
4343 + * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
4344 + * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
4345 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
4346 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
4347 + * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
4348 + * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
4349 + * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
4350 + * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
4351 + * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
4352 + * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
4353 + * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
4354 + * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
4355 + * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
4356 + * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
4357 + * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
4358 + * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
4359 + *
4360 + * The following software/firmware and/or related documentation ("MediaTek Software")
4361 + * have been modified by MediaTek Inc. All revisions are subject to any receiver's
4362 + * applicable license agreements with MediaTek Inc.
4363 + */
4364 +
4365 +#ifndef __NAND_DEVICE_LIST_H__
4366 +#define __NAND_DEVICE_LIST_H__
4367 +
4368 +static const flashdev_info gen_FlashTable[]={
4369 + {0x20BC, 0x105554, 5, 16, 512, 128, 2048, 64, 0x1123, "EHD013151MA_5", 0},
4370 + {0xECBC, 0x005554, 5, 16, 512, 128, 2048, 64, 0x1123, "K524G2GACB_A0", 0},
4371 + {0x2CBC, 0x905556, 5, 16, 512, 128, 2048, 64, 0x21044333, "MT29C4G96MAZA", 0},
4372 + {0xADBC, 0x905554, 5, 16, 512, 128, 2048, 64, 0x10801011, "H9DA4GH4JJAMC", 0},
4373 + {0x01F1, 0x801D01, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "S34ML01G100TF", 0},
4374 + {0x92F1, 0x8095FF, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "F59L1G81A", 0},
4375 + {0xECD3, 0x519558, 5, 8, 1024, 128, 2048, 64, 0x44333, "K9K8G8000", 0},
4376 + {0xC2F1, 0x801DC2, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "MX30LF1G08AA", 0},
4377 + {0x98D3, 0x902676, 5, 8, 1024, 256, 4096, 224, 0x00C25332, "TC58NVG3S0F", 0},
4378 + {0x01DA, 0x909546, 5, 8, 256, 128, 2048, 128, 0x30C77fff, "S34ML02G200TF", 0},
4379 + {0x01DC, 0x909556, 5, 8, 512, 128, 2048, 128, 0x30C77fff, "S34ML04G200TF", 0},
4380 + {0x0000, 0x000000, 0, 0, 0, 0, 0, 0, 0, "xxxxxxxxxx", 0},
4381 +};
4382 +
4383 +
4384 +#endif
4385 Index: linux-4.9.30/drivers/mtd/nand/partition.h
4386 ===================================================================
4387 --- /dev/null
4388 +++ linux-4.9.30/drivers/mtd/nand/partition.h
4389 @@ -0,0 +1,115 @@
4390 +/* Copyright Statement:
4391 + *
4392 + * This software/firmware and related documentation ("MediaTek Software") are
4393 + * protected under relevant copyright laws. The information contained herein
4394 + * is confidential and proprietary to MediaTek Inc. and/or its licensors.
4395 + * Without the prior written permission of MediaTek inc. and/or its licensors,
4396 + * any reproduction, modification, use or disclosure of MediaTek Software,
4397 + * and information contained herein, in whole or in part, shall be strictly prohibited.
4398 + */
4399 +/* MediaTek Inc. (C) 2010. All rights reserved.
4400 + *
4401 + * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
4402 + * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
4403 + * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
4404 + * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
4405 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
4406 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
4407 + * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
4408 + * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
4409 + * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
4410 + * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
4411 + * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
4412 + * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
4413 + * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
4414 + * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
4415 + * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
4416 + * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
4417 + * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
4418 + * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
4419 + *
4420 + * The following software/firmware and/or related documentation ("MediaTek Software")
4421 + * have been modified by MediaTek Inc. All revisions are subject to any receiver's
4422 + * applicable license agreements with MediaTek Inc.
4423 + */
4424 +
4425 +#include <linux/mtd/mtd.h>
4426 +#include <linux/mtd/nand.h>
4427 +#include <linux/mtd/partitions.h>
4428 +
4429 +#define RECONFIG_PARTITION_SIZE 1
4430 +
4431 +#define MTD_BOOT_PART_SIZE 0x80000
4432 +#define MTD_CONFIG_PART_SIZE 0x20000
4433 +#define MTD_FACTORY_PART_SIZE 0x20000
4434 +
4435 +extern unsigned int CFG_BLOCKSIZE;
4436 +#define LARGE_MTD_BOOT_PART_SIZE (CFG_BLOCKSIZE<<2)
4437 +#define LARGE_MTD_CONFIG_PART_SIZE (CFG_BLOCKSIZE<<2)
4438 +#define LARGE_MTD_FACTORY_PART_SIZE (CFG_BLOCKSIZE<<1)
4439 +
4440 +/*=======================================================================*/
4441 +/* NAND PARTITION Mapping */
4442 +/*=======================================================================*/
4443 +//#ifdef CONFIG_MTD_PARTITIONS
4444 +static struct mtd_partition g_pasStatic_Partition[] = {
4445 + {
4446 + name: "ALL",
4447 + size: MTDPART_SIZ_FULL,
4448 + offset: 0,
4449 + },
4450 + /* Put your own partition definitions here */
4451 + {
4452 + name: "Bootloader",
4453 + size: MTD_BOOT_PART_SIZE,
4454 + offset: 0,
4455 + }, {
4456 + name: "Config",
4457 + size: MTD_CONFIG_PART_SIZE,
4458 + offset: MTDPART_OFS_APPEND
4459 + }, {
4460 + name: "Factory",
4461 + size: MTD_FACTORY_PART_SIZE,
4462 + offset: MTDPART_OFS_APPEND
4463 +#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH
4464 + }, {
4465 + name: "Kernel",
4466 + size: MTD_KERN_PART_SIZE,
4467 + offset: MTDPART_OFS_APPEND,
4468 + }, {
4469 + name: "RootFS",
4470 + size: MTD_ROOTFS_PART_SIZE,
4471 + offset: MTDPART_OFS_APPEND,
4472 +#ifdef CONFIG_ROOTFS_IN_FLASH_NO_PADDING
4473 + }, {
4474 + name: "Kernel_RootFS",
4475 + size: MTD_KERN_PART_SIZE + MTD_ROOTFS_PART_SIZE,
4476 + offset: MTD_BOOT_PART_SIZE + MTD_CONFIG_PART_SIZE + MTD_FACTORY_PART_SIZE,
4477 +#endif
4478 +#else //CONFIG_RT2880_ROOTFS_IN_RAM
4479 + }, {
4480 + name: "Kernel",
4481 + size: 0x10000,
4482 + offset: MTDPART_OFS_APPEND,
4483 +#endif
4484 +#ifdef CONFIG_DUAL_IMAGE
4485 + }, {
4486 + name: "Kernel2",
4487 + size: MTD_KERN2_PART_SIZE,
4488 + offset: MTD_KERN2_PART_OFFSET,
4489 +#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH
4490 + }, {
4491 + name: "RootFS2",
4492 + size: MTD_ROOTFS2_PART_SIZE,
4493 + offset: MTD_ROOTFS2_PART_OFFSET,
4494 +#endif
4495 +#endif
4496 + }
4497 +
4498 +};
4499 +
4500 +#define NUM_PARTITIONS ARRAY_SIZE(g_pasStatic_Partition)
4501 +extern int part_num; // = NUM_PARTITIONS;
4502 +//#endif
4503 +#undef RECONFIG_PARTITION_SIZE
4504 +