hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/rkflash/sfc_nand_mtd_bbt.c
....@@ -21,12 +21,118 @@
2121 #define BBT_DBG(args...)
2222 #endif
2323
24
+#define BBT_VERSION_INVALID (0xFFFFFFFFU)
25
+#define BBT_VERSION_BLOCK_ABNORMAL (BBT_VERSION_INVALID - 1)
26
+#define BBT_VERSION_MAX (BBT_VERSION_INVALID - 8)
2427 struct nanddev_bbt_info {
2528 u8 pattern[4];
2629 unsigned int version;
30
+ u32 hash;
2731 };
2832
2933 static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
34
+
35
+#if defined(BBT_DEBUG) && defined(BBT_DEBUG_DUMP)
36
+static void bbt_dbg_hex(char *s, void *buf, u32 len)
37
+{
38
+ print_hex_dump(KERN_WARNING, s, DUMP_PREFIX_OFFSET, 4, 4, buf, len, 0);
39
+}
40
+#endif
41
+
42
+static u32 js_hash(u8 *buf, u32 len)
43
+{
44
+ u32 hash = 0x47C6A7E6;
45
+ u32 i;
46
+
47
+ for (i = 0; i < len; i++)
48
+ hash ^= ((hash << 5) + buf[i] + (hash >> 2));
49
+
50
+ return hash;
51
+}
52
+
53
+static bool bbt_check_hash(u8 *buf, u32 len, u32 hash_cmp)
54
+{
55
+ u32 hash;
56
+
57
+ /* compatible with no-hash version */
58
+ if (hash_cmp == 0 || hash_cmp == 0xFFFFFFFF)
59
+ return 1;
60
+
61
+ hash = js_hash(buf, len);
62
+ if (hash != hash_cmp)
63
+ return 0;
64
+
65
+ return 1;
66
+}
67
+
68
+static u32 bbt_nand_isbad_bypass(struct snand_mtd_dev *nand, u32 block)
69
+{
70
+ struct mtd_info *mtd = snanddev_to_mtd(nand);
71
+
72
+ return sfc_nand_isbad_mtd(mtd, block * mtd->erasesize);
73
+}
74
+
75
+static int bbt_mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
76
+{
77
+ int i, ret = 0, bbt_page_num, page_addr, block;
78
+ u8 *temp_buf;
79
+
80
+ bbt_page_num = ops->len >> mtd->writesize_shift;
81
+ block = from >> mtd->erasesize_shift;
82
+
83
+ temp_buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
84
+ if (!temp_buf)
85
+ return -ENOMEM;
86
+
87
+ page_addr = (u32)(block << (mtd->erasesize_shift - mtd->writesize_shift));
88
+ for (i = 0; i < bbt_page_num; i++) {
89
+ ret = sfc_nand_read_page_raw(0, page_addr + i, (u32 *)temp_buf);
90
+ if (ret < 0) {
91
+ pr_err("%s fail %d\n", __func__, ret);
92
+ ret = -EIO;
93
+ goto out;
94
+ }
95
+
96
+ memcpy(ops->datbuf + i * mtd->writesize, temp_buf, mtd->writesize);
97
+ memcpy(ops->oobbuf + i * mtd->oobsize, temp_buf + mtd->writesize, mtd->oobsize);
98
+ }
99
+
100
+out:
101
+ kfree(temp_buf);
102
+
103
+ return ret;
104
+}
105
+
106
+static int bbt_mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
107
+{
108
+ int i, ret = 0, bbt_page_num, page_addr, block;
109
+ u8 *temp_buf;
110
+
111
+ bbt_page_num = ops->len >> mtd->writesize_shift;
112
+ block = to >> mtd->erasesize_shift;
113
+
114
+ temp_buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
115
+ if (!temp_buf)
116
+ return -ENOMEM;
117
+
118
+ page_addr = (u32)(block << (mtd->erasesize_shift - mtd->writesize_shift));
119
+ for (i = 0; i < bbt_page_num; i++) {
120
+ memcpy(temp_buf, ops->datbuf + i * mtd->writesize, mtd->writesize);
121
+ memcpy(temp_buf + mtd->writesize, ops->oobbuf + i * mtd->oobsize, mtd->oobsize);
122
+
123
+ ret = sfc_nand_prog_page_raw(0, page_addr + i, (u32 *)temp_buf);
124
+ if (ret < 0) {
125
+ pr_err("%s fail %d\n", __func__, ret);
126
+ ret = -EIO;
127
+ goto out;
128
+ }
129
+ }
130
+
131
+out:
132
+ kfree(temp_buf);
133
+
134
+ return ret;
135
+}
30136
31137 /**
32138 * nanddev_read_bbt() - Read the BBT (Bad Block Table)
....@@ -37,7 +143,7 @@
37143 *
38144 * Initialize the in-memory BBT.
39145 *
40
- * Return: 0 in case of success, a negative error code otherwise.
146
+ * Return: positive value means success, 0 means abnornal data, a negative error code otherwise.
41147 */
42148 static int nanddev_read_bbt(struct snand_mtd_dev *nand, u32 block, bool update)
43149 {
....@@ -46,13 +152,12 @@
46152 unsigned int nbytes = DIV_ROUND_UP(nblocks * bits_per_block,
47153 BITS_PER_LONG) * sizeof(*nand->bbt.cache);
48154 struct mtd_info *mtd = snanddev_to_mtd(nand);
49
- u8 *data_buf, *oob_buf, *temp_buf;
155
+ u8 *data_buf, *oob_buf;
50156 struct nanddev_bbt_info *bbt_info;
51157 struct mtd_oob_ops ops;
52158 u32 bbt_page_num;
53159 int ret = 0;
54160 unsigned int version = 0;
55
- u32 page_addr, i;
56161
57162 if (!nand->bbt.cache)
58163 return -ENOMEM;
....@@ -85,36 +190,64 @@
85190 ops.ooboffs = 0;
86191
87192 /* Store one entry for each block */
88
- temp_buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
89
- if (!temp_buf) {
90
- kfree(data_buf);
91
- kfree(oob_buf);
92
-
93
- return -ENOMEM;
193
+ ret = bbt_mtd_read_oob(mtd, block * mtd->erasesize, &ops);
194
+ if (ret && ret != -EUCLEAN) {
195
+ pr_err("read_bbt blk=%d fail=%d update=%d\n", block, ret, update);
196
+ ret = 0;
197
+ version = BBT_VERSION_BLOCK_ABNORMAL;
198
+ goto out;
199
+ } else {
200
+ ret = 0;
94201 }
95
- page_addr = (u32)(block << (mtd->erasesize_shift - mtd->writesize_shift));
96
- for (i = 0; i < bbt_page_num; i++) {
97
- ret = sfc_nand_read_page_raw(0, page_addr + i, (u32 *)temp_buf);
98
- if (ret < 0) {
99
- pr_err("%s fail %d\n", __func__, ret);
100
- ret = -EIO;
101
- kfree(temp_buf);
102
- goto out;
103
- }
104
-
105
- memcpy(ops.datbuf + i * mtd->writesize, temp_buf, mtd->writesize);
106
- memcpy(ops.oobbuf + i * mtd->oobsize, temp_buf + mtd->writesize, mtd->oobsize);
202
+ /* bad block or good block without bbt */
203
+ if (memcmp(bbt_pattern, bbt_info->pattern, 4)) {
204
+ ret = 0;
205
+ goto out;
107206 }
108
- kfree(temp_buf);
109207
110
- if (oob_buf[0] != 0xff && !memcmp(bbt_pattern, bbt_info->pattern, 4))
111
- version = bbt_info->version;
208
+ /* good block with abnornal bbt */
209
+ if (oob_buf[0] == 0xff ||
210
+ !bbt_check_hash(data_buf, nbytes + sizeof(struct nanddev_bbt_info) - 4, bbt_info->hash)) {
211
+ pr_err("read_bbt check fail blk=%d ret=%d update=%d\n", block, ret, update);
212
+ ret = 0;
213
+ version = BBT_VERSION_BLOCK_ABNORMAL;
214
+ goto out;
215
+ }
112216
113
- BBT_DBG("read_bbt from blk=%d tag=%d ver=%d\n", block, update, version);
217
+ /* good block with good bbt */
218
+ version = bbt_info->version;
219
+ BBT_DBG("read_bbt from blk=%d ver=%d update=%d\n", block, version, update);
114220 if (update && version > nand->bbt.version) {
115221 memcpy(nand->bbt.cache, data_buf, nbytes);
116222 nand->bbt.version = version;
117223 }
224
+
225
+#if defined(BBT_DEBUG) && defined(BBT_DEBUG_DUMP)
226
+ bbt_dbg_hex("bbt", data_buf, nbytes + sizeof(struct nanddev_bbt_info));
227
+ if (version) {
228
+ u8 *temp_buf = kzalloc(bbt_page_num * mtd->writesize, GFP_KERNEL);
229
+ bool in_scan = nand->bbt.option & NANDDEV_BBT_SCANNED;
230
+
231
+ if (!temp_buf)
232
+ goto out;
233
+
234
+ memcpy(temp_buf, nand->bbt.cache, nbytes);
235
+ memcpy(nand->bbt.cache, data_buf, nbytes);
236
+
237
+ if (!in_scan)
238
+ nand->bbt.option |= NANDDEV_BBT_SCANNED;
239
+ for (block = 0; block < nblocks; block++) {
240
+ ret = snanddev_bbt_get_block_status(nand, block);
241
+ if (ret != NAND_BBT_BLOCK_GOOD)
242
+ BBT_DBG("bad block[0x%x], ret=%d\n", block, ret);
243
+ }
244
+ if (!in_scan)
245
+ nand->bbt.option &= ~NANDDEV_BBT_SCANNED;
246
+ memcpy(nand->bbt.cache, temp_buf, nbytes);
247
+ kfree(temp_buf);
248
+ ret = 0;
249
+ }
250
+#endif
118251
119252 out:
120253 kfree(data_buf);
....@@ -130,12 +263,11 @@
130263 unsigned int nbytes = DIV_ROUND_UP(nblocks * bits_per_block,
131264 BITS_PER_LONG) * sizeof(*nand->bbt.cache);
132265 struct mtd_info *mtd = snanddev_to_mtd(nand);
133
- u8 *data_buf, *oob_buf, *temp_buf;
266
+ u8 *data_buf, *oob_buf;
134267 struct nanddev_bbt_info *bbt_info;
135268 struct mtd_oob_ops ops;
136269 u32 bbt_page_num;
137
- int ret = 0;
138
- u32 page_addr, i;
270
+ int ret = 0, version;
139271
140272 BBT_DBG("write_bbt to blk=%d ver=%d\n", block, nand->bbt.version);
141273 if (!nand->bbt.cache)
....@@ -164,6 +296,7 @@
164296 memcpy(data_buf, nand->bbt.cache, nbytes);
165297 memcpy(bbt_info, bbt_pattern, 4);
166298 bbt_info->version = nand->bbt.version;
299
+ bbt_info->hash = js_hash(data_buf, nbytes + sizeof(struct nanddev_bbt_info) - 4);
167300
168301 /* Store one entry for each block */
169302 ret = sfc_nand_erase_mtd(mtd, block * mtd->erasesize);
....@@ -171,34 +304,27 @@
171304 goto out;
172305
173306 memset(&ops, 0, sizeof(struct mtd_oob_ops));
307
+ ops.mode = MTD_OPS_PLACE_OOB;
174308 ops.datbuf = data_buf;
175309 ops.len = bbt_page_num * mtd->writesize;
176310 ops.oobbuf = oob_buf;
177311 ops.ooblen = bbt_page_num * mtd->oobsize;
178312 ops.ooboffs = 0;
179
-
180
- temp_buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
181
- if (!temp_buf) {
182
- kfree(data_buf);
183
- kfree(oob_buf);
184
-
185
- return -ENOMEM;
313
+ ret = bbt_mtd_write_oob(mtd, block * mtd->erasesize, &ops);
314
+ if (ret) {
315
+ sfc_nand_erase_mtd(mtd, block * mtd->erasesize);
316
+ goto out;
186317 }
187
- page_addr = (u32)(block << (mtd->erasesize_shift - mtd->writesize_shift));
188
- for (i = 0; i < bbt_page_num; i++) {
189
- memcpy(temp_buf, ops.datbuf + i * mtd->writesize, mtd->writesize);
190
- memcpy(temp_buf + mtd->writesize, ops.oobbuf + i * mtd->oobsize, mtd->oobsize);
191318
192
- ret = sfc_nand_prog_page_raw(0, page_addr + i, (u32 *)temp_buf);
193
- if (ret < 0) {
194
- pr_err("%s fail %d\n", __func__, ret);
195
- ret = -EIO;
196
- kfree(temp_buf);
197
- goto out;
198
- }
319
+ version = nanddev_read_bbt(nand, block, false);
320
+ if (version != bbt_info->version) {
321
+ pr_err("bbt_write fail, blk=%d recheck fail %d-%d\n",
322
+ block, version, bbt_info->version);
323
+ sfc_nand_erase_mtd(mtd, block * mtd->erasesize);
324
+ ret = -EIO;
325
+ } else {
326
+ ret = 0;
199327 }
200
- kfree(temp_buf);
201
-
202328 out:
203329 kfree(data_buf);
204330 kfree(oob_buf);
....@@ -211,13 +337,29 @@
211337 unsigned int nblocks = snanddev_neraseblocks(nand);
212338 struct mtd_info *mtd = snanddev_to_mtd(nand);
213339 u32 start_block, block;
340
+ unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
341
+ unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block,
342
+ BITS_PER_LONG);
214343
215344 start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
216345
217346 for (block = 0; block < nblocks; block++) {
218
- if (sfc_nand_isbad_mtd(mtd, block * mtd->erasesize))
347
+ if (sfc_nand_isbad_mtd(mtd, block * mtd->erasesize)) {
348
+ if (bbt_nand_isbad_bypass(nand, 0)) {
349
+ memset(nand->bbt.cache, 0, nwords * sizeof(*nand->bbt.cache));
350
+ pr_err("bbt_format fail, test good block %d fail\n", 0);
351
+ return -EIO;
352
+ }
353
+
354
+ if (!bbt_nand_isbad_bypass(nand, block)) {
355
+ memset(nand->bbt.cache, 0, nwords * sizeof(*nand->bbt.cache));
356
+ pr_err("bbt_format fail, test bad block %d fail\n", block);
357
+ return -EIO;
358
+ }
359
+
219360 snanddev_bbt_set_block_status(nand, block,
220
- NAND_BBT_BLOCK_FACTORY_BAD);
361
+ NAND_BBT_BLOCK_FACTORY_BAD);
362
+ }
221363 }
222364
223365 for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
....@@ -243,13 +385,33 @@
243385
244386 nand->bbt.option |= NANDDEV_BBT_SCANNED;
245387 if (nand->bbt.version == 0) {
246
- nanddev_bbt_format(nand);
388
+ ret = nanddev_bbt_format(nand);
389
+ if (ret) {
390
+ nand->bbt.option = 0;
391
+ pr_err("%s format fail\n", __func__);
392
+
393
+ return ret;
394
+ }
395
+
247396 ret = snanddev_bbt_update(nand);
248397 if (ret) {
249398 nand->bbt.option = 0;
250
- pr_err("%s fail\n", __func__);
399
+ pr_err("%s update fail\n", __func__);
400
+
401
+ return ret;
251402 }
252403 }
404
+
405
+#if defined(BBT_DEBUG)
406
+ pr_err("scan_bbt success\n");
407
+ if (nand->bbt.version) {
408
+ for (block = 0; block < nblocks; block++) {
409
+ ret = snanddev_bbt_get_block_status(nand, block);
410
+ if (ret != NAND_BBT_BLOCK_GOOD)
411
+ BBT_DBG("bad block[0x%x], ret=%d\n", block, ret);
412
+ }
413
+ }
414
+#endif
253415
254416 return ret;
255417 }
....@@ -304,32 +466,32 @@
304466 int snanddev_bbt_update(struct snand_mtd_dev *nand)
305467 {
306468 #ifdef CONFIG_MTD_NAND_BBT_USING_FLASH
469
+ struct mtd_info *mtd = snanddev_to_mtd(nand);
470
+
307471 if (nand->bbt.cache &&
308472 nand->bbt.option & NANDDEV_BBT_USE_FLASH) {
309473 unsigned int nblocks = snanddev_neraseblocks(nand);
310474 u32 bbt_version[NANDDEV_BBT_SCAN_MAXBLOCKS];
311475 int start_block, block;
312476 u32 min_version, block_des;
313
- int ret, count = 0;
477
+ int ret, count = 0, status;
314478
315479 start_block = nblocks - NANDDEV_BBT_SCAN_MAXBLOCKS;
316480 for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
317
- ret = snanddev_bbt_get_block_status(nand, start_block + block);
318
- if (ret == NAND_BBT_BLOCK_FACTORY_BAD) {
319
- bbt_version[block] = 0xFFFFFFFF;
320
- continue;
321
- }
322
- ret = nanddev_read_bbt(nand, start_block + block,
323
- false);
324
- if (ret < 0)
325
- bbt_version[block] = 0xFFFFFFFF;
326
- else if (ret == 0)
327
- bbt_version[block] = 0;
481
+ status = snanddev_bbt_get_block_status(nand, start_block + block);
482
+ ret = nanddev_read_bbt(nand, start_block + block, false);
483
+
484
+ if (ret == 0 && status == NAND_BBT_BLOCK_FACTORY_BAD)
485
+ bbt_version[block] = BBT_VERSION_INVALID;
486
+ else if (ret == -EIO)
487
+ bbt_version[block] = BBT_VERSION_INVALID;
488
+ else if (ret == BBT_VERSION_BLOCK_ABNORMAL)
489
+ bbt_version[block] = ret;
328490 else
329491 bbt_version[block] = ret;
330492 }
331493 get_min_ver:
332
- min_version = 0xFFFFFFFF;
494
+ min_version = BBT_VERSION_MAX;
333495 block_des = 0;
334496 for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
335497 if (bbt_version[block] < min_version) {
....@@ -338,25 +500,37 @@
338500 }
339501 }
340502
503
+ /* Overwrite the BBT_VERSION_BLOCK_ABNORMAL block */
504
+ if (nand->bbt.version < min_version)
505
+ nand->bbt.version = min_version + 4;
506
+
341507 if (block_des > 0) {
342508 nand->bbt.version++;
343509 ret = nanddev_write_bbt(nand, block_des);
344
- bbt_version[block_des - start_block] = 0xFFFFFFFF;
345510 if (ret) {
346
- pr_err("%s blk= %d ret= %d\n", __func__,
347
- block_des, ret);
348
- goto get_min_ver;
349
- } else {
350
- count++;
351
- if (count < 2)
352
- goto get_min_ver;
353
- BBT_DBG("%s success\n", __func__);
354
- }
355
- } else {
356
- pr_err("%s failed\n", __func__);
511
+ pr_err("bbt_update fail, blk=%d ret= %d\n", block_des, ret);
357512
358
- return -1;
513
+ return -1;
514
+ }
515
+
516
+ bbt_version[block_des - start_block] = BBT_VERSION_INVALID;
517
+ count++;
518
+ if (count < 2)
519
+ goto get_min_ver;
520
+ BBT_DBG("bbt_update success\n");
521
+ } else {
522
+ pr_err("bbt_update failed\n");
523
+ ret = -1;
359524 }
525
+
526
+ for (block = 0; block < NANDDEV_BBT_SCAN_MAXBLOCKS; block++) {
527
+ if (bbt_version[block] == BBT_VERSION_BLOCK_ABNORMAL) {
528
+ block_des = start_block + block;
529
+ sfc_nand_erase_mtd(mtd, block_des * mtd->erasesize);
530
+ }
531
+ }
532
+
533
+ return ret;
360534 }
361535 #endif
362536 return 0;