forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/mtd/ubi/wl.c
....@@ -1,19 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Copyright (c) International Business Machines Corp., 2006
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License as published by
6
- * the Free Software Foundation; either version 2 of the License, or
7
- * (at your option) any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12
- * the GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; if not, write to the Free Software
16
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
174 *
185 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
196 */
....@@ -278,6 +265,27 @@
278265 }
279266
280267 /**
268
+ * in_pq - check if a wear-leveling entry is present in the protection queue.
269
+ * @ubi: UBI device description object
270
+ * @e: the wear-leveling entry to check
271
+ *
272
+ * This function returns non-zero if @e is in the protection queue and zero
273
+ * if it is not.
274
+ */
275
+static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
276
+{
277
+ struct ubi_wl_entry *p;
278
+ int i;
279
+
280
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
281
+ list_for_each_entry(p, &ubi->pq[i], u.list)
282
+ if (p == e)
283
+ return 1;
284
+
285
+ return 0;
286
+}
287
+
288
+/**
281289 * prot_queue_add - add physical eraseblock to the protection queue.
282290 * @ubi: UBI device description object
283291 * @e: the physical eraseblock to add
....@@ -311,7 +319,7 @@
311319 struct rb_root *root, int diff)
312320 {
313321 struct rb_node *p;
314
- struct ubi_wl_entry *e, *prev_e = NULL;
322
+ struct ubi_wl_entry *e;
315323 int max;
316324
317325 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
....@@ -326,7 +334,6 @@
326334 p = p->rb_left;
327335 else {
328336 p = p->rb_right;
329
- prev_e = e;
330337 e = e1;
331338 }
332339 }
....@@ -680,8 +687,21 @@
680687 }
681688
682689 #ifdef CONFIG_MTD_UBI_FASTMAP
690
+ e1 = find_anchor_wl_entry(&ubi->used);
691
+ if (e1 && ubi->fm_anchor &&
692
+ (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
693
+ ubi->fm_do_produce_anchor = 1;
694
+ /*
695
+ * fm_anchor is no longer considered a good anchor.
696
+ * NULL assignment also prevents multiple wear level checks
697
+ * of this PEB.
698
+ */
699
+ wl_tree_add(ubi->fm_anchor, &ubi->free);
700
+ ubi->fm_anchor = NULL;
701
+ ubi->free_count++;
702
+ }
703
+
683704 if (ubi->fm_do_produce_anchor) {
684
- e1 = find_anchor_wl_entry(&ubi->used);
685705 if (!e1)
686706 goto out_cancel;
687707 e2 = get_peb_for_wl(ubi);
....@@ -1066,7 +1086,12 @@
10661086 if (!err) {
10671087 spin_lock(&ubi->wl_lock);
10681088
1069
- if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
1089
+ if (!ubi->fm_disabled && !ubi->fm_anchor &&
1090
+ e->pnum < UBI_FM_MAX_START) {
1091
+ /*
1092
+ * Abort anchor production, if needed it will be
1093
+ * enabled again in the wear leveling started below.
1094
+ */
10701095 ubi->fm_anchor = e;
10711096 ubi->fm_do_produce_anchor = 0;
10721097 } else {
....@@ -1412,6 +1437,150 @@
14121437 return err;
14131438 }
14141439
1440
+static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
1441
+{
1442
+ if (in_wl_tree(e, &ubi->scrub))
1443
+ return false;
1444
+ else if (in_wl_tree(e, &ubi->erroneous))
1445
+ return false;
1446
+ else if (ubi->move_from == e)
1447
+ return false;
1448
+ else if (ubi->move_to == e)
1449
+ return false;
1450
+
1451
+ return true;
1452
+}
1453
+
1454
+/**
1455
+ * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
1456
+ * @ubi: UBI device description object
1457
+ * @pnum: the physical eraseblock to schedule
1458
+ * @force: dont't read the block, assume bitflips happened and take action.
1459
+ *
1460
+ * This function reads the given eraseblock and checks if bitflips occured.
1461
+ * In case of bitflips, the eraseblock is scheduled for scrubbing.
1462
+ * If scrubbing is forced with @force, the eraseblock is not read,
1463
+ * but scheduled for scrubbing right away.
1464
+ *
1465
+ * Returns:
1466
+ * %EINVAL, PEB is out of range
1467
+ * %ENOENT, PEB is no longer used by UBI
1468
+ * %EBUSY, PEB cannot be checked now or a check is currently running on it
1469
+ * %EAGAIN, bit flips happened but scrubbing is currently not possible
1470
+ * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
1471
+ * %0, no bit flips detected
1472
+ */
1473
+int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
1474
+{
1475
+ int err = 0;
1476
+ struct ubi_wl_entry *e;
1477
+
1478
+ if (pnum < 0 || pnum >= ubi->peb_count) {
1479
+ err = -EINVAL;
1480
+ goto out;
1481
+ }
1482
+
1483
+ /*
1484
+ * Pause all parallel work, otherwise it can happen that the
1485
+ * erase worker frees a wl entry under us.
1486
+ */
1487
+ down_write(&ubi->work_sem);
1488
+
1489
+ /*
1490
+ * Make sure that the wl entry does not change state while
1491
+ * inspecting it.
1492
+ */
1493
+ spin_lock(&ubi->wl_lock);
1494
+ e = ubi->lookuptbl[pnum];
1495
+ if (!e) {
1496
+ spin_unlock(&ubi->wl_lock);
1497
+ err = -ENOENT;
1498
+ goto out_resume;
1499
+ }
1500
+
1501
+ /*
1502
+ * Does it make sense to check this PEB?
1503
+ */
1504
+ if (!scrub_possible(ubi, e)) {
1505
+ spin_unlock(&ubi->wl_lock);
1506
+ err = -EBUSY;
1507
+ goto out_resume;
1508
+ }
1509
+ spin_unlock(&ubi->wl_lock);
1510
+
1511
+ if (!force) {
1512
+ mutex_lock(&ubi->buf_mutex);
1513
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
1514
+ mutex_unlock(&ubi->buf_mutex);
1515
+ }
1516
+
1517
+ if (force || err == UBI_IO_BITFLIPS) {
1518
+ /*
1519
+ * Okay, bit flip happened, let's figure out what we can do.
1520
+ */
1521
+ spin_lock(&ubi->wl_lock);
1522
+
1523
+ /*
1524
+ * Recheck. We released wl_lock, UBI might have killed the
1525
+ * wl entry under us.
1526
+ */
1527
+ e = ubi->lookuptbl[pnum];
1528
+ if (!e) {
1529
+ spin_unlock(&ubi->wl_lock);
1530
+ err = -ENOENT;
1531
+ goto out_resume;
1532
+ }
1533
+
1534
+ /*
1535
+ * Need to re-check state
1536
+ */
1537
+ if (!scrub_possible(ubi, e)) {
1538
+ spin_unlock(&ubi->wl_lock);
1539
+ err = -EBUSY;
1540
+ goto out_resume;
1541
+ }
1542
+
1543
+ if (in_pq(ubi, e)) {
1544
+ prot_queue_del(ubi, e->pnum);
1545
+ wl_tree_add(e, &ubi->scrub);
1546
+ spin_unlock(&ubi->wl_lock);
1547
+
1548
+ err = ensure_wear_leveling(ubi, 1);
1549
+ } else if (in_wl_tree(e, &ubi->used)) {
1550
+ rb_erase(&e->u.rb, &ubi->used);
1551
+ wl_tree_add(e, &ubi->scrub);
1552
+ spin_unlock(&ubi->wl_lock);
1553
+
1554
+ err = ensure_wear_leveling(ubi, 1);
1555
+ } else if (in_wl_tree(e, &ubi->free)) {
1556
+ rb_erase(&e->u.rb, &ubi->free);
1557
+ ubi->free_count--;
1558
+ spin_unlock(&ubi->wl_lock);
1559
+
1560
+ /*
1561
+ * This PEB is empty we can schedule it for
1562
+ * erasure right away. No wear leveling needed.
1563
+ */
1564
+ err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
1565
+ force ? 0 : 1, true);
1566
+ } else {
1567
+ spin_unlock(&ubi->wl_lock);
1568
+ err = -EAGAIN;
1569
+ }
1570
+
1571
+ if (!err && !force)
1572
+ err = -EUCLEAN;
1573
+ } else {
1574
+ err = 0;
1575
+ }
1576
+
1577
+out_resume:
1578
+ up_write(&ubi->work_sem);
1579
+out:
1580
+
1581
+ return err;
1582
+}
1583
+
14151584 /**
14161585 * tree_destroy - destroy an RB-tree.
14171586 * @ubi: UBI device description object
....@@ -1731,7 +1900,8 @@
17311900 goto out_free;
17321901
17331902 #ifdef CONFIG_MTD_UBI_FASTMAP
1734
- ubi_ensure_anchor_pebs(ubi);
1903
+ if (!ubi->ro_mode && !ubi->fm_disabled)
1904
+ ubi_ensure_anchor_pebs(ubi);
17351905 #endif
17361906 return 0;
17371907
....@@ -1857,16 +2027,11 @@
18572027 static int self_check_in_pq(const struct ubi_device *ubi,
18582028 struct ubi_wl_entry *e)
18592029 {
1860
- struct ubi_wl_entry *p;
1861
- int i;
1862
-
18632030 if (!ubi_dbg_chk_gen(ubi))
18642031 return 0;
18652032
1866
- for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1867
- list_for_each_entry(p, &ubi->pq[i], u.list)
1868
- if (p == e)
1869
- return 0;
2033
+ if (in_pq(ubi, e))
2034
+ return 0;
18702035
18712036 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
18722037 e->pnum, e->ec);