hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/mtd/ubi/wl.c
....@@ -1,19 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Copyright (c) International Business Machines Corp., 2006
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License as published by
6
- * the Free Software Foundation; either version 2 of the License, or
7
- * (at your option) any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12
- * the GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; if not, write to the Free Software
16
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
174 *
185 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
196 */
....@@ -278,6 +265,27 @@
278265 }
279266
280267 /**
268
+ * in_pq - check if a wear-leveling entry is present in the protection queue.
269
+ * @ubi: UBI device description object
270
+ * @e: the wear-leveling entry to check
271
+ *
272
+ * This function returns non-zero if @e is in the protection queue and zero
273
+ * if it is not.
274
+ */
275
+static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
276
+{
277
+ struct ubi_wl_entry *p;
278
+ int i;
279
+
280
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
281
+ list_for_each_entry(p, &ubi->pq[i], u.list)
282
+ if (p == e)
283
+ return 1;
284
+
285
+ return 0;
286
+}
287
+
288
+/**
281289 * prot_queue_add - add physical eraseblock to the protection queue.
282290 * @ubi: UBI device description object
283291 * @e: the physical eraseblock to add
....@@ -311,7 +319,7 @@
311319 struct rb_root *root, int diff)
312320 {
313321 struct rb_node *p;
314
- struct ubi_wl_entry *e, *prev_e = NULL;
322
+ struct ubi_wl_entry *e;
315323 int max;
316324
317325 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
....@@ -326,7 +334,6 @@
326334 p = p->rb_left;
327335 else {
328336 p = p->rb_right;
329
- prev_e = e;
330337 e = e1;
331338 }
332339 }
....@@ -568,6 +575,7 @@
568575 * @vol_id: the volume ID that last used this PEB
569576 * @lnum: the last used logical eraseblock number for the PEB
570577 * @torture: if the physical eraseblock has to be tortured
578
+ * @nested: denotes whether the work_sem is already held
571579 *
572580 * This function returns zero in case of success and a %-ENOMEM in case of
573581 * failure.
....@@ -680,8 +688,21 @@
680688 }
681689
682690 #ifdef CONFIG_MTD_UBI_FASTMAP
691
+ e1 = find_anchor_wl_entry(&ubi->used);
692
+ if (e1 && ubi->fm_anchor &&
693
+ (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
694
+ ubi->fm_do_produce_anchor = 1;
695
+ /*
696
+ * fm_anchor is no longer considered a good anchor.
697
+ * NULL assignment also prevents multiple wear level checks
698
+ * of this PEB.
699
+ */
700
+ wl_tree_add(ubi->fm_anchor, &ubi->free);
701
+ ubi->fm_anchor = NULL;
702
+ ubi->free_count++;
703
+ }
704
+
683705 if (ubi->fm_do_produce_anchor) {
684
- e1 = find_anchor_wl_entry(&ubi->used);
685706 if (!e1)
686707 goto out_cancel;
687708 e2 = get_peb_for_wl(ubi);
....@@ -865,8 +886,11 @@
865886
866887 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
867888 if (err) {
868
- if (e2)
889
+ if (e2) {
890
+ spin_lock(&ubi->wl_lock);
869891 wl_entry_destroy(ubi, e2);
892
+ spin_unlock(&ubi->wl_lock);
893
+ }
870894 goto out_ro;
871895 }
872896
....@@ -948,11 +972,11 @@
948972 spin_lock(&ubi->wl_lock);
949973 ubi->move_from = ubi->move_to = NULL;
950974 ubi->move_to_put = ubi->wl_scheduled = 0;
975
+ wl_entry_destroy(ubi, e1);
976
+ wl_entry_destroy(ubi, e2);
951977 spin_unlock(&ubi->wl_lock);
952978
953979 ubi_free_vid_buf(vidb);
954
- wl_entry_destroy(ubi, e1);
955
- wl_entry_destroy(ubi, e2);
956980
957981 out_ro:
958982 ubi_ro_mode(ubi);
....@@ -1043,8 +1067,6 @@
10431067 * __erase_worker - physical eraseblock erase worker function.
10441068 * @ubi: UBI device description object
10451069 * @wl_wrk: the work object
1046
- * @shutdown: non-zero if the worker has to free memory and exit
1047
- * because the WL sub-system is shutting down
10481070 *
10491071 * This function erases a physical eraseblock and perform torture testing if
10501072 * needed. It also takes care about marking the physical eraseblock bad if
....@@ -1066,7 +1088,12 @@
10661088 if (!err) {
10671089 spin_lock(&ubi->wl_lock);
10681090
1069
- if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
1091
+ if (!ubi->fm_disabled && !ubi->fm_anchor &&
1092
+ e->pnum < UBI_FM_MAX_START) {
1093
+ /*
1094
+ * Abort anchor production, if needed it will be
1095
+ * enabled again in the wear leveling started below.
1096
+ */
10701097 ubi->fm_anchor = e;
10711098 ubi->fm_do_produce_anchor = 0;
10721099 } else {
....@@ -1094,16 +1121,20 @@
10941121 int err1;
10951122
10961123 /* Re-schedule the LEB for erasure */
1097
- err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
1124
+ err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
10981125 if (err1) {
1126
+ spin_lock(&ubi->wl_lock);
10991127 wl_entry_destroy(ubi, e);
1128
+ spin_unlock(&ubi->wl_lock);
11001129 err = err1;
11011130 goto out_ro;
11021131 }
11031132 return err;
11041133 }
11051134
1135
+ spin_lock(&ubi->wl_lock);
11061136 wl_entry_destroy(ubi, e);
1137
+ spin_unlock(&ubi->wl_lock);
11071138 if (err != -EIO)
11081139 /*
11091140 * If this is not %-EIO, we have no idea what to do. Scheduling
....@@ -1219,6 +1250,18 @@
12191250 retry:
12201251 spin_lock(&ubi->wl_lock);
12211252 e = ubi->lookuptbl[pnum];
1253
+ if (!e) {
1254
+ /*
1255
+ * This wl entry has been removed for some errors by other
1256
+ * process (eg. wear leveling worker), corresponding process
1257
+ * (except __erase_worker, which cannot concurrent with
1258
+ * ubi_wl_put_peb) will set ubi ro_mode at the same time,
1259
+ * just ignore this wl entry.
1260
+ */
1261
+ spin_unlock(&ubi->wl_lock);
1262
+ up_read(&ubi->fm_protect);
1263
+ return 0;
1264
+ }
12221265 if (e == ubi->move_from) {
12231266 /*
12241267 * User is putting the physical eraseblock which was selected to
....@@ -1408,6 +1451,150 @@
14081451 */
14091452 down_write(&ubi->work_sem);
14101453 up_write(&ubi->work_sem);
1454
+
1455
+ return err;
1456
+}
1457
+
1458
+static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
1459
+{
1460
+ if (in_wl_tree(e, &ubi->scrub))
1461
+ return false;
1462
+ else if (in_wl_tree(e, &ubi->erroneous))
1463
+ return false;
1464
+ else if (ubi->move_from == e)
1465
+ return false;
1466
+ else if (ubi->move_to == e)
1467
+ return false;
1468
+
1469
+ return true;
1470
+}
1471
+
1472
+/**
1473
+ * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
1474
+ * @ubi: UBI device description object
1475
+ * @pnum: the physical eraseblock to schedule
1476
+ * @force: dont't read the block, assume bitflips happened and take action.
1477
+ *
1478
+ * This function reads the given eraseblock and checks if bitflips occured.
1479
+ * In case of bitflips, the eraseblock is scheduled for scrubbing.
1480
+ * If scrubbing is forced with @force, the eraseblock is not read,
1481
+ * but scheduled for scrubbing right away.
1482
+ *
1483
+ * Returns:
1484
+ * %EINVAL, PEB is out of range
1485
+ * %ENOENT, PEB is no longer used by UBI
1486
+ * %EBUSY, PEB cannot be checked now or a check is currently running on it
1487
+ * %EAGAIN, bit flips happened but scrubbing is currently not possible
1488
+ * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
1489
+ * %0, no bit flips detected
1490
+ */
1491
+int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
1492
+{
1493
+ int err = 0;
1494
+ struct ubi_wl_entry *e;
1495
+
1496
+ if (pnum < 0 || pnum >= ubi->peb_count) {
1497
+ err = -EINVAL;
1498
+ goto out;
1499
+ }
1500
+
1501
+ /*
1502
+ * Pause all parallel work, otherwise it can happen that the
1503
+ * erase worker frees a wl entry under us.
1504
+ */
1505
+ down_write(&ubi->work_sem);
1506
+
1507
+ /*
1508
+ * Make sure that the wl entry does not change state while
1509
+ * inspecting it.
1510
+ */
1511
+ spin_lock(&ubi->wl_lock);
1512
+ e = ubi->lookuptbl[pnum];
1513
+ if (!e) {
1514
+ spin_unlock(&ubi->wl_lock);
1515
+ err = -ENOENT;
1516
+ goto out_resume;
1517
+ }
1518
+
1519
+ /*
1520
+ * Does it make sense to check this PEB?
1521
+ */
1522
+ if (!scrub_possible(ubi, e)) {
1523
+ spin_unlock(&ubi->wl_lock);
1524
+ err = -EBUSY;
1525
+ goto out_resume;
1526
+ }
1527
+ spin_unlock(&ubi->wl_lock);
1528
+
1529
+ if (!force) {
1530
+ mutex_lock(&ubi->buf_mutex);
1531
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
1532
+ mutex_unlock(&ubi->buf_mutex);
1533
+ }
1534
+
1535
+ if (force || err == UBI_IO_BITFLIPS) {
1536
+ /*
1537
+ * Okay, bit flip happened, let's figure out what we can do.
1538
+ */
1539
+ spin_lock(&ubi->wl_lock);
1540
+
1541
+ /*
1542
+ * Recheck. We released wl_lock, UBI might have killed the
1543
+ * wl entry under us.
1544
+ */
1545
+ e = ubi->lookuptbl[pnum];
1546
+ if (!e) {
1547
+ spin_unlock(&ubi->wl_lock);
1548
+ err = -ENOENT;
1549
+ goto out_resume;
1550
+ }
1551
+
1552
+ /*
1553
+ * Need to re-check state
1554
+ */
1555
+ if (!scrub_possible(ubi, e)) {
1556
+ spin_unlock(&ubi->wl_lock);
1557
+ err = -EBUSY;
1558
+ goto out_resume;
1559
+ }
1560
+
1561
+ if (in_pq(ubi, e)) {
1562
+ prot_queue_del(ubi, e->pnum);
1563
+ wl_tree_add(e, &ubi->scrub);
1564
+ spin_unlock(&ubi->wl_lock);
1565
+
1566
+ err = ensure_wear_leveling(ubi, 1);
1567
+ } else if (in_wl_tree(e, &ubi->used)) {
1568
+ rb_erase(&e->u.rb, &ubi->used);
1569
+ wl_tree_add(e, &ubi->scrub);
1570
+ spin_unlock(&ubi->wl_lock);
1571
+
1572
+ err = ensure_wear_leveling(ubi, 1);
1573
+ } else if (in_wl_tree(e, &ubi->free)) {
1574
+ rb_erase(&e->u.rb, &ubi->free);
1575
+ ubi->free_count--;
1576
+ spin_unlock(&ubi->wl_lock);
1577
+
1578
+ /*
1579
+ * This PEB is empty we can schedule it for
1580
+ * erasure right away. No wear leveling needed.
1581
+ */
1582
+ err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
1583
+ force ? 0 : 1, true);
1584
+ } else {
1585
+ spin_unlock(&ubi->wl_lock);
1586
+ err = -EAGAIN;
1587
+ }
1588
+
1589
+ if (!err && !force)
1590
+ err = -EUCLEAN;
1591
+ } else {
1592
+ err = 0;
1593
+ }
1594
+
1595
+out_resume:
1596
+ up_write(&ubi->work_sem);
1597
+out:
14111598
14121599 return err;
14131600 }
....@@ -1731,7 +1918,8 @@
17311918 goto out_free;
17321919
17331920 #ifdef CONFIG_MTD_UBI_FASTMAP
1734
- ubi_ensure_anchor_pebs(ubi);
1921
+ if (!ubi->ro_mode && !ubi->fm_disabled)
1922
+ ubi_ensure_anchor_pebs(ubi);
17351923 #endif
17361924 return 0;
17371925
....@@ -1857,16 +2045,11 @@
18572045 static int self_check_in_pq(const struct ubi_device *ubi,
18582046 struct ubi_wl_entry *e)
18592047 {
1860
- struct ubi_wl_entry *p;
1861
- int i;
1862
-
18632048 if (!ubi_dbg_chk_gen(ubi))
18642049 return 0;
18652050
1866
- for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1867
- list_for_each_entry(p, &ubi->pq[i], u.list)
1868
- if (p == e)
1869
- return 0;
2051
+ if (in_pq(ubi, e))
2052
+ return 0;
18702053
18712054 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
18722055 e->pnum, e->ec);