forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 072de836f53be56a70cecf70b43ae43b7ce17376
kernel/drivers/md/dm-bufio.c
....@@ -66,7 +66,7 @@
6666
6767 /*
6868 * Linking of buffers:
69
- * All buffers are linked to cache_hash with their hash_list field.
69
+ * All buffers are linked to buffer_tree with their node field.
7070 *
7171 * Clean buffers that are not being written (B_WRITING not set)
7272 * are linked to lru[LIST_CLEAN] with their lru_list field.
....@@ -108,7 +108,10 @@
108108 int async_write_error;
109109
110110 struct list_head client_list;
111
+
111112 struct shrinker shrinker;
113
+ struct work_struct shrink_work;
114
+ atomic_long_t need_shrink;
112115 };
113116
114117 /*
....@@ -153,7 +156,7 @@
153156 void (*end_io)(struct dm_buffer *, blk_status_t);
154157 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
155158 #define MAX_STACK 10
156
- struct stack_trace stack_trace;
159
+ unsigned int stack_len;
157160 unsigned long stack_entries[MAX_STACK];
158161 #endif
159162 };
....@@ -238,11 +241,7 @@
238241 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
239242 static void buffer_record_stack(struct dm_buffer *b)
240243 {
241
- b->stack_trace.nr_entries = 0;
242
- b->stack_trace.max_entries = MAX_STACK;
243
- b->stack_trace.entries = b->stack_entries;
244
- b->stack_trace.skip = 2;
245
- save_stack_trace(&b->stack_trace);
244
+ b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
246245 }
247246 #endif
248247
....@@ -260,10 +259,33 @@
260259 if (b->block == block)
261260 return b;
262261
263
- n = (b->block < block) ? n->rb_left : n->rb_right;
262
+ n = block < b->block ? n->rb_left : n->rb_right;
264263 }
265264
266265 return NULL;
266
+}
267
+
268
+static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block)
269
+{
270
+ struct rb_node *n = c->buffer_tree.rb_node;
271
+ struct dm_buffer *b;
272
+ struct dm_buffer *best = NULL;
273
+
274
+ while (n) {
275
+ b = container_of(n, struct dm_buffer, node);
276
+
277
+ if (b->block == block)
278
+ return b;
279
+
280
+ if (block <= b->block) {
281
+ n = n->rb_left;
282
+ best = b;
283
+ } else {
284
+ n = n->rb_right;
285
+ }
286
+ }
287
+
288
+ return best;
267289 }
268290
269291 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
....@@ -280,8 +302,8 @@
280302 }
281303
282304 parent = *new;
283
- new = (found->block < b->block) ?
284
- &((*new)->rb_left) : &((*new)->rb_right);
305
+ new = b->block < found->block ?
306
+ &found->node.rb_left : &found->node.rb_right;
285307 }
286308
287309 rb_link_node(&b->node, parent, new);
....@@ -404,13 +426,13 @@
404426 */
405427 if (gfp_mask & __GFP_NORETRY) {
406428 unsigned noio_flag = memalloc_noio_save();
407
- void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
429
+ void *ptr = __vmalloc(c->block_size, gfp_mask);
408430
409431 memalloc_noio_restore(noio_flag);
410432 return ptr;
411433 }
412434
413
- return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
435
+ return __vmalloc(c->block_size, gfp_mask);
414436 }
415437
416438 /*
....@@ -459,7 +481,7 @@
459481 }
460482
461483 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
462
- memset(&b->stack_trace, 0, sizeof(b->stack_trace));
484
+ b->stack_len = 0;
463485 #endif
464486 return b;
465487 }
....@@ -476,7 +498,7 @@
476498 }
477499
478500 /*
479
- * Link buffer to the hash list and clean or dirty queue.
501
+ * Link buffer to the buffer tree and clean or dirty queue.
480502 */
481503 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
482504 {
....@@ -493,7 +515,7 @@
493515 }
494516
495517 /*
496
- * Unlink buffer from the hash list and dirty or clean queue.
518
+ * Unlink buffer from the buffer tree and dirty or clean queue.
497519 */
498520 static void __unlink_buffer(struct dm_buffer *b)
499521 {
....@@ -635,6 +657,19 @@
635657 submit_bio(bio);
636658 }
637659
660
+static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
661
+{
662
+ sector_t sector;
663
+
664
+ if (likely(c->sectors_per_block_bits >= 0))
665
+ sector = block << c->sectors_per_block_bits;
666
+ else
667
+ sector = block * (c->block_size >> SECTOR_SHIFT);
668
+ sector += c->start;
669
+
670
+ return sector;
671
+}
672
+
638673 static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
639674 {
640675 unsigned n_sectors;
....@@ -643,11 +678,7 @@
643678
644679 b->end_io = end_io;
645680
646
- if (likely(b->c->sectors_per_block_bits >= 0))
647
- sector = b->block << b->c->sectors_per_block_bits;
648
- else
649
- sector = b->block * (b->c->block_size >> SECTOR_SHIFT);
650
- sector += b->c->start;
681
+ sector = block_to_sector(b->c, b->block);
651682
652683 if (rw != REQ_OP_WRITE) {
653684 n_sectors = b->c->block_size >> SECTOR_SHIFT;
....@@ -972,7 +1003,7 @@
9721003
9731004 /*
9741005 * We've had a period where the mutex was unlocked, so need to
975
- * recheck the hash table.
1006
+ * recheck the buffer tree.
9761007 */
9771008 b = __find(c, block);
9781009 if (b) {
....@@ -1306,7 +1337,7 @@
13061337 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
13071338
13081339 /*
1309
- * Use dm-io to send and empty barrier flush the device.
1340
+ * Use dm-io to send an empty barrier to flush the device.
13101341 */
13111342 int dm_bufio_issue_flush(struct dm_bufio_client *c)
13121343 {
....@@ -1330,12 +1361,36 @@
13301361 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
13311362
13321363 /*
1364
+ * Use dm-io to send a discard request to flush the device.
1365
+ */
1366
+int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
1367
+{
1368
+ struct dm_io_request io_req = {
1369
+ .bi_op = REQ_OP_DISCARD,
1370
+ .bi_op_flags = REQ_SYNC,
1371
+ .mem.type = DM_IO_KMEM,
1372
+ .mem.ptr.addr = NULL,
1373
+ .client = c->dm_io,
1374
+ };
1375
+ struct dm_io_region io_reg = {
1376
+ .bdev = c->bdev,
1377
+ .sector = block_to_sector(c, block),
1378
+ .count = block_to_sector(c, count),
1379
+ };
1380
+
1381
+ BUG_ON(dm_bufio_in_request());
1382
+
1383
+ return dm_io(&io_req, 1, &io_reg, NULL);
1384
+}
1385
+EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
1386
+
1387
+/*
13331388 * We first delete any other buffer that may be at that new location.
13341389 *
13351390 * Then, we write the buffer to the original location if it was dirty.
13361391 *
13371392 * Then, if we are the only one who is holding the buffer, relink the buffer
1338
- * in the hash queue for the new location.
1393
+ * in the buffer tree for the new location.
13391394 *
13401395 * If there was someone else holding the buffer, we write it to the new
13411396 * location but not relink it, because that other user needs to have the buffer
....@@ -1405,6 +1460,14 @@
14051460 }
14061461 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
14071462
1463
+static void forget_buffer_locked(struct dm_buffer *b)
1464
+{
1465
+ if (likely(!b->hold_count) && likely(!b->state)) {
1466
+ __unlink_buffer(b);
1467
+ __free_buffer_wake(b);
1468
+ }
1469
+}
1470
+
14081471 /*
14091472 * Free the given buffer.
14101473 *
....@@ -1418,14 +1481,35 @@
14181481 dm_bufio_lock(c);
14191482
14201483 b = __find(c, block);
1421
- if (b && likely(!b->hold_count) && likely(!b->state)) {
1422
- __unlink_buffer(b);
1423
- __free_buffer_wake(b);
1424
- }
1484
+ if (b)
1485
+ forget_buffer_locked(b);
14251486
14261487 dm_bufio_unlock(c);
14271488 }
14281489 EXPORT_SYMBOL_GPL(dm_bufio_forget);
1490
+
1491
+void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
1492
+{
1493
+ struct dm_buffer *b;
1494
+ sector_t end_block = block + n_blocks;
1495
+
1496
+ while (block < end_block) {
1497
+ dm_bufio_lock(c);
1498
+
1499
+ b = __find_next(c, block);
1500
+ if (b) {
1501
+ block = b->block + 1;
1502
+ forget_buffer_locked(b);
1503
+ }
1504
+
1505
+ dm_bufio_unlock(c);
1506
+
1507
+ if (!b)
1508
+ break;
1509
+ }
1510
+
1511
+}
1512
+EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
14291513
14301514 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
14311515 {
....@@ -1509,8 +1593,9 @@
15091593 DMERR("leaked buffer %llx, hold count %u, list %d",
15101594 (unsigned long long)b->block, b->hold_count, i);
15111595 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1512
- print_stack_trace(&b->stack_trace, 1);
1513
- b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */
1596
+ stack_trace_print(b->stack_entries, b->stack_len, 1);
1597
+ /* mark unclaimed to avoid BUG_ON below */
1598
+ b->hold_count = 0;
15141599 #endif
15151600 }
15161601
....@@ -1562,8 +1647,7 @@
15621647 return retain_bytes;
15631648 }
15641649
1565
-static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1566
- gfp_t gfp_mask)
1650
+static void __scan(struct dm_bufio_client *c)
15671651 {
15681652 int l;
15691653 struct dm_buffer *b, *tmp;
....@@ -1574,42 +1658,58 @@
15741658
15751659 for (l = 0; l < LIST_SIZE; l++) {
15761660 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1577
- if (__try_evict_buffer(b, gfp_mask))
1661
+ if (count - freed <= retain_target)
1662
+ atomic_long_set(&c->need_shrink, 0);
1663
+ if (!atomic_long_read(&c->need_shrink))
1664
+ return;
1665
+ if (__try_evict_buffer(b, GFP_KERNEL)) {
1666
+ atomic_long_dec(&c->need_shrink);
15781667 freed++;
1579
- if (!--nr_to_scan || ((count - freed) <= retain_target))
1580
- return freed;
1668
+ }
15811669 cond_resched();
15821670 }
15831671 }
1584
- return freed;
15851672 }
15861673
1587
-static unsigned long
1588
-dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1674
+static void shrink_work(struct work_struct *w)
1675
+{
1676
+ struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
1677
+
1678
+ dm_bufio_lock(c);
1679
+ __scan(c);
1680
+ dm_bufio_unlock(c);
1681
+}
1682
+
1683
+static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
15891684 {
15901685 struct dm_bufio_client *c;
1591
- unsigned long freed;
15921686
15931687 c = container_of(shrink, struct dm_bufio_client, shrinker);
1594
- if (sc->gfp_mask & __GFP_FS)
1595
- dm_bufio_lock(c);
1596
- else if (!dm_bufio_trylock(c))
1597
- return SHRINK_STOP;
1688
+ atomic_long_add(sc->nr_to_scan, &c->need_shrink);
1689
+ queue_work(dm_bufio_wq, &c->shrink_work);
15981690
1599
- freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1600
- dm_bufio_unlock(c);
1601
- return freed;
1691
+ return sc->nr_to_scan;
16021692 }
16031693
1604
-static unsigned long
1605
-dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1694
+static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
16061695 {
16071696 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
16081697 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
16091698 READ_ONCE(c->n_buffers[LIST_DIRTY]);
16101699 unsigned long retain_target = get_retain_buffers(c);
1700
+ unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
16111701
1612
- return (count < retain_target) ? 0 : (count - retain_target);
1702
+ if (unlikely(count < retain_target))
1703
+ count = 0;
1704
+ else
1705
+ count -= retain_target;
1706
+
1707
+ if (unlikely(count < queued_for_cleanup))
1708
+ count = 0;
1709
+ else
1710
+ count -= queued_for_cleanup;
1711
+
1712
+ return count;
16131713 }
16141714
16151715 /*
....@@ -1700,6 +1800,9 @@
17001800 __free_buffer_wake(b);
17011801 }
17021802
1803
+ INIT_WORK(&c->shrink_work, shrink_work);
1804
+ atomic_long_set(&c->need_shrink, 0);
1805
+
17031806 c->shrinker.count_objects = dm_bufio_shrink_count;
17041807 c->shrinker.scan_objects = dm_bufio_shrink_scan;
17051808 c->shrinker.seeks = 1;
....@@ -1745,6 +1848,7 @@
17451848 drop_buffers(c);
17461849
17471850 unregister_shrinker(&c->shrinker);
1851
+ flush_work(&c->shrink_work);
17481852
17491853 mutex_lock(&dm_bufio_clients_lock);
17501854
....@@ -1941,7 +2045,7 @@
19412045 dm_bufio_allocated_vmalloc = 0;
19422046 dm_bufio_current_allocated = 0;
19432047
1944
- mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
2048
+ mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
19452049 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
19462050
19472051 if (mem > ULONG_MAX)