hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/md/raid1-10.c
....@@ -3,12 +3,42 @@
33 #define RESYNC_BLOCK_SIZE (64*1024)
44 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
55
6
+/*
7
+ * Number of guaranteed raid bios in case of extreme VM load:
8
+ */
9
+#define NR_RAID_BIOS 256
10
+
11
+/* when we get a read error on a read-only array, we redirect to another
12
+ * device without failing the first device, or trying to over-write to
13
+ * correct the read error. To keep track of bad blocks on a per-bio
14
+ * level, we store IO_BLOCKED in the appropriate 'bios' pointer
15
+ */
16
+#define IO_BLOCKED ((struct bio *)1)
17
+/* When we successfully write to a known bad-block, we need to remove the
18
+ * bad-block marking which must be done from process context. So we record
19
+ * the success by setting devs[n].bio to IO_MADE_GOOD
20
+ */
21
+#define IO_MADE_GOOD ((struct bio *)2)
22
+
23
+#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
24
+
25
+/* When there are this many requests queue to be written by
26
+ * the raid thread, we become 'congested' to provide back-pressure
27
+ * for writeback.
28
+ */
29
+static int max_queued_requests = 1024;
30
+
631 /* for managing resync I/O pages */
732 struct resync_pages {
833 void *raid_bio;
934 struct page *pages[RESYNC_PAGES];
1035 };
1136
37
+static void rbio_pool_free(void *rbio, void *data)
38
+{
39
+ kfree(rbio);
40
+}
41
+
1242 static inline int resync_alloc_pages(struct resync_pages *rp,
1343 gfp_t gfp_flags)
1444 {