.. | .. |
---|
3 | 3 | #define RESYNC_BLOCK_SIZE (64*1024) |
---|
4 | 4 | #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) |
---|
5 | 5 | |
---|
| 6 | +/* |
---|
| 7 | + * Number of guaranteed raid bios in case of extreme VM load: |
---|
| 8 | + */ |
---|
| 9 | +#define NR_RAID_BIOS 256 |
---|
| 10 | + |
---|
| 11 | +/* when we get a read error on a read-only array, we redirect to another |
---|
| 12 | + * device without failing the first device, or trying to over-write to |
---|
| 13 | + * correct the read error. To keep track of bad blocks on a per-bio |
---|
| 14 | + * level, we store IO_BLOCKED in the appropriate 'bios' pointer |
---|
| 15 | + */ |
---|
| 16 | +#define IO_BLOCKED ((struct bio *)1) |
---|
| 17 | +/* When we successfully write to a known bad-block, we need to remove the |
---|
| 18 | + * bad-block marking which must be done from process context. So we record |
---|
| 19 | + * the success by setting devs[n].bio to IO_MADE_GOOD |
---|
| 20 | + */ |
---|
| 21 | +#define IO_MADE_GOOD ((struct bio *)2) |
---|
| 22 | + |
---|
| 23 | +#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) |
---|
| 24 | + |
---|
| 25 | +/* When there are this many requests queue to be written by |
---|
| 26 | + * the raid thread, we become 'congested' to provide back-pressure |
---|
| 27 | + * for writeback. |
---|
| 28 | + */ |
---|
| 29 | +static int max_queued_requests = 1024; |
---|
| 30 | + |
---|
6 | 31 | /* for managing resync I/O pages */ |
---|
7 | 32 | struct resync_pages { |
---|
8 | 33 | void *raid_bio; |
---|
9 | 34 | struct page *pages[RESYNC_PAGES]; |
---|
10 | 35 | }; |
---|
11 | 36 | |
---|
| 37 | +static void rbio_pool_free(void *rbio, void *data) |
---|
| 38 | +{ |
---|
| 39 | + kfree(rbio); |
---|
| 40 | +} |
---|
| 41 | + |
---|
12 | 42 | static inline int resync_alloc_pages(struct resync_pages *rp, |
---|
13 | 43 | gfp_t gfp_flags) |
---|
14 | 44 | { |
---|