.. | .. |
---|
26 | 26 | struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO); |
---|
27 | 27 | struct bio *bio = &b->bio; |
---|
28 | 28 | |
---|
29 | | - bio_init(bio, bio->bi_inline_vecs, bucket_pages(c)); |
---|
| 29 | + bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb)); |
---|
30 | 30 | |
---|
31 | 31 | return bio; |
---|
32 | 32 | } |
---|
.. | .. |
---|
65 | 65 | * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors. |
---|
66 | 66 | */ |
---|
67 | 67 | if (bio->bi_opf & REQ_RAHEAD) { |
---|
68 | | - pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore", |
---|
| 68 | + pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore\n", |
---|
69 | 69 | dc->backing_dev_name); |
---|
70 | 70 | return; |
---|
71 | 71 | } |
---|
72 | 72 | |
---|
73 | 73 | errors = atomic_add_return(1, &dc->io_errors); |
---|
74 | 74 | if (errors < dc->error_limit) |
---|
75 | | - pr_err("%s: IO error on backing device, unrecoverable", |
---|
| 75 | + pr_err("%s: IO error on backing device, unrecoverable\n", |
---|
76 | 76 | dc->backing_dev_name); |
---|
77 | 77 | else |
---|
78 | 78 | bch_cached_dev_error(dc); |
---|
.. | .. |
---|
123 | 123 | errors >>= IO_ERROR_SHIFT; |
---|
124 | 124 | |
---|
125 | 125 | if (errors < ca->set->error_limit) |
---|
126 | | - pr_err("%s: IO error on %s%s", |
---|
| 126 | + pr_err("%s: IO error on %s%s\n", |
---|
127 | 127 | ca->cache_dev_name, m, |
---|
128 | 128 | is_read ? ", recovering." : "."); |
---|
129 | 129 | else |
---|
130 | 130 | bch_cache_set_error(ca->set, |
---|
131 | | - "%s: too many IO errors %s", |
---|
| 131 | + "%s: too many IO errors %s\n", |
---|
132 | 132 | ca->cache_dev_name, m); |
---|
133 | 133 | } |
---|
134 | 134 | } |
---|