forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/md/bcache/io.c
....@@ -26,7 +26,7 @@
2626 struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
2727 struct bio *bio = &b->bio;
2828
29
- bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
29
+ bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb));
3030
3131 return bio;
3232 }
....@@ -65,14 +65,14 @@
6565 * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
6666 */
6767 if (bio->bi_opf & REQ_RAHEAD) {
68
- pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore",
68
+ pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore\n",
6969 dc->backing_dev_name);
7070 return;
7171 }
7272
7373 errors = atomic_add_return(1, &dc->io_errors);
7474 if (errors < dc->error_limit)
75
- pr_err("%s: IO error on backing device, unrecoverable",
75
+ pr_err("%s: IO error on backing device, unrecoverable\n",
7676 dc->backing_dev_name);
7777 else
7878 bch_cached_dev_error(dc);
....@@ -123,12 +123,12 @@
123123 errors >>= IO_ERROR_SHIFT;
124124
125125 if (errors < ca->set->error_limit)
126
- pr_err("%s: IO error on %s%s",
126
+ pr_err("%s: IO error on %s%s\n",
127127 ca->cache_dev_name, m,
128128 is_read ? ", recovering." : ".");
129129 else
130130 bch_cache_set_error(ca->set,
131
- "%s: too many IO errors %s",
131
+ "%s: too many IO errors %s\n",
132132 ca->cache_dev_name, m);
133133 }
134134 }