.. | .. |
---|
77 | 77 | #include <linux/falloc.h> |
---|
78 | 78 | #include <linux/uio.h> |
---|
79 | 79 | #include <linux/ioprio.h> |
---|
| 80 | +#include <linux/blk-cgroup.h> |
---|
80 | 81 | |
---|
81 | 82 | #include "loop.h" |
---|
82 | 83 | |
---|
.. | .. |
---|
228 | 229 | } |
---|
229 | 230 | |
---|
230 | 231 | /** |
---|
231 | | - * loop_validate_block_size() - validates the passed in block size |
---|
232 | | - * @bsize: size to validate |
---|
233 | | - */ |
---|
234 | | -static int |
---|
235 | | -loop_validate_block_size(unsigned short bsize) |
---|
236 | | -{ |
---|
237 | | - if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) |
---|
238 | | - return -EINVAL; |
---|
239 | | - |
---|
240 | | - return 0; |
---|
241 | | -} |
---|
242 | | - |
---|
243 | | -/** |
---|
244 | 232 | * loop_set_size() - sets device size and notifies userspace |
---|
245 | 233 | * @lo: struct loop_device to set the size for |
---|
246 | 234 | * @size: new size of the loop device |
---|
.. | .. |
---|
252 | 240 | { |
---|
253 | 241 | struct block_device *bdev = lo->lo_device; |
---|
254 | 242 | |
---|
255 | | - set_capacity(lo->lo_disk, size); |
---|
256 | | - bd_set_size(bdev, size << SECTOR_SHIFT); |
---|
257 | | - /* let user-space know about the new size */ |
---|
258 | | - kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); |
---|
| 243 | + bd_set_nr_sectors(bdev, size); |
---|
| 244 | + |
---|
| 245 | + if (!set_capacity_revalidate_and_notify(lo->lo_disk, size, false)) |
---|
| 246 | + kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); |
---|
259 | 247 | } |
---|
260 | 248 | |
---|
261 | 249 | static inline int |
---|
.. | .. |
---|
281 | 269 | struct iov_iter i; |
---|
282 | 270 | ssize_t bw; |
---|
283 | 271 | |
---|
284 | | - iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len); |
---|
| 272 | + iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len); |
---|
285 | 273 | |
---|
286 | 274 | file_start_write(file); |
---|
287 | 275 | bw = vfs_iter_write(file, &i, ppos, 0); |
---|
.. | .. |
---|
359 | 347 | ssize_t len; |
---|
360 | 348 | |
---|
361 | 349 | rq_for_each_segment(bvec, rq, iter) { |
---|
362 | | - iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len); |
---|
| 350 | + iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len); |
---|
363 | 351 | len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); |
---|
364 | 352 | if (len < 0) |
---|
365 | 353 | return len; |
---|
.. | .. |
---|
400 | 388 | b.bv_offset = 0; |
---|
401 | 389 | b.bv_len = bvec.bv_len; |
---|
402 | 390 | |
---|
403 | | - iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len); |
---|
| 391 | + iov_iter_bvec(&i, READ, &b, 1, b.bv_len); |
---|
404 | 392 | len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); |
---|
405 | 393 | if (len < 0) { |
---|
406 | 394 | ret = len; |
---|
.. | .. |
---|
474 | 462 | if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) || |
---|
475 | 463 | req_op(rq) != REQ_OP_READ) { |
---|
476 | 464 | if (cmd->ret < 0) |
---|
477 | | - ret = BLK_STS_IOERR; |
---|
| 465 | + ret = errno_to_blk_status(cmd->ret); |
---|
478 | 466 | goto end_io; |
---|
479 | 467 | } |
---|
480 | 468 | |
---|
.. | .. |
---|
509 | 497 | return; |
---|
510 | 498 | kfree(cmd->bvec); |
---|
511 | 499 | cmd->bvec = NULL; |
---|
512 | | - blk_mq_complete_request(rq); |
---|
| 500 | + if (likely(!blk_should_fake_timeout(rq->q))) |
---|
| 501 | + blk_mq_complete_request(rq); |
---|
513 | 502 | } |
---|
514 | 503 | |
---|
515 | 504 | static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2) |
---|
.. | .. |
---|
526 | 515 | loff_t pos, bool rw) |
---|
527 | 516 | { |
---|
528 | 517 | struct iov_iter iter; |
---|
| 518 | + struct req_iterator rq_iter; |
---|
529 | 519 | struct bio_vec *bvec; |
---|
530 | 520 | struct request *rq = blk_mq_rq_from_pdu(cmd); |
---|
531 | 521 | struct bio *bio = rq->bio; |
---|
532 | 522 | struct file *file = lo->lo_backing_file; |
---|
| 523 | + struct bio_vec tmp; |
---|
533 | 524 | unsigned int offset; |
---|
534 | | - int segments = 0; |
---|
| 525 | + int nr_bvec = 0; |
---|
535 | 526 | int ret; |
---|
536 | 527 | |
---|
537 | | - if (rq->bio != rq->biotail) { |
---|
538 | | - struct req_iterator iter; |
---|
539 | | - struct bio_vec tmp; |
---|
| 528 | + rq_for_each_bvec(tmp, rq, rq_iter) |
---|
| 529 | + nr_bvec++; |
---|
540 | 530 | |
---|
541 | | - __rq_for_each_bio(bio, rq) |
---|
542 | | - segments += bio_segments(bio); |
---|
543 | | - bvec = kmalloc_array(segments, sizeof(struct bio_vec), |
---|
| 531 | + if (rq->bio != rq->biotail) { |
---|
| 532 | + |
---|
| 533 | + bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec), |
---|
544 | 534 | GFP_NOIO); |
---|
545 | 535 | if (!bvec) |
---|
546 | 536 | return -EIO; |
---|
.. | .. |
---|
549 | 539 | /* |
---|
550 | 540 | * The bios of the request may be started from the middle of |
---|
551 | 541 | * the 'bvec' because of bio splitting, so we can't directly |
---|
552 | | - * copy bio->bi_iov_vec to new bvec. The rq_for_each_segment |
---|
| 542 | + * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec |
---|
553 | 543 | * API will take care of all details for us. |
---|
554 | 544 | */ |
---|
555 | | - rq_for_each_segment(tmp, rq, iter) { |
---|
| 545 | + rq_for_each_bvec(tmp, rq, rq_iter) { |
---|
556 | 546 | *bvec = tmp; |
---|
557 | 547 | bvec++; |
---|
558 | 548 | } |
---|
.. | .. |
---|
566 | 556 | */ |
---|
567 | 557 | offset = bio->bi_iter.bi_bvec_done; |
---|
568 | 558 | bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); |
---|
569 | | - segments = bio_segments(bio); |
---|
570 | 559 | } |
---|
571 | 560 | atomic_set(&cmd->ref, 2); |
---|
572 | 561 | |
---|
573 | | - iov_iter_bvec(&iter, ITER_BVEC | rw, bvec, |
---|
574 | | - segments, blk_rq_bytes(rq)); |
---|
| 562 | + iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq)); |
---|
575 | 563 | iter.iov_offset = offset; |
---|
576 | 564 | |
---|
577 | 565 | cmd->iocb.ki_pos = pos; |
---|
.. | .. |
---|
640 | 628 | default: |
---|
641 | 629 | WARN_ON_ONCE(1); |
---|
642 | 630 | return -EIO; |
---|
643 | | - break; |
---|
644 | 631 | } |
---|
645 | 632 | } |
---|
646 | 633 | |
---|
647 | 634 | static inline void loop_update_dio(struct loop_device *lo) |
---|
648 | 635 | { |
---|
649 | | - __loop_update_dio(lo, io_is_direct(lo->lo_backing_file) | |
---|
650 | | - lo->use_dio); |
---|
| 636 | + __loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) | |
---|
| 637 | + lo->use_dio); |
---|
651 | 638 | } |
---|
652 | 639 | |
---|
653 | 640 | static void loop_reread_partitions(struct loop_device *lo, |
---|
.. | .. |
---|
655 | 642 | { |
---|
656 | 643 | int rc; |
---|
657 | 644 | |
---|
658 | | - rc = blkdev_reread_part(bdev); |
---|
| 645 | + mutex_lock(&bdev->bd_mutex); |
---|
| 646 | + rc = bdev_disk_changed(bdev, false); |
---|
| 647 | + mutex_unlock(&bdev->bd_mutex); |
---|
659 | 648 | if (rc) |
---|
660 | 649 | pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n", |
---|
661 | 650 | __func__, lo->lo_number, lo->lo_file_name, rc); |
---|
.. | .. |
---|
808 | 797 | |
---|
809 | 798 | static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) |
---|
810 | 799 | { |
---|
811 | | - return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset); |
---|
| 800 | + return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset); |
---|
812 | 801 | } |
---|
813 | 802 | |
---|
814 | 803 | static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) |
---|
815 | 804 | { |
---|
816 | | - return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); |
---|
| 805 | + return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); |
---|
817 | 806 | } |
---|
818 | 807 | |
---|
819 | 808 | static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) |
---|
820 | 809 | { |
---|
821 | 810 | int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); |
---|
822 | 811 | |
---|
823 | | - return sprintf(buf, "%s\n", autoclear ? "1" : "0"); |
---|
| 812 | + return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0"); |
---|
824 | 813 | } |
---|
825 | 814 | |
---|
826 | 815 | static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) |
---|
827 | 816 | { |
---|
828 | 817 | int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); |
---|
829 | 818 | |
---|
830 | | - return sprintf(buf, "%s\n", partscan ? "1" : "0"); |
---|
| 819 | + return sysfs_emit(buf, "%s\n", partscan ? "1" : "0"); |
---|
831 | 820 | } |
---|
832 | 821 | |
---|
833 | 822 | static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf) |
---|
834 | 823 | { |
---|
835 | 824 | int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO); |
---|
836 | 825 | |
---|
837 | | - return sprintf(buf, "%s\n", dio ? "1" : "0"); |
---|
| 826 | + return sysfs_emit(buf, "%s\n", dio ? "1" : "0"); |
---|
838 | 827 | } |
---|
839 | 828 | |
---|
840 | 829 | LOOP_ATTR_RO(backing_file); |
---|
.. | .. |
---|
932 | 921 | |
---|
933 | 922 | static int loop_kthread_worker_fn(void *worker_ptr) |
---|
934 | 923 | { |
---|
935 | | - current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO; |
---|
| 924 | + current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO; |
---|
936 | 925 | return kthread_worker_fn(worker_ptr); |
---|
937 | 926 | } |
---|
938 | 927 | |
---|
.. | .. |
---|
945 | 934 | return -ENOMEM; |
---|
946 | 935 | set_user_nice(lo->worker_task, MIN_NICE); |
---|
947 | 936 | return 0; |
---|
| 937 | +} |
---|
| 938 | + |
---|
| 939 | +static void loop_update_rotational(struct loop_device *lo) |
---|
| 940 | +{ |
---|
| 941 | + struct file *file = lo->lo_backing_file; |
---|
| 942 | + struct inode *file_inode = file->f_mapping->host; |
---|
| 943 | + struct block_device *file_bdev = file_inode->i_sb->s_bdev; |
---|
| 944 | + struct request_queue *q = lo->lo_queue; |
---|
| 945 | + bool nonrot = true; |
---|
| 946 | + |
---|
| 947 | + /* not all filesystems (e.g. tmpfs) have a sb->s_bdev */ |
---|
| 948 | + if (file_bdev) |
---|
| 949 | + nonrot = blk_queue_nonrot(bdev_get_queue(file_bdev)); |
---|
| 950 | + |
---|
| 951 | + if (nonrot) |
---|
| 952 | + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
---|
| 953 | + else |
---|
| 954 | + blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); |
---|
948 | 955 | } |
---|
949 | 956 | |
---|
950 | 957 | static int |
---|
.. | .. |
---|
1022 | 1029 | if (err) |
---|
1023 | 1030 | return err; |
---|
1024 | 1031 | |
---|
| 1032 | + /* Avoid assigning overflow values */ |
---|
| 1033 | + if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX) |
---|
| 1034 | + return -EOVERFLOW; |
---|
| 1035 | + |
---|
1025 | 1036 | lo->lo_offset = info->lo_offset; |
---|
1026 | 1037 | lo->lo_sizelimit = info->lo_sizelimit; |
---|
| 1038 | + |
---|
1027 | 1039 | memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); |
---|
1028 | 1040 | memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); |
---|
1029 | 1041 | lo->lo_file_name[LO_NAME_SIZE-1] = 0; |
---|
.. | .. |
---|
1055 | 1067 | struct file *file; |
---|
1056 | 1068 | struct inode *inode; |
---|
1057 | 1069 | struct address_space *mapping; |
---|
| 1070 | + struct block_device *claimed_bdev = NULL; |
---|
1058 | 1071 | int error; |
---|
1059 | 1072 | loff_t size; |
---|
1060 | 1073 | bool partscan; |
---|
.. | .. |
---|
1068 | 1081 | if (!file) |
---|
1069 | 1082 | goto out; |
---|
1070 | 1083 | |
---|
| 1084 | + /* |
---|
| 1085 | + * If we don't hold exclusive handle for the device, upgrade to it |
---|
| 1086 | + * here to avoid changing device under exclusive owner. |
---|
| 1087 | + */ |
---|
| 1088 | + if (!(mode & FMODE_EXCL)) { |
---|
| 1089 | + claimed_bdev = bdev->bd_contains; |
---|
| 1090 | + error = bd_prepare_to_claim(bdev, claimed_bdev, loop_configure); |
---|
| 1091 | + if (error) |
---|
| 1092 | + goto out_putf; |
---|
| 1093 | + } |
---|
| 1094 | + |
---|
1071 | 1095 | error = mutex_lock_killable(&loop_ctl_mutex); |
---|
1072 | 1096 | if (error) |
---|
1073 | | - goto out_putf; |
---|
| 1097 | + goto out_bdev; |
---|
1074 | 1098 | |
---|
1075 | 1099 | error = -EBUSY; |
---|
1076 | 1100 | if (lo->lo_state != Lo_unbound) |
---|
.. | .. |
---|
1089 | 1113 | } |
---|
1090 | 1114 | |
---|
1091 | 1115 | if (config->block_size) { |
---|
1092 | | - error = loop_validate_block_size(config->block_size); |
---|
| 1116 | + error = blk_validate_block_size(config->block_size); |
---|
1093 | 1117 | if (error) |
---|
1094 | 1118 | goto out_unlock; |
---|
1095 | 1119 | } |
---|
.. | .. |
---|
1106 | 1130 | if (error) |
---|
1107 | 1131 | goto out_unlock; |
---|
1108 | 1132 | |
---|
1109 | | - error = 0; |
---|
1110 | | - |
---|
1111 | 1133 | set_device_ro(bdev, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0); |
---|
1112 | 1134 | |
---|
1113 | 1135 | lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO; |
---|
.. | .. |
---|
1121 | 1143 | |
---|
1122 | 1144 | if (config->block_size) |
---|
1123 | 1145 | bsize = config->block_size; |
---|
1124 | | - else if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) |
---|
| 1146 | + else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev) |
---|
1125 | 1147 | /* In case of direct I/O, match underlying block size */ |
---|
1126 | 1148 | bsize = bdev_logical_block_size(inode->i_sb->s_bdev); |
---|
1127 | 1149 | else |
---|
.. | .. |
---|
1131 | 1153 | blk_queue_physical_block_size(lo->lo_queue, bsize); |
---|
1132 | 1154 | blk_queue_io_min(lo->lo_queue, bsize); |
---|
1133 | 1155 | |
---|
| 1156 | + loop_config_discard(lo); |
---|
| 1157 | + loop_update_rotational(lo); |
---|
1134 | 1158 | loop_update_dio(lo); |
---|
1135 | 1159 | loop_sysfs_init(lo); |
---|
1136 | 1160 | |
---|
.. | .. |
---|
1144 | 1168 | if (part_shift) |
---|
1145 | 1169 | lo->lo_flags |= LO_FLAGS_PARTSCAN; |
---|
1146 | 1170 | partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; |
---|
| 1171 | + if (partscan) |
---|
| 1172 | + lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; |
---|
1147 | 1173 | |
---|
1148 | 1174 | /* Grab the block_device to prevent its destruction after we |
---|
1149 | 1175 | * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev). |
---|
.. | .. |
---|
1152 | 1178 | mutex_unlock(&loop_ctl_mutex); |
---|
1153 | 1179 | if (partscan) |
---|
1154 | 1180 | loop_reread_partitions(lo, bdev); |
---|
| 1181 | + if (claimed_bdev) |
---|
| 1182 | + bd_abort_claiming(bdev, claimed_bdev, loop_configure); |
---|
1155 | 1183 | return 0; |
---|
1156 | 1184 | |
---|
1157 | 1185 | out_unlock: |
---|
1158 | 1186 | mutex_unlock(&loop_ctl_mutex); |
---|
| 1187 | +out_bdev: |
---|
| 1188 | + if (claimed_bdev) |
---|
| 1189 | + bd_abort_claiming(bdev, claimed_bdev, loop_configure); |
---|
1159 | 1190 | out_putf: |
---|
1160 | 1191 | fput(file); |
---|
1161 | 1192 | out: |
---|
.. | .. |
---|
1184 | 1215 | err = -EINVAL; |
---|
1185 | 1216 | goto out_unlock; |
---|
1186 | 1217 | } |
---|
| 1218 | + |
---|
| 1219 | + if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags)) |
---|
| 1220 | + blk_queue_write_cache(lo->lo_queue, false, false); |
---|
1187 | 1221 | |
---|
1188 | 1222 | /* freeze request queue during the transition */ |
---|
1189 | 1223 | blk_mq_freeze_queue(lo->lo_queue); |
---|
.. | .. |
---|
1214 | 1248 | set_capacity(lo->lo_disk, 0); |
---|
1215 | 1249 | loop_sysfs_exit(lo); |
---|
1216 | 1250 | if (bdev) { |
---|
1217 | | - bd_set_size(bdev, 0); |
---|
| 1251 | + bd_set_nr_sectors(bdev, 0); |
---|
1218 | 1252 | /* let user-space know about this change */ |
---|
1219 | 1253 | kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); |
---|
1220 | 1254 | } |
---|
.. | .. |
---|
1237 | 1271 | * must be at least one and it can only become zero when the |
---|
1238 | 1272 | * current holder is released. |
---|
1239 | 1273 | */ |
---|
1240 | | - if (release) |
---|
1241 | | - err = __blkdev_reread_part(bdev); |
---|
1242 | | - else |
---|
1243 | | - err = blkdev_reread_part(bdev); |
---|
| 1274 | + if (!release) |
---|
| 1275 | + mutex_lock(&bdev->bd_mutex); |
---|
| 1276 | + err = bdev_disk_changed(bdev, false); |
---|
| 1277 | + if (!release) |
---|
| 1278 | + mutex_unlock(&bdev->bd_mutex); |
---|
1244 | 1279 | if (err) |
---|
1245 | 1280 | pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", |
---|
1246 | 1281 | __func__, lo_number, err); |
---|
.. | .. |
---|
1342 | 1377 | blk_mq_freeze_queue(lo->lo_queue); |
---|
1343 | 1378 | |
---|
1344 | 1379 | if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) { |
---|
1345 | | - /* If any pages were dirtied after kill_bdev(), try again */ |
---|
| 1380 | + /* If any pages were dirtied after invalidate_bdev(), try again */ |
---|
1346 | 1381 | err = -EAGAIN; |
---|
1347 | 1382 | pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", |
---|
1348 | 1383 | __func__, lo->lo_number, lo->lo_file_name, |
---|
.. | .. |
---|
1574 | 1609 | if (lo->lo_state != Lo_bound) |
---|
1575 | 1610 | return -ENXIO; |
---|
1576 | 1611 | |
---|
1577 | | - err = loop_validate_block_size(arg); |
---|
| 1612 | + err = blk_validate_block_size(arg); |
---|
1578 | 1613 | if (err) |
---|
1579 | 1614 | return err; |
---|
1580 | 1615 | |
---|
.. | .. |
---|
1684 | 1719 | case LOOP_SET_BLOCK_SIZE: |
---|
1685 | 1720 | if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN)) |
---|
1686 | 1721 | return -EPERM; |
---|
1687 | | - /* Fall through */ |
---|
| 1722 | + fallthrough; |
---|
1688 | 1723 | default: |
---|
1689 | 1724 | err = lo_simple_ioctl(lo, cmd, arg); |
---|
1690 | 1725 | break; |
---|
.. | .. |
---|
1832 | 1867 | case LOOP_SET_STATUS64: |
---|
1833 | 1868 | case LOOP_CONFIGURE: |
---|
1834 | 1869 | arg = (unsigned long) compat_ptr(arg); |
---|
1835 | | - /* fall through */ |
---|
| 1870 | + fallthrough; |
---|
1836 | 1871 | case LOOP_SET_FD: |
---|
1837 | 1872 | case LOOP_CHANGE_FD: |
---|
1838 | 1873 | case LOOP_SET_BLOCK_SIZE: |
---|
.. | .. |
---|
1984 | 2019 | |
---|
1985 | 2020 | /* always use the first bio's css */ |
---|
1986 | 2021 | #ifdef CONFIG_BLK_CGROUP |
---|
1987 | | - if (cmd->use_aio && rq->bio && rq->bio->bi_css) { |
---|
1988 | | - cmd->css = rq->bio->bi_css; |
---|
| 2022 | + if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) { |
---|
| 2023 | + cmd->css = &bio_blkcg(rq->bio)->css; |
---|
1989 | 2024 | css_get(cmd->css); |
---|
1990 | 2025 | } else |
---|
1991 | 2026 | #endif |
---|
.. | .. |
---|
2011 | 2046 | failed: |
---|
2012 | 2047 | /* complete non-aio request */ |
---|
2013 | 2048 | if (!cmd->use_aio || ret) { |
---|
2014 | | - cmd->ret = ret ? -EIO : 0; |
---|
2015 | | - blk_mq_complete_request(rq); |
---|
| 2049 | + if (ret == -EOPNOTSUPP) |
---|
| 2050 | + cmd->ret = ret; |
---|
| 2051 | + else |
---|
| 2052 | + cmd->ret = ret ? -EIO : 0; |
---|
| 2053 | + if (likely(!blk_should_fake_timeout(rq->q))) |
---|
| 2054 | + blk_mq_complete_request(rq); |
---|
2016 | 2055 | } |
---|
2017 | 2056 | } |
---|
2018 | 2057 | |
---|
.. | .. |
---|
2070 | 2109 | lo->tag_set.queue_depth = 128; |
---|
2071 | 2110 | lo->tag_set.numa_node = NUMA_NO_NODE; |
---|
2072 | 2111 | lo->tag_set.cmd_size = sizeof(struct loop_cmd); |
---|
2073 | | - lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; |
---|
| 2112 | + lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING | |
---|
| 2113 | + BLK_MQ_F_NO_SCHED_BY_DEFAULT; |
---|
2074 | 2114 | lo->tag_set.driver_data = lo; |
---|
2075 | 2115 | |
---|
2076 | 2116 | err = blk_mq_alloc_tag_set(&lo->tag_set); |
---|
.. | .. |
---|
2078 | 2118 | goto out_free_idr; |
---|
2079 | 2119 | |
---|
2080 | 2120 | lo->lo_queue = blk_mq_init_queue(&lo->tag_set); |
---|
2081 | | - if (IS_ERR_OR_NULL(lo->lo_queue)) { |
---|
| 2121 | + if (IS_ERR(lo->lo_queue)) { |
---|
2082 | 2122 | err = PTR_ERR(lo->lo_queue); |
---|
2083 | 2123 | goto out_cleanup_tags; |
---|
2084 | 2124 | } |
---|