.. | .. |
---|
151 | 151 | #include <linux/delay.h> |
---|
152 | 152 | #include <linux/hdreg.h> |
---|
153 | 153 | #include <linux/cdrom.h> /* for the eject ioctl */ |
---|
154 | | -#include <linux/blkdev.h> |
---|
| 154 | +#include <linux/blk-mq.h> |
---|
155 | 155 | #include <linux/blkpg.h> |
---|
156 | 156 | #include <linux/kernel.h> |
---|
157 | 157 | #include <linux/mutex.h> |
---|
.. | .. |
---|
236 | 236 | int alt_geom; |
---|
237 | 237 | char name[PD_NAMELEN]; /* pda, pdb, etc ... */ |
---|
238 | 238 | struct gendisk *gd; |
---|
| 239 | + struct blk_mq_tag_set tag_set; |
---|
| 240 | + struct list_head rq_list; |
---|
239 | 241 | }; |
---|
240 | 242 | |
---|
241 | 243 | static struct pd_unit pd[PD_UNITS]; |
---|
| 244 | + |
---|
| 245 | +struct pd_req { |
---|
| 246 | + /* for REQ_OP_DRV_IN: */ |
---|
| 247 | + enum action (*func)(struct pd_unit *disk); |
---|
| 248 | +}; |
---|
242 | 249 | |
---|
243 | 250 | static char pd_scratch[512]; /* scratch block buffer */ |
---|
244 | 251 | |
---|
.. | .. |
---|
399 | 406 | if (++pd_queue == PD_UNITS) |
---|
400 | 407 | pd_queue = 0; |
---|
401 | 408 | if (q) { |
---|
402 | | - pd_req = blk_fetch_request(q); |
---|
403 | | - if (pd_req) |
---|
404 | | - break; |
---|
| 409 | + struct pd_unit *disk = q->queuedata; |
---|
| 410 | + |
---|
| 411 | + if (list_empty(&disk->rq_list)) |
---|
| 412 | + continue; |
---|
| 413 | + |
---|
| 414 | + pd_req = list_first_entry(&disk->rq_list, |
---|
| 415 | + struct request, |
---|
| 416 | + queuelist); |
---|
| 417 | + list_del_init(&pd_req->queuelist); |
---|
| 418 | + blk_mq_start_request(pd_req); |
---|
| 419 | + break; |
---|
405 | 420 | } |
---|
406 | 421 | } while (pd_queue != old_pos); |
---|
407 | 422 | |
---|
.. | .. |
---|
412 | 427 | { |
---|
413 | 428 | while (1) { |
---|
414 | 429 | enum action res; |
---|
415 | | - unsigned long saved_flags; |
---|
416 | 430 | int stop = 0; |
---|
417 | 431 | |
---|
418 | 432 | if (!phase) { |
---|
.. | .. |
---|
426 | 440 | pd_claimed = 1; |
---|
427 | 441 | if (!pi_schedule_claimed(pi_current, run_fsm)) |
---|
428 | 442 | return; |
---|
429 | | - /* fall through */ |
---|
| 443 | + fallthrough; |
---|
430 | 444 | case 1: |
---|
431 | 445 | pd_claimed = 2; |
---|
432 | 446 | pi_current->proto->connect(pi_current); |
---|
433 | 447 | } |
---|
434 | 448 | |
---|
435 | 449 | switch(res = phase()) { |
---|
436 | | - case Ok: case Fail: |
---|
| 450 | + case Ok: case Fail: { |
---|
| 451 | + blk_status_t err; |
---|
| 452 | + |
---|
| 453 | + err = res == Ok ? 0 : BLK_STS_IOERR; |
---|
437 | 454 | pi_disconnect(pi_current); |
---|
438 | 455 | pd_claimed = 0; |
---|
439 | 456 | phase = NULL; |
---|
440 | | - spin_lock_irqsave(&pd_lock, saved_flags); |
---|
441 | | - if (!__blk_end_request_cur(pd_req, |
---|
442 | | - res == Ok ? 0 : BLK_STS_IOERR)) { |
---|
443 | | - if (!set_next_request()) |
---|
444 | | - stop = 1; |
---|
| 457 | + spin_lock_irq(&pd_lock); |
---|
| 458 | + if (!blk_update_request(pd_req, err, |
---|
| 459 | + blk_rq_cur_bytes(pd_req))) { |
---|
| 460 | + __blk_mq_end_request(pd_req, err); |
---|
| 461 | + pd_req = NULL; |
---|
| 462 | + stop = !set_next_request(); |
---|
445 | 463 | } |
---|
446 | | - spin_unlock_irqrestore(&pd_lock, saved_flags); |
---|
| 464 | + spin_unlock_irq(&pd_lock); |
---|
447 | 465 | if (stop) |
---|
448 | 466 | return; |
---|
449 | | - /* fall through */ |
---|
| 467 | + } |
---|
| 468 | + fallthrough; |
---|
450 | 469 | case Hold: |
---|
451 | 470 | schedule_fsm(); |
---|
452 | 471 | return; |
---|
.. | .. |
---|
488 | 507 | |
---|
489 | 508 | static enum action pd_special(void) |
---|
490 | 509 | { |
---|
491 | | - enum action (*func)(struct pd_unit *) = pd_req->special; |
---|
492 | | - return func(pd_current); |
---|
| 510 | + struct pd_req *req = blk_mq_rq_to_pdu(pd_req); |
---|
| 511 | + |
---|
| 512 | + return req->func(pd_current); |
---|
493 | 513 | } |
---|
494 | 514 | |
---|
495 | 515 | static int pd_next_buf(void) |
---|
.. | .. |
---|
505 | 525 | if (pd_count) |
---|
506 | 526 | return 0; |
---|
507 | 527 | spin_lock_irqsave(&pd_lock, saved_flags); |
---|
508 | | - __blk_end_request_cur(pd_req, 0); |
---|
509 | | - pd_count = blk_rq_cur_sectors(pd_req); |
---|
510 | | - pd_buf = bio_data(pd_req->bio); |
---|
| 528 | + if (!blk_update_request(pd_req, 0, blk_rq_cur_bytes(pd_req))) { |
---|
| 529 | + __blk_mq_end_request(pd_req, 0); |
---|
| 530 | + pd_req = NULL; |
---|
| 531 | + pd_count = 0; |
---|
| 532 | + pd_buf = NULL; |
---|
| 533 | + } else { |
---|
| 534 | + pd_count = blk_rq_cur_sectors(pd_req); |
---|
| 535 | + pd_buf = bio_data(pd_req->bio); |
---|
| 536 | + } |
---|
511 | 537 | spin_unlock_irqrestore(&pd_lock, saved_flags); |
---|
512 | | - return 0; |
---|
| 538 | + return !pd_count; |
---|
513 | 539 | } |
---|
514 | 540 | |
---|
515 | 541 | static unsigned long pd_timeout; |
---|
.. | .. |
---|
726 | 752 | |
---|
727 | 753 | /* end of io request engine */ |
---|
728 | 754 | |
---|
729 | | -static void do_pd_request(struct request_queue * q) |
---|
| 755 | +static blk_status_t pd_queue_rq(struct blk_mq_hw_ctx *hctx, |
---|
| 756 | + const struct blk_mq_queue_data *bd) |
---|
730 | 757 | { |
---|
731 | | - if (pd_req) |
---|
732 | | - return; |
---|
733 | | - pd_req = blk_fetch_request(q); |
---|
734 | | - if (!pd_req) |
---|
735 | | - return; |
---|
| 758 | + struct pd_unit *disk = hctx->queue->queuedata; |
---|
736 | 759 | |
---|
737 | | - schedule_fsm(); |
---|
| 760 | + spin_lock_irq(&pd_lock); |
---|
| 761 | + if (!pd_req) { |
---|
| 762 | + pd_req = bd->rq; |
---|
| 763 | + blk_mq_start_request(pd_req); |
---|
| 764 | + } else |
---|
| 765 | + list_add_tail(&bd->rq->queuelist, &disk->rq_list); |
---|
| 766 | + spin_unlock_irq(&pd_lock); |
---|
| 767 | + |
---|
| 768 | + run_fsm(); |
---|
| 769 | + return BLK_STS_OK; |
---|
738 | 770 | } |
---|
739 | 771 | |
---|
740 | 772 | static int pd_special_command(struct pd_unit *disk, |
---|
741 | 773 | enum action (*func)(struct pd_unit *disk)) |
---|
742 | 774 | { |
---|
743 | 775 | struct request *rq; |
---|
| 776 | + struct pd_req *req; |
---|
744 | 777 | |
---|
745 | 778 | rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0); |
---|
746 | 779 | if (IS_ERR(rq)) |
---|
747 | 780 | return PTR_ERR(rq); |
---|
| 781 | + req = blk_mq_rq_to_pdu(rq); |
---|
748 | 782 | |
---|
749 | | - rq->special = func; |
---|
| 783 | + req->func = func; |
---|
750 | 784 | blk_execute_rq(disk->gd->queue, disk->gd, rq, 0); |
---|
751 | 785 | blk_put_request(rq); |
---|
752 | 786 | return 0; |
---|
.. | .. |
---|
840 | 874 | .open = pd_open, |
---|
841 | 875 | .release = pd_release, |
---|
842 | 876 | .ioctl = pd_ioctl, |
---|
| 877 | + .compat_ioctl = pd_ioctl, |
---|
843 | 878 | .getgeo = pd_getgeo, |
---|
844 | 879 | .check_events = pd_check_events, |
---|
845 | 880 | .revalidate_disk= pd_revalidate |
---|
.. | .. |
---|
847 | 882 | |
---|
848 | 883 | /* probing */ |
---|
849 | 884 | |
---|
| 885 | +static const struct blk_mq_ops pd_mq_ops = { |
---|
| 886 | + .queue_rq = pd_queue_rq, |
---|
| 887 | +}; |
---|
| 888 | + |
---|
850 | 889 | static void pd_probe_drive(struct pd_unit *disk) |
---|
851 | 890 | { |
---|
852 | | - struct gendisk *p = alloc_disk(1 << PD_BITS); |
---|
| 891 | + struct gendisk *p; |
---|
| 892 | + |
---|
| 893 | + p = alloc_disk(1 << PD_BITS); |
---|
853 | 894 | if (!p) |
---|
854 | 895 | return; |
---|
| 896 | + |
---|
855 | 897 | strcpy(p->disk_name, disk->name); |
---|
856 | 898 | p->fops = &pd_fops; |
---|
857 | 899 | p->major = major; |
---|
858 | 900 | p->first_minor = (disk - pd) << PD_BITS; |
---|
| 901 | + p->events = DISK_EVENT_MEDIA_CHANGE; |
---|
859 | 902 | disk->gd = p; |
---|
860 | 903 | p->private_data = disk; |
---|
861 | | - p->queue = blk_init_queue(do_pd_request, &pd_lock); |
---|
862 | | - if (!p->queue) { |
---|
863 | | - disk->gd = NULL; |
---|
864 | | - put_disk(p); |
---|
| 904 | + |
---|
| 905 | + memset(&disk->tag_set, 0, sizeof(disk->tag_set)); |
---|
| 906 | + disk->tag_set.ops = &pd_mq_ops; |
---|
| 907 | + disk->tag_set.cmd_size = sizeof(struct pd_req); |
---|
| 908 | + disk->tag_set.nr_hw_queues = 1; |
---|
| 909 | + disk->tag_set.nr_maps = 1; |
---|
| 910 | + disk->tag_set.queue_depth = 2; |
---|
| 911 | + disk->tag_set.numa_node = NUMA_NO_NODE; |
---|
| 912 | + disk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; |
---|
| 913 | + |
---|
| 914 | + if (blk_mq_alloc_tag_set(&disk->tag_set)) |
---|
| 915 | + return; |
---|
| 916 | + |
---|
| 917 | + p->queue = blk_mq_init_queue(&disk->tag_set); |
---|
| 918 | + if (IS_ERR(p->queue)) { |
---|
| 919 | + blk_mq_free_tag_set(&disk->tag_set); |
---|
| 920 | + p->queue = NULL; |
---|
865 | 921 | return; |
---|
866 | 922 | } |
---|
| 923 | + |
---|
| 924 | + p->queue->queuedata = disk; |
---|
867 | 925 | blk_queue_max_hw_sectors(p->queue, cluster); |
---|
868 | 926 | blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH); |
---|
869 | 927 | |
---|
.. | .. |
---|
895 | 953 | disk->standby = parm[D_SBY]; |
---|
896 | 954 | if (parm[D_PRT]) |
---|
897 | 955 | pd_drive_count++; |
---|
| 956 | + INIT_LIST_HEAD(&disk->rq_list); |
---|
898 | 957 | } |
---|
899 | 958 | |
---|
900 | 959 | par_drv = pi_register_driver(name); |
---|
.. | .. |
---|
972 | 1031 | disk->gd = NULL; |
---|
973 | 1032 | del_gendisk(p); |
---|
974 | 1033 | blk_cleanup_queue(p->queue); |
---|
| 1034 | + blk_mq_free_tag_set(&disk->tag_set); |
---|
975 | 1035 | put_disk(p); |
---|
976 | 1036 | pi_release(disk->pi); |
---|
977 | 1037 | } |
---|