hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/block/paride/pd.c
....@@ -151,7 +151,7 @@
151151 #include <linux/delay.h>
152152 #include <linux/hdreg.h>
153153 #include <linux/cdrom.h> /* for the eject ioctl */
154
-#include <linux/blkdev.h>
154
+#include <linux/blk-mq.h>
155155 #include <linux/blkpg.h>
156156 #include <linux/kernel.h>
157157 #include <linux/mutex.h>
....@@ -236,9 +236,16 @@
236236 int alt_geom;
237237 char name[PD_NAMELEN]; /* pda, pdb, etc ... */
238238 struct gendisk *gd;
239
+ struct blk_mq_tag_set tag_set;
240
+ struct list_head rq_list;
239241 };
240242
241243 static struct pd_unit pd[PD_UNITS];
244
+
245
+struct pd_req {
246
+ /* for REQ_OP_DRV_IN: */
247
+ enum action (*func)(struct pd_unit *disk);
248
+};
242249
243250 static char pd_scratch[512]; /* scratch block buffer */
244251
....@@ -399,9 +406,17 @@
399406 if (++pd_queue == PD_UNITS)
400407 pd_queue = 0;
401408 if (q) {
402
- pd_req = blk_fetch_request(q);
403
- if (pd_req)
404
- break;
409
+ struct pd_unit *disk = q->queuedata;
410
+
411
+ if (list_empty(&disk->rq_list))
412
+ continue;
413
+
414
+ pd_req = list_first_entry(&disk->rq_list,
415
+ struct request,
416
+ queuelist);
417
+ list_del_init(&pd_req->queuelist);
418
+ blk_mq_start_request(pd_req);
419
+ break;
405420 }
406421 } while (pd_queue != old_pos);
407422
....@@ -412,7 +427,6 @@
412427 {
413428 while (1) {
414429 enum action res;
415
- unsigned long saved_flags;
416430 int stop = 0;
417431
418432 if (!phase) {
....@@ -426,27 +440,32 @@
426440 pd_claimed = 1;
427441 if (!pi_schedule_claimed(pi_current, run_fsm))
428442 return;
429
- /* fall through */
443
+ fallthrough;
430444 case 1:
431445 pd_claimed = 2;
432446 pi_current->proto->connect(pi_current);
433447 }
434448
435449 switch(res = phase()) {
436
- case Ok: case Fail:
450
+ case Ok: case Fail: {
451
+ blk_status_t err;
452
+
453
+ err = res == Ok ? 0 : BLK_STS_IOERR;
437454 pi_disconnect(pi_current);
438455 pd_claimed = 0;
439456 phase = NULL;
440
- spin_lock_irqsave(&pd_lock, saved_flags);
441
- if (!__blk_end_request_cur(pd_req,
442
- res == Ok ? 0 : BLK_STS_IOERR)) {
443
- if (!set_next_request())
444
- stop = 1;
457
+ spin_lock_irq(&pd_lock);
458
+ if (!blk_update_request(pd_req, err,
459
+ blk_rq_cur_bytes(pd_req))) {
460
+ __blk_mq_end_request(pd_req, err);
461
+ pd_req = NULL;
462
+ stop = !set_next_request();
445463 }
446
- spin_unlock_irqrestore(&pd_lock, saved_flags);
464
+ spin_unlock_irq(&pd_lock);
447465 if (stop)
448466 return;
449
- /* fall through */
467
+ }
468
+ fallthrough;
450469 case Hold:
451470 schedule_fsm();
452471 return;
....@@ -488,8 +507,9 @@
488507
489508 static enum action pd_special(void)
490509 {
491
- enum action (*func)(struct pd_unit *) = pd_req->special;
492
- return func(pd_current);
510
+ struct pd_req *req = blk_mq_rq_to_pdu(pd_req);
511
+
512
+ return req->func(pd_current);
493513 }
494514
495515 static int pd_next_buf(void)
....@@ -505,11 +525,17 @@
505525 if (pd_count)
506526 return 0;
507527 spin_lock_irqsave(&pd_lock, saved_flags);
508
- __blk_end_request_cur(pd_req, 0);
509
- pd_count = blk_rq_cur_sectors(pd_req);
510
- pd_buf = bio_data(pd_req->bio);
528
+ if (!blk_update_request(pd_req, 0, blk_rq_cur_bytes(pd_req))) {
529
+ __blk_mq_end_request(pd_req, 0);
530
+ pd_req = NULL;
531
+ pd_count = 0;
532
+ pd_buf = NULL;
533
+ } else {
534
+ pd_count = blk_rq_cur_sectors(pd_req);
535
+ pd_buf = bio_data(pd_req->bio);
536
+ }
511537 spin_unlock_irqrestore(&pd_lock, saved_flags);
512
- return 0;
538
+ return !pd_count;
513539 }
514540
515541 static unsigned long pd_timeout;
....@@ -726,27 +752,35 @@
726752
727753 /* end of io request engine */
728754
729
-static void do_pd_request(struct request_queue * q)
755
+static blk_status_t pd_queue_rq(struct blk_mq_hw_ctx *hctx,
756
+ const struct blk_mq_queue_data *bd)
730757 {
731
- if (pd_req)
732
- return;
733
- pd_req = blk_fetch_request(q);
734
- if (!pd_req)
735
- return;
758
+ struct pd_unit *disk = hctx->queue->queuedata;
736759
737
- schedule_fsm();
760
+ spin_lock_irq(&pd_lock);
761
+ if (!pd_req) {
762
+ pd_req = bd->rq;
763
+ blk_mq_start_request(pd_req);
764
+ } else
765
+ list_add_tail(&bd->rq->queuelist, &disk->rq_list);
766
+ spin_unlock_irq(&pd_lock);
767
+
768
+ run_fsm();
769
+ return BLK_STS_OK;
738770 }
739771
740772 static int pd_special_command(struct pd_unit *disk,
741773 enum action (*func)(struct pd_unit *disk))
742774 {
743775 struct request *rq;
776
+ struct pd_req *req;
744777
745778 rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
746779 if (IS_ERR(rq))
747780 return PTR_ERR(rq);
781
+ req = blk_mq_rq_to_pdu(rq);
748782
749
- rq->special = func;
783
+ req->func = func;
750784 blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
751785 blk_put_request(rq);
752786 return 0;
....@@ -840,6 +874,7 @@
840874 .open = pd_open,
841875 .release = pd_release,
842876 .ioctl = pd_ioctl,
877
+ .compat_ioctl = pd_ioctl,
843878 .getgeo = pd_getgeo,
844879 .check_events = pd_check_events,
845880 .revalidate_disk= pd_revalidate
....@@ -847,23 +882,46 @@
847882
848883 /* probing */
849884
885
+static const struct blk_mq_ops pd_mq_ops = {
886
+ .queue_rq = pd_queue_rq,
887
+};
888
+
850889 static void pd_probe_drive(struct pd_unit *disk)
851890 {
852
- struct gendisk *p = alloc_disk(1 << PD_BITS);
891
+ struct gendisk *p;
892
+
893
+ p = alloc_disk(1 << PD_BITS);
853894 if (!p)
854895 return;
896
+
855897 strcpy(p->disk_name, disk->name);
856898 p->fops = &pd_fops;
857899 p->major = major;
858900 p->first_minor = (disk - pd) << PD_BITS;
901
+ p->events = DISK_EVENT_MEDIA_CHANGE;
859902 disk->gd = p;
860903 p->private_data = disk;
861
- p->queue = blk_init_queue(do_pd_request, &pd_lock);
862
- if (!p->queue) {
863
- disk->gd = NULL;
864
- put_disk(p);
904
+
905
+ memset(&disk->tag_set, 0, sizeof(disk->tag_set));
906
+ disk->tag_set.ops = &pd_mq_ops;
907
+ disk->tag_set.cmd_size = sizeof(struct pd_req);
908
+ disk->tag_set.nr_hw_queues = 1;
909
+ disk->tag_set.nr_maps = 1;
910
+ disk->tag_set.queue_depth = 2;
911
+ disk->tag_set.numa_node = NUMA_NO_NODE;
912
+ disk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
913
+
914
+ if (blk_mq_alloc_tag_set(&disk->tag_set))
915
+ return;
916
+
917
+ p->queue = blk_mq_init_queue(&disk->tag_set);
918
+ if (IS_ERR(p->queue)) {
919
+ blk_mq_free_tag_set(&disk->tag_set);
920
+ p->queue = NULL;
865921 return;
866922 }
923
+
924
+ p->queue->queuedata = disk;
867925 blk_queue_max_hw_sectors(p->queue, cluster);
868926 blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
869927
....@@ -895,6 +953,7 @@
895953 disk->standby = parm[D_SBY];
896954 if (parm[D_PRT])
897955 pd_drive_count++;
956
+ INIT_LIST_HEAD(&disk->rq_list);
898957 }
899958
900959 par_drv = pi_register_driver(name);
....@@ -972,6 +1031,7 @@
9721031 disk->gd = NULL;
9731032 del_gendisk(p);
9741033 blk_cleanup_queue(p->queue);
1034
+ blk_mq_free_tag_set(&disk->tag_set);
9751035 put_disk(p);
9761036 pi_release(disk->pi);
9771037 }