From 102a0743326a03cd1a1202ceda21e175b7d3575c Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Tue, 20 Feb 2024 01:20:52 +0000 Subject: [PATCH] add new system file --- kernel/drivers/block/paride/pd.c | 126 +++++++++++++++++++++++++++++++----------- 1 files changed, 93 insertions(+), 33 deletions(-) diff --git a/kernel/drivers/block/paride/pd.c b/kernel/drivers/block/paride/pd.c index 7cf9475..a7af4f2 100644 --- a/kernel/drivers/block/paride/pd.c +++ b/kernel/drivers/block/paride/pd.c @@ -151,7 +151,7 @@ #include <linux/delay.h> #include <linux/hdreg.h> #include <linux/cdrom.h> /* for the eject ioctl */ -#include <linux/blkdev.h> +#include <linux/blk-mq.h> #include <linux/blkpg.h> #include <linux/kernel.h> #include <linux/mutex.h> @@ -236,9 +236,16 @@ int alt_geom; char name[PD_NAMELEN]; /* pda, pdb, etc ... */ struct gendisk *gd; + struct blk_mq_tag_set tag_set; + struct list_head rq_list; }; static struct pd_unit pd[PD_UNITS]; + +struct pd_req { + /* for REQ_OP_DRV_IN: */ + enum action (*func)(struct pd_unit *disk); +}; static char pd_scratch[512]; /* scratch block buffer */ @@ -399,9 +406,17 @@ if (++pd_queue == PD_UNITS) pd_queue = 0; if (q) { - pd_req = blk_fetch_request(q); - if (pd_req) - break; + struct pd_unit *disk = q->queuedata; + + if (list_empty(&disk->rq_list)) + continue; + + pd_req = list_first_entry(&disk->rq_list, + struct request, + queuelist); + list_del_init(&pd_req->queuelist); + blk_mq_start_request(pd_req); + break; } } while (pd_queue != old_pos); @@ -412,7 +427,6 @@ { while (1) { enum action res; - unsigned long saved_flags; int stop = 0; if (!phase) { @@ -426,27 +440,32 @@ pd_claimed = 1; if (!pi_schedule_claimed(pi_current, run_fsm)) return; - /* fall through */ + fallthrough; case 1: pd_claimed = 2; pi_current->proto->connect(pi_current); } switch(res = phase()) { - case Ok: case Fail: + case Ok: case Fail: { + blk_status_t err; + + err = res == Ok ? 0 : BLK_STS_IOERR; pi_disconnect(pi_current); pd_claimed = 0; phase = NULL; - spin_lock_irqsave(&pd_lock, saved_flags); - if (!__blk_end_request_cur(pd_req, - res == Ok ? 0 : BLK_STS_IOERR)) { - if (!set_next_request()) - stop = 1; + spin_lock_irq(&pd_lock); + if (!blk_update_request(pd_req, err, + blk_rq_cur_bytes(pd_req))) { + __blk_mq_end_request(pd_req, err); + pd_req = NULL; + stop = !set_next_request(); } - spin_unlock_irqrestore(&pd_lock, saved_flags); + spin_unlock_irq(&pd_lock); if (stop) return; - /* fall through */ + } + fallthrough; case Hold: schedule_fsm(); return; @@ -488,8 +507,9 @@ static enum action pd_special(void) { - enum action (*func)(struct pd_unit *) = pd_req->special; - return func(pd_current); + struct pd_req *req = blk_mq_rq_to_pdu(pd_req); + + return req->func(pd_current); } static int pd_next_buf(void) @@ -505,11 +525,17 @@ if (pd_count) return 0; spin_lock_irqsave(&pd_lock, saved_flags); - __blk_end_request_cur(pd_req, 0); - pd_count = blk_rq_cur_sectors(pd_req); - pd_buf = bio_data(pd_req->bio); + if (!blk_update_request(pd_req, 0, blk_rq_cur_bytes(pd_req))) { + __blk_mq_end_request(pd_req, 0); + pd_req = NULL; + pd_count = 0; + pd_buf = NULL; + } else { + pd_count = blk_rq_cur_sectors(pd_req); + pd_buf = bio_data(pd_req->bio); + } spin_unlock_irqrestore(&pd_lock, saved_flags); - return 0; + return !pd_count; } static unsigned long pd_timeout; @@ -726,27 +752,35 @@ /* end of io request engine */ -static void do_pd_request(struct request_queue * q) +static blk_status_t pd_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) { - if (pd_req) - return; - pd_req = blk_fetch_request(q); - if (!pd_req) - return; + struct pd_unit *disk = hctx->queue->queuedata; - schedule_fsm(); + spin_lock_irq(&pd_lock); + if (!pd_req) { + pd_req = bd->rq; + blk_mq_start_request(pd_req); + } else + list_add_tail(&bd->rq->queuelist, &disk->rq_list); + spin_unlock_irq(&pd_lock); + + run_fsm(); + return BLK_STS_OK; } static int pd_special_command(struct pd_unit *disk, enum action (*func)(struct pd_unit *disk)) { struct request *rq; + struct pd_req *req; rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0); if (IS_ERR(rq)) return PTR_ERR(rq); + req = blk_mq_rq_to_pdu(rq); - rq->special = func; + req->func = func; blk_execute_rq(disk->gd->queue, disk->gd, rq, 0); blk_put_request(rq); return 0; @@ -840,6 +874,7 @@ .open = pd_open, .release = pd_release, .ioctl = pd_ioctl, + .compat_ioctl = pd_ioctl, .getgeo = pd_getgeo, .check_events = pd_check_events, .revalidate_disk= pd_revalidate @@ -847,23 +882,46 @@ /* probing */ +static const struct blk_mq_ops pd_mq_ops = { + .queue_rq = pd_queue_rq, +}; + static void pd_probe_drive(struct pd_unit *disk) { - struct gendisk *p = alloc_disk(1 << PD_BITS); + struct gendisk *p; + + p = alloc_disk(1 << PD_BITS); if (!p) return; + strcpy(p->disk_name, disk->name); p->fops = &pd_fops; p->major = major; p->first_minor = (disk - pd) << PD_BITS; + p->events = DISK_EVENT_MEDIA_CHANGE; disk->gd = p; p->private_data = disk; - p->queue = blk_init_queue(do_pd_request, &pd_lock); - if (!p->queue) { - disk->gd = NULL; - put_disk(p); + + memset(&disk->tag_set, 0, sizeof(disk->tag_set)); + disk->tag_set.ops = &pd_mq_ops; + disk->tag_set.cmd_size = sizeof(struct pd_req); + disk->tag_set.nr_hw_queues = 1; + disk->tag_set.nr_maps = 1; + disk->tag_set.queue_depth = 2; + disk->tag_set.numa_node = NUMA_NO_NODE; + disk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; + + if (blk_mq_alloc_tag_set(&disk->tag_set)) + return; + + p->queue = blk_mq_init_queue(&disk->tag_set); + if (IS_ERR(p->queue)) { + blk_mq_free_tag_set(&disk->tag_set); + p->queue = NULL; return; } + + p->queue->queuedata = disk; blk_queue_max_hw_sectors(p->queue, cluster); blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH); @@ -895,6 +953,7 @@ disk->standby = parm[D_SBY]; if (parm[D_PRT]) pd_drive_count++; + INIT_LIST_HEAD(&disk->rq_list); } par_drv = pi_register_driver(name); @@ -972,6 +1031,7 @@ disk->gd = NULL; del_gendisk(p); blk_cleanup_queue(p->queue); + blk_mq_free_tag_set(&disk->tag_set); put_disk(p); pi_release(disk->pi); } -- Gitblit v1.6.2