forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/block/aoe/aoecmd.c
....@@ -7,7 +7,7 @@
77 #include <linux/ata.h>
88 #include <linux/slab.h>
99 #include <linux/hdreg.h>
10
-#include <linux/blkdev.h>
10
+#include <linux/blk-mq.h>
1111 #include <linux/skbuff.h>
1212 #include <linux/netdevice.h>
1313 #include <linux/genhd.h>
....@@ -813,24 +813,13 @@
813813 out:
814814 if ((d->flags & DEVFL_KICKME) && d->blkq) {
815815 d->flags &= ~DEVFL_KICKME;
816
- d->blkq->request_fn(d->blkq);
816
+ blk_mq_run_hw_queues(d->blkq, true);
817817 }
818818
819819 d->timer.expires = jiffies + TIMERTICK;
820820 add_timer(&d->timer);
821821
822822 spin_unlock_irqrestore(&d->lock, flags);
823
-}
824
-
825
-static unsigned long
826
-rqbiocnt(struct request *r)
827
-{
828
- struct bio *bio;
829
- unsigned long n = 0;
830
-
831
- __rq_for_each_bio(bio, r)
832
- n++;
833
- return n;
834823 }
835824
836825 static void
....@@ -847,6 +836,7 @@
847836 {
848837 struct request *rq;
849838 struct request_queue *q;
839
+ struct aoe_req *req;
850840 struct buf *buf;
851841 struct bio *bio;
852842
....@@ -857,13 +847,19 @@
857847 return d->ip.buf;
858848 rq = d->ip.rq;
859849 if (rq == NULL) {
860
- rq = blk_peek_request(q);
850
+ rq = list_first_entry_or_null(&d->rq_list, struct request,
851
+ queuelist);
861852 if (rq == NULL)
862853 return NULL;
863
- blk_start_request(rq);
854
+ list_del_init(&rq->queuelist);
855
+ blk_mq_start_request(rq);
864856 d->ip.rq = rq;
865857 d->ip.nxbio = rq->bio;
866
- rq->special = (void *) rqbiocnt(rq);
858
+
859
+ req = blk_mq_rq_to_pdu(rq);
860
+ req->nr_bios = 0;
861
+ __rq_for_each_bio(bio, rq)
862
+ req->nr_bios++;
867863 }
868864 buf = mempool_alloc(d->bufpool, GFP_ATOMIC);
869865 if (buf == NULL) {
....@@ -904,9 +900,7 @@
904900 ssize = get_capacity(d->gd);
905901 bd = bdget_disk(d->gd, 0);
906902 if (bd) {
907
- inode_lock(bd->bd_inode);
908
- i_size_write(bd->bd_inode, (loff_t)ssize<<9);
909
- inode_unlock(bd->bd_inode);
903
+ bd_set_nr_sectors(bd, ssize);
910904 bdput(bd);
911905 }
912906 spin_lock_irq(&d->lock);
....@@ -1045,6 +1039,7 @@
10451039 struct bio *bio;
10461040 int bok;
10471041 struct request_queue *q;
1042
+ blk_status_t err = BLK_STS_OK;
10481043
10491044 q = d->blkq;
10501045 if (rq == d->ip.rq)
....@@ -1052,26 +1047,27 @@
10521047 do {
10531048 bio = rq->bio;
10541049 bok = !fastfail && !bio->bi_status;
1055
- } while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
1050
+ if (!bok)
1051
+ err = BLK_STS_IOERR;
1052
+ } while (blk_update_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
1053
+
1054
+ __blk_mq_end_request(rq, err);
10561055
10571056 /* cf. http://lkml.org/lkml/2006/10/31/28 */
10581057 if (!fastfail)
1059
- __blk_run_queue(q);
1058
+ blk_mq_run_hw_queues(q, true);
10601059 }
10611060
10621061 static void
10631062 aoe_end_buf(struct aoedev *d, struct buf *buf)
10641063 {
1065
- struct request *rq;
1066
- unsigned long n;
1064
+ struct request *rq = buf->rq;
1065
+ struct aoe_req *req = blk_mq_rq_to_pdu(rq);
10671066
10681067 if (buf == d->ip.buf)
10691068 d->ip.buf = NULL;
1070
- rq = buf->rq;
10711069 mempool_free(buf, d->bufpool);
1072
- n = (unsigned long) rq->special;
1073
- rq->special = (void *) --n;
1074
- if (n == 0)
1070
+ if (--req->nr_bios == 0)
10751071 aoe_end_request(d, rq, 0);
10761072 }
10771073
....@@ -1137,7 +1133,7 @@
11371133 break;
11381134 }
11391135 bvcpy(skb, f->buf->bio, f->iter, n);
1140
- /* fall through */
1136
+ fallthrough;
11411137 case ATA_CMD_PIO_WRITE:
11421138 case ATA_CMD_PIO_WRITE_EXT:
11431139 spin_lock_irq(&d->lock);