| .. | .. |
|---|
| 7 | 7 | #include <linux/ata.h> |
|---|
| 8 | 8 | #include <linux/slab.h> |
|---|
| 9 | 9 | #include <linux/hdreg.h> |
|---|
| 10 | | -#include <linux/blkdev.h> |
|---|
| 10 | +#include <linux/blk-mq.h> |
|---|
| 11 | 11 | #include <linux/skbuff.h> |
|---|
| 12 | 12 | #include <linux/netdevice.h> |
|---|
| 13 | 13 | #include <linux/genhd.h> |
|---|
| .. | .. |
|---|
| 813 | 813 | out: |
|---|
| 814 | 814 | if ((d->flags & DEVFL_KICKME) && d->blkq) { |
|---|
| 815 | 815 | d->flags &= ~DEVFL_KICKME; |
|---|
| 816 | | - d->blkq->request_fn(d->blkq); |
|---|
| 816 | + blk_mq_run_hw_queues(d->blkq, true); |
|---|
| 817 | 817 | } |
|---|
| 818 | 818 | |
|---|
| 819 | 819 | d->timer.expires = jiffies + TIMERTICK; |
|---|
| 820 | 820 | add_timer(&d->timer); |
|---|
| 821 | 821 | |
|---|
| 822 | 822 | spin_unlock_irqrestore(&d->lock, flags); |
|---|
| 823 | | -} |
|---|
| 824 | | - |
|---|
| 825 | | -static unsigned long |
|---|
| 826 | | -rqbiocnt(struct request *r) |
|---|
| 827 | | -{ |
|---|
| 828 | | - struct bio *bio; |
|---|
| 829 | | - unsigned long n = 0; |
|---|
| 830 | | - |
|---|
| 831 | | - __rq_for_each_bio(bio, r) |
|---|
| 832 | | - n++; |
|---|
| 833 | | - return n; |
|---|
| 834 | 823 | } |
|---|
| 835 | 824 | |
|---|
| 836 | 825 | static void |
|---|
| .. | .. |
|---|
| 847 | 836 | { |
|---|
| 848 | 837 | struct request *rq; |
|---|
| 849 | 838 | struct request_queue *q; |
|---|
| 839 | + struct aoe_req *req; |
|---|
| 850 | 840 | struct buf *buf; |
|---|
| 851 | 841 | struct bio *bio; |
|---|
| 852 | 842 | |
|---|
| .. | .. |
|---|
| 857 | 847 | return d->ip.buf; |
|---|
| 858 | 848 | rq = d->ip.rq; |
|---|
| 859 | 849 | if (rq == NULL) { |
|---|
| 860 | | - rq = blk_peek_request(q); |
|---|
| 850 | + rq = list_first_entry_or_null(&d->rq_list, struct request, |
|---|
| 851 | + queuelist); |
|---|
| 861 | 852 | if (rq == NULL) |
|---|
| 862 | 853 | return NULL; |
|---|
| 863 | | - blk_start_request(rq); |
|---|
| 854 | + list_del_init(&rq->queuelist); |
|---|
| 855 | + blk_mq_start_request(rq); |
|---|
| 864 | 856 | d->ip.rq = rq; |
|---|
| 865 | 857 | d->ip.nxbio = rq->bio; |
|---|
| 866 | | - rq->special = (void *) rqbiocnt(rq); |
|---|
| 858 | + |
|---|
| 859 | + req = blk_mq_rq_to_pdu(rq); |
|---|
| 860 | + req->nr_bios = 0; |
|---|
| 861 | + __rq_for_each_bio(bio, rq) |
|---|
| 862 | + req->nr_bios++; |
|---|
| 867 | 863 | } |
|---|
| 868 | 864 | buf = mempool_alloc(d->bufpool, GFP_ATOMIC); |
|---|
| 869 | 865 | if (buf == NULL) { |
|---|
| .. | .. |
|---|
| 904 | 900 | ssize = get_capacity(d->gd); |
|---|
| 905 | 901 | bd = bdget_disk(d->gd, 0); |
|---|
| 906 | 902 | if (bd) { |
|---|
| 907 | | - inode_lock(bd->bd_inode); |
|---|
| 908 | | - i_size_write(bd->bd_inode, (loff_t)ssize<<9); |
|---|
| 909 | | - inode_unlock(bd->bd_inode); |
|---|
| 903 | + bd_set_nr_sectors(bd, ssize); |
|---|
| 910 | 904 | bdput(bd); |
|---|
| 911 | 905 | } |
|---|
| 912 | 906 | spin_lock_irq(&d->lock); |
|---|
| .. | .. |
|---|
| 1045 | 1039 | struct bio *bio; |
|---|
| 1046 | 1040 | int bok; |
|---|
| 1047 | 1041 | struct request_queue *q; |
|---|
| 1042 | + blk_status_t err = BLK_STS_OK; |
|---|
| 1048 | 1043 | |
|---|
| 1049 | 1044 | q = d->blkq; |
|---|
| 1050 | 1045 | if (rq == d->ip.rq) |
|---|
| .. | .. |
|---|
| 1052 | 1047 | do { |
|---|
| 1053 | 1048 | bio = rq->bio; |
|---|
| 1054 | 1049 | bok = !fastfail && !bio->bi_status; |
|---|
| 1055 | | - } while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size)); |
|---|
| 1050 | + if (!bok) |
|---|
| 1051 | + err = BLK_STS_IOERR; |
|---|
| 1052 | + } while (blk_update_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size)); |
|---|
| 1053 | + |
|---|
| 1054 | + __blk_mq_end_request(rq, err); |
|---|
| 1056 | 1055 | |
|---|
| 1057 | 1056 | /* cf. http://lkml.org/lkml/2006/10/31/28 */ |
|---|
| 1058 | 1057 | if (!fastfail) |
|---|
| 1059 | | - __blk_run_queue(q); |
|---|
| 1058 | + blk_mq_run_hw_queues(q, true); |
|---|
| 1060 | 1059 | } |
|---|
| 1061 | 1060 | |
|---|
| 1062 | 1061 | static void |
|---|
| 1063 | 1062 | aoe_end_buf(struct aoedev *d, struct buf *buf) |
|---|
| 1064 | 1063 | { |
|---|
| 1065 | | - struct request *rq; |
|---|
| 1066 | | - unsigned long n; |
|---|
| 1064 | + struct request *rq = buf->rq; |
|---|
| 1065 | + struct aoe_req *req = blk_mq_rq_to_pdu(rq); |
|---|
| 1067 | 1066 | |
|---|
| 1068 | 1067 | if (buf == d->ip.buf) |
|---|
| 1069 | 1068 | d->ip.buf = NULL; |
|---|
| 1070 | | - rq = buf->rq; |
|---|
| 1071 | 1069 | mempool_free(buf, d->bufpool); |
|---|
| 1072 | | - n = (unsigned long) rq->special; |
|---|
| 1073 | | - rq->special = (void *) --n; |
|---|
| 1074 | | - if (n == 0) |
|---|
| 1070 | + if (--req->nr_bios == 0) |
|---|
| 1075 | 1071 | aoe_end_request(d, rq, 0); |
|---|
| 1076 | 1072 | } |
|---|
| 1077 | 1073 | |
|---|
| .. | .. |
|---|
| 1137 | 1133 | break; |
|---|
| 1138 | 1134 | } |
|---|
| 1139 | 1135 | bvcpy(skb, f->buf->bio, f->iter, n); |
|---|
| 1140 | | - /* fall through */ |
|---|
| 1136 | + fallthrough; |
|---|
| 1141 | 1137 | case ATA_CMD_PIO_WRITE: |
|---|
| 1142 | 1138 | case ATA_CMD_PIO_WRITE_EXT: |
|---|
| 1143 | 1139 | spin_lock_irq(&d->lock); |
|---|