hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/target/target_core_user.c
....@@ -1,21 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
34 * Copyright (C) 2014 Red Hat, Inc.
45 * Copyright (C) 2015 Arrikto, Inc.
56 * Copyright (C) 2017 Chinamobile, Inc.
6
- *
7
- * This program is free software; you can redistribute it and/or modify it
8
- * under the terms and conditions of the GNU General Public License,
9
- * version 2, as published by the Free Software Foundation.
10
- *
11
- * This program is distributed in the hope it will be useful, but WITHOUT
12
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14
- * more details.
15
- *
16
- * You should have received a copy of the GNU General Public License along with
17
- * this program; if not, write to the Free Software Foundation, Inc.,
18
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
197 */
208
219 #include <linux/spinlock.h>
....@@ -130,6 +118,7 @@
130118 #define TCMU_DEV_BIT_OPEN 0
131119 #define TCMU_DEV_BIT_BROKEN 1
132120 #define TCMU_DEV_BIT_BLOCKED 2
121
+#define TCMU_DEV_BIT_TMR_NOTIFY 3
133122 unsigned long flags;
134123
135124 struct uio_info uio_info;
....@@ -149,6 +138,7 @@
149138
150139 struct mutex cmdr_lock;
151140 struct list_head qfull_queue;
141
+ struct list_head tmr_queue;
152142
153143 uint32_t dbi_max;
154144 uint32_t dbi_thresh;
....@@ -187,15 +177,26 @@
187177 /* Can't use se_cmd when cleaning up expired cmds, because if
188178 cmd has been completed then accessing se_cmd is off limits */
189179 uint32_t dbi_cnt;
180
+ uint32_t dbi_bidi_cnt;
190181 uint32_t dbi_cur;
191182 uint32_t *dbi;
183
+
184
+ uint32_t data_len_bidi;
192185
193186 unsigned long deadline;
194187
195188 #define TCMU_CMD_BIT_EXPIRED 0
196
-#define TCMU_CMD_BIT_INFLIGHT 1
197189 unsigned long flags;
198190 };
191
+
192
+struct tcmu_tmr {
193
+ struct list_head queue_entry;
194
+
195
+ uint8_t tmr_type;
196
+ uint32_t tmr_cmd_cnt;
197
+ int16_t tmr_cmd_ids[];
198
+};
199
+
199200 /*
200201 * To avoid dead lock the mutex lock order should always be:
201202 *
....@@ -244,7 +245,7 @@
244245 static int tcmu_get_global_max_data_area(char *buffer,
245246 const struct kernel_param *kp)
246247 {
247
- return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
248
+ return sprintf(buffer, "%d\n", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
248249 }
249250
250251 static const struct kernel_param_ops tcmu_global_max_data_area_op = {
....@@ -438,29 +439,29 @@
438439 return 0;
439440 }
440441
441
-static const struct genl_ops tcmu_genl_ops[] = {
442
+static const struct genl_small_ops tcmu_genl_ops[] = {
442443 {
443444 .cmd = TCMU_CMD_SET_FEATURES,
445
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
444446 .flags = GENL_ADMIN_PERM,
445
- .policy = tcmu_attr_policy,
446447 .doit = tcmu_genl_set_features,
447448 },
448449 {
449450 .cmd = TCMU_CMD_ADDED_DEVICE_DONE,
451
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
450452 .flags = GENL_ADMIN_PERM,
451
- .policy = tcmu_attr_policy,
452453 .doit = tcmu_genl_add_dev_done,
453454 },
454455 {
455456 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
457
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
456458 .flags = GENL_ADMIN_PERM,
457
- .policy = tcmu_attr_policy,
458459 .doit = tcmu_genl_rm_dev_done,
459460 },
460461 {
461462 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
463
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
462464 .flags = GENL_ADMIN_PERM,
463
- .policy = tcmu_attr_policy,
464465 .doit = tcmu_genl_reconfig_dev_done,
465466 },
466467 };
....@@ -472,11 +473,12 @@
472473 .name = "TCM-USER",
473474 .version = 2,
474475 .maxattr = TCMU_ATTR_MAX,
476
+ .policy = tcmu_attr_policy,
475477 .mcgrps = tcmu_mcgrps,
476478 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
477479 .netnsok = true,
478
- .ops = tcmu_genl_ops,
479
- .n_ops = ARRAY_SIZE(tcmu_genl_ops),
480
+ .small_ops = tcmu_genl_ops,
481
+ .n_small_ops = ARRAY_SIZE(tcmu_genl_ops),
480482 };
481483
482484 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
....@@ -493,15 +495,16 @@
493495 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
494496 }
495497
496
-static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
497
- struct tcmu_cmd *tcmu_cmd)
498
+static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
499
+ struct tcmu_cmd *tcmu_cmd,
500
+ int prev_dbi, int *iov_cnt)
498501 {
499502 struct page *page;
500503 int ret, dbi;
501504
502505 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
503506 if (dbi == udev->dbi_thresh)
504
- return false;
507
+ return -1;
505508
506509 page = radix_tree_lookup(&udev->data_blocks, dbi);
507510 if (!page) {
....@@ -510,7 +513,7 @@
510513 schedule_delayed_work(&tcmu_unmap_work, 0);
511514
512515 /* try to get new page from the mm */
513
- page = alloc_page(GFP_KERNEL);
516
+ page = alloc_page(GFP_NOIO);
514517 if (!page)
515518 goto err_alloc;
516519
....@@ -525,24 +528,30 @@
525528 set_bit(dbi, udev->data_bitmap);
526529 tcmu_cmd_set_dbi(tcmu_cmd, dbi);
527530
528
- return true;
531
+ if (dbi != prev_dbi + 1)
532
+ *iov_cnt += 1;
533
+
534
+ return dbi;
529535 err_insert:
530536 __free_page(page);
531537 err_alloc:
532538 atomic_dec(&global_db_count);
533
- return false;
539
+ return -1;
534540 }
535541
536
-static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
537
- struct tcmu_cmd *tcmu_cmd)
542
+static int tcmu_get_empty_blocks(struct tcmu_dev *udev,
543
+ struct tcmu_cmd *tcmu_cmd, int dbi_cnt)
538544 {
539
- int i;
545
+ /* start value of dbi + 1 must not be a valid dbi */
546
+ int dbi = -2;
547
+ int i, iov_cnt = 0;
540548
541
- for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
542
- if (!tcmu_get_empty_block(udev, tcmu_cmd))
543
- return false;
549
+ for (i = 0; i < dbi_cnt; i++) {
550
+ dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, &iov_cnt);
551
+ if (dbi < 0)
552
+ return -1;
544553 }
545
- return true;
554
+ return iov_cnt;
546555 }
547556
548557 static inline struct page *
....@@ -557,25 +566,58 @@
557566 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
558567 }
559568
560
-static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
569
+static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd)
561570 {
562
- struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
563
- size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
571
+ int i, len;
572
+ struct se_cmd *se_cmd = cmd->se_cmd;
573
+
574
+ cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
564575
565576 if (se_cmd->se_cmd_flags & SCF_BIDI) {
566577 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
567
- data_length += round_up(se_cmd->t_bidi_data_sg->length,
568
- DATA_BLOCK_SIZE);
578
+ for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++)
579
+ len += se_cmd->t_bidi_data_sg[i].length;
580
+ cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, DATA_BLOCK_SIZE);
581
+ cmd->dbi_cnt += cmd->dbi_bidi_cnt;
582
+ cmd->data_len_bidi = len;
569583 }
570
-
571
- return data_length;
572584 }
573585
574
-static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
586
+static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
587
+ struct iovec **iov, int prev_dbi, int *remain)
575588 {
576
- size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
589
+ /* Get the next dbi */
590
+ int dbi = tcmu_cmd_get_dbi(cmd);
591
+ /* Do not add more than DATA_BLOCK_SIZE to iov */
592
+ int len = min_t(int, DATA_BLOCK_SIZE, *remain);
577593
578
- return data_length / DATA_BLOCK_SIZE;
594
+ *remain -= len;
595
+ /*
596
+ * The following code will gather and map the blocks to the same iovec
597
+ * when the blocks are all next to each other.
598
+ */
599
+ if (dbi != prev_dbi + 1) {
600
+ /* dbi is not next to previous dbi, so start new iov */
601
+ if (prev_dbi >= 0)
602
+ (*iov)++;
603
+ /* write offset relative to mb_addr */
604
+ (*iov)->iov_base = (void __user *)
605
+ (udev->data_off + dbi * DATA_BLOCK_SIZE);
606
+ }
607
+ (*iov)->iov_len += len;
608
+
609
+ return dbi;
610
+}
611
+
612
+static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
613
+ struct iovec **iov, int data_length)
614
+{
615
+ /* start value of dbi + 1 must not be a valid dbi */
616
+ int dbi = -2;
617
+
618
+ /* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */
619
+ while (data_length > 0)
620
+ dbi = new_block_to_iov(udev, cmd, iov, dbi, &data_length);
579621 }
580622
581623 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
....@@ -584,7 +626,7 @@
584626 struct tcmu_dev *udev = TCMU_DEV(se_dev);
585627 struct tcmu_cmd *tcmu_cmd;
586628
587
- tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
629
+ tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO);
588630 if (!tcmu_cmd)
589631 return NULL;
590632
....@@ -592,10 +634,9 @@
592634 tcmu_cmd->se_cmd = se_cmd;
593635 tcmu_cmd->tcmu_dev = udev;
594636
595
- tcmu_cmd_reset_dbi_cur(tcmu_cmd);
596
- tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
637
+ tcmu_cmd_set_block_cnts(tcmu_cmd);
597638 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
598
- GFP_KERNEL);
639
+ GFP_NOIO);
599640 if (!tcmu_cmd->dbi) {
600641 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
601642 return NULL;
....@@ -643,46 +684,22 @@
643684 return size - head;
644685 }
645686
646
-static inline void new_iov(struct iovec **iov, int *iov_cnt)
647
-{
648
- struct iovec *iovec;
649
-
650
- if (*iov_cnt != 0)
651
- (*iov)++;
652
- (*iov_cnt)++;
653
-
654
- iovec = *iov;
655
- memset(iovec, 0, sizeof(struct iovec));
656
-}
657
-
658687 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
659688
660
-/* offset is relative to mb_addr */
661
-static inline size_t get_block_offset_user(struct tcmu_dev *dev,
662
- int dbi, int remaining)
689
+static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
690
+ struct iovec **iov)
663691 {
664
- return dev->data_off + dbi * DATA_BLOCK_SIZE +
665
- DATA_BLOCK_SIZE - remaining;
666
-}
667
-
668
-static inline size_t iov_tail(struct iovec *iov)
669
-{
670
- return (size_t)iov->iov_base + iov->iov_len;
671
-}
672
-
673
-static void scatter_data_area(struct tcmu_dev *udev,
674
- struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
675
- unsigned int data_nents, struct iovec **iov,
676
- int *iov_cnt, bool copy_data)
677
-{
678
- int i, dbi;
692
+ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
693
+ /* start value of dbi + 1 must not be a valid dbi */
694
+ int i, dbi = -2;
679695 int block_remaining = 0;
696
+ int data_len = se_cmd->data_length;
680697 void *from, *to = NULL;
681
- size_t copy_bytes, to_offset, offset;
698
+ size_t copy_bytes, offset;
682699 struct scatterlist *sg;
683700 struct page *page = NULL;
684701
685
- for_each_sg(data_sg, sg, data_nents, i) {
702
+ for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
686703 int sg_remaining = sg->length;
687704 from = kmap_atomic(sg_page(sg)) + sg->offset;
688705 while (sg_remaining > 0) {
....@@ -692,50 +709,19 @@
692709 kunmap_atomic(to);
693710 }
694711
695
- block_remaining = DATA_BLOCK_SIZE;
696
- dbi = tcmu_cmd_get_dbi(tcmu_cmd);
712
+ /* get next dbi and add to IOVs */
713
+ dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi,
714
+ &data_len);
697715 page = tcmu_get_block_page(udev, dbi);
698716 to = kmap_atomic(page);
717
+ block_remaining = DATA_BLOCK_SIZE;
699718 }
700719
701
- /*
702
- * Covert to virtual offset of the ring data area.
703
- */
704
- to_offset = get_block_offset_user(udev, dbi,
705
- block_remaining);
706
-
707
- /*
708
- * The following code will gather and map the blocks
709
- * to the same iovec when the blocks are all next to
710
- * each other.
711
- */
712720 copy_bytes = min_t(size_t, sg_remaining,
713721 block_remaining);
714
- if (*iov_cnt != 0 &&
715
- to_offset == iov_tail(*iov)) {
716
- /*
717
- * Will append to the current iovec, because
718
- * the current block page is next to the
719
- * previous one.
720
- */
721
- (*iov)->iov_len += copy_bytes;
722
- } else {
723
- /*
724
- * Will allocate a new iovec because we are
725
- * first time here or the current block page
726
- * is not next to the previous one.
727
- */
728
- new_iov(iov, iov_cnt);
729
- (*iov)->iov_base = (void __user *)to_offset;
730
- (*iov)->iov_len = copy_bytes;
731
- }
732
-
733
- if (copy_data) {
734
- offset = DATA_BLOCK_SIZE - block_remaining;
735
- memcpy(to + offset,
736
- from + sg->length - sg_remaining,
737
- copy_bytes);
738
- }
722
+ offset = DATA_BLOCK_SIZE - block_remaining;
723
+ memcpy(to + offset, from + sg->length - sg_remaining,
724
+ copy_bytes);
739725
740726 sg_remaining -= copy_bytes;
741727 block_remaining -= copy_bytes;
....@@ -766,13 +752,12 @@
766752 data_sg = se_cmd->t_data_sg;
767753 data_nents = se_cmd->t_data_nents;
768754 } else {
769
-
770755 /*
771756 * For bidi case, the first count blocks are for Data-Out
772757 * buffer blocks, and before gathering the Data-In buffer
773
- * the Data-Out buffer blocks should be discarded.
758
+ * the Data-Out buffer blocks should be skipped.
774759 */
775
- count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
760
+ count = cmd->dbi_cnt - cmd->dbi_bidi_cnt;
776761
777762 data_sg = se_cmd->t_bidi_data_sg;
778763 data_nents = se_cmd->t_bidi_data_nents;
....@@ -820,17 +805,13 @@
820805 }
821806
822807 /*
823
- * We can't queue a command until we have space available on the cmd ring *and*
824
- * space available on the data area.
808
+ * We can't queue a command until we have space available on the cmd ring.
825809 *
826810 * Called with ring lock held.
827811 */
828
-static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
829
- size_t cmd_size, size_t data_needed)
812
+static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size)
830813 {
831814 struct tcmu_mailbox *mb = udev->mb_addr;
832
- uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
833
- / DATA_BLOCK_SIZE;
834815 size_t space, cmd_needed;
835816 u32 cmd_head;
836817
....@@ -853,26 +834,54 @@
853834 udev->cmdr_last_cleaned, udev->cmdr_size);
854835 return false;
855836 }
837
+ return true;
838
+}
839
+
840
+/*
841
+ * We have to allocate data buffers before we can queue a command.
842
+ * Returns -1 on error (not enough space) or number of needed iovs on success
843
+ *
844
+ * Called with ring lock held.
845
+ */
846
+static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
847
+ int *iov_bidi_cnt)
848
+{
849
+ int space, iov_cnt = 0, ret = 0;
850
+
851
+ if (!cmd->dbi_cnt)
852
+ goto wr_iov_cnts;
856853
857854 /* try to check and get the data blocks as needed */
858855 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
859
- if ((space * DATA_BLOCK_SIZE) < data_needed) {
856
+ if (space < cmd->dbi_cnt) {
860857 unsigned long blocks_left =
861858 (udev->max_blocks - udev->dbi_thresh) + space;
862859
863
- if (blocks_left < blocks_needed) {
864
- pr_debug("no data space: only %lu available, but ask for %zu\n",
860
+ if (blocks_left < cmd->dbi_cnt) {
861
+ pr_debug("no data space: only %lu available, but ask for %lu\n",
865862 blocks_left * DATA_BLOCK_SIZE,
866
- data_needed);
867
- return false;
863
+ cmd->dbi_cnt * DATA_BLOCK_SIZE);
864
+ return -1;
868865 }
869866
870
- udev->dbi_thresh += blocks_needed;
867
+ udev->dbi_thresh += cmd->dbi_cnt;
871868 if (udev->dbi_thresh > udev->max_blocks)
872869 udev->dbi_thresh = udev->max_blocks;
873870 }
874871
875
- return tcmu_get_empty_blocks(udev, cmd);
872
+ iov_cnt = tcmu_get_empty_blocks(udev, cmd,
873
+ cmd->dbi_cnt - cmd->dbi_bidi_cnt);
874
+ if (iov_cnt < 0)
875
+ return -1;
876
+
877
+ if (cmd->dbi_bidi_cnt) {
878
+ ret = tcmu_get_empty_blocks(udev, cmd, cmd->dbi_bidi_cnt);
879
+ if (ret < 0)
880
+ return -1;
881
+ }
882
+wr_iov_cnts:
883
+ *iov_bidi_cnt = ret;
884
+ return iov_cnt + ret;
876885 }
877886
878887 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
....@@ -936,6 +945,34 @@
936945 return 0;
937946 }
938947
948
+static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size)
949
+{
950
+ struct tcmu_cmd_entry_hdr *hdr;
951
+ struct tcmu_mailbox *mb = udev->mb_addr;
952
+ uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
953
+
954
+ /* Insert a PAD if end-of-ring space is too small */
955
+ if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) {
956
+ size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
957
+
958
+ hdr = (void *) mb + CMDR_OFF + cmd_head;
959
+ tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD);
960
+ tcmu_hdr_set_len(&hdr->len_op, pad_size);
961
+ hdr->cmd_id = 0; /* not used for PAD */
962
+ hdr->kflags = 0;
963
+ hdr->uflags = 0;
964
+ tcmu_flush_dcache_range(hdr, sizeof(*hdr));
965
+
966
+ UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
967
+ tcmu_flush_dcache_range(mb, sizeof(*mb));
968
+
969
+ cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
970
+ WARN_ON(cmd_head != 0);
971
+ }
972
+
973
+ return cmd_head;
974
+}
975
+
939976 /**
940977 * queue_cmd_ring - queue cmd to ring or internally
941978 * @tcmu_cmd: cmd to queue
....@@ -951,14 +988,14 @@
951988 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
952989 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
953990 size_t base_command_size, command_size;
954
- struct tcmu_mailbox *mb;
991
+ struct tcmu_mailbox *mb = udev->mb_addr;
955992 struct tcmu_cmd_entry *entry;
956993 struct iovec *iov;
957
- int iov_cnt, cmd_id;
994
+ int iov_cnt, iov_bidi_cnt, cmd_id;
958995 uint32_t cmd_head;
959996 uint64_t cdb_off;
960
- bool copy_to_data_area;
961
- size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
997
+ /* size of data buffer needed */
998
+ size_t data_length = (size_t)tcmu_cmd->dbi_cnt * DATA_BLOCK_SIZE;
962999
9631000 *scsi_err = TCM_NO_SENSE;
9641001
....@@ -972,88 +1009,41 @@
9721009 return -1;
9731010 }
9741011
975
- /*
976
- * Must be a certain minimum size for response sense info, but
977
- * also may be larger if the iov array is large.
978
- *
979
- * We prepare as many iovs as possbile for potential uses here,
980
- * because it's expensive to tell how many regions are freed in
981
- * the bitmap & global data pool, as the size calculated here
982
- * will only be used to do the checks.
983
- *
984
- * The size will be recalculated later as actually needed to save
985
- * cmd area memories.
986
- */
987
- base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
988
- command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
989
-
9901012 if (!list_empty(&udev->qfull_queue))
9911013 goto queue;
9921014
993
- mb = udev->mb_addr;
994
- cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
995
- if ((command_size > (udev->cmdr_size / 2)) ||
996
- data_length > udev->data_size) {
997
- pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
998
- "cmd ring/data area\n", command_size, data_length,
999
- udev->cmdr_size, udev->data_size);
1015
+ if (data_length > udev->data_size) {
1016
+ pr_warn("TCMU: Request of size %zu is too big for %zu data area\n",
1017
+ data_length, udev->data_size);
10001018 *scsi_err = TCM_INVALID_CDB_FIELD;
10011019 return -1;
10021020 }
10031021
1004
- if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
1022
+ iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt);
1023
+ if (iov_cnt < 0)
1024
+ goto free_and_queue;
1025
+
1026
+ /*
1027
+ * Must be a certain minimum size for response sense info, but
1028
+ * also may be larger if the iov array is large.
1029
+ */
1030
+ base_command_size = tcmu_cmd_get_base_cmd_size(iov_cnt);
1031
+ command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
1032
+
1033
+ if (command_size > (udev->cmdr_size / 2)) {
1034
+ pr_warn("TCMU: Request of size %zu is too big for %u cmd ring\n",
1035
+ command_size, udev->cmdr_size);
1036
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
1037
+ *scsi_err = TCM_INVALID_CDB_FIELD;
1038
+ return -1;
1039
+ }
1040
+
1041
+ if (!is_ring_space_avail(udev, command_size))
10051042 /*
10061043 * Don't leave commands partially setup because the unmap
10071044 * thread might need the blocks to make forward progress.
10081045 */
1009
- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
1010
- tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1011
- goto queue;
1012
- }
1013
-
1014
- /* Insert a PAD if end-of-ring space is too small */
1015
- if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
1016
- size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
1017
-
1018
- entry = (void *) mb + CMDR_OFF + cmd_head;
1019
- tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
1020
- tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
1021
- entry->hdr.cmd_id = 0; /* not used for PAD */
1022
- entry->hdr.kflags = 0;
1023
- entry->hdr.uflags = 0;
1024
- tcmu_flush_dcache_range(entry, sizeof(entry->hdr));
1025
-
1026
- UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
1027
- tcmu_flush_dcache_range(mb, sizeof(*mb));
1028
-
1029
- cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
1030
- WARN_ON(cmd_head != 0);
1031
- }
1032
-
1033
- entry = (void *) mb + CMDR_OFF + cmd_head;
1034
- memset(entry, 0, command_size);
1035
- tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
1036
-
1037
- /* Handle allocating space from the data area */
1038
- tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1039
- iov = &entry->req.iov[0];
1040
- iov_cnt = 0;
1041
- copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
1042
- || se_cmd->se_cmd_flags & SCF_BIDI);
1043
- scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
1044
- se_cmd->t_data_nents, &iov, &iov_cnt,
1045
- copy_to_data_area);
1046
- entry->req.iov_cnt = iov_cnt;
1047
-
1048
- /* Handle BIDI commands */
1049
- iov_cnt = 0;
1050
- if (se_cmd->se_cmd_flags & SCF_BIDI) {
1051
- iov++;
1052
- scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg,
1053
- se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
1054
- false);
1055
- }
1056
- entry->req.iov_bidi_cnt = iov_cnt;
1046
+ goto free_and_queue;
10571047
10581048 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
10591049 if (cmd_id < 0) {
....@@ -1068,17 +1058,34 @@
10681058 pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
10691059 tcmu_cmd, udev->name);
10701060
1061
+ cmd_head = ring_insert_padding(udev, command_size);
1062
+
1063
+ entry = (void *) mb + CMDR_OFF + cmd_head;
1064
+ memset(entry, 0, command_size);
1065
+ tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
1066
+
1067
+ /* prepare iov list and copy data to data area if necessary */
1068
+ tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1069
+ iov = &entry->req.iov[0];
1070
+
1071
+ if (se_cmd->data_direction == DMA_TO_DEVICE ||
1072
+ se_cmd->se_cmd_flags & SCF_BIDI)
1073
+ scatter_data_area(udev, tcmu_cmd, &iov);
1074
+ else
1075
+ tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length);
1076
+
1077
+ entry->req.iov_cnt = iov_cnt - iov_bidi_cnt;
1078
+
1079
+ /* Handle BIDI commands */
1080
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
1081
+ iov++;
1082
+ tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi);
1083
+ entry->req.iov_bidi_cnt = iov_bidi_cnt;
1084
+ }
1085
+
10711086 tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
10721087
10731088 entry->hdr.cmd_id = tcmu_cmd->cmd_id;
1074
-
1075
- /*
1076
- * Recalaulate the command's base size and size according
1077
- * to the actual needs
1078
- */
1079
- base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
1080
- entry->req.iov_bidi_cnt);
1081
- command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
10821089
10831090 tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
10841091
....@@ -1092,12 +1099,15 @@
10921099 tcmu_flush_dcache_range(mb, sizeof(*mb));
10931100
10941101 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
1095
- set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
10961102
10971103 /* TODO: only if FLUSH and FUA? */
10981104 uio_event_notify(&udev->uio_info);
10991105
11001106 return 0;
1107
+
1108
+free_and_queue:
1109
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
1110
+ tcmu_cmd_reset_dbi_cur(tcmu_cmd);
11011111
11021112 queue:
11031113 if (add_to_qfull_queue(tcmu_cmd)) {
....@@ -1108,25 +1118,177 @@
11081118 return 1;
11091119 }
11101120
1121
+/**
1122
+ * queue_tmr_ring - queue tmr info to ring or internally
1123
+ * @udev: related tcmu_dev
1124
+ * @tmr: tcmu_tmr containing tmr info to queue
1125
+ *
1126
+ * Returns:
1127
+ * 0 success
1128
+ * 1 internally queued to wait for ring memory to free.
1129
+ */
1130
+static int
1131
+queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr)
1132
+{
1133
+ struct tcmu_tmr_entry *entry;
1134
+ int cmd_size;
1135
+ int id_list_sz;
1136
+ struct tcmu_mailbox *mb = udev->mb_addr;
1137
+ uint32_t cmd_head;
1138
+
1139
+ if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
1140
+ goto out_free;
1141
+
1142
+ id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt;
1143
+ cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE);
1144
+
1145
+ if (!list_empty(&udev->tmr_queue) ||
1146
+ !is_ring_space_avail(udev, cmd_size)) {
1147
+ list_add_tail(&tmr->queue_entry, &udev->tmr_queue);
1148
+ pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n",
1149
+ tmr, udev->name);
1150
+ return 1;
1151
+ }
1152
+
1153
+ cmd_head = ring_insert_padding(udev, cmd_size);
1154
+
1155
+ entry = (void *)mb + CMDR_OFF + cmd_head;
1156
+ memset(entry, 0, cmd_size);
1157
+ tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR);
1158
+ tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size);
1159
+ entry->tmr_type = tmr->tmr_type;
1160
+ entry->cmd_cnt = tmr->tmr_cmd_cnt;
1161
+ memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz);
1162
+ tcmu_flush_dcache_range(entry, cmd_size);
1163
+
1164
+ UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size);
1165
+ tcmu_flush_dcache_range(mb, sizeof(*mb));
1166
+
1167
+ uio_event_notify(&udev->uio_info);
1168
+
1169
+out_free:
1170
+ kfree(tmr);
1171
+
1172
+ return 0;
1173
+}
1174
+
11111175 static sense_reason_t
11121176 tcmu_queue_cmd(struct se_cmd *se_cmd)
11131177 {
11141178 struct se_device *se_dev = se_cmd->se_dev;
11151179 struct tcmu_dev *udev = TCMU_DEV(se_dev);
11161180 struct tcmu_cmd *tcmu_cmd;
1117
- sense_reason_t scsi_ret;
1118
- int ret;
1181
+ sense_reason_t scsi_ret = TCM_CHECK_CONDITION_ABORT_CMD;
1182
+ int ret = -1;
11191183
11201184 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
11211185 if (!tcmu_cmd)
11221186 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
11231187
11241188 mutex_lock(&udev->cmdr_lock);
1125
- ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1126
- mutex_unlock(&udev->cmdr_lock);
1189
+ if (!(se_cmd->transport_state & CMD_T_ABORTED))
1190
+ ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
11271191 if (ret < 0)
11281192 tcmu_free_cmd(tcmu_cmd);
1193
+ else
1194
+ se_cmd->priv = tcmu_cmd;
1195
+ mutex_unlock(&udev->cmdr_lock);
11291196 return scsi_ret;
1197
+}
1198
+
1199
+static void tcmu_set_next_deadline(struct list_head *queue,
1200
+ struct timer_list *timer)
1201
+{
1202
+ struct tcmu_cmd *cmd;
1203
+
1204
+ if (!list_empty(queue)) {
1205
+ cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry);
1206
+ mod_timer(timer, cmd->deadline);
1207
+ } else
1208
+ del_timer(timer);
1209
+}
1210
+
1211
+static int
1212
+tcmu_tmr_type(enum tcm_tmreq_table tmf)
1213
+{
1214
+ switch (tmf) {
1215
+ case TMR_ABORT_TASK: return TCMU_TMR_ABORT_TASK;
1216
+ case TMR_ABORT_TASK_SET: return TCMU_TMR_ABORT_TASK_SET;
1217
+ case TMR_CLEAR_ACA: return TCMU_TMR_CLEAR_ACA;
1218
+ case TMR_CLEAR_TASK_SET: return TCMU_TMR_CLEAR_TASK_SET;
1219
+ case TMR_LUN_RESET: return TCMU_TMR_LUN_RESET;
1220
+ case TMR_TARGET_WARM_RESET: return TCMU_TMR_TARGET_WARM_RESET;
1221
+ case TMR_TARGET_COLD_RESET: return TCMU_TMR_TARGET_COLD_RESET;
1222
+ case TMR_LUN_RESET_PRO: return TCMU_TMR_LUN_RESET_PRO;
1223
+ default: return TCMU_TMR_UNKNOWN;
1224
+ }
1225
+}
1226
+
1227
+static void
1228
+tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
1229
+ struct list_head *cmd_list)
1230
+{
1231
+ int i = 0, cmd_cnt = 0;
1232
+ bool unqueued = false;
1233
+ uint16_t *cmd_ids = NULL;
1234
+ struct tcmu_cmd *cmd;
1235
+ struct se_cmd *se_cmd;
1236
+ struct tcmu_tmr *tmr;
1237
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
1238
+
1239
+ mutex_lock(&udev->cmdr_lock);
1240
+
1241
+ /* First we check for aborted commands in qfull_queue */
1242
+ list_for_each_entry(se_cmd, cmd_list, state_list) {
1243
+ i++;
1244
+ if (!se_cmd->priv)
1245
+ continue;
1246
+ cmd = se_cmd->priv;
1247
+ /* Commands on qfull queue have no id yet */
1248
+ if (cmd->cmd_id) {
1249
+ cmd_cnt++;
1250
+ continue;
1251
+ }
1252
+ pr_debug("Removing aborted command %p from queue on dev %s.\n",
1253
+ cmd, udev->name);
1254
+
1255
+ list_del_init(&cmd->queue_entry);
1256
+ tcmu_free_cmd(cmd);
1257
+ se_cmd->priv = NULL;
1258
+ target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
1259
+ unqueued = true;
1260
+ }
1261
+ if (unqueued)
1262
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1263
+
1264
+ if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags))
1265
+ goto unlock;
1266
+
1267
+ pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n",
1268
+ tcmu_tmr_type(tmf), udev->name, i, cmd_cnt);
1269
+
1270
+ tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_KERNEL);
1271
+ if (!tmr)
1272
+ goto unlock;
1273
+
1274
+ tmr->tmr_type = tcmu_tmr_type(tmf);
1275
+ tmr->tmr_cmd_cnt = cmd_cnt;
1276
+
1277
+ if (cmd_cnt != 0) {
1278
+ cmd_cnt = 0;
1279
+ list_for_each_entry(se_cmd, cmd_list, state_list) {
1280
+ if (!se_cmd->priv)
1281
+ continue;
1282
+ cmd = se_cmd->priv;
1283
+ if (cmd->cmd_id)
1284
+ tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id;
1285
+ }
1286
+ }
1287
+
1288
+ queue_tmr_ring(udev, tmr);
1289
+
1290
+unlock:
1291
+ mutex_unlock(&udev->cmdr_lock);
11301292 }
11311293
11321294 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
....@@ -1184,6 +1346,7 @@
11841346 }
11851347
11861348 done:
1349
+ se_cmd->priv = NULL;
11871350 if (read_len_valid) {
11881351 pr_debug("read_len = %d\n", read_len);
11891352 target_complete_cmd_with_length(cmd->se_cmd,
....@@ -1192,35 +1355,47 @@
11921355 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
11931356
11941357 out:
1195
- cmd->se_cmd = NULL;
11961358 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
11971359 tcmu_free_cmd(cmd);
11981360 }
11991361
1200
-static void tcmu_set_next_deadline(struct list_head *queue,
1201
- struct timer_list *timer)
1362
+static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
12021363 {
1203
- struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1204
- unsigned long deadline = 0;
1364
+ struct tcmu_tmr *tmr, *tmp;
1365
+ LIST_HEAD(tmrs);
12051366
1206
- list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
1207
- if (!time_after(jiffies, tcmu_cmd->deadline)) {
1208
- deadline = tcmu_cmd->deadline;
1209
- break;
1367
+ if (list_empty(&udev->tmr_queue))
1368
+ return 1;
1369
+
1370
+ pr_debug("running %s's tmr queue\n", udev->name);
1371
+
1372
+ list_splice_init(&udev->tmr_queue, &tmrs);
1373
+
1374
+ list_for_each_entry_safe(tmr, tmp, &tmrs, queue_entry) {
1375
+ list_del_init(&tmr->queue_entry);
1376
+
1377
+ pr_debug("removing tmr %p on dev %s from queue\n",
1378
+ tmr, udev->name);
1379
+
1380
+ if (queue_tmr_ring(udev, tmr)) {
1381
+ pr_debug("ran out of space during tmr queue run\n");
1382
+ /*
1383
+ * tmr was requeued, so just put all tmrs back in
1384
+ * the queue
1385
+ */
1386
+ list_splice_tail(&tmrs, &udev->tmr_queue);
1387
+ return 0;
12101388 }
12111389 }
12121390
1213
- if (deadline)
1214
- mod_timer(timer, deadline);
1215
- else
1216
- del_timer(timer);
1391
+ return 1;
12171392 }
12181393
12191394 static bool tcmu_handle_completions(struct tcmu_dev *udev)
12201395 {
12211396 struct tcmu_mailbox *mb;
12221397 struct tcmu_cmd *cmd;
1223
- int handled = 0;
1398
+ bool free_space = false;
12241399
12251400 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
12261401 pr_err("ring broken, not handling completions\n");
....@@ -1243,7 +1418,10 @@
12431418 tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ?
12441419 ring_left : sizeof(*entry));
12451420
1246
- if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
1421
+ free_space = true;
1422
+
1423
+ if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD ||
1424
+ tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) {
12471425 UPDATE_HEAD(udev->cmdr_last_cleaned,
12481426 tcmu_hdr_get_len(entry->hdr.len_op),
12491427 udev->cmdr_size);
....@@ -1264,40 +1442,35 @@
12641442 UPDATE_HEAD(udev->cmdr_last_cleaned,
12651443 tcmu_hdr_get_len(entry->hdr.len_op),
12661444 udev->cmdr_size);
1267
-
1268
- handled++;
12691445 }
1446
+ if (free_space)
1447
+ free_space = tcmu_run_tmr_queue(udev);
12701448
1271
- if (mb->cmd_tail == mb->cmd_head) {
1272
- /* no more pending commands */
1273
- del_timer(&udev->cmd_timer);
1274
-
1275
- if (list_empty(&udev->qfull_queue)) {
1276
- /*
1277
- * no more pending or waiting commands so try to
1278
- * reclaim blocks if needed.
1279
- */
1280
- if (atomic_read(&global_db_count) >
1281
- tcmu_global_max_blocks)
1282
- schedule_delayed_work(&tcmu_unmap_work, 0);
1283
- }
1284
- } else if (udev->cmd_time_out) {
1449
+ if (atomic_read(&global_db_count) > tcmu_global_max_blocks &&
1450
+ idr_is_empty(&udev->commands) && list_empty(&udev->qfull_queue)) {
1451
+ /*
1452
+ * Allocated blocks exceeded global block limit, currently no
1453
+ * more pending or waiting commands so try to reclaim blocks.
1454
+ */
1455
+ schedule_delayed_work(&tcmu_unmap_work, 0);
1456
+ }
1457
+ if (udev->cmd_time_out)
12851458 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
1286
- }
12871459
1288
- return handled;
1460
+ return free_space;
12891461 }
12901462
12911463 static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
12921464 {
12931465 struct se_cmd *se_cmd;
12941466
1295
- if (!time_after(jiffies, cmd->deadline))
1467
+ if (!time_after_eq(jiffies, cmd->deadline))
12961468 return;
12971469
12981470 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
12991471 list_del_init(&cmd->queue_entry);
13001472 se_cmd = cmd->se_cmd;
1473
+ se_cmd->priv = NULL;
13011474 cmd->se_cmd = NULL;
13021475
13031476 pr_debug("Timing out inflight cmd %u on dev %s.\n",
....@@ -1310,7 +1483,7 @@
13101483 {
13111484 struct se_cmd *se_cmd;
13121485
1313
- if (!time_after(jiffies, cmd->deadline))
1486
+ if (!time_after_eq(jiffies, cmd->deadline))
13141487 return;
13151488
13161489 pr_debug("Timing out queued cmd %p on dev %s.\n",
....@@ -1320,6 +1493,7 @@
13201493 se_cmd = cmd->se_cmd;
13211494 tcmu_free_cmd(cmd);
13221495
1496
+ se_cmd->priv = NULL;
13231497 target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
13241498 }
13251499
....@@ -1394,6 +1568,7 @@
13941568 INIT_LIST_HEAD(&udev->node);
13951569 INIT_LIST_HEAD(&udev->timedout_entry);
13961570 INIT_LIST_HEAD(&udev->qfull_queue);
1571
+ INIT_LIST_HEAD(&udev->tmr_queue);
13971572 INIT_LIST_HEAD(&udev->inflight_queue);
13981573 idr_init(&udev->commands);
13991574
....@@ -1433,6 +1608,7 @@
14331608 * removed then LIO core will do the right thing and
14341609 * fail the retry.
14351610 */
1611
+ tcmu_cmd->se_cmd->priv = NULL;
14361612 target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
14371613 tcmu_free_cmd(tcmu_cmd);
14381614 continue;
....@@ -1446,6 +1622,7 @@
14461622 * Ignore scsi_ret for now. target_complete_cmd
14471623 * drops it.
14481624 */
1625
+ tcmu_cmd->se_cmd->priv = NULL;
14491626 target_complete_cmd(tcmu_cmd->se_cmd,
14501627 SAM_STAT_CHECK_CONDITION);
14511628 tcmu_free_cmd(tcmu_cmd);
....@@ -1468,8 +1645,8 @@
14681645 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
14691646
14701647 mutex_lock(&udev->cmdr_lock);
1471
- tcmu_handle_completions(udev);
1472
- run_qfull_queue(udev, false);
1648
+ if (tcmu_handle_completions(udev))
1649
+ run_qfull_queue(udev, false);
14731650 mutex_unlock(&udev->cmdr_lock);
14741651
14751652 return 0;
....@@ -1499,6 +1676,7 @@
14991676 mutex_lock(&udev->cmdr_lock);
15001677 page = tcmu_get_block_page(udev, dbi);
15011678 if (likely(page)) {
1679
+ get_page(page);
15021680 mutex_unlock(&udev->cmdr_lock);
15031681 return page;
15041682 }
....@@ -1537,6 +1715,7 @@
15371715 /* For the vmalloc()ed cmd area pages */
15381716 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
15391717 page = vmalloc_to_page(addr);
1718
+ get_page(page);
15401719 } else {
15411720 uint32_t dbi;
15421721
....@@ -1547,7 +1726,6 @@
15471726 return VM_FAULT_SIGBUS;
15481727 }
15491728
1550
- get_page(page);
15511729 vmf->page = page;
15521730 return 0;
15531731 }
....@@ -1622,6 +1800,16 @@
16221800 }
16231801 }
16241802
1803
+static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev)
1804
+{
1805
+ struct tcmu_tmr *tmr, *tmp;
1806
+
1807
+ list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) {
1808
+ list_del_init(&tmr->queue_entry);
1809
+ kfree(tmr);
1810
+ }
1811
+}
1812
+
16251813 static void tcmu_dev_kref_release(struct kref *kref)
16261814 {
16271815 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
....@@ -1644,13 +1832,15 @@
16441832 if (tcmu_check_and_free_pending_cmd(cmd) != 0)
16451833 all_expired = false;
16461834 }
1835
+ /* There can be left over TMR cmds. Remove them. */
1836
+ tcmu_remove_all_queued_tmr(udev);
16471837 if (!list_empty(&udev->qfull_queue))
16481838 all_expired = false;
16491839 idr_destroy(&udev->commands);
16501840 WARN_ON(!all_expired);
16511841
16521842 tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
1653
- kfree(udev->data_bitmap);
1843
+ bitmap_free(udev->data_bitmap);
16541844 mutex_unlock(&udev->cmdr_lock);
16551845
16561846 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
....@@ -1704,6 +1894,24 @@
17041894
17051895 mutex_unlock(&tcmu_nl_cmd_mutex);
17061896 return 0;
1897
+}
1898
+
1899
+static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev)
1900
+{
1901
+ struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1902
+
1903
+ if (!tcmu_kern_cmd_reply_supported)
1904
+ return;
1905
+
1906
+ if (udev->nl_reply_supported <= 0)
1907
+ return;
1908
+
1909
+ mutex_lock(&tcmu_nl_cmd_mutex);
1910
+
1911
+ list_del(&nl_cmd->nl_list);
1912
+ memset(nl_cmd, 0, sizeof(*nl_cmd));
1913
+
1914
+ mutex_unlock(&tcmu_nl_cmd_mutex);
17071915 }
17081916
17091917 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
....@@ -1781,11 +1989,14 @@
17811989
17821990 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
17831991 TCMU_MCGRP_CONFIG, GFP_KERNEL);
1784
- /* We don't care if no one is listening */
1785
- if (ret == -ESRCH)
1786
- ret = 0;
1787
- if (!ret)
1788
- ret = tcmu_wait_genl_cmd_reply(udev);
1992
+
1993
+ /* Wait during an add as the listener may not be up yet */
1994
+ if (ret == 0 ||
1995
+ (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
1996
+ return tcmu_wait_genl_cmd_reply(udev);
1997
+ else
1998
+ tcmu_destroy_genl_cmd_reply(udev);
1999
+
17892000 return ret;
17902001 }
17912002
....@@ -1821,20 +2032,18 @@
18212032 {
18222033 struct tcmu_hba *hba = udev->hba->hba_ptr;
18232034 struct uio_info *info;
1824
- size_t size, used;
18252035 char *str;
18262036
18272037 info = &udev->uio_info;
1828
- size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
1829
- udev->dev_config);
1830
- size += 1; /* for \0 */
1831
- str = kmalloc(size, GFP_KERNEL);
2038
+
2039
+ if (udev->dev_config[0])
2040
+ str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id,
2041
+ udev->name, udev->dev_config);
2042
+ else
2043
+ str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id,
2044
+ udev->name);
18322045 if (!str)
18332046 return -ENOMEM;
1834
-
1835
- used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
1836
- if (udev->dev_config[0])
1837
- snprintf(str + used, size - used, "/%s", udev->dev_config);
18382047
18392048 /* If the old string exists, free it */
18402049 kfree(info->name);
....@@ -1857,9 +2066,7 @@
18572066 info = &udev->uio_info;
18582067
18592068 mutex_lock(&udev->cmdr_lock);
1860
- udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks),
1861
- sizeof(unsigned long),
1862
- GFP_KERNEL);
2069
+ udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL);
18632070 mutex_unlock(&udev->cmdr_lock);
18642071 if (!udev->data_bitmap) {
18652072 ret = -ENOMEM;
....@@ -1881,7 +2088,9 @@
18812088 /* Initialise the mailbox of the ring buffer */
18822089 mb = udev->mb_addr;
18832090 mb->version = TCMU_MAILBOX_VERSION;
1884
- mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
2091
+ mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC |
2092
+ TCMU_MAILBOX_FLAG_CAP_READ_LEN |
2093
+ TCMU_MAILBOX_FLAG_CAP_TMR;
18852094 mb->cmdr_off = CMDR_OFF;
18862095 mb->cmdr_size = udev->cmdr_size;
18872096
....@@ -1946,7 +2155,7 @@
19462155 vfree(udev->mb_addr);
19472156 udev->mb_addr = NULL;
19482157 err_vzalloc:
1949
- kfree(udev->data_bitmap);
2158
+ bitmap_free(udev->data_bitmap);
19502159 udev->data_bitmap = NULL;
19512160 err_bitmap_alloc:
19522161 kfree(info->name);
....@@ -2022,6 +2231,7 @@
20222231 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
20232232 WARN_ON(!cmd->se_cmd);
20242233 list_del_init(&cmd->queue_entry);
2234
+ cmd->se_cmd->priv = NULL;
20252235 if (err_level == 1) {
20262236 /*
20272237 * Userspace was not able to start the
....@@ -2050,6 +2260,15 @@
20502260 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
20512261
20522262 del_timer(&udev->cmd_timer);
2263
+
2264
+ /*
2265
+ * ring is empty and qfull queue never contains aborted commands.
2266
+ * So TMRs in tmr queue do not contain relevant cmd_ids.
2267
+ * After a ring reset userspace should do a fresh start, so
2268
+ * even LUN RESET message is no longer relevant.
2269
+ * Therefore remove all TMRs from qfull queue
2270
+ */
2271
+ tcmu_remove_all_queued_tmr(udev);
20532272
20542273 run_qfull_queue(udev, false);
20552274
....@@ -2497,6 +2716,39 @@
24972716 }
24982717 CONFIGFS_ATTR(tcmu_, emulate_write_cache);
24992718
2719
+static ssize_t tcmu_tmr_notification_show(struct config_item *item, char *page)
2720
+{
2721
+ struct se_dev_attrib *da = container_of(to_config_group(item),
2722
+ struct se_dev_attrib, da_group);
2723
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2724
+
2725
+ return snprintf(page, PAGE_SIZE, "%i\n",
2726
+ test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags));
2727
+}
2728
+
2729
+static ssize_t tcmu_tmr_notification_store(struct config_item *item,
2730
+ const char *page, size_t count)
2731
+{
2732
+ struct se_dev_attrib *da = container_of(to_config_group(item),
2733
+ struct se_dev_attrib, da_group);
2734
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2735
+ u8 val;
2736
+ int ret;
2737
+
2738
+ ret = kstrtou8(page, 0, &val);
2739
+ if (ret < 0)
2740
+ return ret;
2741
+ if (val > 1)
2742
+ return -EINVAL;
2743
+
2744
+ if (val)
2745
+ set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
2746
+ else
2747
+ clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
2748
+ return count;
2749
+}
2750
+CONFIGFS_ATTR(tcmu_, tmr_notification);
2751
+
25002752 static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
25012753 {
25022754 struct se_device *se_dev = container_of(to_config_group(item),
....@@ -2578,6 +2830,7 @@
25782830 &tcmu_attr_dev_config,
25792831 &tcmu_attr_dev_size,
25802832 &tcmu_attr_emulate_write_cache,
2833
+ &tcmu_attr_tmr_notification,
25812834 &tcmu_attr_nl_reply_supported,
25822835 NULL,
25832836 };
....@@ -2593,7 +2846,9 @@
25932846 static struct target_backend_ops tcmu_ops = {
25942847 .name = "user",
25952848 .owner = THIS_MODULE,
2596
- .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
2849
+ .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH,
2850
+ .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR |
2851
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA,
25972852 .attach_hba = tcmu_attach_hba,
25982853 .detach_hba = tcmu_detach_hba,
25992854 .alloc_device = tcmu_alloc_device,
....@@ -2601,6 +2856,7 @@
26012856 .destroy_device = tcmu_destroy_device,
26022857 .free_device = tcmu_free_device,
26032858 .parse_cdb = tcmu_parse_cdb,
2859
+ .tmr_notify = tcmu_tmr_notify,
26042860 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
26052861 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
26062862 .get_device_type = sbc_get_device_type,
....@@ -2627,7 +2883,8 @@
26272883 }
26282884
26292885 /* Try to complete the finished commands first */
2630
- tcmu_handle_completions(udev);
2886
+ if (tcmu_handle_completions(udev))
2887
+ run_qfull_queue(udev, false);
26312888
26322889 /* Skip the udevs in idle */
26332890 if (!udev->dbi_thresh) {
....@@ -2745,12 +3002,12 @@
27453002 goto out_unreg_device;
27463003 }
27473004
2748
- for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
3005
+ for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
27493006 len += sizeof(struct configfs_attribute *);
2750
- }
2751
- for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) {
3007
+ for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++)
27523008 len += sizeof(struct configfs_attribute *);
2753
- }
3009
+ for (i = 0; tcmu_attrib_attrs[i] != NULL; i++)
3010
+ len += sizeof(struct configfs_attribute *);
27543011 len += sizeof(struct configfs_attribute *);
27553012
27563013 tcmu_attrs = kzalloc(len, GFP_KERNEL);
....@@ -2759,13 +3016,12 @@
27593016 goto out_unreg_genl;
27603017 }
27613018
2762
- for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
3019
+ for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
27633020 tcmu_attrs[i] = passthrough_attrib_attrs[i];
2764
- }
2765
- for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) {
2766
- tcmu_attrs[i] = tcmu_attrib_attrs[k];
2767
- i++;
2768
- }
3021
+ for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++)
3022
+ tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k];
3023
+ for (k = 0; tcmu_attrib_attrs[k] != NULL; k++)
3024
+ tcmu_attrs[i++] = tcmu_attrib_attrs[k];
27693025 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
27703026
27713027 ret = transport_backend_register(&tcmu_ops);