hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/scsi/bnx2fc/bnx2fc_hwi.c
....@@ -485,7 +485,7 @@
485485 /**
486486 * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
487487 *
488
- * @port: port structure pointer
488
+ * @hba: adapter structure pointer
489489 * @tgt: bnx2fc_rport structure pointer
490490 */
491491 int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
....@@ -633,10 +633,8 @@
633633 u16 xid;
634634 u32 frame_len, len;
635635 struct bnx2fc_cmd *io_req = NULL;
636
- struct fcoe_task_ctx_entry *task, *task_page;
637636 struct bnx2fc_interface *interface = tgt->port->priv;
638637 struct bnx2fc_hba *hba = interface->hba;
639
- int task_idx, index;
640638 int rc = 0;
641639 u64 err_warn_bit_map;
642640 u8 err_warn = 0xff;
....@@ -702,18 +700,12 @@
702700 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
703701 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
704702
705
-
706703 if (xid > hba->max_xid) {
707704 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
708705 xid);
709706 goto ret_err_rqe;
710707 }
711708
712
- task_idx = xid / BNX2FC_TASKS_PER_PAGE;
713
- index = xid % BNX2FC_TASKS_PER_PAGE;
714
- task_page = (struct fcoe_task_ctx_entry *)
715
- hba->task_ctx[task_idx];
716
- task = &(task_page[index]);
717709
718710 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
719711 if (!io_req)
....@@ -837,11 +829,6 @@
837829 }
838830 BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
839831
840
- task_idx = xid / BNX2FC_TASKS_PER_PAGE;
841
- index = xid % BNX2FC_TASKS_PER_PAGE;
842
- task_page = (struct fcoe_task_ctx_entry *)
843
- interface->hba->task_ctx[task_idx];
844
- task = &(task_page[index]);
845832 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
846833 if (!io_req)
847834 goto ret_warn_rqe;
....@@ -870,36 +857,22 @@
870857 }
871858 }
872859
873
-void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
860
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe,
861
+ unsigned char *rq_data, u8 num_rq,
862
+ struct fcoe_task_ctx_entry *task)
874863 {
875
- struct fcoe_task_ctx_entry *task;
876
- struct fcoe_task_ctx_entry *task_page;
877864 struct fcoe_port *port = tgt->port;
878865 struct bnx2fc_interface *interface = port->priv;
879866 struct bnx2fc_hba *hba = interface->hba;
880867 struct bnx2fc_cmd *io_req;
881
- int task_idx, index;
868
+
882869 u16 xid;
883870 u8 cmd_type;
884871 u8 rx_state = 0;
885
- u8 num_rq;
886872
887873 spin_lock_bh(&tgt->tgt_lock);
874
+
888875 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
889
- if (xid >= hba->max_tasks) {
890
- printk(KERN_ERR PFX "ERROR:xid out of range\n");
891
- spin_unlock_bh(&tgt->tgt_lock);
892
- return;
893
- }
894
- task_idx = xid / BNX2FC_TASKS_PER_PAGE;
895
- index = xid % BNX2FC_TASKS_PER_PAGE;
896
- task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
897
- task = &(task_page[index]);
898
-
899
- num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
900
- FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
901
- FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
902
-
903876 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
904877
905878 if (io_req == NULL) {
....@@ -919,7 +892,8 @@
919892 switch (cmd_type) {
920893 case BNX2FC_SCSI_CMD:
921894 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
922
- bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
895
+ bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq,
896
+ rq_data);
923897 spin_unlock_bh(&tgt->tgt_lock);
924898 return;
925899 }
....@@ -936,7 +910,7 @@
936910
937911 case BNX2FC_TASK_MGMT_CMD:
938912 BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
939
- bnx2fc_process_tm_compl(io_req, task, num_rq);
913
+ bnx2fc_process_tm_compl(io_req, task, num_rq, rq_data);
940914 break;
941915
942916 case BNX2FC_ABTS:
....@@ -991,11 +965,12 @@
991965 FCOE_CQE_TOGGLE_BIT_SHIFT);
992966 msg = *((u32 *)rx_db);
993967 writel(cpu_to_le32(msg), tgt->ctx_base);
994
- mmiowb();
995968
996969 }
997970
998
-static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
971
+static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe,
972
+ unsigned char *rq_data, u8 num_rq,
973
+ struct fcoe_task_ctx_entry *task)
999974 {
1000975 struct bnx2fc_work *work;
1001976 work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
....@@ -1005,29 +980,86 @@
1005980 INIT_LIST_HEAD(&work->list);
1006981 work->tgt = tgt;
1007982 work->wqe = wqe;
983
+ work->num_rq = num_rq;
984
+ work->task = task;
985
+ if (rq_data)
986
+ memcpy(work->rq_data, rq_data, BNX2FC_RQ_BUF_SZ);
987
+
1008988 return work;
1009989 }
1010990
1011991 /* Pending work request completion */
1012
-static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
992
+static bool bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
1013993 {
1014994 unsigned int cpu = wqe % num_possible_cpus();
1015995 struct bnx2fc_percpu_s *fps;
1016996 struct bnx2fc_work *work;
997
+ struct fcoe_task_ctx_entry *task;
998
+ struct fcoe_task_ctx_entry *task_page;
999
+ struct fcoe_port *port = tgt->port;
1000
+ struct bnx2fc_interface *interface = port->priv;
1001
+ struct bnx2fc_hba *hba = interface->hba;
1002
+ unsigned char *rq_data = NULL;
1003
+ unsigned char rq_data_buff[BNX2FC_RQ_BUF_SZ];
1004
+ int task_idx, index;
1005
+ u16 xid;
1006
+ u8 num_rq;
1007
+ int i;
1008
+
1009
+ xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
1010
+ if (xid >= hba->max_tasks) {
1011
+ pr_err(PFX "ERROR:xid out of range\n");
1012
+ return false;
1013
+ }
1014
+
1015
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
1016
+ index = xid % BNX2FC_TASKS_PER_PAGE;
1017
+ task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
1018
+ task = &task_page[index];
1019
+
1020
+ num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
1021
+ FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
1022
+ FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
1023
+
1024
+ memset(rq_data_buff, 0, BNX2FC_RQ_BUF_SZ);
1025
+
1026
+ if (!num_rq)
1027
+ goto num_rq_zero;
1028
+
1029
+ rq_data = bnx2fc_get_next_rqe(tgt, 1);
1030
+
1031
+ if (num_rq > 1) {
1032
+ /* We do not need extra sense data */
1033
+ for (i = 1; i < num_rq; i++)
1034
+ bnx2fc_get_next_rqe(tgt, 1);
1035
+ }
1036
+
1037
+ if (rq_data)
1038
+ memcpy(rq_data_buff, rq_data, BNX2FC_RQ_BUF_SZ);
1039
+
1040
+ /* return RQ entries */
1041
+ for (i = 0; i < num_rq; i++)
1042
+ bnx2fc_return_rqe(tgt, 1);
1043
+
1044
+num_rq_zero:
10171045
10181046 fps = &per_cpu(bnx2fc_percpu, cpu);
10191047 spin_lock_bh(&fps->fp_work_lock);
10201048 if (fps->iothread) {
1021
- work = bnx2fc_alloc_work(tgt, wqe);
1049
+ work = bnx2fc_alloc_work(tgt, wqe, rq_data_buff,
1050
+ num_rq, task);
10221051 if (work) {
10231052 list_add_tail(&work->list, &fps->work_list);
10241053 wake_up_process(fps->iothread);
10251054 spin_unlock_bh(&fps->fp_work_lock);
1026
- return;
1055
+ return true;
10271056 }
10281057 }
10291058 spin_unlock_bh(&fps->fp_work_lock);
1030
- bnx2fc_process_cq_compl(tgt, wqe);
1059
+ bnx2fc_process_cq_compl(tgt, wqe,
1060
+ rq_data_buff, num_rq, task);
1061
+
1062
+ return true;
10311063 }
10321064
10331065 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
....@@ -1064,8 +1096,8 @@
10641096 /* Unsolicited event notification */
10651097 bnx2fc_process_unsol_compl(tgt, wqe);
10661098 } else {
1067
- bnx2fc_pending_work(tgt, wqe);
1068
- num_free_sqes++;
1099
+ if (bnx2fc_pending_work(tgt, wqe))
1100
+ num_free_sqes++;
10691101 }
10701102 cqe++;
10711103 tgt->cq_cons_idx++;
....@@ -1123,7 +1155,6 @@
11231155 struct fcoe_kcqe *ofld_kcqe)
11241156 {
11251157 struct bnx2fc_rport *tgt;
1126
- struct fcoe_port *port;
11271158 struct bnx2fc_interface *interface;
11281159 u32 conn_id;
11291160 u32 context_id;
....@@ -1137,7 +1168,6 @@
11371168 }
11381169 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
11391170 ofld_kcqe->fcoe_conn_context_id);
1140
- port = tgt->port;
11411171 interface = tgt->port->priv;
11421172 if (hba != interface->hba) {
11431173 printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
....@@ -1304,8 +1334,8 @@
13041334 /**
13051335 * bnx2fc_indicae_kcqe - process KCQE
13061336 *
1307
- * @hba: adapter structure pointer
1308
- * @kcqe: kcqe pointer
1337
+ * @context: adapter structure pointer
1338
+ * @kcq: kcqe pointer
13091339 * @num_cqe: Number of completion queue elements
13101340 *
13111341 * Generic KCQ event handler
....@@ -1374,7 +1404,6 @@
13741404 break;
13751405
13761406 case FCOE_KCQE_OPCODE_FCOE_ERROR:
1377
- /* fall thru */
13781407 default:
13791408 printk(KERN_ERR PFX "unknown opcode 0x%x\n",
13801409 kcqe->op_code);
....@@ -1409,7 +1438,6 @@
14091438 (tgt->sq_curr_toggle_bit << 15);
14101439 msg = *((u32 *)sq_db);
14111440 writel(cpu_to_le32(msg), tgt->ctx_base);
1412
- mmiowb();
14131441
14141442 }
14151443
....@@ -1425,7 +1453,7 @@
14251453 reg_base = pci_resource_start(hba->pcidev,
14261454 BNX2X_DOORBELL_PCI_BAR);
14271455 reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
1428
- tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1456
+ tgt->ctx_base = ioremap(reg_base + reg_off, 4);
14291457 if (!tgt->ctx_base)
14301458 return -ENOMEM;
14311459 return 0;
....@@ -1465,10 +1493,7 @@
14651493 {
14661494 struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
14671495 struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
1468
- struct bnx2fc_interface *interface = tgt->port->priv;
14691496 struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
1470
- struct fcoe_task_ctx_entry *orig_task;
1471
- struct fcoe_task_ctx_entry *task_page;
14721497 struct fcoe_ext_mul_sges_ctx *sgl;
14731498 u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
14741499 u8 orig_task_type;
....@@ -1477,7 +1502,6 @@
14771502 u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
14781503 u32 orig_offset = offset;
14791504 int bd_count;
1480
- int orig_task_idx, index;
14811505 int i;
14821506
14831507 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
....@@ -1527,12 +1551,6 @@
15271551 offset; /* adjusted offset */
15281552 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
15291553 } else {
1530
- orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
1531
- index = orig_xid % BNX2FC_TASKS_PER_PAGE;
1532
-
1533
- task_page = (struct fcoe_task_ctx_entry *)
1534
- interface->hba->task_ctx[orig_task_idx];
1535
- orig_task = &(task_page[index]);
15361554
15371555 /* Multiple SGEs were used for this IO */
15381556 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
....@@ -1857,10 +1875,10 @@
18571875 * entries. Hence the limit with one page is 8192 task context
18581876 * entries.
18591877 */
1860
- hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev,
1861
- PAGE_SIZE,
1862
- &hba->task_ctx_bd_dma,
1863
- GFP_KERNEL);
1878
+ hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1879
+ PAGE_SIZE,
1880
+ &hba->task_ctx_bd_dma,
1881
+ GFP_KERNEL);
18641882 if (!hba->task_ctx_bd_tbl) {
18651883 printk(KERN_ERR PFX "unable to allocate task context BDT\n");
18661884 rc = -1;
....@@ -1894,10 +1912,10 @@
18941912 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
18951913 for (i = 0; i < task_ctx_arr_sz; i++) {
18961914
1897
- hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev,
1898
- PAGE_SIZE,
1899
- &hba->task_ctx_dma[i],
1900
- GFP_KERNEL);
1915
+ hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1916
+ PAGE_SIZE,
1917
+ &hba->task_ctx_dma[i],
1918
+ GFP_KERNEL);
19011919 if (!hba->task_ctx[i]) {
19021920 printk(KERN_ERR PFX "unable to alloc task context\n");
19031921 rc = -1;
....@@ -2031,19 +2049,19 @@
20312049 }
20322050
20332051 for (i = 0; i < segment_count; ++i) {
2034
- hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev,
2035
- BNX2FC_HASH_TBL_CHUNK_SIZE,
2036
- &dma_segment_array[i],
2037
- GFP_KERNEL);
2052
+ hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev,
2053
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
2054
+ &dma_segment_array[i],
2055
+ GFP_KERNEL);
20382056 if (!hba->hash_tbl_segments[i]) {
20392057 printk(KERN_ERR PFX "hash segment alloc failed\n");
20402058 goto cleanup_dma;
20412059 }
20422060 }
20432061
2044
- hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2045
- &hba->hash_tbl_pbl_dma,
2046
- GFP_KERNEL);
2062
+ hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2063
+ &hba->hash_tbl_pbl_dma,
2064
+ GFP_KERNEL);
20472065 if (!hba->hash_tbl_pbl) {
20482066 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
20492067 goto cleanup_dma;
....@@ -2060,11 +2078,7 @@
20602078 pbl = hba->hash_tbl_pbl;
20612079 i = 0;
20622080 while (*pbl && *(pbl + 1)) {
2063
- u32 lo;
2064
- u32 hi;
2065
- lo = *pbl;
20662081 ++pbl;
2067
- hi = *pbl;
20682082 ++pbl;
20692083 ++i;
20702084 }
....@@ -2104,10 +2118,9 @@
21042118 return -ENOMEM;
21052119
21062120 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2107
- hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev,
2108
- mem_size,
2109
- &hba->t2_hash_tbl_ptr_dma,
2110
- GFP_KERNEL);
2121
+ hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2122
+ &hba->t2_hash_tbl_ptr_dma,
2123
+ GFP_KERNEL);
21112124 if (!hba->t2_hash_tbl_ptr) {
21122125 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
21132126 bnx2fc_free_fw_resc(hba);
....@@ -2116,9 +2129,9 @@
21162129
21172130 mem_size = BNX2FC_NUM_MAX_SESS *
21182131 sizeof(struct fcoe_t2_hash_table_entry);
2119
- hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size,
2120
- &hba->t2_hash_tbl_dma,
2121
- GFP_KERNEL);
2132
+ hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2133
+ &hba->t2_hash_tbl_dma,
2134
+ GFP_KERNEL);
21222135 if (!hba->t2_hash_tbl) {
21232136 printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
21242137 bnx2fc_free_fw_resc(hba);
....@@ -2140,9 +2153,9 @@
21402153 return -ENOMEM;
21412154 }
21422155
2143
- hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2144
- &hba->stats_buf_dma,
2145
- GFP_KERNEL);
2156
+ hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2157
+ &hba->stats_buf_dma,
2158
+ GFP_KERNEL);
21462159 if (!hba->stats_buffer) {
21472160 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
21482161 bnx2fc_free_fw_resc(hba);