hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/scsi/qla2xxx/qla_iocb.c
....@@ -1,8 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * QLogic Fibre Channel HBA Driver
34 * Copyright (c) 2003-2014 QLogic Corporation
4
- *
5
- * See LICENSE.qla2xxx for copyright and licensing details.
65 */
76 #include "qla_def.h"
87 #include "qla_target.h"
....@@ -44,7 +43,7 @@
4443 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
4544 * Continuation Type 0 IOCBs to allocate.
4645 *
47
- * @dsds: number of data segment decriptors needed
46
+ * @dsds: number of data segment descriptors needed
4847 *
4948 * Returns the number of IOCB entries needed to store @dsds.
5049 */
....@@ -66,7 +65,7 @@
6665 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
6766 * Continuation Type 1 IOCBs to allocate.
6867 *
69
- * @dsds: number of data segment decriptors needed
68
+ * @dsds: number of data segment descriptors needed
7069 *
7170 * Returns the number of IOCB entries needed to store @dsds.
7271 */
....@@ -107,7 +106,7 @@
107106 cont_pkt = (cont_entry_t *)req->ring_ptr;
108107
109108 /* Load packet defaults. */
110
- *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
109
+ put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
111110
112111 return (cont_pkt);
113112 }
....@@ -136,9 +135,8 @@
136135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137136
138137 /* Load packet defaults. */
139
- *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
140
- cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
141
- cpu_to_le32(CONTINUE_A64_TYPE);
138
+ put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
139
+ CONTINUE_A64_TYPE, &cont_pkt->entry_type);
142140
143141 return (cont_pkt);
144142 }
....@@ -193,7 +191,7 @@
193191 uint16_t tot_dsds)
194192 {
195193 uint16_t avail_dsds;
196
- uint32_t *cur_dsd;
194
+ struct dsd32 *cur_dsd;
197195 scsi_qla_host_t *vha;
198196 struct scsi_cmnd *cmd;
199197 struct scatterlist *sg;
....@@ -202,8 +200,7 @@
202200 cmd = GET_CMD_SP(sp);
203201
204202 /* Update entry type to indicate Command Type 2 IOCB */
205
- *((uint32_t *)(&cmd_pkt->entry_type)) =
206
- cpu_to_le32(COMMAND_TYPE);
203
+ put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
207204
208205 /* No data transfer */
209206 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
....@@ -215,8 +212,8 @@
215212 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
216213
217214 /* Three DSDs are available in the Command Type 2 IOCB */
218
- avail_dsds = 3;
219
- cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
215
+ avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
216
+ cur_dsd = cmd_pkt->dsd32;
220217
221218 /* Load data segments */
222219 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
....@@ -229,12 +226,11 @@
229226 * Type 0 IOCB.
230227 */
231228 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
232
- cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
233
- avail_dsds = 7;
229
+ cur_dsd = cont_pkt->dsd;
230
+ avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
234231 }
235232
236
- *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
237
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
233
+ append_dsd32(&cur_dsd, sg);
238234 avail_dsds--;
239235 }
240236 }
....@@ -251,7 +247,7 @@
251247 uint16_t tot_dsds)
252248 {
253249 uint16_t avail_dsds;
254
- uint32_t *cur_dsd;
250
+ struct dsd64 *cur_dsd;
255251 scsi_qla_host_t *vha;
256252 struct scsi_cmnd *cmd;
257253 struct scatterlist *sg;
....@@ -260,7 +256,7 @@
260256 cmd = GET_CMD_SP(sp);
261257
262258 /* Update entry type to indicate Command Type 3 IOCB */
263
- *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
259
+ put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
264260
265261 /* No data transfer */
266262 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
....@@ -272,12 +268,11 @@
272268 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
273269
274270 /* Two DSDs are available in the Command Type 3 IOCB */
275
- avail_dsds = 2;
276
- cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
271
+ avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
272
+ cur_dsd = cmd_pkt->dsd64;
277273
278274 /* Load data segments */
279275 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
280
- dma_addr_t sle_dma;
281276 cont_a64_entry_t *cont_pkt;
282277
283278 /* Allocate additional continuation packets? */
....@@ -287,16 +282,33 @@
287282 * Type 1 IOCB.
288283 */
289284 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
290
- cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
291
- avail_dsds = 5;
285
+ cur_dsd = cont_pkt->dsd;
286
+ avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
292287 }
293288
294
- sle_dma = sg_dma_address(sg);
295
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
296
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
297
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
289
+ append_dsd64(&cur_dsd, sg);
298290 avail_dsds--;
299291 }
292
+}
293
+
294
+/*
295
+ * Find the first handle that is not in use, starting from
296
+ * req->current_outstanding_cmd + 1. The caller must hold the lock that is
297
+ * associated with @req.
298
+ */
299
+uint32_t qla2xxx_get_next_handle(struct req_que *req)
300
+{
301
+ uint32_t index, handle = req->current_outstanding_cmd;
302
+
303
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
304
+ handle++;
305
+ if (handle == req->num_outstanding_cmds)
306
+ handle = 1;
307
+ if (!req->outstanding_cmds[handle])
308
+ return handle;
309
+ }
310
+
311
+ return 0;
300312 }
301313
302314 /**
....@@ -313,7 +325,6 @@
313325 scsi_qla_host_t *vha;
314326 struct scsi_cmnd *cmd;
315327 uint32_t *clr_ptr;
316
- uint32_t index;
317328 uint32_t handle;
318329 cmd_entry_t *cmd_pkt;
319330 uint16_t cnt;
....@@ -336,7 +347,7 @@
336347
337348 /* Send marker if required */
338349 if (vha->marker_needed != 0) {
339
- if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
350
+ if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
340351 QLA_SUCCESS) {
341352 return (QLA_FUNCTION_FAILED);
342353 }
....@@ -346,16 +357,8 @@
346357 /* Acquire ring specific lock */
347358 spin_lock_irqsave(&ha->hardware_lock, flags);
348359
349
- /* Check for room in outstanding command list. */
350
- handle = req->current_outstanding_cmd;
351
- for (index = 1; index < req->num_outstanding_cmds; index++) {
352
- handle++;
353
- if (handle == req->num_outstanding_cmds)
354
- handle = 1;
355
- if (!req->outstanding_cmds[handle])
356
- break;
357
- }
358
- if (index == req->num_outstanding_cmds)
360
+ handle = qla2xxx_get_next_handle(req);
361
+ if (handle == 0)
359362 goto queuing_error;
360363
361364 /* Map the sg table so we have an accurate count of sg entries needed */
....@@ -372,7 +375,7 @@
372375 /* Calculate the number of request entries needed. */
373376 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
374377 if (req->cnt < (req_cnt + 2)) {
375
- cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
378
+ cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
376379 if (req->ring_index < cnt)
377380 req->cnt = cnt - req->ring_index;
378381 else
....@@ -424,8 +427,8 @@
424427 sp->flags |= SRB_DMA_VALID;
425428
426429 /* Set chip new ring index. */
427
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
428
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
430
+ wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
431
+ rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
429432
430433 /* Manage unprocessed RIO/ZIO commands in response queue. */
431434 if (vha->flags.process_response_queue &&
....@@ -467,22 +470,22 @@
467470 req->ring_ptr++;
468471
469472 /* Set chip new ring index. */
470
- if (ha->mqenable || IS_QLA27XX(ha)) {
471
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
473
+ if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
474
+ wrt_reg_dword(req->req_q_in, req->ring_index);
472475 } else if (IS_QLA83XX(ha)) {
473
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
474
- RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
476
+ wrt_reg_dword(req->req_q_in, req->ring_index);
477
+ rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
475478 } else if (IS_QLAFX00(ha)) {
476
- WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
477
- RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
479
+ wrt_reg_dword(&reg->ispfx00.req_q_in, req->ring_index);
480
+ rd_reg_dword_relaxed(&reg->ispfx00.req_q_in);
478481 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
479482 } else if (IS_FWI2_CAPABLE(ha)) {
480
- WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
481
- RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
483
+ wrt_reg_dword(&reg->isp24.req_q_in, req->ring_index);
484
+ rd_reg_dword_relaxed(&reg->isp24.req_q_in);
482485 } else {
483
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
486
+ wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp),
484487 req->ring_index);
485
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
488
+ rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp));
486489 }
487490 }
488491 }
....@@ -490,8 +493,7 @@
490493 /**
491494 * qla2x00_marker() - Send a marker IOCB to the firmware.
492495 * @vha: HA context
493
- * @req: request queue
494
- * @rsp: response queue
496
+ * @qpair: queue pair pointer
495497 * @loop_id: loop ID
496498 * @lun: LUN
497499 * @type: marker modifier
....@@ -501,18 +503,16 @@
501503 * Returns non-zero if a failure occurred, else zero.
502504 */
503505 static int
504
-__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
505
- struct rsp_que *rsp, uint16_t loop_id,
506
- uint64_t lun, uint8_t type)
506
+__qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
507
+ uint16_t loop_id, uint64_t lun, uint8_t type)
507508 {
508509 mrk_entry_t *mrk;
509510 struct mrk_entry_24xx *mrk24 = NULL;
510
-
511
+ struct req_que *req = qpair->req;
511512 struct qla_hw_data *ha = vha->hw;
512513 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
513514
514
- req = ha->req_q_map[0];
515
- mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
515
+ mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
516516 if (mrk == NULL) {
517517 ql_log(ql_log_warn, base_vha, 0x3026,
518518 "Failed to allocate Marker IOCB.\n");
....@@ -529,7 +529,7 @@
529529 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
530530 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
531531 mrk24->vp_index = vha->vp_idx;
532
- mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
532
+ mrk24->handle = make_handle(req->id, mrk24->handle);
533533 } else {
534534 SET_TARGET_ID(ha, mrk->target, loop_id);
535535 mrk->lun = cpu_to_le16((uint16_t)lun);
....@@ -543,16 +543,15 @@
543543 }
544544
545545 int
546
-qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
547
- struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
548
- uint8_t type)
546
+qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
547
+ uint16_t loop_id, uint64_t lun, uint8_t type)
549548 {
550549 int ret;
551550 unsigned long flags = 0;
552551
553
- spin_lock_irqsave(&vha->hw->hardware_lock, flags);
554
- ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
555
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
552
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
553
+ ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
554
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
556555
557556 return (ret);
558557 }
....@@ -567,11 +566,11 @@
567566 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
568567 {
569568 if (ha_locked) {
570
- if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
569
+ if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
571570 MK_SYNC_ALL) != QLA_SUCCESS)
572571 return QLA_FUNCTION_FAILED;
573572 } else {
574
- if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
573
+ if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
575574 MK_SYNC_ALL) != QLA_SUCCESS)
576575 return QLA_FUNCTION_FAILED;
577576 }
....@@ -584,23 +583,22 @@
584583 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
585584 uint16_t tot_dsds)
586585 {
587
- uint32_t *cur_dsd = NULL;
586
+ struct dsd64 *cur_dsd = NULL, *next_dsd;
588587 scsi_qla_host_t *vha;
589588 struct qla_hw_data *ha;
590589 struct scsi_cmnd *cmd;
591590 struct scatterlist *cur_seg;
592
- uint32_t *dsd_seg;
593
- void *next_dsd;
594591 uint8_t avail_dsds;
595592 uint8_t first_iocb = 1;
596593 uint32_t dsd_list_len;
597594 struct dsd_dma *dsd_ptr;
598595 struct ct6_dsd *ctx;
596
+ struct qla_qpair *qpair = sp->qpair;
599597
600598 cmd = GET_CMD_SP(sp);
601599
602600 /* Update entry type to indicate Command Type 3 IOCB */
603
- *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
601
+ put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
604602
605603 /* No data transfer */
606604 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
....@@ -614,16 +612,16 @@
614612 /* Set transfer direction */
615613 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
616614 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
617
- vha->qla_stats.output_bytes += scsi_bufflen(cmd);
618
- vha->qla_stats.output_requests++;
615
+ qpair->counters.output_bytes += scsi_bufflen(cmd);
616
+ qpair->counters.output_requests++;
619617 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
620618 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
621
- vha->qla_stats.input_bytes += scsi_bufflen(cmd);
622
- vha->qla_stats.input_requests++;
619
+ qpair->counters.input_bytes += scsi_bufflen(cmd);
620
+ qpair->counters.input_requests++;
623621 }
624622
625623 cur_seg = scsi_sglist(cmd);
626
- ctx = GET_CMD_CTX_SP(sp);
624
+ ctx = sp->u.scmd.ct6_ctx;
627625
628626 while (tot_dsds) {
629627 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
....@@ -642,33 +640,28 @@
642640
643641 if (first_iocb) {
644642 first_iocb = 0;
645
- dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
646
- *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
647
- *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
648
- cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
643
+ put_unaligned_le64(dsd_ptr->dsd_list_dma,
644
+ &cmd_pkt->fcp_dsd.address);
645
+ cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
649646 } else {
650
- *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
651
- *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
652
- *cur_dsd++ = cpu_to_le32(dsd_list_len);
647
+ put_unaligned_le64(dsd_ptr->dsd_list_dma,
648
+ &cur_dsd->address);
649
+ cur_dsd->length = cpu_to_le32(dsd_list_len);
650
+ cur_dsd++;
653651 }
654
- cur_dsd = (uint32_t *)next_dsd;
652
+ cur_dsd = next_dsd;
655653 while (avail_dsds) {
656
- dma_addr_t sle_dma;
657
-
658
- sle_dma = sg_dma_address(cur_seg);
659
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
660
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
661
- *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
654
+ append_dsd64(&cur_dsd, cur_seg);
662655 cur_seg = sg_next(cur_seg);
663656 avail_dsds--;
664657 }
665658 }
666659
667660 /* Null termination */
668
- *cur_dsd++ = 0;
669
- *cur_dsd++ = 0;
670
- *cur_dsd++ = 0;
671
- cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
661
+ cur_dsd->address = 0;
662
+ cur_dsd->length = 0;
663
+ cur_dsd++;
664
+ cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
672665 return 0;
673666 }
674667
....@@ -676,7 +669,7 @@
676669 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
677670 * for Command Type 6.
678671 *
679
- * @dsds: number of data segment decriptors needed
672
+ * @dsds: number of data segment descriptors needed
680673 *
681674 * Returns the number of dsd list needed to store @dsds.
682675 */
....@@ -706,16 +699,17 @@
706699 uint16_t tot_dsds, struct req_que *req)
707700 {
708701 uint16_t avail_dsds;
709
- uint32_t *cur_dsd;
702
+ struct dsd64 *cur_dsd;
710703 scsi_qla_host_t *vha;
711704 struct scsi_cmnd *cmd;
712705 struct scatterlist *sg;
713706 int i;
707
+ struct qla_qpair *qpair = sp->qpair;
714708
715709 cmd = GET_CMD_SP(sp);
716710
717711 /* Update entry type to indicate Command Type 3 IOCB */
718
- *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
712
+ put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
719713
720714 /* No data transfer */
721715 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
....@@ -728,22 +722,21 @@
728722 /* Set transfer direction */
729723 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
730724 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
731
- vha->qla_stats.output_bytes += scsi_bufflen(cmd);
732
- vha->qla_stats.output_requests++;
725
+ qpair->counters.output_bytes += scsi_bufflen(cmd);
726
+ qpair->counters.output_requests++;
733727 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
734728 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
735
- vha->qla_stats.input_bytes += scsi_bufflen(cmd);
736
- vha->qla_stats.input_requests++;
729
+ qpair->counters.input_bytes += scsi_bufflen(cmd);
730
+ qpair->counters.input_requests++;
737731 }
738732
739733 /* One DSD is available in the Command Type 3 IOCB */
740734 avail_dsds = 1;
741
- cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
735
+ cur_dsd = &cmd_pkt->dsd;
742736
743737 /* Load data segments */
744738
745739 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
746
- dma_addr_t sle_dma;
747740 cont_a64_entry_t *cont_pkt;
748741
749742 /* Allocate additional continuation packets? */
....@@ -753,21 +746,18 @@
753746 * Type 1 IOCB.
754747 */
755748 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
756
- cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
757
- avail_dsds = 5;
749
+ cur_dsd = cont_pkt->dsd;
750
+ avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
758751 }
759752
760
- sle_dma = sg_dma_address(sg);
761
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
762
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
763
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
753
+ append_dsd64(&cur_dsd, sg);
764754 avail_dsds--;
765755 }
766756 }
767757
768758 struct fw_dif_context {
769
- uint32_t ref_tag;
770
- uint16_t app_tag;
759
+ __le32 ref_tag;
760
+ __le16 app_tag;
771761 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
772762 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
773763 };
....@@ -896,14 +886,14 @@
896886
897887 int
898888 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
899
- uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
889
+ struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
900890 {
901891 void *next_dsd;
902892 uint8_t avail_dsds = 0;
903893 uint32_t dsd_list_len;
904894 struct dsd_dma *dsd_ptr;
905895 struct scatterlist *sg_prot;
906
- uint32_t *cur_dsd = dsd;
896
+ struct dsd64 *cur_dsd = dsd;
907897 uint16_t used_dsds = tot_dsds;
908898 uint32_t prot_int; /* protection interval */
909899 uint32_t partial;
....@@ -965,8 +955,7 @@
965955
966956 if (sp) {
967957 list_add_tail(&dsd_ptr->list,
968
- &((struct crc_context *)
969
- sp->u.scmd.ctx)->dsd_list);
958
+ &sp->u.scmd.crc_ctx->dsd_list);
970959
971960 sp->flags |= SRB_CRC_CTX_DSD_VALID;
972961 } else {
....@@ -977,14 +966,14 @@
977966
978967
979968 /* add new list to cmd iocb or last list */
980
- *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
981
- *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
982
- *cur_dsd++ = dsd_list_len;
983
- cur_dsd = (uint32_t *)next_dsd;
969
+ put_unaligned_le64(dsd_ptr->dsd_list_dma,
970
+ &cur_dsd->address);
971
+ cur_dsd->length = cpu_to_le32(dsd_list_len);
972
+ cur_dsd = next_dsd;
984973 }
985
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
986
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
987
- *cur_dsd++ = cpu_to_le32(sle_dma_len);
974
+ put_unaligned_le64(sle_dma, &cur_dsd->address);
975
+ cur_dsd->length = cpu_to_le32(sle_dma_len);
976
+ cur_dsd++;
988977 avail_dsds--;
989978
990979 if (partial == 0) {
....@@ -1003,22 +992,22 @@
1003992 }
1004993 }
1005994 /* Null termination */
1006
- *cur_dsd++ = 0;
1007
- *cur_dsd++ = 0;
1008
- *cur_dsd++ = 0;
995
+ cur_dsd->address = 0;
996
+ cur_dsd->length = 0;
997
+ cur_dsd++;
1009998 return 0;
1010999 }
10111000
10121001 int
1013
-qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1014
- uint16_t tot_dsds, struct qla_tc_param *tc)
1002
+qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
1003
+ struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
10151004 {
10161005 void *next_dsd;
10171006 uint8_t avail_dsds = 0;
10181007 uint32_t dsd_list_len;
10191008 struct dsd_dma *dsd_ptr;
10201009 struct scatterlist *sg, *sgl;
1021
- uint32_t *cur_dsd = dsd;
1010
+ struct dsd64 *cur_dsd = dsd;
10221011 int i;
10231012 uint16_t used_dsds = tot_dsds;
10241013 struct scsi_cmnd *cmd;
....@@ -1035,8 +1024,6 @@
10351024
10361025
10371026 for_each_sg(sgl, sg, tot_dsds, i) {
1038
- dma_addr_t sle_dma;
1039
-
10401027 /* Allocate additional continuation packets? */
10411028 if (avail_dsds == 0) {
10421029 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
....@@ -1065,8 +1052,7 @@
10651052
10661053 if (sp) {
10671054 list_add_tail(&dsd_ptr->list,
1068
- &((struct crc_context *)
1069
- sp->u.scmd.ctx)->dsd_list);
1055
+ &sp->u.scmd.crc_ctx->dsd_list);
10701056
10711057 sp->flags |= SRB_CRC_CTX_DSD_VALID;
10721058 } else {
....@@ -1076,115 +1062,316 @@
10761062 }
10771063
10781064 /* add new list to cmd iocb or last list */
1079
- *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1080
- *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1081
- *cur_dsd++ = dsd_list_len;
1082
- cur_dsd = (uint32_t *)next_dsd;
1065
+ put_unaligned_le64(dsd_ptr->dsd_list_dma,
1066
+ &cur_dsd->address);
1067
+ cur_dsd->length = cpu_to_le32(dsd_list_len);
1068
+ cur_dsd = next_dsd;
10831069 }
1084
- sle_dma = sg_dma_address(sg);
1085
-
1086
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1087
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1088
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1070
+ append_dsd64(&cur_dsd, sg);
10891071 avail_dsds--;
10901072
10911073 }
10921074 /* Null termination */
1093
- *cur_dsd++ = 0;
1094
- *cur_dsd++ = 0;
1095
- *cur_dsd++ = 0;
1075
+ cur_dsd->address = 0;
1076
+ cur_dsd->length = 0;
1077
+ cur_dsd++;
10961078 return 0;
10971079 }
10981080
10991081 int
11001082 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1101
- uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1083
+ struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
11021084 {
1103
- void *next_dsd;
1104
- uint8_t avail_dsds = 0;
1105
- uint32_t dsd_list_len;
1106
- struct dsd_dma *dsd_ptr;
1085
+ struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
11071086 struct scatterlist *sg, *sgl;
1108
- int i;
1109
- struct scsi_cmnd *cmd;
1110
- uint32_t *cur_dsd = dsd;
1111
- uint16_t used_dsds = tot_dsds;
1087
+ struct crc_context *difctx = NULL;
11121088 struct scsi_qla_host *vha;
1089
+ uint dsd_list_len;
1090
+ uint avail_dsds = 0;
1091
+ uint used_dsds = tot_dsds;
1092
+ bool dif_local_dma_alloc = false;
1093
+ bool direction_to_device = false;
1094
+ int i;
11131095
11141096 if (sp) {
1115
- cmd = GET_CMD_SP(sp);
1097
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1098
+
11161099 sgl = scsi_prot_sglist(cmd);
11171100 vha = sp->vha;
1101
+ difctx = sp->u.scmd.crc_ctx;
1102
+ direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1103
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1104
+ "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1105
+ __func__, cmd, difctx, sp);
11181106 } else if (tc) {
11191107 vha = tc->vha;
11201108 sgl = tc->prot_sg;
1109
+ difctx = tc->ctx;
1110
+ direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
11211111 } else {
11221112 BUG();
11231113 return 1;
11241114 }
11251115
1126
- ql_dbg(ql_dbg_tgt, vha, 0xe021,
1127
- "%s: enter\n", __func__);
1116
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1117
+ "%s: enter (write=%u)\n", __func__, direction_to_device);
11281118
1129
- for_each_sg(sgl, sg, tot_dsds, i) {
1130
- dma_addr_t sle_dma;
1119
+ /* if initiator doing write or target doing read */
1120
+ if (direction_to_device) {
1121
+ for_each_sg(sgl, sg, tot_dsds, i) {
1122
+ u64 sle_phys = sg_phys(sg);
11311123
1132
- /* Allocate additional continuation packets? */
1133
- if (avail_dsds == 0) {
1134
- avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1135
- QLA_DSDS_PER_IOCB : used_dsds;
1136
- dsd_list_len = (avail_dsds + 1) * 12;
1137
- used_dsds -= avail_dsds;
1124
+ /* If SGE addr + len flips bits in upper 32-bits */
1125
+ if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1126
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1127
+ "%s: page boundary crossing (phys=%llx len=%x)\n",
1128
+ __func__, sle_phys, sg->length);
11381129
1139
- /* allocate tracking DS */
1140
- dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1141
- if (!dsd_ptr)
1142
- return 1;
1143
-
1144
- /* allocate new list */
1145
- dsd_ptr->dsd_addr = next_dsd =
1146
- dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1147
- &dsd_ptr->dsd_list_dma);
1148
-
1149
- if (!next_dsd) {
1150
- /*
1151
- * Need to cleanup only this dsd_ptr, rest
1152
- * will be done by sp_free_dma()
1153
- */
1154
- kfree(dsd_ptr);
1155
- return 1;
1130
+ if (difctx) {
1131
+ ha->dif_bundle_crossed_pages++;
1132
+ dif_local_dma_alloc = true;
1133
+ } else {
1134
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1135
+ vha, 0xe022,
1136
+ "%s: difctx pointer is NULL\n",
1137
+ __func__);
1138
+ }
1139
+ break;
11561140 }
1157
-
1158
- if (sp) {
1159
- list_add_tail(&dsd_ptr->list,
1160
- &((struct crc_context *)
1161
- sp->u.scmd.ctx)->dsd_list);
1162
-
1163
- sp->flags |= SRB_CRC_CTX_DSD_VALID;
1164
- } else {
1165
- list_add_tail(&dsd_ptr->list,
1166
- &(tc->ctx->dsd_list));
1167
- *tc->ctx_dsd_alloced = 1;
1168
- }
1169
-
1170
- /* add new list to cmd iocb or last list */
1171
- *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1172
- *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1173
- *cur_dsd++ = dsd_list_len;
1174
- cur_dsd = (uint32_t *)next_dsd;
11751141 }
1176
- sle_dma = sg_dma_address(sg);
1142
+ ha->dif_bundle_writes++;
1143
+ } else {
1144
+ ha->dif_bundle_reads++;
1145
+ }
11771146
1178
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1179
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1180
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1147
+ if (ql2xdifbundlinginternalbuffers)
1148
+ dif_local_dma_alloc = direction_to_device;
11811149
1182
- avail_dsds--;
1150
+ if (dif_local_dma_alloc) {
1151
+ u32 track_difbundl_buf = 0;
1152
+ u32 ldma_sg_len = 0;
1153
+ u8 ldma_needed = 1;
1154
+
1155
+ difctx->no_dif_bundl = 0;
1156
+ difctx->dif_bundl_len = 0;
1157
+
1158
+ /* Track DSD buffers */
1159
+ INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1160
+ /* Track local DMA buffers */
1161
+ INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1162
+
1163
+ for_each_sg(sgl, sg, tot_dsds, i) {
1164
+ u32 sglen = sg_dma_len(sg);
1165
+
1166
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1167
+ "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1168
+ __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1169
+ difctx->dif_bundl_len, ldma_needed);
1170
+
1171
+ while (sglen) {
1172
+ u32 xfrlen = 0;
1173
+
1174
+ if (ldma_needed) {
1175
+ /*
1176
+ * Allocate list item to store
1177
+ * the DMA buffers
1178
+ */
1179
+ dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1180
+ GFP_ATOMIC);
1181
+ if (!dsd_ptr) {
1182
+ ql_dbg(ql_dbg_tgt, vha, 0xe024,
1183
+ "%s: failed alloc dsd_ptr\n",
1184
+ __func__);
1185
+ return 1;
1186
+ }
1187
+ ha->dif_bundle_kallocs++;
1188
+
1189
+ /* allocate dma buffer */
1190
+ dsd_ptr->dsd_addr = dma_pool_alloc
1191
+ (ha->dif_bundl_pool, GFP_ATOMIC,
1192
+ &dsd_ptr->dsd_list_dma);
1193
+ if (!dsd_ptr->dsd_addr) {
1194
+ ql_dbg(ql_dbg_tgt, vha, 0xe024,
1195
+ "%s: failed alloc ->dsd_ptr\n",
1196
+ __func__);
1197
+ /*
1198
+ * need to cleanup only this
1199
+ * dsd_ptr rest will be done
1200
+ * by sp_free_dma()
1201
+ */
1202
+ kfree(dsd_ptr);
1203
+ ha->dif_bundle_kallocs--;
1204
+ return 1;
1205
+ }
1206
+ ha->dif_bundle_dma_allocs++;
1207
+ ldma_needed = 0;
1208
+ difctx->no_dif_bundl++;
1209
+ list_add_tail(&dsd_ptr->list,
1210
+ &difctx->ldif_dma_hndl_list);
1211
+ }
1212
+
1213
+ /* xfrlen is min of dma pool size and sglen */
1214
+ xfrlen = (sglen >
1215
+ (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1216
+ DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1217
+ sglen;
1218
+
1219
+ /* replace with local allocated dma buffer */
1220
+ sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1221
+ dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1222
+ difctx->dif_bundl_len);
1223
+ difctx->dif_bundl_len += xfrlen;
1224
+ sglen -= xfrlen;
1225
+ ldma_sg_len += xfrlen;
1226
+ if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1227
+ sg_is_last(sg)) {
1228
+ ldma_needed = 1;
1229
+ ldma_sg_len = 0;
1230
+ }
1231
+ }
1232
+ }
1233
+
1234
+ track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1235
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1236
+ "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1237
+ difctx->dif_bundl_len, difctx->no_dif_bundl,
1238
+ track_difbundl_buf);
1239
+
1240
+ if (sp)
1241
+ sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1242
+ else
1243
+ tc->prot_flags = DIF_BUNDL_DMA_VALID;
1244
+
1245
+ list_for_each_entry_safe(dif_dsd, nxt_dsd,
1246
+ &difctx->ldif_dma_hndl_list, list) {
1247
+ u32 sglen = (difctx->dif_bundl_len >
1248
+ DIF_BUNDLING_DMA_POOL_SIZE) ?
1249
+ DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1250
+
1251
+ BUG_ON(track_difbundl_buf == 0);
1252
+
1253
+ /* Allocate additional continuation packets? */
1254
+ if (avail_dsds == 0) {
1255
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1256
+ 0xe024,
1257
+ "%s: adding continuation iocb's\n",
1258
+ __func__);
1259
+ avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1260
+ QLA_DSDS_PER_IOCB : used_dsds;
1261
+ dsd_list_len = (avail_dsds + 1) * 12;
1262
+ used_dsds -= avail_dsds;
1263
+
1264
+ /* allocate tracking DS */
1265
+ dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1266
+ if (!dsd_ptr) {
1267
+ ql_dbg(ql_dbg_tgt, vha, 0xe026,
1268
+ "%s: failed alloc dsd_ptr\n",
1269
+ __func__);
1270
+ return 1;
1271
+ }
1272
+ ha->dif_bundle_kallocs++;
1273
+
1274
+ difctx->no_ldif_dsd++;
1275
+ /* allocate new list */
1276
+ dsd_ptr->dsd_addr =
1277
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1278
+ &dsd_ptr->dsd_list_dma);
1279
+ if (!dsd_ptr->dsd_addr) {
1280
+ ql_dbg(ql_dbg_tgt, vha, 0xe026,
1281
+ "%s: failed alloc ->dsd_addr\n",
1282
+ __func__);
1283
+ /*
1284
+ * need to cleanup only this dsd_ptr
1285
+ * rest will be done by sp_free_dma()
1286
+ */
1287
+ kfree(dsd_ptr);
1288
+ ha->dif_bundle_kallocs--;
1289
+ return 1;
1290
+ }
1291
+ ha->dif_bundle_dma_allocs++;
1292
+
1293
+ if (sp) {
1294
+ list_add_tail(&dsd_ptr->list,
1295
+ &difctx->ldif_dsd_list);
1296
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
1297
+ } else {
1298
+ list_add_tail(&dsd_ptr->list,
1299
+ &difctx->ldif_dsd_list);
1300
+ tc->ctx_dsd_alloced = 1;
1301
+ }
1302
+
1303
+ /* add new list to cmd iocb or last list */
1304
+ put_unaligned_le64(dsd_ptr->dsd_list_dma,
1305
+ &cur_dsd->address);
1306
+ cur_dsd->length = cpu_to_le32(dsd_list_len);
1307
+ cur_dsd = dsd_ptr->dsd_addr;
1308
+ }
1309
+ put_unaligned_le64(dif_dsd->dsd_list_dma,
1310
+ &cur_dsd->address);
1311
+ cur_dsd->length = cpu_to_le32(sglen);
1312
+ cur_dsd++;
1313
+ avail_dsds--;
1314
+ difctx->dif_bundl_len -= sglen;
1315
+ track_difbundl_buf--;
1316
+ }
1317
+
1318
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1319
+ "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1320
+ difctx->no_ldif_dsd, difctx->no_dif_bundl);
1321
+ } else {
1322
+ for_each_sg(sgl, sg, tot_dsds, i) {
1323
+ /* Allocate additional continuation packets? */
1324
+ if (avail_dsds == 0) {
1325
+ avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1326
+ QLA_DSDS_PER_IOCB : used_dsds;
1327
+ dsd_list_len = (avail_dsds + 1) * 12;
1328
+ used_dsds -= avail_dsds;
1329
+
1330
+ /* allocate tracking DS */
1331
+ dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1332
+ if (!dsd_ptr) {
1333
+ ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1334
+ vha, 0xe027,
1335
+ "%s: failed alloc dsd_dma...\n",
1336
+ __func__);
1337
+ return 1;
1338
+ }
1339
+
1340
+ /* allocate new list */
1341
+ dsd_ptr->dsd_addr =
1342
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1343
+ &dsd_ptr->dsd_list_dma);
1344
+ if (!dsd_ptr->dsd_addr) {
1345
+ /* need to cleanup only this dsd_ptr */
1346
+ /* rest will be done by sp_free_dma() */
1347
+ kfree(dsd_ptr);
1348
+ return 1;
1349
+ }
1350
+
1351
+ if (sp) {
1352
+ list_add_tail(&dsd_ptr->list,
1353
+ &difctx->dsd_list);
1354
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
1355
+ } else {
1356
+ list_add_tail(&dsd_ptr->list,
1357
+ &difctx->dsd_list);
1358
+ tc->ctx_dsd_alloced = 1;
1359
+ }
1360
+
1361
+ /* add new list to cmd iocb or last list */
1362
+ put_unaligned_le64(dsd_ptr->dsd_list_dma,
1363
+ &cur_dsd->address);
1364
+ cur_dsd->length = cpu_to_le32(dsd_list_len);
1365
+ cur_dsd = dsd_ptr->dsd_addr;
1366
+ }
1367
+ append_dsd64(&cur_dsd, sg);
1368
+ avail_dsds--;
1369
+ }
11831370 }
11841371 /* Null termination */
1185
- *cur_dsd++ = 0;
1186
- *cur_dsd++ = 0;
1187
- *cur_dsd++ = 0;
1372
+ cur_dsd->address = 0;
1373
+ cur_dsd->length = 0;
1374
+ cur_dsd++;
11881375 return 0;
11891376 }
11901377
....@@ -1195,14 +1382,15 @@
11951382 * @sp: SRB command to process
11961383 * @cmd_pkt: Command type 3 IOCB
11971384 * @tot_dsds: Total number of segments to transfer
1198
- * @tot_prot_dsds:
1199
- * @fw_prot_opts:
1385
+ * @tot_prot_dsds: Total number of segments with protection information
1386
+ * @fw_prot_opts: Protection options to be passed to firmware
12001387 */
1201
-inline int
1388
+static inline int
12021389 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
12031390 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
12041391 {
1205
- uint32_t *cur_dsd, *fcp_dl;
1392
+ struct dsd64 *cur_dsd;
1393
+ __be32 *fcp_dl;
12061394 scsi_qla_host_t *vha;
12071395 struct scsi_cmnd *cmd;
12081396 uint32_t total_bytes = 0;
....@@ -1220,7 +1408,7 @@
12201408 cmd = GET_CMD_SP(sp);
12211409
12221410 /* Update entry type to indicate Command Type CRC_2 IOCB */
1223
- *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1411
+ put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
12241412
12251413 vha = sp->vha;
12261414 ha = vha->hw;
....@@ -1250,7 +1438,7 @@
12501438 bundling = 0;
12511439
12521440 /* Allocate CRC context from global pool */
1253
- crc_ctx_pkt = sp->u.scmd.ctx =
1441
+ crc_ctx_pkt = sp->u.scmd.crc_ctx =
12541442 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
12551443
12561444 if (!crc_ctx_pkt)
....@@ -1268,9 +1456,8 @@
12681456 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
12691457 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
12701458
1271
- cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1272
- cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1273
- cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1459
+ put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1460
+ cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
12741461
12751462 /* Determine SCSI command length -- align to 4 byte boundary */
12761463 if (cmd->cmd_len > 16) {
....@@ -1296,10 +1483,8 @@
12961483 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
12971484 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
12981485 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1299
- cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1300
- LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1301
- cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1302
- MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1486
+ put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1487
+ &cmd_pkt->fcp_cmnd_dseg_address);
13031488 fcp_cmnd->task_management = 0;
13041489 fcp_cmnd->task_attribute = TSK_SIMPLE;
13051490
....@@ -1313,18 +1498,18 @@
13131498 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
13141499 case SCSI_PROT_READ_INSERT:
13151500 case SCSI_PROT_WRITE_STRIP:
1316
- total_bytes = data_bytes;
1317
- data_bytes += dif_bytes;
1318
- break;
1501
+ total_bytes = data_bytes;
1502
+ data_bytes += dif_bytes;
1503
+ break;
13191504
13201505 case SCSI_PROT_READ_STRIP:
13211506 case SCSI_PROT_WRITE_INSERT:
13221507 case SCSI_PROT_READ_PASS:
13231508 case SCSI_PROT_WRITE_PASS:
1324
- total_bytes = data_bytes + dif_bytes;
1325
- break;
1509
+ total_bytes = data_bytes + dif_bytes;
1510
+ break;
13261511 default:
1327
- BUG();
1512
+ BUG();
13281513 }
13291514
13301515 if (!qla2x00_hba_err_chk_enabled(sp))
....@@ -1341,7 +1526,7 @@
13411526 }
13421527
13431528 if (!bundling) {
1344
- cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1529
+ cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
13451530 } else {
13461531 /*
13471532 * Configure Bundling if we need to fetch interlaving
....@@ -1351,7 +1536,7 @@
13511536 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
13521537 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
13531538 tot_prot_dsds);
1354
- cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1539
+ cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
13551540 }
13561541
13571542 /* Finish the common fields of CRC pkt */
....@@ -1361,7 +1546,7 @@
13611546 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
13621547 /* Fibre channel byte count */
13631548 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1364
- fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1549
+ fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
13651550 additional_fcpcdb_len);
13661551 *fcp_dl = htonl(total_bytes);
13671552
....@@ -1384,7 +1569,7 @@
13841569 if (bundling && tot_prot_dsds) {
13851570 /* Walks dif segments */
13861571 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1387
- cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1572
+ cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
13881573 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
13891574 tot_prot_dsds, NULL))
13901575 goto crc_queuing_error;
....@@ -1409,28 +1594,25 @@
14091594 int nseg;
14101595 unsigned long flags;
14111596 uint32_t *clr_ptr;
1412
- uint32_t index;
14131597 uint32_t handle;
14141598 struct cmd_type_7 *cmd_pkt;
14151599 uint16_t cnt;
14161600 uint16_t req_cnt;
14171601 uint16_t tot_dsds;
14181602 struct req_que *req = NULL;
1419
- struct rsp_que *rsp = NULL;
14201603 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
14211604 struct scsi_qla_host *vha = sp->vha;
14221605 struct qla_hw_data *ha = vha->hw;
14231606
14241607 /* Setup device pointers. */
14251608 req = vha->req;
1426
- rsp = req->rsp;
14271609
14281610 /* So we know we haven't pci_map'ed anything yet */
14291611 tot_dsds = 0;
14301612
14311613 /* Send marker if required */
14321614 if (vha->marker_needed != 0) {
1433
- if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1615
+ if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
14341616 QLA_SUCCESS)
14351617 return QLA_FUNCTION_FAILED;
14361618 vha->marker_needed = 0;
....@@ -1439,16 +1621,8 @@
14391621 /* Acquire ring specific lock */
14401622 spin_lock_irqsave(&ha->hardware_lock, flags);
14411623
1442
- /* Check for room in outstanding command list. */
1443
- handle = req->current_outstanding_cmd;
1444
- for (index = 1; index < req->num_outstanding_cmds; index++) {
1445
- handle++;
1446
- if (handle == req->num_outstanding_cmds)
1447
- handle = 1;
1448
- if (!req->outstanding_cmds[handle])
1449
- break;
1450
- }
1451
- if (index == req->num_outstanding_cmds)
1624
+ handle = qla2xxx_get_next_handle(req);
1625
+ if (handle == 0)
14521626 goto queuing_error;
14531627
14541628 /* Map the sg table so we have an accurate count of sg entries needed */
....@@ -1462,9 +1636,15 @@
14621636
14631637 tot_dsds = nseg;
14641638 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1639
+
1640
+ sp->iores.res_type = RESOURCE_INI;
1641
+ sp->iores.iocb_cnt = req_cnt;
1642
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
1643
+ goto queuing_error;
1644
+
14651645 if (req->cnt < (req_cnt + 2)) {
14661646 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1467
- RD_REG_DWORD_RELAXED(req->req_q_out);
1647
+ rd_reg_dword_relaxed(req->req_q_out);
14681648 if (req->ring_index < cnt)
14691649 req->cnt = cnt - req->ring_index;
14701650 else
....@@ -1482,7 +1662,7 @@
14821662 req->cnt -= req_cnt;
14831663
14841664 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1485
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1665
+ cmd_pkt->handle = make_handle(req->id, handle);
14861666
14871667 /* Zero out remaining portion of packet. */
14881668 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
....@@ -1525,7 +1705,7 @@
15251705 sp->flags |= SRB_DMA_VALID;
15261706
15271707 /* Set chip new ring index. */
1528
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
1708
+ wrt_reg_dword(req->req_q_in, req->ring_index);
15291709
15301710 spin_unlock_irqrestore(&ha->hardware_lock, flags);
15311711 return QLA_SUCCESS;
....@@ -1534,6 +1714,7 @@
15341714 if (tot_dsds)
15351715 scsi_dma_unmap(cmd);
15361716
1717
+ qla_put_iocbs(sp->qpair, &sp->iores);
15371718 spin_unlock_irqrestore(&ha->hardware_lock, flags);
15381719
15391720 return QLA_FUNCTION_FAILED;
....@@ -1551,7 +1732,6 @@
15511732 int nseg;
15521733 unsigned long flags;
15531734 uint32_t *clr_ptr;
1554
- uint32_t index;
15551735 uint32_t handle;
15561736 uint16_t cnt;
15571737 uint16_t req_cnt = 0;
....@@ -1583,7 +1763,7 @@
15831763
15841764 /* Send marker if required */
15851765 if (vha->marker_needed != 0) {
1586
- if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1766
+ if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
15871767 QLA_SUCCESS)
15881768 return QLA_FUNCTION_FAILED;
15891769 vha->marker_needed = 0;
....@@ -1592,17 +1772,8 @@
15921772 /* Acquire ring specific lock */
15931773 spin_lock_irqsave(&ha->hardware_lock, flags);
15941774
1595
- /* Check for room in outstanding command list. */
1596
- handle = req->current_outstanding_cmd;
1597
- for (index = 1; index < req->num_outstanding_cmds; index++) {
1598
- handle++;
1599
- if (handle == req->num_outstanding_cmds)
1600
- handle = 1;
1601
- if (!req->outstanding_cmds[handle])
1602
- break;
1603
- }
1604
-
1605
- if (index == req->num_outstanding_cmds)
1775
+ handle = qla2xxx_get_next_handle(req);
1776
+ if (handle == 0)
16061777 goto queuing_error;
16071778
16081779 /* Compute number of required data segments */
....@@ -1657,9 +1828,15 @@
16571828 /* Total Data and protection sg segment(s) */
16581829 tot_prot_dsds = nseg;
16591830 tot_dsds += nseg;
1831
+
1832
+ sp->iores.res_type = RESOURCE_INI;
1833
+ sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1834
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
1835
+ goto queuing_error;
1836
+
16601837 if (req->cnt < (req_cnt + 2)) {
16611838 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1662
- RD_REG_DWORD_RELAXED(req->req_q_out);
1839
+ rd_reg_dword_relaxed(req->req_q_out);
16631840 if (req->ring_index < cnt)
16641841 req->cnt = cnt - req->ring_index;
16651842 else
....@@ -1680,7 +1857,7 @@
16801857
16811858 /* Fill-in common area */
16821859 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1683
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1860
+ cmd_pkt->handle = make_handle(req->id, handle);
16841861
16851862 clr_ptr = (uint32_t *)cmd_pkt + 2;
16861863 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
....@@ -1718,7 +1895,7 @@
17181895 req->ring_ptr++;
17191896
17201897 /* Set chip new ring index. */
1721
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
1898
+ wrt_reg_dword(req->req_q_in, req->ring_index);
17221899
17231900 spin_unlock_irqrestore(&ha->hardware_lock, flags);
17241901
....@@ -1731,6 +1908,7 @@
17311908 }
17321909 /* Cleanup will be performed by the caller (queuecommand) */
17331910
1911
+ qla_put_iocbs(sp->qpair, &sp->iores);
17341912 spin_unlock_irqrestore(&ha->hardware_lock, flags);
17351913 return QLA_FUNCTION_FAILED;
17361914 }
....@@ -1747,14 +1925,12 @@
17471925 int nseg;
17481926 unsigned long flags;
17491927 uint32_t *clr_ptr;
1750
- uint32_t index;
17511928 uint32_t handle;
17521929 struct cmd_type_7 *cmd_pkt;
17531930 uint16_t cnt;
17541931 uint16_t req_cnt;
17551932 uint16_t tot_dsds;
17561933 struct req_que *req = NULL;
1757
- struct rsp_que *rsp = NULL;
17581934 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
17591935 struct scsi_qla_host *vha = sp->fcport->vha;
17601936 struct qla_hw_data *ha = vha->hw;
....@@ -1764,7 +1940,6 @@
17641940 spin_lock_irqsave(&qpair->qp_lock, flags);
17651941
17661942 /* Setup qpair pointers */
1767
- rsp = qpair->rsp;
17681943 req = qpair->req;
17691944
17701945 /* So we know we haven't pci_map'ed anything yet */
....@@ -1772,7 +1947,7 @@
17721947
17731948 /* Send marker if required */
17741949 if (vha->marker_needed != 0) {
1775
- if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1950
+ if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
17761951 QLA_SUCCESS) {
17771952 spin_unlock_irqrestore(&qpair->qp_lock, flags);
17781953 return QLA_FUNCTION_FAILED;
....@@ -1780,16 +1955,8 @@
17801955 vha->marker_needed = 0;
17811956 }
17821957
1783
- /* Check for room in outstanding command list. */
1784
- handle = req->current_outstanding_cmd;
1785
- for (index = 1; index < req->num_outstanding_cmds; index++) {
1786
- handle++;
1787
- if (handle == req->num_outstanding_cmds)
1788
- handle = 1;
1789
- if (!req->outstanding_cmds[handle])
1790
- break;
1791
- }
1792
- if (index == req->num_outstanding_cmds)
1958
+ handle = qla2xxx_get_next_handle(req);
1959
+ if (handle == 0)
17931960 goto queuing_error;
17941961
17951962 /* Map the sg table so we have an accurate count of sg entries needed */
....@@ -1803,9 +1970,15 @@
18031970
18041971 tot_dsds = nseg;
18051972 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1973
+
1974
+ sp->iores.res_type = RESOURCE_INI;
1975
+ sp->iores.iocb_cnt = req_cnt;
1976
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
1977
+ goto queuing_error;
1978
+
18061979 if (req->cnt < (req_cnt + 2)) {
18071980 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1808
- RD_REG_DWORD_RELAXED(req->req_q_out);
1981
+ rd_reg_dword_relaxed(req->req_q_out);
18091982 if (req->ring_index < cnt)
18101983 req->cnt = cnt - req->ring_index;
18111984 else
....@@ -1823,7 +1996,7 @@
18231996 req->cnt -= req_cnt;
18241997
18251998 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1826
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1999
+ cmd_pkt->handle = make_handle(req->id, handle);
18272000
18282001 /* Zero out remaining portion of packet. */
18292002 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
....@@ -1866,7 +2039,7 @@
18662039 sp->flags |= SRB_DMA_VALID;
18672040
18682041 /* Set chip new ring index. */
1869
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
2042
+ wrt_reg_dword(req->req_q_in, req->ring_index);
18702043
18712044 spin_unlock_irqrestore(&qpair->qp_lock, flags);
18722045 return QLA_SUCCESS;
....@@ -1875,6 +2048,7 @@
18752048 if (tot_dsds)
18762049 scsi_dma_unmap(cmd);
18772050
2051
+ qla_put_iocbs(sp->qpair, &sp->iores);
18782052 spin_unlock_irqrestore(&qpair->qp_lock, flags);
18792053
18802054 return QLA_FUNCTION_FAILED;
....@@ -1893,7 +2067,6 @@
18932067 int nseg;
18942068 unsigned long flags;
18952069 uint32_t *clr_ptr;
1896
- uint32_t index;
18972070 uint32_t handle;
18982071 uint16_t cnt;
18992072 uint16_t req_cnt = 0;
....@@ -1940,7 +2113,7 @@
19402113
19412114 /* Send marker if required */
19422115 if (vha->marker_needed != 0) {
1943
- if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
2116
+ if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
19442117 QLA_SUCCESS) {
19452118 spin_unlock_irqrestore(&qpair->qp_lock, flags);
19462119 return QLA_FUNCTION_FAILED;
....@@ -1948,17 +2121,8 @@
19482121 vha->marker_needed = 0;
19492122 }
19502123
1951
- /* Check for room in outstanding command list. */
1952
- handle = req->current_outstanding_cmd;
1953
- for (index = 1; index < req->num_outstanding_cmds; index++) {
1954
- handle++;
1955
- if (handle == req->num_outstanding_cmds)
1956
- handle = 1;
1957
- if (!req->outstanding_cmds[handle])
1958
- break;
1959
- }
1960
-
1961
- if (index == req->num_outstanding_cmds)
2124
+ handle = qla2xxx_get_next_handle(req);
2125
+ if (handle == 0)
19622126 goto queuing_error;
19632127
19642128 /* Compute number of required data segments */
....@@ -2013,9 +2177,15 @@
20132177 /* Total Data and protection sg segment(s) */
20142178 tot_prot_dsds = nseg;
20152179 tot_dsds += nseg;
2180
+
2181
+ sp->iores.res_type = RESOURCE_INI;
2182
+ sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2183
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
2184
+ goto queuing_error;
2185
+
20162186 if (req->cnt < (req_cnt + 2)) {
20172187 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2018
- RD_REG_DWORD_RELAXED(req->req_q_out);
2188
+ rd_reg_dword_relaxed(req->req_q_out);
20192189 if (req->ring_index < cnt)
20202190 req->cnt = cnt - req->ring_index;
20212191 else
....@@ -2036,7 +2206,7 @@
20362206
20372207 /* Fill-in common area */
20382208 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2039
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2209
+ cmd_pkt->handle = make_handle(req->id, handle);
20402210
20412211 clr_ptr = (uint32_t *)cmd_pkt + 2;
20422212 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
....@@ -2072,7 +2242,7 @@
20722242 req->ring_ptr++;
20732243
20742244 /* Set chip new ring index. */
2075
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
2245
+ wrt_reg_dword(req->req_q_in, req->ring_index);
20762246
20772247 /* Manage unprocessed RIO/ZIO commands in response queue. */
20782248 if (vha->flags.process_response_queue &&
....@@ -2090,6 +2260,7 @@
20902260 }
20912261 /* Cleanup will be performed by the caller (queuecommand) */
20922262
2263
+ qla_put_iocbs(sp->qpair, &sp->iores);
20932264 spin_unlock_irqrestore(&qpair->qp_lock, flags);
20942265 return QLA_FUNCTION_FAILED;
20952266 }
....@@ -2105,7 +2276,7 @@
21052276 struct qla_hw_data *ha = vha->hw;
21062277 struct req_que *req = qpair->req;
21072278 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2108
- uint32_t index, handle;
2279
+ uint32_t handle;
21092280 request_t *pkt;
21102281 uint16_t cnt, req_cnt;
21112282
....@@ -2122,14 +2293,15 @@
21222293 if (req->cnt < req_cnt + 2) {
21232294 if (qpair->use_shadow_reg)
21242295 cnt = *req->out_ptr;
2125
- else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2126
- cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2296
+ else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2297
+ IS_QLA28XX(ha))
2298
+ cnt = rd_reg_dword(&reg->isp25mq.req_q_out);
21272299 else if (IS_P3P_TYPE(ha))
2128
- cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2300
+ cnt = rd_reg_dword(reg->isp82.req_q_out);
21292301 else if (IS_FWI2_CAPABLE(ha))
2130
- cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2302
+ cnt = rd_reg_dword(&reg->isp24.req_q_out);
21312303 else if (IS_QLAFX00(ha))
2132
- cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2304
+ cnt = rd_reg_dword(&reg->ispfx00.req_q_out);
21332305 else
21342306 cnt = qla2x00_debounce_register(
21352307 ISP_REQ_Q_OUT(ha, &reg->isp));
....@@ -2144,16 +2316,8 @@
21442316 goto queuing_error;
21452317
21462318 if (sp) {
2147
- /* Check for room in outstanding command list. */
2148
- handle = req->current_outstanding_cmd;
2149
- for (index = 1; index < req->num_outstanding_cmds; index++) {
2150
- handle++;
2151
- if (handle == req->num_outstanding_cmds)
2152
- handle = 1;
2153
- if (!req->outstanding_cmds[handle])
2154
- break;
2155
- }
2156
- if (index == req->num_outstanding_cmds) {
2319
+ handle = qla2xxx_get_next_handle(req);
2320
+ if (handle == 0) {
21572321 ql_log(ql_log_warn, vha, 0x700b,
21582322 "No room on outstanding cmd array.\n");
21592323 goto queuing_error;
....@@ -2170,8 +2334,8 @@
21702334 pkt = req->ring_ptr;
21712335 memset(pkt, 0, REQUEST_ENTRY_SIZE);
21722336 if (IS_QLAFX00(ha)) {
2173
- WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2174
- WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2337
+ wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt);
2338
+ wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle);
21752339 } else {
21762340 pkt->entry_count = req_cnt;
21772341 pkt->handle = handle;
....@@ -2208,8 +2372,20 @@
22082372
22092373 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
22102374 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2211
- if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
2212
- logio->control_flags |= LCF_NVME_PRLI;
2375
+ if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2376
+ logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
2377
+ if (sp->vha->flags.nvme_first_burst)
2378
+ logio->io_parameter[0] =
2379
+ cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
2380
+ if (sp->vha->flags.nvme2_enabled) {
2381
+ /* Set service parameter BIT_8 for SLER support */
2382
+ logio->io_parameter[0] |=
2383
+ cpu_to_le32(NVME_PRLI_SP_SLER);
2384
+ /* Set service parameter BIT_9 for PI control support */
2385
+ logio->io_parameter[0] |=
2386
+ cpu_to_le32(NVME_PRLI_SP_PI_CTRL);
2387
+ }
2388
+ }
22132389
22142390 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
22152391 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
....@@ -2224,6 +2400,8 @@
22242400 struct srb_iocb *lio = &sp->u.iocb_cmd;
22252401
22262402 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2403
+ logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2404
+
22272405 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
22282406 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
22292407 } else {
....@@ -2267,12 +2445,19 @@
22672445 static void
22682446 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
22692447 {
2448
+ u16 control_flags = LCF_COMMAND_LOGO;
22702449 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2271
- logio->control_flags =
2272
- cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2273
- if (!sp->fcport->se_sess ||
2274
- !sp->fcport->keep_nport_handle)
2275
- logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2450
+
2451
+ if (sp->fcport->explicit_logout) {
2452
+ control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
2453
+ } else {
2454
+ control_flags |= LCF_IMPL_LOGO;
2455
+
2456
+ if (!sp->fcport->keep_nport_handle)
2457
+ control_flags |= LCF_FREE_NPORT;
2458
+ }
2459
+
2460
+ logio->control_flags = cpu_to_le16(control_flags);
22762461 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
22772462 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
22782463 logio->port_id[1] = sp->fcport->d_id.b.area;
....@@ -2289,7 +2474,7 @@
22892474 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
22902475 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
22912476 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2292
- cpu_to_le16(sp->fcport->loop_id):
2477
+ cpu_to_le16(sp->fcport->loop_id) :
22932478 cpu_to_le16(sp->fcport->loop_id << 8);
22942479 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
22952480 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
....@@ -2344,7 +2529,7 @@
23442529
23452530 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
23462531 tsk->entry_count = 1;
2347
- tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2532
+ tsk->handle = make_handle(req->id, tsk->handle);
23482533 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
23492534 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
23502535 tsk->control_flags = cpu_to_le32(flags);
....@@ -2360,10 +2545,18 @@
23602545 }
23612546 }
23622547
2363
-static void
2364
-qla2x00_els_dcmd_sp_free(void *data)
2548
+void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
23652549 {
2366
- srb_t *sp = data;
2550
+ timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2551
+ sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2552
+ sp->free = qla2x00_sp_free;
2553
+ if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2554
+ init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2555
+ sp->start_timer = 1;
2556
+}
2557
+
2558
+static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2559
+{
23672560 struct srb_iocb *elsio = &sp->u.iocb_cmd;
23682561
23692562 kfree(sp->fcport);
....@@ -2384,19 +2577,36 @@
23842577 fc_port_t *fcport = sp->fcport;
23852578 struct scsi_qla_host *vha = sp->vha;
23862579 struct srb_iocb *lio = &sp->u.iocb_cmd;
2580
+ unsigned long flags = 0;
2581
+ int res, h;
23872582
23882583 ql_dbg(ql_dbg_io, vha, 0x3069,
23892584 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
23902585 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
23912586 fcport->d_id.b.al_pa);
23922587
2393
- complete(&lio->u.els_logo.comp);
2588
+ /* Abort the exchange */
2589
+ res = qla24xx_async_abort_cmd(sp, false);
2590
+ if (res) {
2591
+ ql_dbg(ql_dbg_io, vha, 0x3070,
2592
+ "mbx abort_command failed.\n");
2593
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2594
+ for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2595
+ if (sp->qpair->req->outstanding_cmds[h] == sp) {
2596
+ sp->qpair->req->outstanding_cmds[h] = NULL;
2597
+ break;
2598
+ }
2599
+ }
2600
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2601
+ complete(&lio->u.els_logo.comp);
2602
+ } else {
2603
+ ql_dbg(ql_dbg_io, vha, 0x3071,
2604
+ "mbx abort_command success.\n");
2605
+ }
23942606 }
23952607
2396
-static void
2397
-qla2x00_els_dcmd_sp_done(void *ptr, int res)
2608
+static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
23982609 {
2399
- srb_t *sp = ptr;
24002610 fc_port_t *fcport = sp->fcport;
24012611 struct srb_iocb *lio = &sp->u.iocb_cmd;
24022612 struct scsi_qla_host *vha = sp->vha;
....@@ -2474,6 +2684,10 @@
24742684
24752685 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
24762686 sizeof(struct els_logo_payload));
2687
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
2688
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
2689
+ elsio->u.els_logo.els_logo_pyld,
2690
+ sizeof(*elsio->u.els_logo.els_logo_pyld));
24772691
24782692 rval = qla2x00_start_sp(sp);
24792693 if (rval != QLA_SUCCESS) {
....@@ -2504,35 +2718,31 @@
25042718 els_iocb->entry_status = 0;
25052719 els_iocb->handle = sp->handle;
25062720 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2507
- els_iocb->tx_dsd_count = 1;
2721
+ els_iocb->tx_dsd_count = cpu_to_le16(1);
25082722 els_iocb->vp_index = vha->vp_idx;
25092723 els_iocb->sof_type = EST_SOFI3;
25102724 els_iocb->rx_dsd_count = 0;
25112725 els_iocb->opcode = elsio->u.els_logo.els_cmd;
25122726
2513
- els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2514
- els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2515
- els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2516
- els_iocb->s_id[0] = vha->d_id.b.al_pa;
2517
- els_iocb->s_id[1] = vha->d_id.b.area;
2518
- els_iocb->s_id[2] = vha->d_id.b.domain;
2519
- els_iocb->control_flags = 0;
2727
+ els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
2728
+ els_iocb->d_id[1] = sp->fcport->d_id.b.area;
2729
+ els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
2730
+ /* For SID the byte order is different than DID */
2731
+ els_iocb->s_id[1] = vha->d_id.b.al_pa;
2732
+ els_iocb->s_id[2] = vha->d_id.b.area;
2733
+ els_iocb->s_id[0] = vha->d_id.b.domain;
25202734
25212735 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2736
+ els_iocb->control_flags = 0;
25222737 els_iocb->tx_byte_count = els_iocb->tx_len =
2523
- sizeof(struct els_plogi_payload);
2524
- els_iocb->tx_address[0] =
2525
- cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2526
- els_iocb->tx_address[1] =
2527
- cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2528
-
2529
- els_iocb->rx_dsd_count = 1;
2738
+ cpu_to_le32(sizeof(struct els_plogi_payload));
2739
+ put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2740
+ &els_iocb->tx_address);
2741
+ els_iocb->rx_dsd_count = cpu_to_le16(1);
25302742 els_iocb->rx_byte_count = els_iocb->rx_len =
2531
- sizeof(struct els_plogi_payload);
2532
- els_iocb->rx_address[0] =
2533
- cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
2534
- els_iocb->rx_address[1] =
2535
- cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
2743
+ cpu_to_le32(sizeof(struct els_plogi_payload));
2744
+ put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2745
+ &els_iocb->rx_address);
25362746
25372747 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
25382748 "PLOGI ELS IOCB:\n");
....@@ -2540,17 +2750,21 @@
25402750 (uint8_t *)els_iocb,
25412751 sizeof(*els_iocb));
25422752 } else {
2543
- els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2544
- els_iocb->tx_address[0] =
2545
- cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2546
- els_iocb->tx_address[1] =
2547
- cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2753
+ els_iocb->control_flags = cpu_to_le16(1 << 13);
2754
+ els_iocb->tx_byte_count =
2755
+ cpu_to_le32(sizeof(struct els_logo_payload));
2756
+ put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2757
+ &els_iocb->tx_address);
25482758 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
25492759
25502760 els_iocb->rx_byte_count = 0;
2551
- els_iocb->rx_address[0] = 0;
2552
- els_iocb->rx_address[1] = 0;
2761
+ els_iocb->rx_address = 0;
25532762 els_iocb->rx_len = 0;
2763
+ ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
2764
+ "LOGO ELS IOCB:");
2765
+ ql_dump_buffer(ql_log_info, vha, 0x010b,
2766
+ els_iocb,
2767
+ sizeof(*els_iocb));
25542768 }
25552769
25562770 sp->vha->qla_stats.control_requests++;
....@@ -2562,34 +2776,57 @@
25622776 srb_t *sp = data;
25632777 fc_port_t *fcport = sp->fcport;
25642778 struct scsi_qla_host *vha = sp->vha;
2565
- struct qla_hw_data *ha = vha->hw;
25662779 unsigned long flags = 0;
2567
- int res;
2780
+ int res, h;
25682781
25692782 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
25702783 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
25712784 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
25722785
25732786 /* Abort the exchange */
2574
- spin_lock_irqsave(&ha->hardware_lock, flags);
2575
- res = ha->isp_ops->abort_command(sp);
2787
+ res = qla24xx_async_abort_cmd(sp, false);
25762788 ql_dbg(ql_dbg_io, vha, 0x3070,
25772789 "mbx abort_command %s\n",
25782790 (res == QLA_SUCCESS) ? "successful" : "failed");
2579
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
2580
-
2581
- sp->done(sp, QLA_FUNCTION_TIMEOUT);
2791
+ if (res) {
2792
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2793
+ for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2794
+ if (sp->qpair->req->outstanding_cmds[h] == sp) {
2795
+ sp->qpair->req->outstanding_cmds[h] = NULL;
2796
+ break;
2797
+ }
2798
+ }
2799
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2800
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
2801
+ }
25822802 }
25832803
2584
-static void
2585
-qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2804
+void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
25862805 {
2587
- srb_t *sp = ptr;
2806
+ if (els_plogi->els_plogi_pyld)
2807
+ dma_free_coherent(&vha->hw->pdev->dev,
2808
+ els_plogi->tx_size,
2809
+ els_plogi->els_plogi_pyld,
2810
+ els_plogi->els_plogi_pyld_dma);
2811
+
2812
+ if (els_plogi->els_resp_pyld)
2813
+ dma_free_coherent(&vha->hw->pdev->dev,
2814
+ els_plogi->rx_size,
2815
+ els_plogi->els_resp_pyld,
2816
+ els_plogi->els_resp_pyld_dma);
2817
+}
2818
+
2819
+static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2820
+{
25882821 fc_port_t *fcport = sp->fcport;
25892822 struct srb_iocb *lio = &sp->u.iocb_cmd;
25902823 struct scsi_qla_host *vha = sp->vha;
25912824 struct event_arg ea;
25922825 struct qla_work_evt *e;
2826
+ struct fc_port *conflict_fcport;
2827
+ port_id_t cid; /* conflict Nport id */
2828
+ const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
2829
+ u16 lid;
25932830
25942831 ql_dbg(ql_dbg_disc, vha, 0x3072,
25952832 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
....@@ -2601,31 +2838,109 @@
26012838 if (sp->flags & SRB_WAKEUP_ON_COMP)
26022839 complete(&lio->u.els_plogi.comp);
26032840 else {
2604
- if (res) {
2605
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2606
- } else {
2841
+ switch (le32_to_cpu(fw_status[0])) {
2842
+ case CS_DATA_UNDERRUN:
2843
+ case CS_COMPLETE:
26072844 memset(&ea, 0, sizeof(ea));
26082845 ea.fcport = fcport;
26092846 ea.rc = res;
2610
- ea.event = FCME_ELS_PLOGI_DONE;
2611
- qla2x00_fcport_event_handler(vha, &ea);
2847
+ qla_handle_els_plogi_done(vha, &ea);
2848
+ break;
2849
+
2850
+ case CS_IOCB_ERROR:
2851
+ switch (le32_to_cpu(fw_status[1])) {
2852
+ case LSC_SCODE_PORTID_USED:
2853
+ lid = le32_to_cpu(fw_status[2]) & 0xffff;
2854
+ qlt_find_sess_invalidate_other(vha,
2855
+ wwn_to_u64(fcport->port_name),
2856
+ fcport->d_id, lid, &conflict_fcport);
2857
+ if (conflict_fcport) {
2858
+ /*
2859
+ * Another fcport shares the same
2860
+ * loop_id & nport id; conflict
2861
+ * fcport needs to finish cleanup
2862
+ * before this fcport can proceed
2863
+ * to login.
2864
+ */
2865
+ conflict_fcport->conflict = fcport;
2866
+ fcport->login_pause = 1;
2867
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
2868
+ "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
2869
+ __func__, __LINE__,
2870
+ fcport->port_name,
2871
+ fcport->d_id.b24, lid);
2872
+ } else {
2873
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
2874
+ "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
2875
+ __func__, __LINE__,
2876
+ fcport->port_name,
2877
+ fcport->d_id.b24, lid);
2878
+ qla2x00_clear_loop_id(fcport);
2879
+ set_bit(lid, vha->hw->loop_id_map);
2880
+ fcport->loop_id = lid;
2881
+ fcport->keep_nport_handle = 0;
2882
+ qlt_schedule_sess_for_deletion(fcport);
2883
+ }
2884
+ break;
2885
+
2886
+ case LSC_SCODE_NPORT_USED:
2887
+ cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
2888
+ & 0xff;
2889
+ cid.b.area = (le32_to_cpu(fw_status[2]) >> 8)
2890
+ & 0xff;
2891
+ cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff;
2892
+ cid.b.rsvd_1 = 0;
2893
+
2894
+ ql_dbg(ql_dbg_disc, vha, 0x20ec,
2895
+ "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2896
+ __func__, __LINE__, fcport->port_name,
2897
+ fcport->loop_id, cid.b24);
2898
+ set_bit(fcport->loop_id,
2899
+ vha->hw->loop_id_map);
2900
+ fcport->loop_id = FC_NO_LOOP_ID;
2901
+ qla24xx_post_gnl_work(vha, fcport);
2902
+ break;
2903
+
2904
+ case LSC_SCODE_NOXCB:
2905
+ vha->hw->exch_starvation++;
2906
+ if (vha->hw->exch_starvation > 5) {
2907
+ ql_log(ql_log_warn, vha, 0xd046,
2908
+ "Exchange starvation. Resetting RISC\n");
2909
+ vha->hw->exch_starvation = 0;
2910
+ set_bit(ISP_ABORT_NEEDED,
2911
+ &vha->dpc_flags);
2912
+ qla2xxx_wake_dpc(vha);
2913
+ break;
2914
+ }
2915
+ fallthrough;
2916
+ default:
2917
+ ql_dbg(ql_dbg_disc, vha, 0x20eb,
2918
+ "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
2919
+ __func__, sp->fcport->port_name,
2920
+ fw_status[0], fw_status[1], fw_status[2]);
2921
+
2922
+ fcport->flags &= ~FCF_ASYNC_SENT;
2923
+ qlt_schedule_sess_for_deletion(fcport);
2924
+ break;
2925
+ }
2926
+ break;
2927
+
2928
+ default:
2929
+ ql_dbg(ql_dbg_disc, vha, 0x20eb,
2930
+ "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
2931
+ __func__, sp->fcport->port_name,
2932
+ fw_status[0], fw_status[1], fw_status[2]);
2933
+
2934
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
2935
+ qlt_schedule_sess_for_deletion(fcport);
2936
+ break;
26122937 }
26132938
26142939 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
26152940 if (!e) {
26162941 struct srb_iocb *elsio = &sp->u.iocb_cmd;
26172942
2618
- if (elsio->u.els_plogi.els_plogi_pyld)
2619
- dma_free_coherent(&sp->vha->hw->pdev->dev,
2620
- elsio->u.els_plogi.tx_size,
2621
- elsio->u.els_plogi.els_plogi_pyld,
2622
- elsio->u.els_plogi.els_plogi_pyld_dma);
2623
-
2624
- if (elsio->u.els_plogi.els_resp_pyld)
2625
- dma_free_coherent(&sp->vha->hw->pdev->dev,
2626
- elsio->u.els_plogi.rx_size,
2627
- elsio->u.els_plogi.els_resp_pyld,
2628
- elsio->u.els_plogi.els_resp_pyld_dma);
2943
+ qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
26292944 sp->free(sp);
26302945 return;
26312946 }
....@@ -2643,27 +2958,27 @@
26432958 struct qla_hw_data *ha = vha->hw;
26442959 int rval = QLA_SUCCESS;
26452960 void *ptr, *resp_ptr;
2646
- dma_addr_t ptr_dma;
26472961
26482962 /* Alloc SRB structure */
26492963 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
26502964 if (!sp) {
26512965 ql_log(ql_log_info, vha, 0x70e6,
26522966 "SRB allocation failed\n");
2967
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
26532968 return -ENOMEM;
26542969 }
26552970
2971
+ fcport->flags |= FCF_ASYNC_SENT;
2972
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
26562973 elsio = &sp->u.iocb_cmd;
26572974 ql_dbg(ql_dbg_io, vha, 0x3073,
26582975 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
26592976
2660
- fcport->flags |= FCF_ASYNC_SENT;
26612977 sp->type = SRB_ELS_DCMD;
26622978 sp->name = "ELS_DCMD";
26632979 sp->fcport = fcport;
26642980
26652981 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2666
- init_completion(&elsio->u.els_plogi.comp);
26672982 if (wait)
26682983 sp->flags = SRB_WAKEUP_ON_COMP;
26692984
....@@ -2673,9 +2988,8 @@
26732988 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
26742989
26752990 ptr = elsio->u.els_plogi.els_plogi_pyld =
2676
- dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2991
+ dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
26772992 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2678
- ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
26792993
26802994 if (!elsio->u.els_plogi.els_plogi_pyld) {
26812995 rval = QLA_FUNCTION_FAILED;
....@@ -2683,7 +2997,7 @@
26832997 }
26842998
26852999 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2686
- dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
3000
+ dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
26873001 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
26883002
26893003 if (!elsio->u.els_plogi.els_resp_pyld) {
....@@ -2696,8 +3010,7 @@
26963010 memset(ptr, 0, sizeof(struct els_plogi_payload));
26973011 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
26983012 memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2699
- &ha->plogi_els_payld.data,
2700
- sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
3013
+ &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE);
27013014
27023015 elsio->u.els_plogi.els_cmd = els_opcode;
27033016 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
....@@ -2707,6 +3020,7 @@
27073020 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
27083021 sizeof(*elsio->u.els_plogi.els_plogi_pyld));
27093022
3023
+ init_completion(&elsio->u.els_plogi.comp);
27103024 rval = qla2x00_start_sp(sp);
27113025 if (rval != QLA_SUCCESS) {
27123026 rval = QLA_FUNCTION_FAILED;
....@@ -2727,19 +3041,8 @@
27273041 }
27283042
27293043 out:
2730
- fcport->flags &= ~(FCF_ASYNC_SENT);
2731
- if (elsio->u.els_plogi.els_plogi_pyld)
2732
- dma_free_coherent(&sp->vha->hw->pdev->dev,
2733
- elsio->u.els_plogi.tx_size,
2734
- elsio->u.els_plogi.els_plogi_pyld,
2735
- elsio->u.els_plogi.els_plogi_pyld_dma);
2736
-
2737
- if (elsio->u.els_plogi.els_resp_pyld)
2738
- dma_free_coherent(&sp->vha->hw->pdev->dev,
2739
- elsio->u.els_plogi.rx_size,
2740
- elsio->u.els_plogi.els_resp_pyld,
2741
- elsio->u.els_plogi.els_resp_pyld_dma);
2742
-
3044
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3045
+ qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
27433046 sp->free(sp);
27443047 done:
27453048 return rval;
....@@ -2756,7 +3059,7 @@
27563059 els_iocb->sys_define = 0;
27573060 els_iocb->entry_status = 0;
27583061 els_iocb->handle = sp->handle;
2759
- els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3062
+ els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
27603063 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
27613064 els_iocb->vp_index = sp->vha->vp_idx;
27623065 els_iocb->sof_type = EST_SOFI3;
....@@ -2766,26 +3069,22 @@
27663069 sp->type == SRB_ELS_CMD_RPT ?
27673070 bsg_request->rqst_data.r_els.els_code :
27683071 bsg_request->rqst_data.h_els.command_code;
2769
- els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2770
- els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2771
- els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3072
+ els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
3073
+ els_iocb->d_id[1] = sp->fcport->d_id.b.area;
3074
+ els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
27723075 els_iocb->control_flags = 0;
27733076 els_iocb->rx_byte_count =
27743077 cpu_to_le32(bsg_job->reply_payload.payload_len);
27753078 els_iocb->tx_byte_count =
27763079 cpu_to_le32(bsg_job->request_payload.payload_len);
27773080
2778
- els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2779
- (bsg_job->request_payload.sg_list)));
2780
- els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2781
- (bsg_job->request_payload.sg_list)));
3081
+ put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3082
+ &els_iocb->tx_address);
27823083 els_iocb->tx_len = cpu_to_le32(sg_dma_len
27833084 (bsg_job->request_payload.sg_list));
27843085
2785
- els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2786
- (bsg_job->reply_payload.sg_list)));
2787
- els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2788
- (bsg_job->reply_payload.sg_list)));
3086
+ put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3087
+ &els_iocb->rx_address);
27893088 els_iocb->rx_len = cpu_to_le32(sg_dma_len
27903089 (bsg_job->reply_payload.sg_list));
27913090
....@@ -2796,14 +3095,13 @@
27963095 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
27973096 {
27983097 uint16_t avail_dsds;
2799
- uint32_t *cur_dsd;
3098
+ struct dsd64 *cur_dsd;
28003099 struct scatterlist *sg;
28013100 int index;
28023101 uint16_t tot_dsds;
28033102 scsi_qla_host_t *vha = sp->vha;
28043103 struct qla_hw_data *ha = vha->hw;
28053104 struct bsg_job *bsg_job = sp->u.bsg_job;
2806
- int loop_iterartion = 0;
28073105 int entry_count = 1;
28083106
28093107 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
....@@ -2823,25 +3121,20 @@
28233121 ct_iocb->rsp_bytecount =
28243122 cpu_to_le32(bsg_job->reply_payload.payload_len);
28253123
2826
- ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2827
- (bsg_job->request_payload.sg_list)));
2828
- ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2829
- (bsg_job->request_payload.sg_list)));
2830
- ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
3124
+ put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3125
+ &ct_iocb->req_dsd.address);
3126
+ ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
28313127
2832
- ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2833
- (bsg_job->reply_payload.sg_list)));
2834
- ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2835
- (bsg_job->reply_payload.sg_list)));
2836
- ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
3128
+ put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3129
+ &ct_iocb->rsp_dsd.address);
3130
+ ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
28373131
28383132 avail_dsds = 1;
2839
- cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
3133
+ cur_dsd = &ct_iocb->rsp_dsd;
28403134 index = 0;
28413135 tot_dsds = bsg_job->reply_payload.sg_cnt;
28423136
28433137 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2844
- dma_addr_t sle_dma;
28453138 cont_a64_entry_t *cont_pkt;
28463139
28473140 /* Allocate additional continuation packets? */
....@@ -2852,16 +3145,12 @@
28523145 */
28533146 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
28543147 vha->hw->req_q_map[0]);
2855
- cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3148
+ cur_dsd = cont_pkt->dsd;
28563149 avail_dsds = 5;
28573150 entry_count++;
28583151 }
28593152
2860
- sle_dma = sg_dma_address(sg);
2861
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2862
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2863
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2864
- loop_iterartion++;
3153
+ append_dsd64(&cur_dsd, sg);
28653154 avail_dsds--;
28663155 }
28673156 ct_iocb->entry_count = entry_count;
....@@ -2873,7 +3162,7 @@
28733162 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
28743163 {
28753164 uint16_t avail_dsds;
2876
- uint32_t *cur_dsd;
3165
+ struct dsd64 *cur_dsd;
28773166 struct scatterlist *sg;
28783167 int index;
28793168 uint16_t cmd_dsds, rsp_dsds;
....@@ -2902,12 +3191,10 @@
29023191 cpu_to_le32(bsg_job->request_payload.payload_len);
29033192
29043193 avail_dsds = 2;
2905
- cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
3194
+ cur_dsd = ct_iocb->dsd;
29063195 index = 0;
29073196
29083197 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
2909
- dma_addr_t sle_dma;
2910
-
29113198 /* Allocate additional continuation packets? */
29123199 if (avail_dsds == 0) {
29133200 /*
....@@ -2916,23 +3203,18 @@
29163203 */
29173204 cont_pkt = qla2x00_prep_cont_type1_iocb(
29183205 vha, ha->req_q_map[0]);
2919
- cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3206
+ cur_dsd = cont_pkt->dsd;
29203207 avail_dsds = 5;
29213208 entry_count++;
29223209 }
29233210
2924
- sle_dma = sg_dma_address(sg);
2925
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2926
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2927
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3211
+ append_dsd64(&cur_dsd, sg);
29283212 avail_dsds--;
29293213 }
29303214
29313215 index = 0;
29323216
29333217 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
2934
- dma_addr_t sle_dma;
2935
-
29363218 /* Allocate additional continuation packets? */
29373219 if (avail_dsds == 0) {
29383220 /*
....@@ -2941,15 +3223,12 @@
29413223 */
29423224 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
29433225 ha->req_q_map[0]);
2944
- cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3226
+ cur_dsd = cont_pkt->dsd;
29453227 avail_dsds = 5;
29463228 entry_count++;
29473229 }
29483230
2949
- sle_dma = sg_dma_address(sg);
2950
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2951
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2952
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3231
+ append_dsd64(&cur_dsd, sg);
29533232 avail_dsds--;
29543233 }
29553234 ct_iocb->entry_count = entry_count;
....@@ -2968,14 +3247,13 @@
29683247 unsigned long flags;
29693248 struct scsi_cmnd *cmd;
29703249 uint32_t *clr_ptr;
2971
- uint32_t index;
29723250 uint32_t handle;
29733251 uint16_t cnt;
29743252 uint16_t req_cnt;
29753253 uint16_t tot_dsds;
29763254 struct device_reg_82xx __iomem *reg;
29773255 uint32_t dbval;
2978
- uint32_t *fcp_dl;
3256
+ __be32 *fcp_dl;
29793257 uint8_t additional_cdb_len;
29803258 struct ct6_dsd *ctx;
29813259 struct scsi_qla_host *vha = sp->vha;
....@@ -2996,8 +3274,8 @@
29963274
29973275 /* Send marker if required */
29983276 if (vha->marker_needed != 0) {
2999
- if (qla2x00_marker(vha, req,
3000
- rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3277
+ if (qla2x00_marker(vha, ha->base_qpair,
3278
+ 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
30013279 ql_log(ql_log_warn, vha, 0x300c,
30023280 "qla2x00_marker failed for cmd=%p.\n", cmd);
30033281 return QLA_FUNCTION_FAILED;
....@@ -3008,16 +3286,8 @@
30083286 /* Acquire ring specific lock */
30093287 spin_lock_irqsave(&ha->hardware_lock, flags);
30103288
3011
- /* Check for room in outstanding command list. */
3012
- handle = req->current_outstanding_cmd;
3013
- for (index = 1; index < req->num_outstanding_cmds; index++) {
3014
- handle++;
3015
- if (handle == req->num_outstanding_cmds)
3016
- handle = 1;
3017
- if (!req->outstanding_cmds[handle])
3018
- break;
3019
- }
3020
- if (index == req->num_outstanding_cmds)
3289
+ handle = qla2xxx_get_next_handle(req);
3290
+ if (handle == 0)
30213291 goto queuing_error;
30223292
30233293 /* Map the sg table so we have an accurate count of sg entries needed */
....@@ -3077,7 +3347,7 @@
30773347 req_cnt = 1;
30783348
30793349 if (req->cnt < (req_cnt + 2)) {
3080
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3350
+ cnt = (uint16_t)rd_reg_dword_relaxed(
30813351 &reg->req_q_out[0]);
30823352 if (req->ring_index < cnt)
30833353 req->cnt = cnt - req->ring_index;
....@@ -3088,7 +3358,7 @@
30883358 goto queuing_error;
30893359 }
30903360
3091
- ctx = sp->u.scmd.ctx =
3361
+ ctx = sp->u.scmd.ct6_ctx =
30923362 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
30933363 if (!ctx) {
30943364 ql_log(ql_log_fatal, vha, 0x3010,
....@@ -3127,7 +3397,7 @@
31273397 }
31283398
31293399 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3130
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3400
+ cmd_pkt->handle = make_handle(req->id, handle);
31313401
31323402 /* Zero out remaining portion of packet. */
31333403 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
....@@ -3165,15 +3435,13 @@
31653435
31663436 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
31673437
3168
- fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3438
+ fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
31693439 additional_cdb_len);
31703440 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
31713441
31723442 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3173
- cmd_pkt->fcp_cmnd_dseg_address[0] =
3174
- cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
3175
- cmd_pkt->fcp_cmnd_dseg_address[1] =
3176
- cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
3443
+ put_unaligned_le64(ctx->fcp_cmnd_dma,
3444
+ &cmd_pkt->fcp_cmnd_dseg_address);
31773445
31783446 sp->flags |= SRB_FCP_CMND_DMA_VALID;
31793447 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
....@@ -3185,9 +3453,10 @@
31853453 cmd_pkt->entry_status = (uint8_t) rsp->id;
31863454 } else {
31873455 struct cmd_type_7 *cmd_pkt;
3456
+
31883457 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
31893458 if (req->cnt < (req_cnt + 2)) {
3190
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3459
+ cnt = (uint16_t)rd_reg_dword_relaxed(
31913460 &reg->req_q_out[0]);
31923461 if (req->ring_index < cnt)
31933462 req->cnt = cnt - req->ring_index;
....@@ -3199,7 +3468,7 @@
31993468 goto queuing_error;
32003469
32013470 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3202
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3471
+ cmd_pkt->handle = make_handle(req->id, handle);
32033472
32043473 /* Zero out remaining portion of packet. */
32053474 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
....@@ -3263,10 +3532,10 @@
32633532 if (ql2xdbwr)
32643533 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
32653534 else {
3266
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3535
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
32673536 wmb();
3268
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3269
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3537
+ while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
3538
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
32703539 wmb();
32713540 }
32723541 }
....@@ -3285,9 +3554,9 @@
32853554 if (tot_dsds)
32863555 scsi_dma_unmap(cmd);
32873556
3288
- if (sp->u.scmd.ctx) {
3289
- mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3290
- sp->u.scmd.ctx = NULL;
3557
+ if (sp->u.scmd.crc_ctx) {
3558
+ mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3559
+ sp->u.scmd.crc_ctx = NULL;
32913560 }
32923561 spin_unlock_irqrestore(&ha->hardware_lock, flags);
32933562
....@@ -3304,7 +3573,7 @@
33043573 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
33053574 abt_iocb->entry_type = ABORT_IOCB_TYPE;
33063575 abt_iocb->entry_count = 1;
3307
- abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3576
+ abt_iocb->handle = make_handle(req->id, sp->handle);
33083577 if (sp->fcport) {
33093578 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
33103579 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
....@@ -3312,10 +3581,10 @@
33123581 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
33133582 }
33143583 abt_iocb->handle_to_abort =
3315
- cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3316
- aio->u.abt.cmd_hndl));
3584
+ make_handle(le16_to_cpu(aio->u.abt.req_que_no),
3585
+ aio->u.abt.cmd_hndl);
33173586 abt_iocb->vp_index = vha->vp_idx;
3318
- abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3587
+ abt_iocb->req_que_no = aio->u.abt.req_que_no;
33193588 /* Send the command to the firmware */
33203589 wmb();
33213590 }
....@@ -3330,7 +3599,7 @@
33303599 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
33313600
33323601 for (i = 0; i < sz; i++)
3333
- mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3602
+ mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
33343603 }
33353604
33363605 static void
....@@ -3354,7 +3623,7 @@
33543623 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
33553624 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
33563625 nack->u.isp24.flags = ntfy->u.isp24.flags &
3357
- cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3626
+ cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
33583627 }
33593628 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
33603629 nack->u.isp24.status = ntfy->u.isp24.status;
....@@ -3372,34 +3641,29 @@
33723641 /*
33733642 * Build NVME LS request
33743643 */
3375
-static int
3644
+static void
33763645 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
33773646 {
33783647 struct srb_iocb *nvme;
3379
- int rval = QLA_SUCCESS;
33803648
33813649 nvme = &sp->u.iocb_cmd;
33823650 cmd_pkt->entry_type = PT_LS4_REQUEST;
33833651 cmd_pkt->entry_count = 1;
3384
- cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3652
+ cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
33853653
33863654 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
33873655 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
33883656 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
33893657
3390
- cmd_pkt->tx_dseg_count = 1;
3391
- cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3392
- cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3393
- cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3394
- cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3658
+ cmd_pkt->tx_dseg_count = cpu_to_le16(1);
3659
+ cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
3660
+ cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
3661
+ put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
33953662
3396
- cmd_pkt->rx_dseg_count = 1;
3397
- cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3398
- cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len;
3399
- cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3400
- cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3401
-
3402
- return rval;
3663
+ cmd_pkt->rx_dseg_count = cpu_to_le16(1);
3664
+ cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
3665
+ cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
3666
+ put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
34033667 }
34043668
34053669 static void
....@@ -3439,22 +3703,22 @@
34393703 int
34403704 qla2x00_start_sp(srb_t *sp)
34413705 {
3442
- int rval;
3706
+ int rval = QLA_SUCCESS;
34433707 scsi_qla_host_t *vha = sp->vha;
34443708 struct qla_hw_data *ha = vha->hw;
3709
+ struct qla_qpair *qp = sp->qpair;
34453710 void *pkt;
34463711 unsigned long flags;
34473712
3448
- rval = QLA_FUNCTION_FAILED;
3449
- spin_lock_irqsave(&ha->hardware_lock, flags);
3450
- pkt = qla2x00_alloc_iocbs(vha, sp);
3713
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
3714
+ pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
34513715 if (!pkt) {
3716
+ rval = EAGAIN;
34523717 ql_log(ql_log_warn, vha, 0x700c,
34533718 "qla2x00_alloc_iocbs failed.\n");
34543719 goto done;
34553720 }
34563721
3457
- rval = QLA_SUCCESS;
34583722 switch (sp->type) {
34593723 case SRB_LOGIN_CMD:
34603724 IS_FWI2_CAPABLE(ha) ?
....@@ -3524,10 +3788,13 @@
35243788 break;
35253789 }
35263790
3791
+ if (sp->start_timer)
3792
+ add_timer(&sp->u.iocb_cmd.timer);
3793
+
35273794 wmb();
3528
- qla2x00_start_iocbs(vha, ha->req_q_map[0]);
3795
+ qla2x00_start_iocbs(vha, qp->req);
35293796 done:
3530
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
3797
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
35313798 return rval;
35323799 }
35333800
....@@ -3536,7 +3803,7 @@
35363803 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
35373804 {
35383805 uint16_t avail_dsds;
3539
- uint32_t *cur_dsd;
3806
+ struct dsd64 *cur_dsd;
35403807 uint32_t req_data_len = 0;
35413808 uint32_t rsp_data_len = 0;
35423809 struct scatterlist *sg;
....@@ -3545,8 +3812,7 @@
35453812 struct bsg_job *bsg_job = sp->u.bsg_job;
35463813
35473814 /*Update entry type to indicate bidir command */
3548
- *((uint32_t *)(&cmd_pkt->entry_type)) =
3549
- cpu_to_le32(COMMAND_BIDIRECTIONAL);
3815
+ put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
35503816
35513817 /* Set the transfer direction, in this set both flags
35523818 * Also set the BD_WRAP_BACK flag, firmware will take care
....@@ -3572,13 +3838,12 @@
35723838 * are bundled in continuation iocb
35733839 */
35743840 avail_dsds = 1;
3575
- cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3841
+ cur_dsd = &cmd_pkt->fcp_dsd;
35763842
35773843 index = 0;
35783844
35793845 for_each_sg(bsg_job->request_payload.sg_list, sg,
35803846 bsg_job->request_payload.sg_cnt, index) {
3581
- dma_addr_t sle_dma;
35823847 cont_a64_entry_t *cont_pkt;
35833848
35843849 /* Allocate additional continuation packets */
....@@ -3587,14 +3852,11 @@
35873852 * 5 DSDS
35883853 */
35893854 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3590
- cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3855
+ cur_dsd = cont_pkt->dsd;
35913856 avail_dsds = 5;
35923857 entry_count++;
35933858 }
3594
- sle_dma = sg_dma_address(sg);
3595
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3596
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3597
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3859
+ append_dsd64(&cur_dsd, sg);
35983860 avail_dsds--;
35993861 }
36003862 /* For read request DSD will always goes to continuation IOCB
....@@ -3604,7 +3866,6 @@
36043866 */
36053867 for_each_sg(bsg_job->reply_payload.sg_list, sg,
36063868 bsg_job->reply_payload.sg_cnt, index) {
3607
- dma_addr_t sle_dma;
36083869 cont_a64_entry_t *cont_pkt;
36093870
36103871 /* Allocate additional continuation packets */
....@@ -3613,14 +3874,11 @@
36133874 * 5 DSDS
36143875 */
36153876 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3616
- cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3877
+ cur_dsd = cont_pkt->dsd;
36173878 avail_dsds = 5;
36183879 entry_count++;
36193880 }
3620
- sle_dma = sg_dma_address(sg);
3621
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3622
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3623
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3881
+ append_dsd64(&cur_dsd, sg);
36243882 avail_dsds--;
36253883 }
36263884 /* This value should be same as number of IOCB required for this cmd */
....@@ -3634,7 +3892,6 @@
36343892 struct qla_hw_data *ha = vha->hw;
36353893 unsigned long flags;
36363894 uint32_t handle;
3637
- uint32_t index;
36383895 uint16_t req_cnt;
36393896 uint16_t cnt;
36403897 uint32_t *clr_ptr;
....@@ -3650,8 +3907,8 @@
36503907
36513908 /* Send marker if required */
36523909 if (vha->marker_needed != 0) {
3653
- if (qla2x00_marker(vha, req,
3654
- rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3910
+ if (qla2x00_marker(vha, ha->base_qpair,
3911
+ 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
36553912 return EXT_STATUS_MAILBOX;
36563913 vha->marker_needed = 0;
36573914 }
....@@ -3659,17 +3916,8 @@
36593916 /* Acquire ring specific lock */
36603917 spin_lock_irqsave(&ha->hardware_lock, flags);
36613918
3662
- /* Check for room in outstanding command list. */
3663
- handle = req->current_outstanding_cmd;
3664
- for (index = 1; index < req->num_outstanding_cmds; index++) {
3665
- handle++;
3666
- if (handle == req->num_outstanding_cmds)
3667
- handle = 1;
3668
- if (!req->outstanding_cmds[handle])
3669
- break;
3670
- }
3671
-
3672
- if (index == req->num_outstanding_cmds) {
3919
+ handle = qla2xxx_get_next_handle(req);
3920
+ if (handle == 0) {
36733921 rval = EXT_STATUS_BUSY;
36743922 goto queuing_error;
36753923 }
....@@ -3680,7 +3928,7 @@
36803928 /* Check for room on request queue. */
36813929 if (req->cnt < req_cnt + 2) {
36823930 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3683
- RD_REG_DWORD_RELAXED(req->req_q_out);
3931
+ rd_reg_dword_relaxed(req->req_q_out);
36843932 if (req->ring_index < cnt)
36853933 req->cnt = cnt - req->ring_index;
36863934 else
....@@ -3693,7 +3941,7 @@
36933941 }
36943942
36953943 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3696
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3944
+ cmd_pkt->handle = make_handle(req->id, handle);
36973945
36983946 /* Zero out remaining portion of packet. */
36993947 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/