| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * QLogic Fibre Channel HBA Driver |
|---|
| 3 | 4 | * Copyright (c) 2003-2014 QLogic Corporation |
|---|
| 4 | | - * |
|---|
| 5 | | - * See LICENSE.qla2xxx for copyright and licensing details. |
|---|
| 6 | 5 | */ |
|---|
| 7 | 6 | #include "qla_def.h" |
|---|
| 8 | 7 | #include "qla_target.h" |
|---|
| .. | .. |
|---|
| 44 | 43 | * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and |
|---|
| 45 | 44 | * Continuation Type 0 IOCBs to allocate. |
|---|
| 46 | 45 | * |
|---|
| 47 | | - * @dsds: number of data segment decriptors needed |
|---|
| 46 | + * @dsds: number of data segment descriptors needed |
|---|
| 48 | 47 | * |
|---|
| 49 | 48 | * Returns the number of IOCB entries needed to store @dsds. |
|---|
| 50 | 49 | */ |
|---|
| .. | .. |
|---|
| 66 | 65 | * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and |
|---|
| 67 | 66 | * Continuation Type 1 IOCBs to allocate. |
|---|
| 68 | 67 | * |
|---|
| 69 | | - * @dsds: number of data segment decriptors needed |
|---|
| 68 | + * @dsds: number of data segment descriptors needed |
|---|
| 70 | 69 | * |
|---|
| 71 | 70 | * Returns the number of IOCB entries needed to store @dsds. |
|---|
| 72 | 71 | */ |
|---|
| .. | .. |
|---|
| 107 | 106 | cont_pkt = (cont_entry_t *)req->ring_ptr; |
|---|
| 108 | 107 | |
|---|
| 109 | 108 | /* Load packet defaults. */ |
|---|
| 110 | | - *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE); |
|---|
| 109 | + put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type); |
|---|
| 111 | 110 | |
|---|
| 112 | 111 | return (cont_pkt); |
|---|
| 113 | 112 | } |
|---|
| .. | .. |
|---|
| 136 | 135 | cont_pkt = (cont_a64_entry_t *)req->ring_ptr; |
|---|
| 137 | 136 | |
|---|
| 138 | 137 | /* Load packet defaults. */ |
|---|
| 139 | | - *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ? |
|---|
| 140 | | - cpu_to_le32(CONTINUE_A64_TYPE_FX00) : |
|---|
| 141 | | - cpu_to_le32(CONTINUE_A64_TYPE); |
|---|
| 138 | + put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 : |
|---|
| 139 | + CONTINUE_A64_TYPE, &cont_pkt->entry_type); |
|---|
| 142 | 140 | |
|---|
| 143 | 141 | return (cont_pkt); |
|---|
| 144 | 142 | } |
|---|
| .. | .. |
|---|
| 193 | 191 | uint16_t tot_dsds) |
|---|
| 194 | 192 | { |
|---|
| 195 | 193 | uint16_t avail_dsds; |
|---|
| 196 | | - uint32_t *cur_dsd; |
|---|
| 194 | + struct dsd32 *cur_dsd; |
|---|
| 197 | 195 | scsi_qla_host_t *vha; |
|---|
| 198 | 196 | struct scsi_cmnd *cmd; |
|---|
| 199 | 197 | struct scatterlist *sg; |
|---|
| .. | .. |
|---|
| 202 | 200 | cmd = GET_CMD_SP(sp); |
|---|
| 203 | 201 | |
|---|
| 204 | 202 | /* Update entry type to indicate Command Type 2 IOCB */ |
|---|
| 205 | | - *((uint32_t *)(&cmd_pkt->entry_type)) = |
|---|
| 206 | | - cpu_to_le32(COMMAND_TYPE); |
|---|
| 203 | + put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type); |
|---|
| 207 | 204 | |
|---|
| 208 | 205 | /* No data transfer */ |
|---|
| 209 | 206 | if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { |
|---|
| .. | .. |
|---|
| 215 | 212 | cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); |
|---|
| 216 | 213 | |
|---|
| 217 | 214 | /* Three DSDs are available in the Command Type 2 IOCB */ |
|---|
| 218 | | - avail_dsds = 3; |
|---|
| 219 | | - cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; |
|---|
| 215 | + avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32); |
|---|
| 216 | + cur_dsd = cmd_pkt->dsd32; |
|---|
| 220 | 217 | |
|---|
| 221 | 218 | /* Load data segments */ |
|---|
| 222 | 219 | scsi_for_each_sg(cmd, sg, tot_dsds, i) { |
|---|
| .. | .. |
|---|
| 229 | 226 | * Type 0 IOCB. |
|---|
| 230 | 227 | */ |
|---|
| 231 | 228 | cont_pkt = qla2x00_prep_cont_type0_iocb(vha); |
|---|
| 232 | | - cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; |
|---|
| 233 | | - avail_dsds = 7; |
|---|
| 229 | + cur_dsd = cont_pkt->dsd; |
|---|
| 230 | + avail_dsds = ARRAY_SIZE(cont_pkt->dsd); |
|---|
| 234 | 231 | } |
|---|
| 235 | 232 | |
|---|
| 236 | | - *cur_dsd++ = cpu_to_le32(sg_dma_address(sg)); |
|---|
| 237 | | - *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); |
|---|
| 233 | + append_dsd32(&cur_dsd, sg); |
|---|
| 238 | 234 | avail_dsds--; |
|---|
| 239 | 235 | } |
|---|
| 240 | 236 | } |
|---|
| .. | .. |
|---|
| 251 | 247 | uint16_t tot_dsds) |
|---|
| 252 | 248 | { |
|---|
| 253 | 249 | uint16_t avail_dsds; |
|---|
| 254 | | - uint32_t *cur_dsd; |
|---|
| 250 | + struct dsd64 *cur_dsd; |
|---|
| 255 | 251 | scsi_qla_host_t *vha; |
|---|
| 256 | 252 | struct scsi_cmnd *cmd; |
|---|
| 257 | 253 | struct scatterlist *sg; |
|---|
| .. | .. |
|---|
| 260 | 256 | cmd = GET_CMD_SP(sp); |
|---|
| 261 | 257 | |
|---|
| 262 | 258 | /* Update entry type to indicate Command Type 3 IOCB */ |
|---|
| 263 | | - *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE); |
|---|
| 259 | + put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type); |
|---|
| 264 | 260 | |
|---|
| 265 | 261 | /* No data transfer */ |
|---|
| 266 | 262 | if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { |
|---|
| .. | .. |
|---|
| 272 | 268 | cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); |
|---|
| 273 | 269 | |
|---|
| 274 | 270 | /* Two DSDs are available in the Command Type 3 IOCB */ |
|---|
| 275 | | - avail_dsds = 2; |
|---|
| 276 | | - cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; |
|---|
| 271 | + avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64); |
|---|
| 272 | + cur_dsd = cmd_pkt->dsd64; |
|---|
| 277 | 273 | |
|---|
| 278 | 274 | /* Load data segments */ |
|---|
| 279 | 275 | scsi_for_each_sg(cmd, sg, tot_dsds, i) { |
|---|
| 280 | | - dma_addr_t sle_dma; |
|---|
| 281 | 276 | cont_a64_entry_t *cont_pkt; |
|---|
| 282 | 277 | |
|---|
| 283 | 278 | /* Allocate additional continuation packets? */ |
|---|
| .. | .. |
|---|
| 287 | 282 | * Type 1 IOCB. |
|---|
| 288 | 283 | */ |
|---|
| 289 | 284 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); |
|---|
| 290 | | - cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; |
|---|
| 291 | | - avail_dsds = 5; |
|---|
| 285 | + cur_dsd = cont_pkt->dsd; |
|---|
| 286 | + avail_dsds = ARRAY_SIZE(cont_pkt->dsd); |
|---|
| 292 | 287 | } |
|---|
| 293 | 288 | |
|---|
| 294 | | - sle_dma = sg_dma_address(sg); |
|---|
| 295 | | - *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); |
|---|
| 296 | | - *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); |
|---|
| 297 | | - *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); |
|---|
| 289 | + append_dsd64(&cur_dsd, sg); |
|---|
| 298 | 290 | avail_dsds--; |
|---|
| 299 | 291 | } |
|---|
| 292 | +} |
|---|
| 293 | + |
|---|
| 294 | +/* |
|---|
| 295 | + * Find the first handle that is not in use, starting from |
|---|
| 296 | + * req->current_outstanding_cmd + 1. The caller must hold the lock that is |
|---|
| 297 | + * associated with @req. |
|---|
| 298 | + */ |
|---|
| 299 | +uint32_t qla2xxx_get_next_handle(struct req_que *req) |
|---|
| 300 | +{ |
|---|
| 301 | + uint32_t index, handle = req->current_outstanding_cmd; |
|---|
| 302 | + |
|---|
| 303 | + for (index = 1; index < req->num_outstanding_cmds; index++) { |
|---|
| 304 | + handle++; |
|---|
| 305 | + if (handle == req->num_outstanding_cmds) |
|---|
| 306 | + handle = 1; |
|---|
| 307 | + if (!req->outstanding_cmds[handle]) |
|---|
| 308 | + return handle; |
|---|
| 309 | + } |
|---|
| 310 | + |
|---|
| 311 | + return 0; |
|---|
| 300 | 312 | } |
|---|
| 301 | 313 | |
|---|
| 302 | 314 | /** |
|---|
| .. | .. |
|---|
| 313 | 325 | scsi_qla_host_t *vha; |
|---|
| 314 | 326 | struct scsi_cmnd *cmd; |
|---|
| 315 | 327 | uint32_t *clr_ptr; |
|---|
| 316 | | - uint32_t index; |
|---|
| 317 | 328 | uint32_t handle; |
|---|
| 318 | 329 | cmd_entry_t *cmd_pkt; |
|---|
| 319 | 330 | uint16_t cnt; |
|---|
| .. | .. |
|---|
| 336 | 347 | |
|---|
| 337 | 348 | /* Send marker if required */ |
|---|
| 338 | 349 | if (vha->marker_needed != 0) { |
|---|
| 339 | | - if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != |
|---|
| 350 | + if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != |
|---|
| 340 | 351 | QLA_SUCCESS) { |
|---|
| 341 | 352 | return (QLA_FUNCTION_FAILED); |
|---|
| 342 | 353 | } |
|---|
| .. | .. |
|---|
| 346 | 357 | /* Acquire ring specific lock */ |
|---|
| 347 | 358 | spin_lock_irqsave(&ha->hardware_lock, flags); |
|---|
| 348 | 359 | |
|---|
| 349 | | - /* Check for room in outstanding command list. */ |
|---|
| 350 | | - handle = req->current_outstanding_cmd; |
|---|
| 351 | | - for (index = 1; index < req->num_outstanding_cmds; index++) { |
|---|
| 352 | | - handle++; |
|---|
| 353 | | - if (handle == req->num_outstanding_cmds) |
|---|
| 354 | | - handle = 1; |
|---|
| 355 | | - if (!req->outstanding_cmds[handle]) |
|---|
| 356 | | - break; |
|---|
| 357 | | - } |
|---|
| 358 | | - if (index == req->num_outstanding_cmds) |
|---|
| 360 | + handle = qla2xxx_get_next_handle(req); |
|---|
| 361 | + if (handle == 0) |
|---|
| 359 | 362 | goto queuing_error; |
|---|
| 360 | 363 | |
|---|
| 361 | 364 | /* Map the sg table so we have an accurate count of sg entries needed */ |
|---|
| .. | .. |
|---|
| 372 | 375 | /* Calculate the number of request entries needed. */ |
|---|
| 373 | 376 | req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); |
|---|
| 374 | 377 | if (req->cnt < (req_cnt + 2)) { |
|---|
| 375 | | - cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); |
|---|
| 378 | + cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg)); |
|---|
| 376 | 379 | if (req->ring_index < cnt) |
|---|
| 377 | 380 | req->cnt = cnt - req->ring_index; |
|---|
| 378 | 381 | else |
|---|
| .. | .. |
|---|
| 424 | 427 | sp->flags |= SRB_DMA_VALID; |
|---|
| 425 | 428 | |
|---|
| 426 | 429 | /* Set chip new ring index. */ |
|---|
| 427 | | - WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index); |
|---|
| 428 | | - RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ |
|---|
| 430 | + wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index); |
|---|
| 431 | + rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ |
|---|
| 429 | 432 | |
|---|
| 430 | 433 | /* Manage unprocessed RIO/ZIO commands in response queue. */ |
|---|
| 431 | 434 | if (vha->flags.process_response_queue && |
|---|
| .. | .. |
|---|
| 467 | 470 | req->ring_ptr++; |
|---|
| 468 | 471 | |
|---|
| 469 | 472 | /* Set chip new ring index. */ |
|---|
| 470 | | - if (ha->mqenable || IS_QLA27XX(ha)) { |
|---|
| 471 | | - WRT_REG_DWORD(req->req_q_in, req->ring_index); |
|---|
| 473 | + if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { |
|---|
| 474 | + wrt_reg_dword(req->req_q_in, req->ring_index); |
|---|
| 472 | 475 | } else if (IS_QLA83XX(ha)) { |
|---|
| 473 | | - WRT_REG_DWORD(req->req_q_in, req->ring_index); |
|---|
| 474 | | - RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); |
|---|
| 476 | + wrt_reg_dword(req->req_q_in, req->ring_index); |
|---|
| 477 | + rd_reg_dword_relaxed(&ha->iobase->isp24.hccr); |
|---|
| 475 | 478 | } else if (IS_QLAFX00(ha)) { |
|---|
| 476 | | - WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index); |
|---|
| 477 | | - RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in); |
|---|
| 479 | + wrt_reg_dword(®->ispfx00.req_q_in, req->ring_index); |
|---|
| 480 | + rd_reg_dword_relaxed(®->ispfx00.req_q_in); |
|---|
| 478 | 481 | QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); |
|---|
| 479 | 482 | } else if (IS_FWI2_CAPABLE(ha)) { |
|---|
| 480 | | - WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); |
|---|
| 481 | | - RD_REG_DWORD_RELAXED(®->isp24.req_q_in); |
|---|
| 483 | + wrt_reg_dword(®->isp24.req_q_in, req->ring_index); |
|---|
| 484 | + rd_reg_dword_relaxed(®->isp24.req_q_in); |
|---|
| 482 | 485 | } else { |
|---|
| 483 | | - WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), |
|---|
| 486 | + wrt_reg_word(ISP_REQ_Q_IN(ha, ®->isp), |
|---|
| 484 | 487 | req->ring_index); |
|---|
| 485 | | - RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); |
|---|
| 488 | + rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, ®->isp)); |
|---|
| 486 | 489 | } |
|---|
| 487 | 490 | } |
|---|
| 488 | 491 | } |
|---|
| .. | .. |
|---|
| 490 | 493 | /** |
|---|
| 491 | 494 | * qla2x00_marker() - Send a marker IOCB to the firmware. |
|---|
| 492 | 495 | * @vha: HA context |
|---|
| 493 | | - * @req: request queue |
|---|
| 494 | | - * @rsp: response queue |
|---|
| 496 | + * @qpair: queue pair pointer |
|---|
| 495 | 497 | * @loop_id: loop ID |
|---|
| 496 | 498 | * @lun: LUN |
|---|
| 497 | 499 | * @type: marker modifier |
|---|
| .. | .. |
|---|
| 501 | 503 | * Returns non-zero if a failure occurred, else zero. |
|---|
| 502 | 504 | */ |
|---|
| 503 | 505 | static int |
|---|
| 504 | | -__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, |
|---|
| 505 | | - struct rsp_que *rsp, uint16_t loop_id, |
|---|
| 506 | | - uint64_t lun, uint8_t type) |
|---|
| 506 | +__qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, |
|---|
| 507 | + uint16_t loop_id, uint64_t lun, uint8_t type) |
|---|
| 507 | 508 | { |
|---|
| 508 | 509 | mrk_entry_t *mrk; |
|---|
| 509 | 510 | struct mrk_entry_24xx *mrk24 = NULL; |
|---|
| 510 | | - |
|---|
| 511 | + struct req_que *req = qpair->req; |
|---|
| 511 | 512 | struct qla_hw_data *ha = vha->hw; |
|---|
| 512 | 513 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); |
|---|
| 513 | 514 | |
|---|
| 514 | | - req = ha->req_q_map[0]; |
|---|
| 515 | | - mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL); |
|---|
| 515 | + mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL); |
|---|
| 516 | 516 | if (mrk == NULL) { |
|---|
| 517 | 517 | ql_log(ql_log_warn, base_vha, 0x3026, |
|---|
| 518 | 518 | "Failed to allocate Marker IOCB.\n"); |
|---|
| .. | .. |
|---|
| 529 | 529 | int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); |
|---|
| 530 | 530 | host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); |
|---|
| 531 | 531 | mrk24->vp_index = vha->vp_idx; |
|---|
| 532 | | - mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle); |
|---|
| 532 | + mrk24->handle = make_handle(req->id, mrk24->handle); |
|---|
| 533 | 533 | } else { |
|---|
| 534 | 534 | SET_TARGET_ID(ha, mrk->target, loop_id); |
|---|
| 535 | 535 | mrk->lun = cpu_to_le16((uint16_t)lun); |
|---|
| .. | .. |
|---|
| 543 | 543 | } |
|---|
| 544 | 544 | |
|---|
| 545 | 545 | int |
|---|
| 546 | | -qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, |
|---|
| 547 | | - struct rsp_que *rsp, uint16_t loop_id, uint64_t lun, |
|---|
| 548 | | - uint8_t type) |
|---|
| 546 | +qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, |
|---|
| 547 | + uint16_t loop_id, uint64_t lun, uint8_t type) |
|---|
| 549 | 548 | { |
|---|
| 550 | 549 | int ret; |
|---|
| 551 | 550 | unsigned long flags = 0; |
|---|
| 552 | 551 | |
|---|
| 553 | | - spin_lock_irqsave(&vha->hw->hardware_lock, flags); |
|---|
| 554 | | - ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type); |
|---|
| 555 | | - spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); |
|---|
| 552 | + spin_lock_irqsave(qpair->qp_lock_ptr, flags); |
|---|
| 553 | + ret = __qla2x00_marker(vha, qpair, loop_id, lun, type); |
|---|
| 554 | + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); |
|---|
| 556 | 555 | |
|---|
| 557 | 556 | return (ret); |
|---|
| 558 | 557 | } |
|---|
| .. | .. |
|---|
| 567 | 566 | int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) |
|---|
| 568 | 567 | { |
|---|
| 569 | 568 | if (ha_locked) { |
|---|
| 570 | | - if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0, |
|---|
| 569 | + if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, |
|---|
| 571 | 570 | MK_SYNC_ALL) != QLA_SUCCESS) |
|---|
| 572 | 571 | return QLA_FUNCTION_FAILED; |
|---|
| 573 | 572 | } else { |
|---|
| 574 | | - if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0, |
|---|
| 573 | + if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, |
|---|
| 575 | 574 | MK_SYNC_ALL) != QLA_SUCCESS) |
|---|
| 576 | 575 | return QLA_FUNCTION_FAILED; |
|---|
| 577 | 576 | } |
|---|
| .. | .. |
|---|
| 584 | 583 | qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, |
|---|
| 585 | 584 | uint16_t tot_dsds) |
|---|
| 586 | 585 | { |
|---|
| 587 | | - uint32_t *cur_dsd = NULL; |
|---|
| 586 | + struct dsd64 *cur_dsd = NULL, *next_dsd; |
|---|
| 588 | 587 | scsi_qla_host_t *vha; |
|---|
| 589 | 588 | struct qla_hw_data *ha; |
|---|
| 590 | 589 | struct scsi_cmnd *cmd; |
|---|
| 591 | 590 | struct scatterlist *cur_seg; |
|---|
| 592 | | - uint32_t *dsd_seg; |
|---|
| 593 | | - void *next_dsd; |
|---|
| 594 | 591 | uint8_t avail_dsds; |
|---|
| 595 | 592 | uint8_t first_iocb = 1; |
|---|
| 596 | 593 | uint32_t dsd_list_len; |
|---|
| 597 | 594 | struct dsd_dma *dsd_ptr; |
|---|
| 598 | 595 | struct ct6_dsd *ctx; |
|---|
| 596 | + struct qla_qpair *qpair = sp->qpair; |
|---|
| 599 | 597 | |
|---|
| 600 | 598 | cmd = GET_CMD_SP(sp); |
|---|
| 601 | 599 | |
|---|
| 602 | 600 | /* Update entry type to indicate Command Type 3 IOCB */ |
|---|
| 603 | | - *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6); |
|---|
| 601 | + put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type); |
|---|
| 604 | 602 | |
|---|
| 605 | 603 | /* No data transfer */ |
|---|
| 606 | | - if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { |
|---|
| 604 | + if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE || |
|---|
| 605 | + tot_dsds == 0) { |
|---|
| 607 | 606 | cmd_pkt->byte_count = cpu_to_le32(0); |
|---|
| 608 | 607 | return 0; |
|---|
| 609 | 608 | } |
|---|
| .. | .. |
|---|
| 614 | 613 | /* Set transfer direction */ |
|---|
| 615 | 614 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { |
|---|
| 616 | 615 | cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); |
|---|
| 617 | | - vha->qla_stats.output_bytes += scsi_bufflen(cmd); |
|---|
| 618 | | - vha->qla_stats.output_requests++; |
|---|
| 616 | + qpair->counters.output_bytes += scsi_bufflen(cmd); |
|---|
| 617 | + qpair->counters.output_requests++; |
|---|
| 619 | 618 | } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { |
|---|
| 620 | 619 | cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); |
|---|
| 621 | | - vha->qla_stats.input_bytes += scsi_bufflen(cmd); |
|---|
| 622 | | - vha->qla_stats.input_requests++; |
|---|
| 620 | + qpair->counters.input_bytes += scsi_bufflen(cmd); |
|---|
| 621 | + qpair->counters.input_requests++; |
|---|
| 623 | 622 | } |
|---|
| 624 | 623 | |
|---|
| 625 | 624 | cur_seg = scsi_sglist(cmd); |
|---|
| 626 | | - ctx = GET_CMD_CTX_SP(sp); |
|---|
| 625 | + ctx = sp->u.scmd.ct6_ctx; |
|---|
| 627 | 626 | |
|---|
| 628 | 627 | while (tot_dsds) { |
|---|
| 629 | 628 | avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? |
|---|
| .. | .. |
|---|
| 642 | 641 | |
|---|
| 643 | 642 | if (first_iocb) { |
|---|
| 644 | 643 | first_iocb = 0; |
|---|
| 645 | | - dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; |
|---|
| 646 | | - *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); |
|---|
| 647 | | - *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); |
|---|
| 648 | | - cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len); |
|---|
| 644 | + put_unaligned_le64(dsd_ptr->dsd_list_dma, |
|---|
| 645 | + &cmd_pkt->fcp_dsd.address); |
|---|
| 646 | + cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len); |
|---|
| 649 | 647 | } else { |
|---|
| 650 | | - *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); |
|---|
| 651 | | - *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); |
|---|
| 652 | | - *cur_dsd++ = cpu_to_le32(dsd_list_len); |
|---|
| 648 | + put_unaligned_le64(dsd_ptr->dsd_list_dma, |
|---|
| 649 | + &cur_dsd->address); |
|---|
| 650 | + cur_dsd->length = cpu_to_le32(dsd_list_len); |
|---|
| 651 | + cur_dsd++; |
|---|
| 653 | 652 | } |
|---|
| 654 | | - cur_dsd = (uint32_t *)next_dsd; |
|---|
| 653 | + cur_dsd = next_dsd; |
|---|
| 655 | 654 | while (avail_dsds) { |
|---|
| 656 | | - dma_addr_t sle_dma; |
|---|
| 657 | | - |
|---|
| 658 | | - sle_dma = sg_dma_address(cur_seg); |
|---|
| 659 | | - *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); |
|---|
| 660 | | - *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); |
|---|
| 661 | | - *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg)); |
|---|
| 655 | + append_dsd64(&cur_dsd, cur_seg); |
|---|
| 662 | 656 | cur_seg = sg_next(cur_seg); |
|---|
| 663 | 657 | avail_dsds--; |
|---|
| 664 | 658 | } |
|---|
| 665 | 659 | } |
|---|
| 666 | 660 | |
|---|
| 667 | 661 | /* Null termination */ |
|---|
| 668 | | - *cur_dsd++ = 0; |
|---|
| 669 | | - *cur_dsd++ = 0; |
|---|
| 670 | | - *cur_dsd++ = 0; |
|---|
| 671 | | - cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE; |
|---|
| 662 | + cur_dsd->address = 0; |
|---|
| 663 | + cur_dsd->length = 0; |
|---|
| 664 | + cur_dsd++; |
|---|
| 665 | + cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); |
|---|
| 672 | 666 | return 0; |
|---|
| 673 | 667 | } |
|---|
| 674 | 668 | |
|---|
| .. | .. |
|---|
| 676 | 670 | * qla24xx_calc_dsd_lists() - Determine number of DSD list required |
|---|
| 677 | 671 | * for Command Type 6. |
|---|
| 678 | 672 | * |
|---|
| 679 | | - * @dsds: number of data segment decriptors needed |
|---|
| 673 | + * @dsds: number of data segment descriptors needed |
|---|
| 680 | 674 | * |
|---|
| 681 | 675 | * Returns the number of dsd list needed to store @dsds. |
|---|
| 682 | 676 | */ |
|---|
| .. | .. |
|---|
| 706 | 700 | uint16_t tot_dsds, struct req_que *req) |
|---|
| 707 | 701 | { |
|---|
| 708 | 702 | uint16_t avail_dsds; |
|---|
| 709 | | - uint32_t *cur_dsd; |
|---|
| 703 | + struct dsd64 *cur_dsd; |
|---|
| 710 | 704 | scsi_qla_host_t *vha; |
|---|
| 711 | 705 | struct scsi_cmnd *cmd; |
|---|
| 712 | 706 | struct scatterlist *sg; |
|---|
| 713 | 707 | int i; |
|---|
| 708 | + struct qla_qpair *qpair = sp->qpair; |
|---|
| 714 | 709 | |
|---|
| 715 | 710 | cmd = GET_CMD_SP(sp); |
|---|
| 716 | 711 | |
|---|
| 717 | 712 | /* Update entry type to indicate Command Type 3 IOCB */ |
|---|
| 718 | | - *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7); |
|---|
| 713 | + put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type); |
|---|
| 719 | 714 | |
|---|
| 720 | 715 | /* No data transfer */ |
|---|
| 721 | 716 | if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { |
|---|
| .. | .. |
|---|
| 728 | 723 | /* Set transfer direction */ |
|---|
| 729 | 724 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { |
|---|
| 730 | 725 | cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA); |
|---|
| 731 | | - vha->qla_stats.output_bytes += scsi_bufflen(cmd); |
|---|
| 732 | | - vha->qla_stats.output_requests++; |
|---|
| 726 | + qpair->counters.output_bytes += scsi_bufflen(cmd); |
|---|
| 727 | + qpair->counters.output_requests++; |
|---|
| 733 | 728 | } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { |
|---|
| 734 | 729 | cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA); |
|---|
| 735 | | - vha->qla_stats.input_bytes += scsi_bufflen(cmd); |
|---|
| 736 | | - vha->qla_stats.input_requests++; |
|---|
| 730 | + qpair->counters.input_bytes += scsi_bufflen(cmd); |
|---|
| 731 | + qpair->counters.input_requests++; |
|---|
| 737 | 732 | } |
|---|
| 738 | 733 | |
|---|
| 739 | 734 | /* One DSD is available in the Command Type 3 IOCB */ |
|---|
| 740 | 735 | avail_dsds = 1; |
|---|
| 741 | | - cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; |
|---|
| 736 | + cur_dsd = &cmd_pkt->dsd; |
|---|
| 742 | 737 | |
|---|
| 743 | 738 | /* Load data segments */ |
|---|
| 744 | 739 | |
|---|
| 745 | 740 | scsi_for_each_sg(cmd, sg, tot_dsds, i) { |
|---|
| 746 | | - dma_addr_t sle_dma; |
|---|
| 747 | 741 | cont_a64_entry_t *cont_pkt; |
|---|
| 748 | 742 | |
|---|
| 749 | 743 | /* Allocate additional continuation packets? */ |
|---|
| .. | .. |
|---|
| 753 | 747 | * Type 1 IOCB. |
|---|
| 754 | 748 | */ |
|---|
| 755 | 749 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req); |
|---|
| 756 | | - cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; |
|---|
| 757 | | - avail_dsds = 5; |
|---|
| 750 | + cur_dsd = cont_pkt->dsd; |
|---|
| 751 | + avail_dsds = ARRAY_SIZE(cont_pkt->dsd); |
|---|
| 758 | 752 | } |
|---|
| 759 | 753 | |
|---|
| 760 | | - sle_dma = sg_dma_address(sg); |
|---|
| 761 | | - *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); |
|---|
| 762 | | - *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); |
|---|
| 763 | | - *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); |
|---|
| 754 | + append_dsd64(&cur_dsd, sg); |
|---|
| 764 | 755 | avail_dsds--; |
|---|
| 765 | 756 | } |
|---|
| 766 | 757 | } |
|---|
| 767 | 758 | |
|---|
| 768 | 759 | struct fw_dif_context { |
|---|
| 769 | | - uint32_t ref_tag; |
|---|
| 770 | | - uint16_t app_tag; |
|---|
| 760 | + __le32 ref_tag; |
|---|
| 761 | + __le16 app_tag; |
|---|
| 771 | 762 | uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ |
|---|
| 772 | 763 | uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ |
|---|
| 773 | 764 | }; |
|---|
| .. | .. |
|---|
| 896 | 887 | |
|---|
| 897 | 888 | int |
|---|
| 898 | 889 | qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, |
|---|
| 899 | | - uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) |
|---|
| 890 | + struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) |
|---|
| 900 | 891 | { |
|---|
| 901 | 892 | void *next_dsd; |
|---|
| 902 | 893 | uint8_t avail_dsds = 0; |
|---|
| 903 | 894 | uint32_t dsd_list_len; |
|---|
| 904 | 895 | struct dsd_dma *dsd_ptr; |
|---|
| 905 | 896 | struct scatterlist *sg_prot; |
|---|
| 906 | | - uint32_t *cur_dsd = dsd; |
|---|
| 897 | + struct dsd64 *cur_dsd = dsd; |
|---|
| 907 | 898 | uint16_t used_dsds = tot_dsds; |
|---|
| 908 | 899 | uint32_t prot_int; /* protection interval */ |
|---|
| 909 | 900 | uint32_t partial; |
|---|
| .. | .. |
|---|
| 965 | 956 | |
|---|
| 966 | 957 | if (sp) { |
|---|
| 967 | 958 | list_add_tail(&dsd_ptr->list, |
|---|
| 968 | | - &((struct crc_context *) |
|---|
| 969 | | - sp->u.scmd.ctx)->dsd_list); |
|---|
| 959 | + &sp->u.scmd.crc_ctx->dsd_list); |
|---|
| 970 | 960 | |
|---|
| 971 | 961 | sp->flags |= SRB_CRC_CTX_DSD_VALID; |
|---|
| 972 | 962 | } else { |
|---|
| .. | .. |
|---|
| 977 | 967 | |
|---|
| 978 | 968 | |
|---|
| 979 | 969 | /* add new list to cmd iocb or last list */ |
|---|
| 980 | | - *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); |
|---|
| 981 | | - *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); |
|---|
| 982 | | - *cur_dsd++ = dsd_list_len; |
|---|
| 983 | | - cur_dsd = (uint32_t *)next_dsd; |
|---|
| 970 | + put_unaligned_le64(dsd_ptr->dsd_list_dma, |
|---|
| 971 | + &cur_dsd->address); |
|---|
| 972 | + cur_dsd->length = cpu_to_le32(dsd_list_len); |
|---|
| 973 | + cur_dsd = next_dsd; |
|---|
| 984 | 974 | } |
|---|
| 985 | | - *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); |
|---|
| 986 | | - *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); |
|---|
| 987 | | - *cur_dsd++ = cpu_to_le32(sle_dma_len); |
|---|
| 975 | + put_unaligned_le64(sle_dma, &cur_dsd->address); |
|---|
| 976 | + cur_dsd->length = cpu_to_le32(sle_dma_len); |
|---|
| 977 | + cur_dsd++; |
|---|
| 988 | 978 | avail_dsds--; |
|---|
| 989 | 979 | |
|---|
| 990 | 980 | if (partial == 0) { |
|---|
| .. | .. |
|---|
| 1003 | 993 | } |
|---|
| 1004 | 994 | } |
|---|
| 1005 | 995 | /* Null termination */ |
|---|
| 1006 | | - *cur_dsd++ = 0; |
|---|
| 1007 | | - *cur_dsd++ = 0; |
|---|
| 1008 | | - *cur_dsd++ = 0; |
|---|
| 996 | + cur_dsd->address = 0; |
|---|
| 997 | + cur_dsd->length = 0; |
|---|
| 998 | + cur_dsd++; |
|---|
| 1009 | 999 | return 0; |
|---|
| 1010 | 1000 | } |
|---|
| 1011 | 1001 | |
|---|
| 1012 | 1002 | int |
|---|
| 1013 | | -qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, |
|---|
| 1014 | | - uint16_t tot_dsds, struct qla_tc_param *tc) |
|---|
| 1003 | +qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, |
|---|
| 1004 | + struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) |
|---|
| 1015 | 1005 | { |
|---|
| 1016 | 1006 | void *next_dsd; |
|---|
| 1017 | 1007 | uint8_t avail_dsds = 0; |
|---|
| 1018 | 1008 | uint32_t dsd_list_len; |
|---|
| 1019 | 1009 | struct dsd_dma *dsd_ptr; |
|---|
| 1020 | 1010 | struct scatterlist *sg, *sgl; |
|---|
| 1021 | | - uint32_t *cur_dsd = dsd; |
|---|
| 1011 | + struct dsd64 *cur_dsd = dsd; |
|---|
| 1022 | 1012 | int i; |
|---|
| 1023 | 1013 | uint16_t used_dsds = tot_dsds; |
|---|
| 1024 | 1014 | struct scsi_cmnd *cmd; |
|---|
| .. | .. |
|---|
| 1035 | 1025 | |
|---|
| 1036 | 1026 | |
|---|
| 1037 | 1027 | for_each_sg(sgl, sg, tot_dsds, i) { |
|---|
| 1038 | | - dma_addr_t sle_dma; |
|---|
| 1039 | | - |
|---|
| 1040 | 1028 | /* Allocate additional continuation packets? */ |
|---|
| 1041 | 1029 | if (avail_dsds == 0) { |
|---|
| 1042 | 1030 | avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? |
|---|
| .. | .. |
|---|
| 1065 | 1053 | |
|---|
| 1066 | 1054 | if (sp) { |
|---|
| 1067 | 1055 | list_add_tail(&dsd_ptr->list, |
|---|
| 1068 | | - &((struct crc_context *) |
|---|
| 1069 | | - sp->u.scmd.ctx)->dsd_list); |
|---|
| 1056 | + &sp->u.scmd.crc_ctx->dsd_list); |
|---|
| 1070 | 1057 | |
|---|
| 1071 | 1058 | sp->flags |= SRB_CRC_CTX_DSD_VALID; |
|---|
| 1072 | 1059 | } else { |
|---|
| .. | .. |
|---|
| 1076 | 1063 | } |
|---|
| 1077 | 1064 | |
|---|
| 1078 | 1065 | /* add new list to cmd iocb or last list */ |
|---|
| 1079 | | - *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); |
|---|
| 1080 | | - *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); |
|---|
| 1081 | | - *cur_dsd++ = dsd_list_len; |
|---|
| 1082 | | - cur_dsd = (uint32_t *)next_dsd; |
|---|
| 1066 | + put_unaligned_le64(dsd_ptr->dsd_list_dma, |
|---|
| 1067 | + &cur_dsd->address); |
|---|
| 1068 | + cur_dsd->length = cpu_to_le32(dsd_list_len); |
|---|
| 1069 | + cur_dsd = next_dsd; |
|---|
| 1083 | 1070 | } |
|---|
| 1084 | | - sle_dma = sg_dma_address(sg); |
|---|
| 1085 | | - |
|---|
| 1086 | | - *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); |
|---|
| 1087 | | - *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); |
|---|
| 1088 | | - *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); |
|---|
| 1071 | + append_dsd64(&cur_dsd, sg); |
|---|
| 1089 | 1072 | avail_dsds--; |
|---|
| 1090 | 1073 | |
|---|
| 1091 | 1074 | } |
|---|
| 1092 | 1075 | /* Null termination */ |
|---|
| 1093 | | - *cur_dsd++ = 0; |
|---|
| 1094 | | - *cur_dsd++ = 0; |
|---|
| 1095 | | - *cur_dsd++ = 0; |
|---|
| 1076 | + cur_dsd->address = 0; |
|---|
| 1077 | + cur_dsd->length = 0; |
|---|
| 1078 | + cur_dsd++; |
|---|
| 1096 | 1079 | return 0; |
|---|
| 1097 | 1080 | } |
|---|
| 1098 | 1081 | |
|---|
| 1099 | 1082 | int |
|---|
| 1100 | 1083 | qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, |
|---|
| 1101 | | - uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) |
|---|
| 1084 | + struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) |
|---|
| 1102 | 1085 | { |
|---|
| 1103 | | - void *next_dsd; |
|---|
| 1104 | | - uint8_t avail_dsds = 0; |
|---|
| 1105 | | - uint32_t dsd_list_len; |
|---|
| 1106 | | - struct dsd_dma *dsd_ptr; |
|---|
| 1086 | + struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd; |
|---|
| 1107 | 1087 | struct scatterlist *sg, *sgl; |
|---|
| 1108 | | - int i; |
|---|
| 1109 | | - struct scsi_cmnd *cmd; |
|---|
| 1110 | | - uint32_t *cur_dsd = dsd; |
|---|
| 1111 | | - uint16_t used_dsds = tot_dsds; |
|---|
| 1088 | + struct crc_context *difctx = NULL; |
|---|
| 1112 | 1089 | struct scsi_qla_host *vha; |
|---|
| 1090 | + uint dsd_list_len; |
|---|
| 1091 | + uint avail_dsds = 0; |
|---|
| 1092 | + uint used_dsds = tot_dsds; |
|---|
| 1093 | + bool dif_local_dma_alloc = false; |
|---|
| 1094 | + bool direction_to_device = false; |
|---|
| 1095 | + int i; |
|---|
| 1113 | 1096 | |
|---|
| 1114 | 1097 | if (sp) { |
|---|
| 1115 | | - cmd = GET_CMD_SP(sp); |
|---|
| 1098 | + struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
|---|
| 1099 | + |
|---|
| 1116 | 1100 | sgl = scsi_prot_sglist(cmd); |
|---|
| 1117 | 1101 | vha = sp->vha; |
|---|
| 1102 | + difctx = sp->u.scmd.crc_ctx; |
|---|
| 1103 | + direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE; |
|---|
| 1104 | + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, |
|---|
| 1105 | + "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n", |
|---|
| 1106 | + __func__, cmd, difctx, sp); |
|---|
| 1118 | 1107 | } else if (tc) { |
|---|
| 1119 | 1108 | vha = tc->vha; |
|---|
| 1120 | 1109 | sgl = tc->prot_sg; |
|---|
| 1110 | + difctx = tc->ctx; |
|---|
| 1111 | + direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE; |
|---|
| 1121 | 1112 | } else { |
|---|
| 1122 | 1113 | BUG(); |
|---|
| 1123 | 1114 | return 1; |
|---|
| 1124 | 1115 | } |
|---|
| 1125 | 1116 | |
|---|
| 1126 | | - ql_dbg(ql_dbg_tgt, vha, 0xe021, |
|---|
| 1127 | | - "%s: enter\n", __func__); |
|---|
| 1117 | + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, |
|---|
| 1118 | + "%s: enter (write=%u)\n", __func__, direction_to_device); |
|---|
| 1128 | 1119 | |
|---|
| 1129 | | - for_each_sg(sgl, sg, tot_dsds, i) { |
|---|
| 1130 | | - dma_addr_t sle_dma; |
|---|
| 1120 | + /* if initiator doing write or target doing read */ |
|---|
| 1121 | + if (direction_to_device) { |
|---|
| 1122 | + for_each_sg(sgl, sg, tot_dsds, i) { |
|---|
| 1123 | + u64 sle_phys = sg_phys(sg); |
|---|
| 1131 | 1124 | |
|---|
| 1132 | | - /* Allocate additional continuation packets? */ |
|---|
| 1133 | | - if (avail_dsds == 0) { |
|---|
| 1134 | | - avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? |
|---|
| 1135 | | - QLA_DSDS_PER_IOCB : used_dsds; |
|---|
| 1136 | | - dsd_list_len = (avail_dsds + 1) * 12; |
|---|
| 1137 | | - used_dsds -= avail_dsds; |
|---|
| 1125 | + /* If SGE addr + len flips bits in upper 32-bits */ |
|---|
| 1126 | + if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) { |
|---|
| 1127 | + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022, |
|---|
| 1128 | + "%s: page boundary crossing (phys=%llx len=%x)\n", |
|---|
| 1129 | + __func__, sle_phys, sg->length); |
|---|
| 1138 | 1130 | |
|---|
| 1139 | | - /* allocate tracking DS */ |
|---|
| 1140 | | - dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); |
|---|
| 1141 | | - if (!dsd_ptr) |
|---|
| 1142 | | - return 1; |
|---|
| 1143 | | - |
|---|
| 1144 | | - /* allocate new list */ |
|---|
| 1145 | | - dsd_ptr->dsd_addr = next_dsd = |
|---|
| 1146 | | - dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, |
|---|
| 1147 | | - &dsd_ptr->dsd_list_dma); |
|---|
| 1148 | | - |
|---|
| 1149 | | - if (!next_dsd) { |
|---|
| 1150 | | - /* |
|---|
| 1151 | | - * Need to cleanup only this dsd_ptr, rest |
|---|
| 1152 | | - * will be done by sp_free_dma() |
|---|
| 1153 | | - */ |
|---|
| 1154 | | - kfree(dsd_ptr); |
|---|
| 1155 | | - return 1; |
|---|
| 1131 | + if (difctx) { |
|---|
| 1132 | + ha->dif_bundle_crossed_pages++; |
|---|
| 1133 | + dif_local_dma_alloc = true; |
|---|
| 1134 | + } else { |
|---|
| 1135 | + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, |
|---|
| 1136 | + vha, 0xe022, |
|---|
| 1137 | + "%s: difctx pointer is NULL\n", |
|---|
| 1138 | + __func__); |
|---|
| 1139 | + } |
|---|
| 1140 | + break; |
|---|
| 1156 | 1141 | } |
|---|
| 1157 | | - |
|---|
| 1158 | | - if (sp) { |
|---|
| 1159 | | - list_add_tail(&dsd_ptr->list, |
|---|
| 1160 | | - &((struct crc_context *) |
|---|
| 1161 | | - sp->u.scmd.ctx)->dsd_list); |
|---|
| 1162 | | - |
|---|
| 1163 | | - sp->flags |= SRB_CRC_CTX_DSD_VALID; |
|---|
| 1164 | | - } else { |
|---|
| 1165 | | - list_add_tail(&dsd_ptr->list, |
|---|
| 1166 | | - &(tc->ctx->dsd_list)); |
|---|
| 1167 | | - *tc->ctx_dsd_alloced = 1; |
|---|
| 1168 | | - } |
|---|
| 1169 | | - |
|---|
| 1170 | | - /* add new list to cmd iocb or last list */ |
|---|
| 1171 | | - *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); |
|---|
| 1172 | | - *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); |
|---|
| 1173 | | - *cur_dsd++ = dsd_list_len; |
|---|
| 1174 | | - cur_dsd = (uint32_t *)next_dsd; |
|---|
| 1175 | 1142 | } |
|---|
| 1176 | | - sle_dma = sg_dma_address(sg); |
|---|
| 1143 | + ha->dif_bundle_writes++; |
|---|
| 1144 | + } else { |
|---|
| 1145 | + ha->dif_bundle_reads++; |
|---|
| 1146 | + } |
|---|
| 1177 | 1147 | |
|---|
| 1178 | | - *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); |
|---|
| 1179 | | - *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); |
|---|
| 1180 | | - *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); |
|---|
| 1148 | + if (ql2xdifbundlinginternalbuffers) |
|---|
| 1149 | + dif_local_dma_alloc = direction_to_device; |
|---|
| 1181 | 1150 | |
|---|
| 1182 | | - avail_dsds--; |
|---|
| 1151 | + if (dif_local_dma_alloc) { |
|---|
| 1152 | + u32 track_difbundl_buf = 0; |
|---|
| 1153 | + u32 ldma_sg_len = 0; |
|---|
| 1154 | + u8 ldma_needed = 1; |
|---|
| 1155 | + |
|---|
| 1156 | + difctx->no_dif_bundl = 0; |
|---|
| 1157 | + difctx->dif_bundl_len = 0; |
|---|
| 1158 | + |
|---|
| 1159 | + /* Track DSD buffers */ |
|---|
| 1160 | + INIT_LIST_HEAD(&difctx->ldif_dsd_list); |
|---|
| 1161 | + /* Track local DMA buffers */ |
|---|
| 1162 | + INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list); |
|---|
| 1163 | + |
|---|
| 1164 | + for_each_sg(sgl, sg, tot_dsds, i) { |
|---|
| 1165 | + u32 sglen = sg_dma_len(sg); |
|---|
| 1166 | + |
|---|
| 1167 | + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023, |
|---|
| 1168 | + "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n", |
|---|
| 1169 | + __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len, |
|---|
| 1170 | + difctx->dif_bundl_len, ldma_needed); |
|---|
| 1171 | + |
|---|
| 1172 | + while (sglen) { |
|---|
| 1173 | + u32 xfrlen = 0; |
|---|
| 1174 | + |
|---|
| 1175 | + if (ldma_needed) { |
|---|
| 1176 | + /* |
|---|
| 1177 | + * Allocate list item to store |
|---|
| 1178 | + * the DMA buffers |
|---|
| 1179 | + */ |
|---|
| 1180 | + dsd_ptr = kzalloc(sizeof(*dsd_ptr), |
|---|
| 1181 | + GFP_ATOMIC); |
|---|
| 1182 | + if (!dsd_ptr) { |
|---|
| 1183 | + ql_dbg(ql_dbg_tgt, vha, 0xe024, |
|---|
| 1184 | + "%s: failed alloc dsd_ptr\n", |
|---|
| 1185 | + __func__); |
|---|
| 1186 | + return 1; |
|---|
| 1187 | + } |
|---|
| 1188 | + ha->dif_bundle_kallocs++; |
|---|
| 1189 | + |
|---|
| 1190 | + /* allocate dma buffer */ |
|---|
| 1191 | + dsd_ptr->dsd_addr = dma_pool_alloc |
|---|
| 1192 | + (ha->dif_bundl_pool, GFP_ATOMIC, |
|---|
| 1193 | + &dsd_ptr->dsd_list_dma); |
|---|
| 1194 | + if (!dsd_ptr->dsd_addr) { |
|---|
| 1195 | + ql_dbg(ql_dbg_tgt, vha, 0xe024, |
|---|
| 1196 | + "%s: failed alloc ->dsd_ptr\n", |
|---|
| 1197 | + __func__); |
|---|
| 1198 | + /* |
|---|
| 1199 | + * need to cleanup only this |
|---|
| 1200 | + * dsd_ptr rest will be done |
|---|
| 1201 | + * by sp_free_dma() |
|---|
| 1202 | + */ |
|---|
| 1203 | + kfree(dsd_ptr); |
|---|
| 1204 | + ha->dif_bundle_kallocs--; |
|---|
| 1205 | + return 1; |
|---|
| 1206 | + } |
|---|
| 1207 | + ha->dif_bundle_dma_allocs++; |
|---|
| 1208 | + ldma_needed = 0; |
|---|
| 1209 | + difctx->no_dif_bundl++; |
|---|
| 1210 | + list_add_tail(&dsd_ptr->list, |
|---|
| 1211 | + &difctx->ldif_dma_hndl_list); |
|---|
| 1212 | + } |
|---|
| 1213 | + |
|---|
| 1214 | + /* xfrlen is min of dma pool size and sglen */ |
|---|
| 1215 | + xfrlen = (sglen > |
|---|
| 1216 | + (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ? |
|---|
| 1217 | + DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len : |
|---|
| 1218 | + sglen; |
|---|
| 1219 | + |
|---|
| 1220 | + /* replace with local allocated dma buffer */ |
|---|
| 1221 | + sg_pcopy_to_buffer(sgl, sg_nents(sgl), |
|---|
| 1222 | + dsd_ptr->dsd_addr + ldma_sg_len, xfrlen, |
|---|
| 1223 | + difctx->dif_bundl_len); |
|---|
| 1224 | + difctx->dif_bundl_len += xfrlen; |
|---|
| 1225 | + sglen -= xfrlen; |
|---|
| 1226 | + ldma_sg_len += xfrlen; |
|---|
| 1227 | + if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE || |
|---|
| 1228 | + sg_is_last(sg)) { |
|---|
| 1229 | + ldma_needed = 1; |
|---|
| 1230 | + ldma_sg_len = 0; |
|---|
| 1231 | + } |
|---|
| 1232 | + } |
|---|
| 1233 | + } |
|---|
| 1234 | + |
|---|
| 1235 | + track_difbundl_buf = used_dsds = difctx->no_dif_bundl; |
|---|
| 1236 | + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025, |
|---|
| 1237 | + "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n", |
|---|
| 1238 | + difctx->dif_bundl_len, difctx->no_dif_bundl, |
|---|
| 1239 | + track_difbundl_buf); |
|---|
| 1240 | + |
|---|
| 1241 | + if (sp) |
|---|
| 1242 | + sp->flags |= SRB_DIF_BUNDL_DMA_VALID; |
|---|
| 1243 | + else |
|---|
| 1244 | + tc->prot_flags = DIF_BUNDL_DMA_VALID; |
|---|
| 1245 | + |
|---|
| 1246 | + list_for_each_entry_safe(dif_dsd, nxt_dsd, |
|---|
| 1247 | + &difctx->ldif_dma_hndl_list, list) { |
|---|
| 1248 | + u32 sglen = (difctx->dif_bundl_len > |
|---|
| 1249 | + DIF_BUNDLING_DMA_POOL_SIZE) ? |
|---|
| 1250 | + DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len; |
|---|
| 1251 | + |
|---|
| 1252 | + BUG_ON(track_difbundl_buf == 0); |
|---|
| 1253 | + |
|---|
| 1254 | + /* Allocate additional continuation packets? */ |
|---|
| 1255 | + if (avail_dsds == 0) { |
|---|
| 1256 | + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, |
|---|
| 1257 | + 0xe024, |
|---|
| 1258 | + "%s: adding continuation iocb's\n", |
|---|
| 1259 | + __func__); |
|---|
| 1260 | + avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? |
|---|
| 1261 | + QLA_DSDS_PER_IOCB : used_dsds; |
|---|
| 1262 | + dsd_list_len = (avail_dsds + 1) * 12; |
|---|
| 1263 | + used_dsds -= avail_dsds; |
|---|
| 1264 | + |
|---|
| 1265 | + /* allocate tracking DS */ |
|---|
| 1266 | + dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); |
|---|
| 1267 | + if (!dsd_ptr) { |
|---|
| 1268 | + ql_dbg(ql_dbg_tgt, vha, 0xe026, |
|---|
| 1269 | + "%s: failed alloc dsd_ptr\n", |
|---|
| 1270 | + __func__); |
|---|
| 1271 | + return 1; |
|---|
| 1272 | + } |
|---|
| 1273 | + ha->dif_bundle_kallocs++; |
|---|
| 1274 | + |
|---|
| 1275 | + difctx->no_ldif_dsd++; |
|---|
| 1276 | + /* allocate new list */ |
|---|
| 1277 | + dsd_ptr->dsd_addr = |
|---|
| 1278 | + dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, |
|---|
| 1279 | + &dsd_ptr->dsd_list_dma); |
|---|
| 1280 | + if (!dsd_ptr->dsd_addr) { |
|---|
| 1281 | + ql_dbg(ql_dbg_tgt, vha, 0xe026, |
|---|
| 1282 | + "%s: failed alloc ->dsd_addr\n", |
|---|
| 1283 | + __func__); |
|---|
| 1284 | + /* |
|---|
| 1285 | + * need to cleanup only this dsd_ptr |
|---|
| 1286 | + * rest will be done by sp_free_dma() |
|---|
| 1287 | + */ |
|---|
| 1288 | + kfree(dsd_ptr); |
|---|
| 1289 | + ha->dif_bundle_kallocs--; |
|---|
| 1290 | + return 1; |
|---|
| 1291 | + } |
|---|
| 1292 | + ha->dif_bundle_dma_allocs++; |
|---|
| 1293 | + |
|---|
| 1294 | + if (sp) { |
|---|
| 1295 | + list_add_tail(&dsd_ptr->list, |
|---|
| 1296 | + &difctx->ldif_dsd_list); |
|---|
| 1297 | + sp->flags |= SRB_CRC_CTX_DSD_VALID; |
|---|
| 1298 | + } else { |
|---|
| 1299 | + list_add_tail(&dsd_ptr->list, |
|---|
| 1300 | + &difctx->ldif_dsd_list); |
|---|
| 1301 | + tc->ctx_dsd_alloced = 1; |
|---|
| 1302 | + } |
|---|
| 1303 | + |
|---|
| 1304 | + /* add new list to cmd iocb or last list */ |
|---|
| 1305 | + put_unaligned_le64(dsd_ptr->dsd_list_dma, |
|---|
| 1306 | + &cur_dsd->address); |
|---|
| 1307 | + cur_dsd->length = cpu_to_le32(dsd_list_len); |
|---|
| 1308 | + cur_dsd = dsd_ptr->dsd_addr; |
|---|
| 1309 | + } |
|---|
| 1310 | + put_unaligned_le64(dif_dsd->dsd_list_dma, |
|---|
| 1311 | + &cur_dsd->address); |
|---|
| 1312 | + cur_dsd->length = cpu_to_le32(sglen); |
|---|
| 1313 | + cur_dsd++; |
|---|
| 1314 | + avail_dsds--; |
|---|
| 1315 | + difctx->dif_bundl_len -= sglen; |
|---|
| 1316 | + track_difbundl_buf--; |
|---|
| 1317 | + } |
|---|
| 1318 | + |
|---|
| 1319 | + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026, |
|---|
| 1320 | + "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__, |
|---|
| 1321 | + difctx->no_ldif_dsd, difctx->no_dif_bundl); |
|---|
| 1322 | + } else { |
|---|
| 1323 | + for_each_sg(sgl, sg, tot_dsds, i) { |
|---|
| 1324 | + /* Allocate additional continuation packets? */ |
|---|
| 1325 | + if (avail_dsds == 0) { |
|---|
| 1326 | + avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? |
|---|
| 1327 | + QLA_DSDS_PER_IOCB : used_dsds; |
|---|
| 1328 | + dsd_list_len = (avail_dsds + 1) * 12; |
|---|
| 1329 | + used_dsds -= avail_dsds; |
|---|
| 1330 | + |
|---|
| 1331 | + /* allocate tracking DS */ |
|---|
| 1332 | + dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); |
|---|
| 1333 | + if (!dsd_ptr) { |
|---|
| 1334 | + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, |
|---|
| 1335 | + vha, 0xe027, |
|---|
| 1336 | + "%s: failed alloc dsd_dma...\n", |
|---|
| 1337 | + __func__); |
|---|
| 1338 | + return 1; |
|---|
| 1339 | + } |
|---|
| 1340 | + |
|---|
| 1341 | + /* allocate new list */ |
|---|
| 1342 | + dsd_ptr->dsd_addr = |
|---|
| 1343 | + dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, |
|---|
| 1344 | + &dsd_ptr->dsd_list_dma); |
|---|
| 1345 | + if (!dsd_ptr->dsd_addr) { |
|---|
| 1346 | + /* need to cleanup only this dsd_ptr */ |
|---|
| 1347 | + /* rest will be done by sp_free_dma() */ |
|---|
| 1348 | + kfree(dsd_ptr); |
|---|
| 1349 | + return 1; |
|---|
| 1350 | + } |
|---|
| 1351 | + |
|---|
| 1352 | + if (sp) { |
|---|
| 1353 | + list_add_tail(&dsd_ptr->list, |
|---|
| 1354 | + &difctx->dsd_list); |
|---|
| 1355 | + sp->flags |= SRB_CRC_CTX_DSD_VALID; |
|---|
| 1356 | + } else { |
|---|
| 1357 | + list_add_tail(&dsd_ptr->list, |
|---|
| 1358 | + &difctx->dsd_list); |
|---|
| 1359 | + tc->ctx_dsd_alloced = 1; |
|---|
| 1360 | + } |
|---|
| 1361 | + |
|---|
| 1362 | + /* add new list to cmd iocb or last list */ |
|---|
| 1363 | + put_unaligned_le64(dsd_ptr->dsd_list_dma, |
|---|
| 1364 | + &cur_dsd->address); |
|---|
| 1365 | + cur_dsd->length = cpu_to_le32(dsd_list_len); |
|---|
| 1366 | + cur_dsd = dsd_ptr->dsd_addr; |
|---|
| 1367 | + } |
|---|
| 1368 | + append_dsd64(&cur_dsd, sg); |
|---|
| 1369 | + avail_dsds--; |
|---|
| 1370 | + } |
|---|
| 1183 | 1371 | } |
|---|
| 1184 | 1372 | /* Null termination */ |
|---|
| 1185 | | - *cur_dsd++ = 0; |
|---|
| 1186 | | - *cur_dsd++ = 0; |
|---|
| 1187 | | - *cur_dsd++ = 0; |
|---|
| 1373 | + cur_dsd->address = 0; |
|---|
| 1374 | + cur_dsd->length = 0; |
|---|
| 1375 | + cur_dsd++; |
|---|
| 1188 | 1376 | return 0; |
|---|
| 1189 | 1377 | } |
|---|
| 1190 | 1378 | |
|---|
| .. | .. |
|---|
| 1195 | 1383 | * @sp: SRB command to process |
|---|
| 1196 | 1384 | * @cmd_pkt: Command type 3 IOCB |
|---|
| 1197 | 1385 | * @tot_dsds: Total number of segments to transfer |
|---|
| 1198 | | - * @tot_prot_dsds: |
|---|
| 1199 | | - * @fw_prot_opts: |
|---|
| 1386 | + * @tot_prot_dsds: Total number of segments with protection information |
|---|
| 1387 | + * @fw_prot_opts: Protection options to be passed to firmware |
|---|
| 1200 | 1388 | */ |
|---|
| 1201 | | -inline int |
|---|
| 1389 | +static inline int |
|---|
| 1202 | 1390 | qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, |
|---|
| 1203 | 1391 | uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) |
|---|
| 1204 | 1392 | { |
|---|
| 1205 | | - uint32_t *cur_dsd, *fcp_dl; |
|---|
| 1393 | + struct dsd64 *cur_dsd; |
|---|
| 1394 | + __be32 *fcp_dl; |
|---|
| 1206 | 1395 | scsi_qla_host_t *vha; |
|---|
| 1207 | 1396 | struct scsi_cmnd *cmd; |
|---|
| 1208 | 1397 | uint32_t total_bytes = 0; |
|---|
| .. | .. |
|---|
| 1220 | 1409 | cmd = GET_CMD_SP(sp); |
|---|
| 1221 | 1410 | |
|---|
| 1222 | 1411 | /* Update entry type to indicate Command Type CRC_2 IOCB */ |
|---|
| 1223 | | - *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2); |
|---|
| 1412 | + put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type); |
|---|
| 1224 | 1413 | |
|---|
| 1225 | 1414 | vha = sp->vha; |
|---|
| 1226 | 1415 | ha = vha->hw; |
|---|
| .. | .. |
|---|
| 1250 | 1439 | bundling = 0; |
|---|
| 1251 | 1440 | |
|---|
| 1252 | 1441 | /* Allocate CRC context from global pool */ |
|---|
| 1253 | | - crc_ctx_pkt = sp->u.scmd.ctx = |
|---|
| 1442 | + crc_ctx_pkt = sp->u.scmd.crc_ctx = |
|---|
| 1254 | 1443 | dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); |
|---|
| 1255 | 1444 | |
|---|
| 1256 | 1445 | if (!crc_ctx_pkt) |
|---|
| .. | .. |
|---|
| 1268 | 1457 | qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *) |
|---|
| 1269 | 1458 | &crc_ctx_pkt->ref_tag, tot_prot_dsds); |
|---|
| 1270 | 1459 | |
|---|
| 1271 | | - cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); |
|---|
| 1272 | | - cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); |
|---|
| 1273 | | - cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW; |
|---|
| 1460 | + put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address); |
|---|
| 1461 | + cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); |
|---|
| 1274 | 1462 | |
|---|
| 1275 | 1463 | /* Determine SCSI command length -- align to 4 byte boundary */ |
|---|
| 1276 | 1464 | if (cmd->cmd_len > 16) { |
|---|
| .. | .. |
|---|
| 1296 | 1484 | int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun); |
|---|
| 1297 | 1485 | memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); |
|---|
| 1298 | 1486 | cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); |
|---|
| 1299 | | - cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( |
|---|
| 1300 | | - LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); |
|---|
| 1301 | | - cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32( |
|---|
| 1302 | | - MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); |
|---|
| 1487 | + put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF, |
|---|
| 1488 | + &cmd_pkt->fcp_cmnd_dseg_address); |
|---|
| 1303 | 1489 | fcp_cmnd->task_management = 0; |
|---|
| 1304 | 1490 | fcp_cmnd->task_attribute = TSK_SIMPLE; |
|---|
| 1305 | 1491 | |
|---|
| .. | .. |
|---|
| 1313 | 1499 | switch (scsi_get_prot_op(GET_CMD_SP(sp))) { |
|---|
| 1314 | 1500 | case SCSI_PROT_READ_INSERT: |
|---|
| 1315 | 1501 | case SCSI_PROT_WRITE_STRIP: |
|---|
| 1316 | | - total_bytes = data_bytes; |
|---|
| 1317 | | - data_bytes += dif_bytes; |
|---|
| 1318 | | - break; |
|---|
| 1502 | + total_bytes = data_bytes; |
|---|
| 1503 | + data_bytes += dif_bytes; |
|---|
| 1504 | + break; |
|---|
| 1319 | 1505 | |
|---|
| 1320 | 1506 | case SCSI_PROT_READ_STRIP: |
|---|
| 1321 | 1507 | case SCSI_PROT_WRITE_INSERT: |
|---|
| 1322 | 1508 | case SCSI_PROT_READ_PASS: |
|---|
| 1323 | 1509 | case SCSI_PROT_WRITE_PASS: |
|---|
| 1324 | | - total_bytes = data_bytes + dif_bytes; |
|---|
| 1325 | | - break; |
|---|
| 1510 | + total_bytes = data_bytes + dif_bytes; |
|---|
| 1511 | + break; |
|---|
| 1326 | 1512 | default: |
|---|
| 1327 | | - BUG(); |
|---|
| 1513 | + BUG(); |
|---|
| 1328 | 1514 | } |
|---|
| 1329 | 1515 | |
|---|
| 1330 | 1516 | if (!qla2x00_hba_err_chk_enabled(sp)) |
|---|
| .. | .. |
|---|
| 1341 | 1527 | } |
|---|
| 1342 | 1528 | |
|---|
| 1343 | 1529 | if (!bundling) { |
|---|
| 1344 | | - cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; |
|---|
| 1530 | + cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; |
|---|
| 1345 | 1531 | } else { |
|---|
| 1346 | 1532 | /* |
|---|
| 1347 | 1533 | * Configure Bundling if we need to fetch interlaving |
|---|
| .. | .. |
|---|
| 1351 | 1537 | crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); |
|---|
| 1352 | 1538 | crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - |
|---|
| 1353 | 1539 | tot_prot_dsds); |
|---|
| 1354 | | - cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; |
|---|
| 1540 | + cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; |
|---|
| 1355 | 1541 | } |
|---|
| 1356 | 1542 | |
|---|
| 1357 | 1543 | /* Finish the common fields of CRC pkt */ |
|---|
| .. | .. |
|---|
| 1361 | 1547 | crc_ctx_pkt->guard_seed = cpu_to_le16(0); |
|---|
| 1362 | 1548 | /* Fibre channel byte count */ |
|---|
| 1363 | 1549 | cmd_pkt->byte_count = cpu_to_le32(total_bytes); |
|---|
| 1364 | | - fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + |
|---|
| 1550 | + fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + |
|---|
| 1365 | 1551 | additional_fcpcdb_len); |
|---|
| 1366 | 1552 | *fcp_dl = htonl(total_bytes); |
|---|
| 1367 | 1553 | |
|---|
| .. | .. |
|---|
| 1384 | 1570 | if (bundling && tot_prot_dsds) { |
|---|
| 1385 | 1571 | /* Walks dif segments */ |
|---|
| 1386 | 1572 | cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); |
|---|
| 1387 | | - cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; |
|---|
| 1573 | + cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd; |
|---|
| 1388 | 1574 | if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, |
|---|
| 1389 | 1575 | tot_prot_dsds, NULL)) |
|---|
| 1390 | 1576 | goto crc_queuing_error; |
|---|
| .. | .. |
|---|
| 1409 | 1595 | int nseg; |
|---|
| 1410 | 1596 | unsigned long flags; |
|---|
| 1411 | 1597 | uint32_t *clr_ptr; |
|---|
| 1412 | | - uint32_t index; |
|---|
| 1413 | 1598 | uint32_t handle; |
|---|
| 1414 | 1599 | struct cmd_type_7 *cmd_pkt; |
|---|
| 1415 | 1600 | uint16_t cnt; |
|---|
| 1416 | 1601 | uint16_t req_cnt; |
|---|
| 1417 | 1602 | uint16_t tot_dsds; |
|---|
| 1418 | 1603 | struct req_que *req = NULL; |
|---|
| 1419 | | - struct rsp_que *rsp = NULL; |
|---|
| 1420 | 1604 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
|---|
| 1421 | 1605 | struct scsi_qla_host *vha = sp->vha; |
|---|
| 1422 | 1606 | struct qla_hw_data *ha = vha->hw; |
|---|
| 1423 | 1607 | |
|---|
| 1424 | 1608 | /* Setup device pointers. */ |
|---|
| 1425 | 1609 | req = vha->req; |
|---|
| 1426 | | - rsp = req->rsp; |
|---|
| 1427 | 1610 | |
|---|
| 1428 | 1611 | /* So we know we haven't pci_map'ed anything yet */ |
|---|
| 1429 | 1612 | tot_dsds = 0; |
|---|
| 1430 | 1613 | |
|---|
| 1431 | 1614 | /* Send marker if required */ |
|---|
| 1432 | 1615 | if (vha->marker_needed != 0) { |
|---|
| 1433 | | - if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != |
|---|
| 1616 | + if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != |
|---|
| 1434 | 1617 | QLA_SUCCESS) |
|---|
| 1435 | 1618 | return QLA_FUNCTION_FAILED; |
|---|
| 1436 | 1619 | vha->marker_needed = 0; |
|---|
| .. | .. |
|---|
| 1439 | 1622 | /* Acquire ring specific lock */ |
|---|
| 1440 | 1623 | spin_lock_irqsave(&ha->hardware_lock, flags); |
|---|
| 1441 | 1624 | |
|---|
| 1442 | | - /* Check for room in outstanding command list. */ |
|---|
| 1443 | | - handle = req->current_outstanding_cmd; |
|---|
| 1444 | | - for (index = 1; index < req->num_outstanding_cmds; index++) { |
|---|
| 1445 | | - handle++; |
|---|
| 1446 | | - if (handle == req->num_outstanding_cmds) |
|---|
| 1447 | | - handle = 1; |
|---|
| 1448 | | - if (!req->outstanding_cmds[handle]) |
|---|
| 1449 | | - break; |
|---|
| 1450 | | - } |
|---|
| 1451 | | - if (index == req->num_outstanding_cmds) |
|---|
| 1625 | + handle = qla2xxx_get_next_handle(req); |
|---|
| 1626 | + if (handle == 0) |
|---|
| 1452 | 1627 | goto queuing_error; |
|---|
| 1453 | 1628 | |
|---|
| 1454 | 1629 | /* Map the sg table so we have an accurate count of sg entries needed */ |
|---|
| .. | .. |
|---|
| 1462 | 1637 | |
|---|
| 1463 | 1638 | tot_dsds = nseg; |
|---|
| 1464 | 1639 | req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); |
|---|
| 1640 | + |
|---|
| 1641 | + sp->iores.res_type = RESOURCE_INI; |
|---|
| 1642 | + sp->iores.iocb_cnt = req_cnt; |
|---|
| 1643 | + if (qla_get_iocbs(sp->qpair, &sp->iores)) |
|---|
| 1644 | + goto queuing_error; |
|---|
| 1645 | + |
|---|
| 1465 | 1646 | if (req->cnt < (req_cnt + 2)) { |
|---|
| 1466 | | - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : |
|---|
| 1467 | | - RD_REG_DWORD_RELAXED(req->req_q_out); |
|---|
| 1647 | + if (IS_SHADOW_REG_CAPABLE(ha)) { |
|---|
| 1648 | + cnt = *req->out_ptr; |
|---|
| 1649 | + } else { |
|---|
| 1650 | + cnt = rd_reg_dword_relaxed(req->req_q_out); |
|---|
| 1651 | + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) |
|---|
| 1652 | + goto queuing_error; |
|---|
| 1653 | + } |
|---|
| 1654 | + |
|---|
| 1468 | 1655 | if (req->ring_index < cnt) |
|---|
| 1469 | 1656 | req->cnt = cnt - req->ring_index; |
|---|
| 1470 | 1657 | else |
|---|
| .. | .. |
|---|
| 1482 | 1669 | req->cnt -= req_cnt; |
|---|
| 1483 | 1670 | |
|---|
| 1484 | 1671 | cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; |
|---|
| 1485 | | - cmd_pkt->handle = MAKE_HANDLE(req->id, handle); |
|---|
| 1672 | + cmd_pkt->handle = make_handle(req->id, handle); |
|---|
| 1486 | 1673 | |
|---|
| 1487 | 1674 | /* Zero out remaining portion of packet. */ |
|---|
| 1488 | 1675 | /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ |
|---|
| .. | .. |
|---|
| 1525 | 1712 | sp->flags |= SRB_DMA_VALID; |
|---|
| 1526 | 1713 | |
|---|
| 1527 | 1714 | /* Set chip new ring index. */ |
|---|
| 1528 | | - WRT_REG_DWORD(req->req_q_in, req->ring_index); |
|---|
| 1715 | + wrt_reg_dword(req->req_q_in, req->ring_index); |
|---|
| 1529 | 1716 | |
|---|
| 1530 | 1717 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
|---|
| 1531 | 1718 | return QLA_SUCCESS; |
|---|
| .. | .. |
|---|
| 1534 | 1721 | if (tot_dsds) |
|---|
| 1535 | 1722 | scsi_dma_unmap(cmd); |
|---|
| 1536 | 1723 | |
|---|
| 1724 | + qla_put_iocbs(sp->qpair, &sp->iores); |
|---|
| 1537 | 1725 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
|---|
| 1538 | 1726 | |
|---|
| 1539 | 1727 | return QLA_FUNCTION_FAILED; |
|---|
| .. | .. |
|---|
| 1551 | 1739 | int nseg; |
|---|
| 1552 | 1740 | unsigned long flags; |
|---|
| 1553 | 1741 | uint32_t *clr_ptr; |
|---|
| 1554 | | - uint32_t index; |
|---|
| 1555 | 1742 | uint32_t handle; |
|---|
| 1556 | 1743 | uint16_t cnt; |
|---|
| 1557 | 1744 | uint16_t req_cnt = 0; |
|---|
| .. | .. |
|---|
| 1583 | 1770 | |
|---|
| 1584 | 1771 | /* Send marker if required */ |
|---|
| 1585 | 1772 | if (vha->marker_needed != 0) { |
|---|
| 1586 | | - if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != |
|---|
| 1773 | + if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != |
|---|
| 1587 | 1774 | QLA_SUCCESS) |
|---|
| 1588 | 1775 | return QLA_FUNCTION_FAILED; |
|---|
| 1589 | 1776 | vha->marker_needed = 0; |
|---|
| .. | .. |
|---|
| 1592 | 1779 | /* Acquire ring specific lock */ |
|---|
| 1593 | 1780 | spin_lock_irqsave(&ha->hardware_lock, flags); |
|---|
| 1594 | 1781 | |
|---|
| 1595 | | - /* Check for room in outstanding command list. */ |
|---|
| 1596 | | - handle = req->current_outstanding_cmd; |
|---|
| 1597 | | - for (index = 1; index < req->num_outstanding_cmds; index++) { |
|---|
| 1598 | | - handle++; |
|---|
| 1599 | | - if (handle == req->num_outstanding_cmds) |
|---|
| 1600 | | - handle = 1; |
|---|
| 1601 | | - if (!req->outstanding_cmds[handle]) |
|---|
| 1602 | | - break; |
|---|
| 1603 | | - } |
|---|
| 1604 | | - |
|---|
| 1605 | | - if (index == req->num_outstanding_cmds) |
|---|
| 1782 | + handle = qla2xxx_get_next_handle(req); |
|---|
| 1783 | + if (handle == 0) |
|---|
| 1606 | 1784 | goto queuing_error; |
|---|
| 1607 | 1785 | |
|---|
| 1608 | 1786 | /* Compute number of required data segments */ |
|---|
| .. | .. |
|---|
| 1657 | 1835 | /* Total Data and protection sg segment(s) */ |
|---|
| 1658 | 1836 | tot_prot_dsds = nseg; |
|---|
| 1659 | 1837 | tot_dsds += nseg; |
|---|
| 1838 | + |
|---|
| 1839 | + sp->iores.res_type = RESOURCE_INI; |
|---|
| 1840 | + sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); |
|---|
| 1841 | + if (qla_get_iocbs(sp->qpair, &sp->iores)) |
|---|
| 1842 | + goto queuing_error; |
|---|
| 1843 | + |
|---|
| 1660 | 1844 | if (req->cnt < (req_cnt + 2)) { |
|---|
| 1661 | | - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : |
|---|
| 1662 | | - RD_REG_DWORD_RELAXED(req->req_q_out); |
|---|
| 1845 | + if (IS_SHADOW_REG_CAPABLE(ha)) { |
|---|
| 1846 | + cnt = *req->out_ptr; |
|---|
| 1847 | + } else { |
|---|
| 1848 | + cnt = rd_reg_dword_relaxed(req->req_q_out); |
|---|
| 1849 | + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) |
|---|
| 1850 | + goto queuing_error; |
|---|
| 1851 | + } |
|---|
| 1663 | 1852 | if (req->ring_index < cnt) |
|---|
| 1664 | 1853 | req->cnt = cnt - req->ring_index; |
|---|
| 1665 | 1854 | else |
|---|
| .. | .. |
|---|
| 1680 | 1869 | |
|---|
| 1681 | 1870 | /* Fill-in common area */ |
|---|
| 1682 | 1871 | cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; |
|---|
| 1683 | | - cmd_pkt->handle = MAKE_HANDLE(req->id, handle); |
|---|
| 1872 | + cmd_pkt->handle = make_handle(req->id, handle); |
|---|
| 1684 | 1873 | |
|---|
| 1685 | 1874 | clr_ptr = (uint32_t *)cmd_pkt + 2; |
|---|
| 1686 | 1875 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); |
|---|
| .. | .. |
|---|
| 1718 | 1907 | req->ring_ptr++; |
|---|
| 1719 | 1908 | |
|---|
| 1720 | 1909 | /* Set chip new ring index. */ |
|---|
| 1721 | | - WRT_REG_DWORD(req->req_q_in, req->ring_index); |
|---|
| 1910 | + wrt_reg_dword(req->req_q_in, req->ring_index); |
|---|
| 1722 | 1911 | |
|---|
| 1723 | 1912 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
|---|
| 1724 | 1913 | |
|---|
| .. | .. |
|---|
| 1731 | 1920 | } |
|---|
| 1732 | 1921 | /* Cleanup will be performed by the caller (queuecommand) */ |
|---|
| 1733 | 1922 | |
|---|
| 1923 | + qla_put_iocbs(sp->qpair, &sp->iores); |
|---|
| 1734 | 1924 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
|---|
| 1925 | + |
|---|
| 1735 | 1926 | return QLA_FUNCTION_FAILED; |
|---|
| 1736 | 1927 | } |
|---|
| 1737 | 1928 | |
|---|
| .. | .. |
|---|
| 1747 | 1938 | int nseg; |
|---|
| 1748 | 1939 | unsigned long flags; |
|---|
| 1749 | 1940 | uint32_t *clr_ptr; |
|---|
| 1750 | | - uint32_t index; |
|---|
| 1751 | 1941 | uint32_t handle; |
|---|
| 1752 | 1942 | struct cmd_type_7 *cmd_pkt; |
|---|
| 1753 | 1943 | uint16_t cnt; |
|---|
| 1754 | 1944 | uint16_t req_cnt; |
|---|
| 1755 | 1945 | uint16_t tot_dsds; |
|---|
| 1756 | 1946 | struct req_que *req = NULL; |
|---|
| 1757 | | - struct rsp_que *rsp = NULL; |
|---|
| 1758 | 1947 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); |
|---|
| 1759 | 1948 | struct scsi_qla_host *vha = sp->fcport->vha; |
|---|
| 1760 | 1949 | struct qla_hw_data *ha = vha->hw; |
|---|
| .. | .. |
|---|
| 1764 | 1953 | spin_lock_irqsave(&qpair->qp_lock, flags); |
|---|
| 1765 | 1954 | |
|---|
| 1766 | 1955 | /* Setup qpair pointers */ |
|---|
| 1767 | | - rsp = qpair->rsp; |
|---|
| 1768 | 1956 | req = qpair->req; |
|---|
| 1769 | 1957 | |
|---|
| 1770 | 1958 | /* So we know we haven't pci_map'ed anything yet */ |
|---|
| .. | .. |
|---|
| 1772 | 1960 | |
|---|
| 1773 | 1961 | /* Send marker if required */ |
|---|
| 1774 | 1962 | if (vha->marker_needed != 0) { |
|---|
| 1775 | | - if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != |
|---|
| 1963 | + if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != |
|---|
| 1776 | 1964 | QLA_SUCCESS) { |
|---|
| 1777 | 1965 | spin_unlock_irqrestore(&qpair->qp_lock, flags); |
|---|
| 1778 | 1966 | return QLA_FUNCTION_FAILED; |
|---|
| .. | .. |
|---|
| 1780 | 1968 | vha->marker_needed = 0; |
|---|
| 1781 | 1969 | } |
|---|
| 1782 | 1970 | |
|---|
| 1783 | | - /* Check for room in outstanding command list. */ |
|---|
| 1784 | | - handle = req->current_outstanding_cmd; |
|---|
| 1785 | | - for (index = 1; index < req->num_outstanding_cmds; index++) { |
|---|
| 1786 | | - handle++; |
|---|
| 1787 | | - if (handle == req->num_outstanding_cmds) |
|---|
| 1788 | | - handle = 1; |
|---|
| 1789 | | - if (!req->outstanding_cmds[handle]) |
|---|
| 1790 | | - break; |
|---|
| 1791 | | - } |
|---|
| 1792 | | - if (index == req->num_outstanding_cmds) |
|---|
| 1971 | + handle = qla2xxx_get_next_handle(req); |
|---|
| 1972 | + if (handle == 0) |
|---|
| 1793 | 1973 | goto queuing_error; |
|---|
| 1794 | 1974 | |
|---|
| 1795 | 1975 | /* Map the sg table so we have an accurate count of sg entries needed */ |
|---|
| .. | .. |
|---|
| 1803 | 1983 | |
|---|
| 1804 | 1984 | tot_dsds = nseg; |
|---|
| 1805 | 1985 | req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); |
|---|
| 1986 | + |
|---|
| 1987 | + sp->iores.res_type = RESOURCE_INI; |
|---|
| 1988 | + sp->iores.iocb_cnt = req_cnt; |
|---|
| 1989 | + if (qla_get_iocbs(sp->qpair, &sp->iores)) |
|---|
| 1990 | + goto queuing_error; |
|---|
| 1991 | + |
|---|
| 1806 | 1992 | if (req->cnt < (req_cnt + 2)) { |
|---|
| 1807 | | - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : |
|---|
| 1808 | | - RD_REG_DWORD_RELAXED(req->req_q_out); |
|---|
| 1993 | + if (IS_SHADOW_REG_CAPABLE(ha)) { |
|---|
| 1994 | + cnt = *req->out_ptr; |
|---|
| 1995 | + } else { |
|---|
| 1996 | + cnt = rd_reg_dword_relaxed(req->req_q_out); |
|---|
| 1997 | + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) |
|---|
| 1998 | + goto queuing_error; |
|---|
| 1999 | + } |
|---|
| 2000 | + |
|---|
| 1809 | 2001 | if (req->ring_index < cnt) |
|---|
| 1810 | 2002 | req->cnt = cnt - req->ring_index; |
|---|
| 1811 | 2003 | else |
|---|
| .. | .. |
|---|
| 1823 | 2015 | req->cnt -= req_cnt; |
|---|
| 1824 | 2016 | |
|---|
| 1825 | 2017 | cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; |
|---|
| 1826 | | - cmd_pkt->handle = MAKE_HANDLE(req->id, handle); |
|---|
| 2018 | + cmd_pkt->handle = make_handle(req->id, handle); |
|---|
| 1827 | 2019 | |
|---|
| 1828 | 2020 | /* Zero out remaining portion of packet. */ |
|---|
| 1829 | 2021 | /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ |
|---|
| .. | .. |
|---|
| 1866 | 2058 | sp->flags |= SRB_DMA_VALID; |
|---|
| 1867 | 2059 | |
|---|
| 1868 | 2060 | /* Set chip new ring index. */ |
|---|
| 1869 | | - WRT_REG_DWORD(req->req_q_in, req->ring_index); |
|---|
| 2061 | + wrt_reg_dword(req->req_q_in, req->ring_index); |
|---|
| 1870 | 2062 | |
|---|
| 1871 | 2063 | spin_unlock_irqrestore(&qpair->qp_lock, flags); |
|---|
| 1872 | 2064 | return QLA_SUCCESS; |
|---|
| .. | .. |
|---|
| 1875 | 2067 | if (tot_dsds) |
|---|
| 1876 | 2068 | scsi_dma_unmap(cmd); |
|---|
| 1877 | 2069 | |
|---|
| 2070 | + qla_put_iocbs(sp->qpair, &sp->iores); |
|---|
| 1878 | 2071 | spin_unlock_irqrestore(&qpair->qp_lock, flags); |
|---|
| 1879 | 2072 | |
|---|
| 1880 | 2073 | return QLA_FUNCTION_FAILED; |
|---|
| .. | .. |
|---|
| 1893 | 2086 | int nseg; |
|---|
| 1894 | 2087 | unsigned long flags; |
|---|
| 1895 | 2088 | uint32_t *clr_ptr; |
|---|
| 1896 | | - uint32_t index; |
|---|
| 1897 | 2089 | uint32_t handle; |
|---|
| 1898 | 2090 | uint16_t cnt; |
|---|
| 1899 | 2091 | uint16_t req_cnt = 0; |
|---|
| .. | .. |
|---|
| 1940 | 2132 | |
|---|
| 1941 | 2133 | /* Send marker if required */ |
|---|
| 1942 | 2134 | if (vha->marker_needed != 0) { |
|---|
| 1943 | | - if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != |
|---|
| 2135 | + if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != |
|---|
| 1944 | 2136 | QLA_SUCCESS) { |
|---|
| 1945 | 2137 | spin_unlock_irqrestore(&qpair->qp_lock, flags); |
|---|
| 1946 | 2138 | return QLA_FUNCTION_FAILED; |
|---|
| .. | .. |
|---|
| 1948 | 2140 | vha->marker_needed = 0; |
|---|
| 1949 | 2141 | } |
|---|
| 1950 | 2142 | |
|---|
| 1951 | | - /* Check for room in outstanding command list. */ |
|---|
| 1952 | | - handle = req->current_outstanding_cmd; |
|---|
| 1953 | | - for (index = 1; index < req->num_outstanding_cmds; index++) { |
|---|
| 1954 | | - handle++; |
|---|
| 1955 | | - if (handle == req->num_outstanding_cmds) |
|---|
| 1956 | | - handle = 1; |
|---|
| 1957 | | - if (!req->outstanding_cmds[handle]) |
|---|
| 1958 | | - break; |
|---|
| 1959 | | - } |
|---|
| 1960 | | - |
|---|
| 1961 | | - if (index == req->num_outstanding_cmds) |
|---|
| 2143 | + handle = qla2xxx_get_next_handle(req); |
|---|
| 2144 | + if (handle == 0) |
|---|
| 1962 | 2145 | goto queuing_error; |
|---|
| 1963 | 2146 | |
|---|
| 1964 | 2147 | /* Compute number of required data segments */ |
|---|
| .. | .. |
|---|
| 2013 | 2196 | /* Total Data and protection sg segment(s) */ |
|---|
| 2014 | 2197 | tot_prot_dsds = nseg; |
|---|
| 2015 | 2198 | tot_dsds += nseg; |
|---|
| 2199 | + |
|---|
| 2200 | + sp->iores.res_type = RESOURCE_INI; |
|---|
| 2201 | + sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); |
|---|
| 2202 | + if (qla_get_iocbs(sp->qpair, &sp->iores)) |
|---|
| 2203 | + goto queuing_error; |
|---|
| 2204 | + |
|---|
| 2016 | 2205 | if (req->cnt < (req_cnt + 2)) { |
|---|
| 2017 | | - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : |
|---|
| 2018 | | - RD_REG_DWORD_RELAXED(req->req_q_out); |
|---|
| 2206 | + if (IS_SHADOW_REG_CAPABLE(ha)) { |
|---|
| 2207 | + cnt = *req->out_ptr; |
|---|
| 2208 | + } else { |
|---|
| 2209 | + cnt = rd_reg_dword_relaxed(req->req_q_out); |
|---|
| 2210 | + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) |
|---|
| 2211 | + goto queuing_error; |
|---|
| 2212 | + } |
|---|
| 2213 | + |
|---|
| 2019 | 2214 | if (req->ring_index < cnt) |
|---|
| 2020 | 2215 | req->cnt = cnt - req->ring_index; |
|---|
| 2021 | 2216 | else |
|---|
| .. | .. |
|---|
| 2036 | 2231 | |
|---|
| 2037 | 2232 | /* Fill-in common area */ |
|---|
| 2038 | 2233 | cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; |
|---|
| 2039 | | - cmd_pkt->handle = MAKE_HANDLE(req->id, handle); |
|---|
| 2234 | + cmd_pkt->handle = make_handle(req->id, handle); |
|---|
| 2040 | 2235 | |
|---|
| 2041 | 2236 | clr_ptr = (uint32_t *)cmd_pkt + 2; |
|---|
| 2042 | 2237 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); |
|---|
| .. | .. |
|---|
| 2072 | 2267 | req->ring_ptr++; |
|---|
| 2073 | 2268 | |
|---|
| 2074 | 2269 | /* Set chip new ring index. */ |
|---|
| 2075 | | - WRT_REG_DWORD(req->req_q_in, req->ring_index); |
|---|
| 2270 | + wrt_reg_dword(req->req_q_in, req->ring_index); |
|---|
| 2076 | 2271 | |
|---|
| 2077 | 2272 | /* Manage unprocessed RIO/ZIO commands in response queue. */ |
|---|
| 2078 | 2273 | if (vha->flags.process_response_queue && |
|---|
| .. | .. |
|---|
| 2090 | 2285 | } |
|---|
| 2091 | 2286 | /* Cleanup will be performed by the caller (queuecommand) */ |
|---|
| 2092 | 2287 | |
|---|
| 2288 | + qla_put_iocbs(sp->qpair, &sp->iores); |
|---|
| 2093 | 2289 | spin_unlock_irqrestore(&qpair->qp_lock, flags); |
|---|
| 2290 | + |
|---|
| 2094 | 2291 | return QLA_FUNCTION_FAILED; |
|---|
| 2095 | 2292 | } |
|---|
| 2096 | 2293 | |
|---|
| .. | .. |
|---|
| 2105 | 2302 | struct qla_hw_data *ha = vha->hw; |
|---|
| 2106 | 2303 | struct req_que *req = qpair->req; |
|---|
| 2107 | 2304 | device_reg_t *reg = ISP_QUE_REG(ha, req->id); |
|---|
| 2108 | | - uint32_t index, handle; |
|---|
| 2305 | + uint32_t handle; |
|---|
| 2109 | 2306 | request_t *pkt; |
|---|
| 2110 | 2307 | uint16_t cnt, req_cnt; |
|---|
| 2111 | 2308 | |
|---|
| .. | .. |
|---|
| 2122 | 2319 | if (req->cnt < req_cnt + 2) { |
|---|
| 2123 | 2320 | if (qpair->use_shadow_reg) |
|---|
| 2124 | 2321 | cnt = *req->out_ptr; |
|---|
| 2125 | | - else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) |
|---|
| 2126 | | - cnt = RD_REG_DWORD(®->isp25mq.req_q_out); |
|---|
| 2322 | + else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || |
|---|
| 2323 | + IS_QLA28XX(ha)) |
|---|
| 2324 | + cnt = rd_reg_dword(®->isp25mq.req_q_out); |
|---|
| 2127 | 2325 | else if (IS_P3P_TYPE(ha)) |
|---|
| 2128 | | - cnt = RD_REG_DWORD(®->isp82.req_q_out); |
|---|
| 2326 | + cnt = rd_reg_dword(reg->isp82.req_q_out); |
|---|
| 2129 | 2327 | else if (IS_FWI2_CAPABLE(ha)) |
|---|
| 2130 | | - cnt = RD_REG_DWORD(®->isp24.req_q_out); |
|---|
| 2328 | + cnt = rd_reg_dword(®->isp24.req_q_out); |
|---|
| 2131 | 2329 | else if (IS_QLAFX00(ha)) |
|---|
| 2132 | | - cnt = RD_REG_DWORD(®->ispfx00.req_q_out); |
|---|
| 2330 | + cnt = rd_reg_dword(®->ispfx00.req_q_out); |
|---|
| 2133 | 2331 | else |
|---|
| 2134 | 2332 | cnt = qla2x00_debounce_register( |
|---|
| 2135 | 2333 | ISP_REQ_Q_OUT(ha, ®->isp)); |
|---|
| 2334 | + |
|---|
| 2335 | + if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) { |
|---|
| 2336 | + qla_schedule_eeh_work(vha); |
|---|
| 2337 | + return NULL; |
|---|
| 2338 | + } |
|---|
| 2136 | 2339 | |
|---|
| 2137 | 2340 | if (req->ring_index < cnt) |
|---|
| 2138 | 2341 | req->cnt = cnt - req->ring_index; |
|---|
| .. | .. |
|---|
| 2144 | 2347 | goto queuing_error; |
|---|
| 2145 | 2348 | |
|---|
| 2146 | 2349 | if (sp) { |
|---|
| 2147 | | - /* Check for room in outstanding command list. */ |
|---|
| 2148 | | - handle = req->current_outstanding_cmd; |
|---|
| 2149 | | - for (index = 1; index < req->num_outstanding_cmds; index++) { |
|---|
| 2150 | | - handle++; |
|---|
| 2151 | | - if (handle == req->num_outstanding_cmds) |
|---|
| 2152 | | - handle = 1; |
|---|
| 2153 | | - if (!req->outstanding_cmds[handle]) |
|---|
| 2154 | | - break; |
|---|
| 2155 | | - } |
|---|
| 2156 | | - if (index == req->num_outstanding_cmds) { |
|---|
| 2350 | + handle = qla2xxx_get_next_handle(req); |
|---|
| 2351 | + if (handle == 0) { |
|---|
| 2157 | 2352 | ql_log(ql_log_warn, vha, 0x700b, |
|---|
| 2158 | 2353 | "No room on outstanding cmd array.\n"); |
|---|
| 2159 | 2354 | goto queuing_error; |
|---|
| .. | .. |
|---|
| 2170 | 2365 | pkt = req->ring_ptr; |
|---|
| 2171 | 2366 | memset(pkt, 0, REQUEST_ENTRY_SIZE); |
|---|
| 2172 | 2367 | if (IS_QLAFX00(ha)) { |
|---|
| 2173 | | - WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt); |
|---|
| 2174 | | - WRT_REG_WORD((void __iomem *)&pkt->handle, handle); |
|---|
| 2368 | + wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt); |
|---|
| 2369 | + wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle); |
|---|
| 2175 | 2370 | } else { |
|---|
| 2176 | 2371 | pkt->entry_count = req_cnt; |
|---|
| 2177 | 2372 | pkt->handle = handle; |
|---|
| .. | .. |
|---|
| 2208 | 2403 | |
|---|
| 2209 | 2404 | logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; |
|---|
| 2210 | 2405 | logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); |
|---|
| 2211 | | - if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) |
|---|
| 2212 | | - logio->control_flags |= LCF_NVME_PRLI; |
|---|
| 2406 | + if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) { |
|---|
| 2407 | + logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI); |
|---|
| 2408 | + if (sp->vha->flags.nvme_first_burst) |
|---|
| 2409 | + logio->io_parameter[0] = |
|---|
| 2410 | + cpu_to_le32(NVME_PRLI_SP_FIRST_BURST); |
|---|
| 2411 | + if (sp->vha->flags.nvme2_enabled) { |
|---|
| 2412 | + /* Set service parameter BIT_8 for SLER support */ |
|---|
| 2413 | + logio->io_parameter[0] |= |
|---|
| 2414 | + cpu_to_le32(NVME_PRLI_SP_SLER); |
|---|
| 2415 | + /* Set service parameter BIT_9 for PI control support */ |
|---|
| 2416 | + logio->io_parameter[0] |= |
|---|
| 2417 | + cpu_to_le32(NVME_PRLI_SP_PI_CTRL); |
|---|
| 2418 | + } |
|---|
| 2419 | + } |
|---|
| 2213 | 2420 | |
|---|
| 2214 | 2421 | logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
|---|
| 2215 | 2422 | logio->port_id[0] = sp->fcport->d_id.b.al_pa; |
|---|
| .. | .. |
|---|
| 2224 | 2431 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
|---|
| 2225 | 2432 | |
|---|
| 2226 | 2433 | logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; |
|---|
| 2434 | + logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); |
|---|
| 2435 | + |
|---|
| 2227 | 2436 | if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) { |
|---|
| 2228 | 2437 | logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); |
|---|
| 2229 | 2438 | } else { |
|---|
| .. | .. |
|---|
| 2267 | 2476 | static void |
|---|
| 2268 | 2477 | qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) |
|---|
| 2269 | 2478 | { |
|---|
| 2479 | + u16 control_flags = LCF_COMMAND_LOGO; |
|---|
| 2270 | 2480 | logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; |
|---|
| 2271 | | - logio->control_flags = |
|---|
| 2272 | | - cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); |
|---|
| 2273 | | - if (!sp->fcport->se_sess || |
|---|
| 2274 | | - !sp->fcport->keep_nport_handle) |
|---|
| 2275 | | - logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT); |
|---|
| 2481 | + |
|---|
| 2482 | + if (sp->fcport->explicit_logout) { |
|---|
| 2483 | + control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT; |
|---|
| 2484 | + } else { |
|---|
| 2485 | + control_flags |= LCF_IMPL_LOGO; |
|---|
| 2486 | + |
|---|
| 2487 | + if (!sp->fcport->keep_nport_handle) |
|---|
| 2488 | + control_flags |= LCF_FREE_NPORT; |
|---|
| 2489 | + } |
|---|
| 2490 | + |
|---|
| 2491 | + logio->control_flags = cpu_to_le16(control_flags); |
|---|
| 2276 | 2492 | logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
|---|
| 2277 | 2493 | logio->port_id[0] = sp->fcport->d_id.b.al_pa; |
|---|
| 2278 | 2494 | logio->port_id[1] = sp->fcport->d_id.b.area; |
|---|
| .. | .. |
|---|
| 2289 | 2505 | SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); |
|---|
| 2290 | 2506 | mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT); |
|---|
| 2291 | 2507 | mbx->mb1 = HAS_EXTENDED_IDS(ha) ? |
|---|
| 2292 | | - cpu_to_le16(sp->fcport->loop_id): |
|---|
| 2508 | + cpu_to_le16(sp->fcport->loop_id) : |
|---|
| 2293 | 2509 | cpu_to_le16(sp->fcport->loop_id << 8); |
|---|
| 2294 | 2510 | mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); |
|---|
| 2295 | 2511 | mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | |
|---|
| .. | .. |
|---|
| 2344 | 2560 | |
|---|
| 2345 | 2561 | tsk->entry_type = TSK_MGMT_IOCB_TYPE; |
|---|
| 2346 | 2562 | tsk->entry_count = 1; |
|---|
| 2347 | | - tsk->handle = MAKE_HANDLE(req->id, tsk->handle); |
|---|
| 2563 | + tsk->handle = make_handle(req->id, tsk->handle); |
|---|
| 2348 | 2564 | tsk->nport_handle = cpu_to_le16(fcport->loop_id); |
|---|
| 2349 | 2565 | tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
|---|
| 2350 | 2566 | tsk->control_flags = cpu_to_le32(flags); |
|---|
| .. | .. |
|---|
| 2360 | 2576 | } |
|---|
| 2361 | 2577 | } |
|---|
| 2362 | 2578 | |
|---|
| 2363 | | -static void |
|---|
| 2364 | | -qla2x00_els_dcmd_sp_free(void *data) |
|---|
| 2579 | +void qla2x00_init_timer(srb_t *sp, unsigned long tmo) |
|---|
| 2365 | 2580 | { |
|---|
| 2366 | | - srb_t *sp = data; |
|---|
| 2581 | + timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0); |
|---|
| 2582 | + sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; |
|---|
| 2583 | + sp->free = qla2x00_sp_free; |
|---|
| 2584 | + if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD) |
|---|
| 2585 | + init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); |
|---|
| 2586 | + sp->start_timer = 1; |
|---|
| 2587 | +} |
|---|
| 2588 | + |
|---|
| 2589 | +static void qla2x00_els_dcmd_sp_free(srb_t *sp) |
|---|
| 2590 | +{ |
|---|
| 2367 | 2591 | struct srb_iocb *elsio = &sp->u.iocb_cmd; |
|---|
| 2368 | 2592 | |
|---|
| 2369 | 2593 | kfree(sp->fcport); |
|---|
| .. | .. |
|---|
| 2384 | 2608 | fc_port_t *fcport = sp->fcport; |
|---|
| 2385 | 2609 | struct scsi_qla_host *vha = sp->vha; |
|---|
| 2386 | 2610 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
|---|
| 2611 | + unsigned long flags = 0; |
|---|
| 2612 | + int res, h; |
|---|
| 2387 | 2613 | |
|---|
| 2388 | 2614 | ql_dbg(ql_dbg_io, vha, 0x3069, |
|---|
| 2389 | 2615 | "%s Timeout, hdl=%x, portid=%02x%02x%02x\n", |
|---|
| 2390 | 2616 | sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, |
|---|
| 2391 | 2617 | fcport->d_id.b.al_pa); |
|---|
| 2392 | 2618 | |
|---|
| 2393 | | - complete(&lio->u.els_logo.comp); |
|---|
| 2619 | + /* Abort the exchange */ |
|---|
| 2620 | + res = qla24xx_async_abort_cmd(sp, false); |
|---|
| 2621 | + if (res) { |
|---|
| 2622 | + ql_dbg(ql_dbg_io, vha, 0x3070, |
|---|
| 2623 | + "mbx abort_command failed.\n"); |
|---|
| 2624 | + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); |
|---|
| 2625 | + for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { |
|---|
| 2626 | + if (sp->qpair->req->outstanding_cmds[h] == sp) { |
|---|
| 2627 | + sp->qpair->req->outstanding_cmds[h] = NULL; |
|---|
| 2628 | + break; |
|---|
| 2629 | + } |
|---|
| 2630 | + } |
|---|
| 2631 | + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); |
|---|
| 2632 | + complete(&lio->u.els_logo.comp); |
|---|
| 2633 | + } else { |
|---|
| 2634 | + ql_dbg(ql_dbg_io, vha, 0x3071, |
|---|
| 2635 | + "mbx abort_command success.\n"); |
|---|
| 2636 | + } |
|---|
| 2394 | 2637 | } |
|---|
| 2395 | 2638 | |
|---|
| 2396 | | -static void |
|---|
| 2397 | | -qla2x00_els_dcmd_sp_done(void *ptr, int res) |
|---|
| 2639 | +static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res) |
|---|
| 2398 | 2640 | { |
|---|
| 2399 | | - srb_t *sp = ptr; |
|---|
| 2400 | 2641 | fc_port_t *fcport = sp->fcport; |
|---|
| 2401 | 2642 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
|---|
| 2402 | 2643 | struct scsi_qla_host *vha = sp->vha; |
|---|
| .. | .. |
|---|
| 2474 | 2715 | |
|---|
| 2475 | 2716 | memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld, |
|---|
| 2476 | 2717 | sizeof(struct els_logo_payload)); |
|---|
| 2718 | + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:"); |
|---|
| 2719 | + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a, |
|---|
| 2720 | + elsio->u.els_logo.els_logo_pyld, |
|---|
| 2721 | + sizeof(*elsio->u.els_logo.els_logo_pyld)); |
|---|
| 2477 | 2722 | |
|---|
| 2478 | 2723 | rval = qla2x00_start_sp(sp); |
|---|
| 2479 | 2724 | if (rval != QLA_SUCCESS) { |
|---|
| .. | .. |
|---|
| 2504 | 2749 | els_iocb->entry_status = 0; |
|---|
| 2505 | 2750 | els_iocb->handle = sp->handle; |
|---|
| 2506 | 2751 | els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
|---|
| 2507 | | - els_iocb->tx_dsd_count = 1; |
|---|
| 2752 | + els_iocb->tx_dsd_count = cpu_to_le16(1); |
|---|
| 2508 | 2753 | els_iocb->vp_index = vha->vp_idx; |
|---|
| 2509 | 2754 | els_iocb->sof_type = EST_SOFI3; |
|---|
| 2510 | 2755 | els_iocb->rx_dsd_count = 0; |
|---|
| 2511 | 2756 | els_iocb->opcode = elsio->u.els_logo.els_cmd; |
|---|
| 2512 | 2757 | |
|---|
| 2513 | | - els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; |
|---|
| 2514 | | - els_iocb->port_id[1] = sp->fcport->d_id.b.area; |
|---|
| 2515 | | - els_iocb->port_id[2] = sp->fcport->d_id.b.domain; |
|---|
| 2516 | | - els_iocb->s_id[0] = vha->d_id.b.al_pa; |
|---|
| 2517 | | - els_iocb->s_id[1] = vha->d_id.b.area; |
|---|
| 2518 | | - els_iocb->s_id[2] = vha->d_id.b.domain; |
|---|
| 2519 | | - els_iocb->control_flags = 0; |
|---|
| 2758 | + els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; |
|---|
| 2759 | + els_iocb->d_id[1] = sp->fcport->d_id.b.area; |
|---|
| 2760 | + els_iocb->d_id[2] = sp->fcport->d_id.b.domain; |
|---|
| 2761 | + /* For SID the byte order is different than DID */ |
|---|
| 2762 | + els_iocb->s_id[1] = vha->d_id.b.al_pa; |
|---|
| 2763 | + els_iocb->s_id[2] = vha->d_id.b.area; |
|---|
| 2764 | + els_iocb->s_id[0] = vha->d_id.b.domain; |
|---|
| 2520 | 2765 | |
|---|
| 2521 | 2766 | if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) { |
|---|
| 2767 | + els_iocb->control_flags = 0; |
|---|
| 2522 | 2768 | els_iocb->tx_byte_count = els_iocb->tx_len = |
|---|
| 2523 | | - sizeof(struct els_plogi_payload); |
|---|
| 2524 | | - els_iocb->tx_address[0] = |
|---|
| 2525 | | - cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma)); |
|---|
| 2526 | | - els_iocb->tx_address[1] = |
|---|
| 2527 | | - cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma)); |
|---|
| 2528 | | - |
|---|
| 2529 | | - els_iocb->rx_dsd_count = 1; |
|---|
| 2769 | + cpu_to_le32(sizeof(struct els_plogi_payload)); |
|---|
| 2770 | + put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma, |
|---|
| 2771 | + &els_iocb->tx_address); |
|---|
| 2772 | + els_iocb->rx_dsd_count = cpu_to_le16(1); |
|---|
| 2530 | 2773 | els_iocb->rx_byte_count = els_iocb->rx_len = |
|---|
| 2531 | | - sizeof(struct els_plogi_payload); |
|---|
| 2532 | | - els_iocb->rx_address[0] = |
|---|
| 2533 | | - cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma)); |
|---|
| 2534 | | - els_iocb->rx_address[1] = |
|---|
| 2535 | | - cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma)); |
|---|
| 2774 | + cpu_to_le32(sizeof(struct els_plogi_payload)); |
|---|
| 2775 | + put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma, |
|---|
| 2776 | + &els_iocb->rx_address); |
|---|
| 2536 | 2777 | |
|---|
| 2537 | 2778 | ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, |
|---|
| 2538 | 2779 | "PLOGI ELS IOCB:\n"); |
|---|
| .. | .. |
|---|
| 2540 | 2781 | (uint8_t *)els_iocb, |
|---|
| 2541 | 2782 | sizeof(*els_iocb)); |
|---|
| 2542 | 2783 | } else { |
|---|
| 2543 | | - els_iocb->tx_byte_count = sizeof(struct els_logo_payload); |
|---|
| 2544 | | - els_iocb->tx_address[0] = |
|---|
| 2545 | | - cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma)); |
|---|
| 2546 | | - els_iocb->tx_address[1] = |
|---|
| 2547 | | - cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma)); |
|---|
| 2784 | + els_iocb->control_flags = cpu_to_le16(1 << 13); |
|---|
| 2785 | + els_iocb->tx_byte_count = |
|---|
| 2786 | + cpu_to_le32(sizeof(struct els_logo_payload)); |
|---|
| 2787 | + put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma, |
|---|
| 2788 | + &els_iocb->tx_address); |
|---|
| 2548 | 2789 | els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload)); |
|---|
| 2549 | 2790 | |
|---|
| 2550 | 2791 | els_iocb->rx_byte_count = 0; |
|---|
| 2551 | | - els_iocb->rx_address[0] = 0; |
|---|
| 2552 | | - els_iocb->rx_address[1] = 0; |
|---|
| 2792 | + els_iocb->rx_address = 0; |
|---|
| 2553 | 2793 | els_iocb->rx_len = 0; |
|---|
| 2794 | + ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076, |
|---|
| 2795 | + "LOGO ELS IOCB:"); |
|---|
| 2796 | + ql_dump_buffer(ql_log_info, vha, 0x010b, |
|---|
| 2797 | + els_iocb, |
|---|
| 2798 | + sizeof(*els_iocb)); |
|---|
| 2554 | 2799 | } |
|---|
| 2555 | 2800 | |
|---|
| 2556 | 2801 | sp->vha->qla_stats.control_requests++; |
|---|
| .. | .. |
|---|
| 2562 | 2807 | srb_t *sp = data; |
|---|
| 2563 | 2808 | fc_port_t *fcport = sp->fcport; |
|---|
| 2564 | 2809 | struct scsi_qla_host *vha = sp->vha; |
|---|
| 2565 | | - struct qla_hw_data *ha = vha->hw; |
|---|
| 2566 | 2810 | unsigned long flags = 0; |
|---|
| 2567 | | - int res; |
|---|
| 2811 | + int res, h; |
|---|
| 2568 | 2812 | |
|---|
| 2569 | 2813 | ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069, |
|---|
| 2570 | 2814 | "%s hdl=%x ELS Timeout, %8phC portid=%06x\n", |
|---|
| 2571 | 2815 | sp->name, sp->handle, fcport->port_name, fcport->d_id.b24); |
|---|
| 2572 | 2816 | |
|---|
| 2573 | 2817 | /* Abort the exchange */ |
|---|
| 2574 | | - spin_lock_irqsave(&ha->hardware_lock, flags); |
|---|
| 2575 | | - res = ha->isp_ops->abort_command(sp); |
|---|
| 2818 | + res = qla24xx_async_abort_cmd(sp, false); |
|---|
| 2576 | 2819 | ql_dbg(ql_dbg_io, vha, 0x3070, |
|---|
| 2577 | 2820 | "mbx abort_command %s\n", |
|---|
| 2578 | 2821 | (res == QLA_SUCCESS) ? "successful" : "failed"); |
|---|
| 2579 | | - spin_unlock_irqrestore(&ha->hardware_lock, flags); |
|---|
| 2580 | | - |
|---|
| 2581 | | - sp->done(sp, QLA_FUNCTION_TIMEOUT); |
|---|
| 2822 | + if (res) { |
|---|
| 2823 | + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); |
|---|
| 2824 | + for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { |
|---|
| 2825 | + if (sp->qpair->req->outstanding_cmds[h] == sp) { |
|---|
| 2826 | + sp->qpair->req->outstanding_cmds[h] = NULL; |
|---|
| 2827 | + break; |
|---|
| 2828 | + } |
|---|
| 2829 | + } |
|---|
| 2830 | + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); |
|---|
| 2831 | + sp->done(sp, QLA_FUNCTION_TIMEOUT); |
|---|
| 2832 | + } |
|---|
| 2582 | 2833 | } |
|---|
| 2583 | 2834 | |
|---|
| 2584 | | -static void |
|---|
| 2585 | | -qla2x00_els_dcmd2_sp_done(void *ptr, int res) |
|---|
| 2835 | +void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi) |
|---|
| 2586 | 2836 | { |
|---|
| 2587 | | - srb_t *sp = ptr; |
|---|
| 2837 | + if (els_plogi->els_plogi_pyld) |
|---|
| 2838 | + dma_free_coherent(&vha->hw->pdev->dev, |
|---|
| 2839 | + els_plogi->tx_size, |
|---|
| 2840 | + els_plogi->els_plogi_pyld, |
|---|
| 2841 | + els_plogi->els_plogi_pyld_dma); |
|---|
| 2842 | + |
|---|
| 2843 | + if (els_plogi->els_resp_pyld) |
|---|
| 2844 | + dma_free_coherent(&vha->hw->pdev->dev, |
|---|
| 2845 | + els_plogi->rx_size, |
|---|
| 2846 | + els_plogi->els_resp_pyld, |
|---|
| 2847 | + els_plogi->els_resp_pyld_dma); |
|---|
| 2848 | +} |
|---|
| 2849 | + |
|---|
| 2850 | +static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) |
|---|
| 2851 | +{ |
|---|
| 2588 | 2852 | fc_port_t *fcport = sp->fcport; |
|---|
| 2589 | 2853 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
|---|
| 2590 | 2854 | struct scsi_qla_host *vha = sp->vha; |
|---|
| 2591 | 2855 | struct event_arg ea; |
|---|
| 2592 | 2856 | struct qla_work_evt *e; |
|---|
| 2857 | + struct fc_port *conflict_fcport; |
|---|
| 2858 | + port_id_t cid; /* conflict Nport id */ |
|---|
| 2859 | + const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; |
|---|
| 2860 | + u16 lid; |
|---|
| 2593 | 2861 | |
|---|
| 2594 | 2862 | ql_dbg(ql_dbg_disc, vha, 0x3072, |
|---|
| 2595 | 2863 | "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n", |
|---|
| .. | .. |
|---|
| 2601 | 2869 | if (sp->flags & SRB_WAKEUP_ON_COMP) |
|---|
| 2602 | 2870 | complete(&lio->u.els_plogi.comp); |
|---|
| 2603 | 2871 | else { |
|---|
| 2604 | | - if (res) { |
|---|
| 2605 | | - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); |
|---|
| 2606 | | - } else { |
|---|
| 2872 | + switch (le32_to_cpu(fw_status[0])) { |
|---|
| 2873 | + case CS_DATA_UNDERRUN: |
|---|
| 2874 | + case CS_COMPLETE: |
|---|
| 2607 | 2875 | memset(&ea, 0, sizeof(ea)); |
|---|
| 2608 | 2876 | ea.fcport = fcport; |
|---|
| 2609 | 2877 | ea.rc = res; |
|---|
| 2610 | | - ea.event = FCME_ELS_PLOGI_DONE; |
|---|
| 2611 | | - qla2x00_fcport_event_handler(vha, &ea); |
|---|
| 2878 | + qla_handle_els_plogi_done(vha, &ea); |
|---|
| 2879 | + break; |
|---|
| 2880 | + |
|---|
| 2881 | + case CS_IOCB_ERROR: |
|---|
| 2882 | + switch (le32_to_cpu(fw_status[1])) { |
|---|
| 2883 | + case LSC_SCODE_PORTID_USED: |
|---|
| 2884 | + lid = le32_to_cpu(fw_status[2]) & 0xffff; |
|---|
| 2885 | + qlt_find_sess_invalidate_other(vha, |
|---|
| 2886 | + wwn_to_u64(fcport->port_name), |
|---|
| 2887 | + fcport->d_id, lid, &conflict_fcport); |
|---|
| 2888 | + if (conflict_fcport) { |
|---|
| 2889 | + /* |
|---|
| 2890 | + * Another fcport shares the same |
|---|
| 2891 | + * loop_id & nport id; conflict |
|---|
| 2892 | + * fcport needs to finish cleanup |
|---|
| 2893 | + * before this fcport can proceed |
|---|
| 2894 | + * to login. |
|---|
| 2895 | + */ |
|---|
| 2896 | + conflict_fcport->conflict = fcport; |
|---|
| 2897 | + fcport->login_pause = 1; |
|---|
| 2898 | + ql_dbg(ql_dbg_disc, vha, 0x20ed, |
|---|
| 2899 | + "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n", |
|---|
| 2900 | + __func__, __LINE__, |
|---|
| 2901 | + fcport->port_name, |
|---|
| 2902 | + fcport->d_id.b24, lid); |
|---|
| 2903 | + } else { |
|---|
| 2904 | + ql_dbg(ql_dbg_disc, vha, 0x20ed, |
|---|
| 2905 | + "%s %d %8phC pid %06x inuse with lid %#x sched del\n", |
|---|
| 2906 | + __func__, __LINE__, |
|---|
| 2907 | + fcport->port_name, |
|---|
| 2908 | + fcport->d_id.b24, lid); |
|---|
| 2909 | + qla2x00_clear_loop_id(fcport); |
|---|
| 2910 | + set_bit(lid, vha->hw->loop_id_map); |
|---|
| 2911 | + fcport->loop_id = lid; |
|---|
| 2912 | + fcport->keep_nport_handle = 0; |
|---|
| 2913 | + qlt_schedule_sess_for_deletion(fcport); |
|---|
| 2914 | + } |
|---|
| 2915 | + break; |
|---|
| 2916 | + |
|---|
| 2917 | + case LSC_SCODE_NPORT_USED: |
|---|
| 2918 | + cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16) |
|---|
| 2919 | + & 0xff; |
|---|
| 2920 | + cid.b.area = (le32_to_cpu(fw_status[2]) >> 8) |
|---|
| 2921 | + & 0xff; |
|---|
| 2922 | + cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff; |
|---|
| 2923 | + cid.b.rsvd_1 = 0; |
|---|
| 2924 | + |
|---|
| 2925 | + ql_dbg(ql_dbg_disc, vha, 0x20ec, |
|---|
| 2926 | + "%s %d %8phC lid %#x in use with pid %06x post gnl\n", |
|---|
| 2927 | + __func__, __LINE__, fcport->port_name, |
|---|
| 2928 | + fcport->loop_id, cid.b24); |
|---|
| 2929 | + set_bit(fcport->loop_id, |
|---|
| 2930 | + vha->hw->loop_id_map); |
|---|
| 2931 | + fcport->loop_id = FC_NO_LOOP_ID; |
|---|
| 2932 | + qla24xx_post_gnl_work(vha, fcport); |
|---|
| 2933 | + break; |
|---|
| 2934 | + |
|---|
| 2935 | + case LSC_SCODE_NOXCB: |
|---|
| 2936 | + vha->hw->exch_starvation++; |
|---|
| 2937 | + if (vha->hw->exch_starvation > 5) { |
|---|
| 2938 | + ql_log(ql_log_warn, vha, 0xd046, |
|---|
| 2939 | + "Exchange starvation. Resetting RISC\n"); |
|---|
| 2940 | + vha->hw->exch_starvation = 0; |
|---|
| 2941 | + set_bit(ISP_ABORT_NEEDED, |
|---|
| 2942 | + &vha->dpc_flags); |
|---|
| 2943 | + qla2xxx_wake_dpc(vha); |
|---|
| 2944 | + break; |
|---|
| 2945 | + } |
|---|
| 2946 | + fallthrough; |
|---|
| 2947 | + default: |
|---|
| 2948 | + ql_dbg(ql_dbg_disc, vha, 0x20eb, |
|---|
| 2949 | + "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n", |
|---|
| 2950 | + __func__, sp->fcport->port_name, |
|---|
| 2951 | + fw_status[0], fw_status[1], fw_status[2]); |
|---|
| 2952 | + |
|---|
| 2953 | + fcport->flags &= ~FCF_ASYNC_SENT; |
|---|
| 2954 | + qlt_schedule_sess_for_deletion(fcport); |
|---|
| 2955 | + break; |
|---|
| 2956 | + } |
|---|
| 2957 | + break; |
|---|
| 2958 | + |
|---|
| 2959 | + default: |
|---|
| 2960 | + ql_dbg(ql_dbg_disc, vha, 0x20eb, |
|---|
| 2961 | + "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n", |
|---|
| 2962 | + __func__, sp->fcport->port_name, |
|---|
| 2963 | + fw_status[0], fw_status[1], fw_status[2]); |
|---|
| 2964 | + |
|---|
| 2965 | + sp->fcport->flags &= ~FCF_ASYNC_SENT; |
|---|
| 2966 | + qlt_schedule_sess_for_deletion(fcport); |
|---|
| 2967 | + break; |
|---|
| 2612 | 2968 | } |
|---|
| 2613 | 2969 | |
|---|
| 2614 | 2970 | e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); |
|---|
| 2615 | 2971 | if (!e) { |
|---|
| 2616 | 2972 | struct srb_iocb *elsio = &sp->u.iocb_cmd; |
|---|
| 2617 | 2973 | |
|---|
| 2618 | | - if (elsio->u.els_plogi.els_plogi_pyld) |
|---|
| 2619 | | - dma_free_coherent(&sp->vha->hw->pdev->dev, |
|---|
| 2620 | | - elsio->u.els_plogi.tx_size, |
|---|
| 2621 | | - elsio->u.els_plogi.els_plogi_pyld, |
|---|
| 2622 | | - elsio->u.els_plogi.els_plogi_pyld_dma); |
|---|
| 2623 | | - |
|---|
| 2624 | | - if (elsio->u.els_plogi.els_resp_pyld) |
|---|
| 2625 | | - dma_free_coherent(&sp->vha->hw->pdev->dev, |
|---|
| 2626 | | - elsio->u.els_plogi.rx_size, |
|---|
| 2627 | | - elsio->u.els_plogi.els_resp_pyld, |
|---|
| 2628 | | - elsio->u.els_plogi.els_resp_pyld_dma); |
|---|
| 2974 | + qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); |
|---|
| 2629 | 2975 | sp->free(sp); |
|---|
| 2630 | 2976 | return; |
|---|
| 2631 | 2977 | } |
|---|
| .. | .. |
|---|
| 2643 | 2989 | struct qla_hw_data *ha = vha->hw; |
|---|
| 2644 | 2990 | int rval = QLA_SUCCESS; |
|---|
| 2645 | 2991 | void *ptr, *resp_ptr; |
|---|
| 2646 | | - dma_addr_t ptr_dma; |
|---|
| 2647 | 2992 | |
|---|
| 2648 | 2993 | /* Alloc SRB structure */ |
|---|
| 2649 | 2994 | sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); |
|---|
| 2650 | 2995 | if (!sp) { |
|---|
| 2651 | 2996 | ql_log(ql_log_info, vha, 0x70e6, |
|---|
| 2652 | 2997 | "SRB allocation failed\n"); |
|---|
| 2998 | + fcport->flags &= ~FCF_ASYNC_ACTIVE; |
|---|
| 2653 | 2999 | return -ENOMEM; |
|---|
| 2654 | 3000 | } |
|---|
| 2655 | 3001 | |
|---|
| 3002 | + fcport->flags |= FCF_ASYNC_SENT; |
|---|
| 3003 | + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); |
|---|
| 2656 | 3004 | elsio = &sp->u.iocb_cmd; |
|---|
| 2657 | 3005 | ql_dbg(ql_dbg_io, vha, 0x3073, |
|---|
| 2658 | 3006 | "Enter: PLOGI portid=%06x\n", fcport->d_id.b24); |
|---|
| 2659 | 3007 | |
|---|
| 2660 | | - fcport->flags |= FCF_ASYNC_SENT; |
|---|
| 2661 | 3008 | sp->type = SRB_ELS_DCMD; |
|---|
| 2662 | 3009 | sp->name = "ELS_DCMD"; |
|---|
| 2663 | 3010 | sp->fcport = fcport; |
|---|
| 2664 | 3011 | |
|---|
| 2665 | 3012 | elsio->timeout = qla2x00_els_dcmd2_iocb_timeout; |
|---|
| 2666 | | - init_completion(&elsio->u.els_plogi.comp); |
|---|
| 2667 | 3013 | if (wait) |
|---|
| 2668 | 3014 | sp->flags = SRB_WAKEUP_ON_COMP; |
|---|
| 2669 | 3015 | |
|---|
| .. | .. |
|---|
| 2673 | 3019 | elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE; |
|---|
| 2674 | 3020 | |
|---|
| 2675 | 3021 | ptr = elsio->u.els_plogi.els_plogi_pyld = |
|---|
| 2676 | | - dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE, |
|---|
| 3022 | + dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size, |
|---|
| 2677 | 3023 | &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL); |
|---|
| 2678 | | - ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma; |
|---|
| 2679 | 3024 | |
|---|
| 2680 | 3025 | if (!elsio->u.els_plogi.els_plogi_pyld) { |
|---|
| 2681 | 3026 | rval = QLA_FUNCTION_FAILED; |
|---|
| .. | .. |
|---|
| 2683 | 3028 | } |
|---|
| 2684 | 3029 | |
|---|
| 2685 | 3030 | resp_ptr = elsio->u.els_plogi.els_resp_pyld = |
|---|
| 2686 | | - dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE, |
|---|
| 3031 | + dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size, |
|---|
| 2687 | 3032 | &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL); |
|---|
| 2688 | 3033 | |
|---|
| 2689 | 3034 | if (!elsio->u.els_plogi.els_resp_pyld) { |
|---|
| .. | .. |
|---|
| 2696 | 3041 | memset(ptr, 0, sizeof(struct els_plogi_payload)); |
|---|
| 2697 | 3042 | memset(resp_ptr, 0, sizeof(struct els_plogi_payload)); |
|---|
| 2698 | 3043 | memcpy(elsio->u.els_plogi.els_plogi_pyld->data, |
|---|
| 2699 | | - &ha->plogi_els_payld.data, |
|---|
| 2700 | | - sizeof(elsio->u.els_plogi.els_plogi_pyld->data)); |
|---|
| 3044 | + &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE); |
|---|
| 2701 | 3045 | |
|---|
| 2702 | 3046 | elsio->u.els_plogi.els_cmd = els_opcode; |
|---|
| 2703 | 3047 | elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode; |
|---|
| .. | .. |
|---|
| 2707 | 3051 | (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, |
|---|
| 2708 | 3052 | sizeof(*elsio->u.els_plogi.els_plogi_pyld)); |
|---|
| 2709 | 3053 | |
|---|
| 3054 | + init_completion(&elsio->u.els_plogi.comp); |
|---|
| 2710 | 3055 | rval = qla2x00_start_sp(sp); |
|---|
| 2711 | 3056 | if (rval != QLA_SUCCESS) { |
|---|
| 2712 | 3057 | rval = QLA_FUNCTION_FAILED; |
|---|
| .. | .. |
|---|
| 2727 | 3072 | } |
|---|
| 2728 | 3073 | |
|---|
| 2729 | 3074 | out: |
|---|
| 2730 | | - fcport->flags &= ~(FCF_ASYNC_SENT); |
|---|
| 2731 | | - if (elsio->u.els_plogi.els_plogi_pyld) |
|---|
| 2732 | | - dma_free_coherent(&sp->vha->hw->pdev->dev, |
|---|
| 2733 | | - elsio->u.els_plogi.tx_size, |
|---|
| 2734 | | - elsio->u.els_plogi.els_plogi_pyld, |
|---|
| 2735 | | - elsio->u.els_plogi.els_plogi_pyld_dma); |
|---|
| 2736 | | - |
|---|
| 2737 | | - if (elsio->u.els_plogi.els_resp_pyld) |
|---|
| 2738 | | - dma_free_coherent(&sp->vha->hw->pdev->dev, |
|---|
| 2739 | | - elsio->u.els_plogi.rx_size, |
|---|
| 2740 | | - elsio->u.els_plogi.els_resp_pyld, |
|---|
| 2741 | | - elsio->u.els_plogi.els_resp_pyld_dma); |
|---|
| 2742 | | - |
|---|
| 3075 | + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); |
|---|
| 3076 | + qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); |
|---|
| 2743 | 3077 | sp->free(sp); |
|---|
| 2744 | 3078 | done: |
|---|
| 2745 | 3079 | return rval; |
|---|
| .. | .. |
|---|
| 2756 | 3090 | els_iocb->sys_define = 0; |
|---|
| 2757 | 3091 | els_iocb->entry_status = 0; |
|---|
| 2758 | 3092 | els_iocb->handle = sp->handle; |
|---|
| 2759 | | - els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
|---|
| 3093 | + els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
|---|
| 2760 | 3094 | els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); |
|---|
| 2761 | 3095 | els_iocb->vp_index = sp->vha->vp_idx; |
|---|
| 2762 | 3096 | els_iocb->sof_type = EST_SOFI3; |
|---|
| .. | .. |
|---|
| 2766 | 3100 | sp->type == SRB_ELS_CMD_RPT ? |
|---|
| 2767 | 3101 | bsg_request->rqst_data.r_els.els_code : |
|---|
| 2768 | 3102 | bsg_request->rqst_data.h_els.command_code; |
|---|
| 2769 | | - els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; |
|---|
| 2770 | | - els_iocb->port_id[1] = sp->fcport->d_id.b.area; |
|---|
| 2771 | | - els_iocb->port_id[2] = sp->fcport->d_id.b.domain; |
|---|
| 3103 | + els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; |
|---|
| 3104 | + els_iocb->d_id[1] = sp->fcport->d_id.b.area; |
|---|
| 3105 | + els_iocb->d_id[2] = sp->fcport->d_id.b.domain; |
|---|
| 2772 | 3106 | els_iocb->control_flags = 0; |
|---|
| 2773 | 3107 | els_iocb->rx_byte_count = |
|---|
| 2774 | 3108 | cpu_to_le32(bsg_job->reply_payload.payload_len); |
|---|
| 2775 | 3109 | els_iocb->tx_byte_count = |
|---|
| 2776 | 3110 | cpu_to_le32(bsg_job->request_payload.payload_len); |
|---|
| 2777 | 3111 | |
|---|
| 2778 | | - els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address |
|---|
| 2779 | | - (bsg_job->request_payload.sg_list))); |
|---|
| 2780 | | - els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address |
|---|
| 2781 | | - (bsg_job->request_payload.sg_list))); |
|---|
| 3112 | + put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), |
|---|
| 3113 | + &els_iocb->tx_address); |
|---|
| 2782 | 3114 | els_iocb->tx_len = cpu_to_le32(sg_dma_len |
|---|
| 2783 | 3115 | (bsg_job->request_payload.sg_list)); |
|---|
| 2784 | 3116 | |
|---|
| 2785 | | - els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address |
|---|
| 2786 | | - (bsg_job->reply_payload.sg_list))); |
|---|
| 2787 | | - els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address |
|---|
| 2788 | | - (bsg_job->reply_payload.sg_list))); |
|---|
| 3117 | + put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), |
|---|
| 3118 | + &els_iocb->rx_address); |
|---|
| 2789 | 3119 | els_iocb->rx_len = cpu_to_le32(sg_dma_len |
|---|
| 2790 | 3120 | (bsg_job->reply_payload.sg_list)); |
|---|
| 2791 | 3121 | |
|---|
| .. | .. |
|---|
| 2796 | 3126 | qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) |
|---|
| 2797 | 3127 | { |
|---|
| 2798 | 3128 | uint16_t avail_dsds; |
|---|
| 2799 | | - uint32_t *cur_dsd; |
|---|
| 3129 | + struct dsd64 *cur_dsd; |
|---|
| 2800 | 3130 | struct scatterlist *sg; |
|---|
| 2801 | 3131 | int index; |
|---|
| 2802 | 3132 | uint16_t tot_dsds; |
|---|
| 2803 | 3133 | scsi_qla_host_t *vha = sp->vha; |
|---|
| 2804 | 3134 | struct qla_hw_data *ha = vha->hw; |
|---|
| 2805 | 3135 | struct bsg_job *bsg_job = sp->u.bsg_job; |
|---|
| 2806 | | - int loop_iterartion = 0; |
|---|
| 2807 | 3136 | int entry_count = 1; |
|---|
| 2808 | 3137 | |
|---|
| 2809 | 3138 | memset(ct_iocb, 0, sizeof(ms_iocb_entry_t)); |
|---|
| .. | .. |
|---|
| 2823 | 3152 | ct_iocb->rsp_bytecount = |
|---|
| 2824 | 3153 | cpu_to_le32(bsg_job->reply_payload.payload_len); |
|---|
| 2825 | 3154 | |
|---|
| 2826 | | - ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address |
|---|
| 2827 | | - (bsg_job->request_payload.sg_list))); |
|---|
| 2828 | | - ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address |
|---|
| 2829 | | - (bsg_job->request_payload.sg_list))); |
|---|
| 2830 | | - ct_iocb->dseg_req_length = ct_iocb->req_bytecount; |
|---|
| 3155 | + put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), |
|---|
| 3156 | + &ct_iocb->req_dsd.address); |
|---|
| 3157 | + ct_iocb->req_dsd.length = ct_iocb->req_bytecount; |
|---|
| 2831 | 3158 | |
|---|
| 2832 | | - ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address |
|---|
| 2833 | | - (bsg_job->reply_payload.sg_list))); |
|---|
| 2834 | | - ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address |
|---|
| 2835 | | - (bsg_job->reply_payload.sg_list))); |
|---|
| 2836 | | - ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount; |
|---|
| 3159 | + put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), |
|---|
| 3160 | + &ct_iocb->rsp_dsd.address); |
|---|
| 3161 | + ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount; |
|---|
| 2837 | 3162 | |
|---|
| 2838 | 3163 | avail_dsds = 1; |
|---|
| 2839 | | - cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address; |
|---|
| 3164 | + cur_dsd = &ct_iocb->rsp_dsd; |
|---|
| 2840 | 3165 | index = 0; |
|---|
| 2841 | 3166 | tot_dsds = bsg_job->reply_payload.sg_cnt; |
|---|
| 2842 | 3167 | |
|---|
| 2843 | 3168 | for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { |
|---|
| 2844 | | - dma_addr_t sle_dma; |
|---|
| 2845 | 3169 | cont_a64_entry_t *cont_pkt; |
|---|
| 2846 | 3170 | |
|---|
| 2847 | 3171 | /* Allocate additional continuation packets? */ |
|---|
| .. | .. |
|---|
| 2852 | 3176 | */ |
|---|
| 2853 | 3177 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, |
|---|
| 2854 | 3178 | vha->hw->req_q_map[0]); |
|---|
| 2855 | | - cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; |
|---|
| 3179 | + cur_dsd = cont_pkt->dsd; |
|---|
| 2856 | 3180 | avail_dsds = 5; |
|---|
| 2857 | 3181 | entry_count++; |
|---|
| 2858 | 3182 | } |
|---|
| 2859 | 3183 | |
|---|
| 2860 | | - sle_dma = sg_dma_address(sg); |
|---|
| 2861 | | - *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); |
|---|
| 2862 | | - *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); |
|---|
| 2863 | | - *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); |
|---|
| 2864 | | - loop_iterartion++; |
|---|
| 3184 | + append_dsd64(&cur_dsd, sg); |
|---|
| 2865 | 3185 | avail_dsds--; |
|---|
| 2866 | 3186 | } |
|---|
| 2867 | 3187 | ct_iocb->entry_count = entry_count; |
|---|
| .. | .. |
|---|
| 2873 | 3193 | qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) |
|---|
| 2874 | 3194 | { |
|---|
| 2875 | 3195 | uint16_t avail_dsds; |
|---|
| 2876 | | - uint32_t *cur_dsd; |
|---|
| 3196 | + struct dsd64 *cur_dsd; |
|---|
| 2877 | 3197 | struct scatterlist *sg; |
|---|
| 2878 | 3198 | int index; |
|---|
| 2879 | 3199 | uint16_t cmd_dsds, rsp_dsds; |
|---|
| .. | .. |
|---|
| 2902 | 3222 | cpu_to_le32(bsg_job->request_payload.payload_len); |
|---|
| 2903 | 3223 | |
|---|
| 2904 | 3224 | avail_dsds = 2; |
|---|
| 2905 | | - cur_dsd = (uint32_t *)ct_iocb->dseg_0_address; |
|---|
| 3225 | + cur_dsd = ct_iocb->dsd; |
|---|
| 2906 | 3226 | index = 0; |
|---|
| 2907 | 3227 | |
|---|
| 2908 | 3228 | for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) { |
|---|
| 2909 | | - dma_addr_t sle_dma; |
|---|
| 2910 | | - |
|---|
| 2911 | 3229 | /* Allocate additional continuation packets? */ |
|---|
| 2912 | 3230 | if (avail_dsds == 0) { |
|---|
| 2913 | 3231 | /* |
|---|
| .. | .. |
|---|
| 2916 | 3234 | */ |
|---|
| 2917 | 3235 | cont_pkt = qla2x00_prep_cont_type1_iocb( |
|---|
| 2918 | 3236 | vha, ha->req_q_map[0]); |
|---|
| 2919 | | - cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; |
|---|
| 3237 | + cur_dsd = cont_pkt->dsd; |
|---|
| 2920 | 3238 | avail_dsds = 5; |
|---|
| 2921 | 3239 | entry_count++; |
|---|
| 2922 | 3240 | } |
|---|
| 2923 | 3241 | |
|---|
| 2924 | | - sle_dma = sg_dma_address(sg); |
|---|
| 2925 | | - *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); |
|---|
| 2926 | | - *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); |
|---|
| 2927 | | - *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); |
|---|
| 3242 | + append_dsd64(&cur_dsd, sg); |
|---|
| 2928 | 3243 | avail_dsds--; |
|---|
| 2929 | 3244 | } |
|---|
| 2930 | 3245 | |
|---|
| 2931 | 3246 | index = 0; |
|---|
| 2932 | 3247 | |
|---|
| 2933 | 3248 | for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) { |
|---|
| 2934 | | - dma_addr_t sle_dma; |
|---|
| 2935 | | - |
|---|
| 2936 | 3249 | /* Allocate additional continuation packets? */ |
|---|
| 2937 | 3250 | if (avail_dsds == 0) { |
|---|
| 2938 | 3251 | /* |
|---|
| .. | .. |
|---|
| 2941 | 3254 | */ |
|---|
| 2942 | 3255 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, |
|---|
| 2943 | 3256 | ha->req_q_map[0]); |
|---|
| 2944 | | - cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; |
|---|
| 3257 | + cur_dsd = cont_pkt->dsd; |
|---|
| 2945 | 3258 | avail_dsds = 5; |
|---|
| 2946 | 3259 | entry_count++; |
|---|
| 2947 | 3260 | } |
|---|
| 2948 | 3261 | |
|---|
| 2949 | | - sle_dma = sg_dma_address(sg); |
|---|
| 2950 | | - *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); |
|---|
| 2951 | | - *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); |
|---|
| 2952 | | - *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); |
|---|
| 3262 | + append_dsd64(&cur_dsd, sg); |
|---|
| 2953 | 3263 | avail_dsds--; |
|---|
| 2954 | 3264 | } |
|---|
| 2955 | 3265 | ct_iocb->entry_count = entry_count; |
|---|
| .. | .. |
|---|
| 2968 | 3278 | unsigned long flags; |
|---|
| 2969 | 3279 | struct scsi_cmnd *cmd; |
|---|
| 2970 | 3280 | uint32_t *clr_ptr; |
|---|
| 2971 | | - uint32_t index; |
|---|
| 2972 | 3281 | uint32_t handle; |
|---|
| 2973 | 3282 | uint16_t cnt; |
|---|
| 2974 | 3283 | uint16_t req_cnt; |
|---|
| 2975 | 3284 | uint16_t tot_dsds; |
|---|
| 2976 | 3285 | struct device_reg_82xx __iomem *reg; |
|---|
| 2977 | 3286 | uint32_t dbval; |
|---|
| 2978 | | - uint32_t *fcp_dl; |
|---|
| 3287 | + __be32 *fcp_dl; |
|---|
| 2979 | 3288 | uint8_t additional_cdb_len; |
|---|
| 2980 | 3289 | struct ct6_dsd *ctx; |
|---|
| 2981 | 3290 | struct scsi_qla_host *vha = sp->vha; |
|---|
| .. | .. |
|---|
| 2996 | 3305 | |
|---|
| 2997 | 3306 | /* Send marker if required */ |
|---|
| 2998 | 3307 | if (vha->marker_needed != 0) { |
|---|
| 2999 | | - if (qla2x00_marker(vha, req, |
|---|
| 3000 | | - rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { |
|---|
| 3308 | + if (qla2x00_marker(vha, ha->base_qpair, |
|---|
| 3309 | + 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { |
|---|
| 3001 | 3310 | ql_log(ql_log_warn, vha, 0x300c, |
|---|
| 3002 | 3311 | "qla2x00_marker failed for cmd=%p.\n", cmd); |
|---|
| 3003 | 3312 | return QLA_FUNCTION_FAILED; |
|---|
| .. | .. |
|---|
| 3008 | 3317 | /* Acquire ring specific lock */ |
|---|
| 3009 | 3318 | spin_lock_irqsave(&ha->hardware_lock, flags); |
|---|
| 3010 | 3319 | |
|---|
| 3011 | | - /* Check for room in outstanding command list. */ |
|---|
| 3012 | | - handle = req->current_outstanding_cmd; |
|---|
| 3013 | | - for (index = 1; index < req->num_outstanding_cmds; index++) { |
|---|
| 3014 | | - handle++; |
|---|
| 3015 | | - if (handle == req->num_outstanding_cmds) |
|---|
| 3016 | | - handle = 1; |
|---|
| 3017 | | - if (!req->outstanding_cmds[handle]) |
|---|
| 3018 | | - break; |
|---|
| 3019 | | - } |
|---|
| 3020 | | - if (index == req->num_outstanding_cmds) |
|---|
| 3320 | + handle = qla2xxx_get_next_handle(req); |
|---|
| 3321 | + if (handle == 0) |
|---|
| 3021 | 3322 | goto queuing_error; |
|---|
| 3022 | 3323 | |
|---|
| 3023 | 3324 | /* Map the sg table so we have an accurate count of sg entries needed */ |
|---|
| .. | .. |
|---|
| 3077 | 3378 | req_cnt = 1; |
|---|
| 3078 | 3379 | |
|---|
| 3079 | 3380 | if (req->cnt < (req_cnt + 2)) { |
|---|
| 3080 | | - cnt = (uint16_t)RD_REG_DWORD_RELAXED( |
|---|
| 3381 | + cnt = (uint16_t)rd_reg_dword_relaxed( |
|---|
| 3081 | 3382 | ®->req_q_out[0]); |
|---|
| 3082 | 3383 | if (req->ring_index < cnt) |
|---|
| 3083 | 3384 | req->cnt = cnt - req->ring_index; |
|---|
| .. | .. |
|---|
| 3088 | 3389 | goto queuing_error; |
|---|
| 3089 | 3390 | } |
|---|
| 3090 | 3391 | |
|---|
| 3091 | | - ctx = sp->u.scmd.ctx = |
|---|
| 3392 | + ctx = sp->u.scmd.ct6_ctx = |
|---|
| 3092 | 3393 | mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); |
|---|
| 3093 | 3394 | if (!ctx) { |
|---|
| 3094 | 3395 | ql_log(ql_log_fatal, vha, 0x3010, |
|---|
| .. | .. |
|---|
| 3127 | 3428 | } |
|---|
| 3128 | 3429 | |
|---|
| 3129 | 3430 | cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; |
|---|
| 3130 | | - cmd_pkt->handle = MAKE_HANDLE(req->id, handle); |
|---|
| 3431 | + cmd_pkt->handle = make_handle(req->id, handle); |
|---|
| 3131 | 3432 | |
|---|
| 3132 | 3433 | /* Zero out remaining portion of packet. */ |
|---|
| 3133 | 3434 | /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ |
|---|
| .. | .. |
|---|
| 3165 | 3466 | |
|---|
| 3166 | 3467 | memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); |
|---|
| 3167 | 3468 | |
|---|
| 3168 | | - fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + |
|---|
| 3469 | + fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + |
|---|
| 3169 | 3470 | additional_cdb_len); |
|---|
| 3170 | 3471 | *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); |
|---|
| 3171 | 3472 | |
|---|
| 3172 | 3473 | cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); |
|---|
| 3173 | | - cmd_pkt->fcp_cmnd_dseg_address[0] = |
|---|
| 3174 | | - cpu_to_le32(LSD(ctx->fcp_cmnd_dma)); |
|---|
| 3175 | | - cmd_pkt->fcp_cmnd_dseg_address[1] = |
|---|
| 3176 | | - cpu_to_le32(MSD(ctx->fcp_cmnd_dma)); |
|---|
| 3474 | + put_unaligned_le64(ctx->fcp_cmnd_dma, |
|---|
| 3475 | + &cmd_pkt->fcp_cmnd_dseg_address); |
|---|
| 3177 | 3476 | |
|---|
| 3178 | 3477 | sp->flags |= SRB_FCP_CMND_DMA_VALID; |
|---|
| 3179 | 3478 | cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); |
|---|
| .. | .. |
|---|
| 3185 | 3484 | cmd_pkt->entry_status = (uint8_t) rsp->id; |
|---|
| 3186 | 3485 | } else { |
|---|
| 3187 | 3486 | struct cmd_type_7 *cmd_pkt; |
|---|
| 3487 | + |
|---|
| 3188 | 3488 | req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); |
|---|
| 3189 | 3489 | if (req->cnt < (req_cnt + 2)) { |
|---|
| 3190 | | - cnt = (uint16_t)RD_REG_DWORD_RELAXED( |
|---|
| 3490 | + cnt = (uint16_t)rd_reg_dword_relaxed( |
|---|
| 3191 | 3491 | ®->req_q_out[0]); |
|---|
| 3192 | 3492 | if (req->ring_index < cnt) |
|---|
| 3193 | 3493 | req->cnt = cnt - req->ring_index; |
|---|
| .. | .. |
|---|
| 3199 | 3499 | goto queuing_error; |
|---|
| 3200 | 3500 | |
|---|
| 3201 | 3501 | cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; |
|---|
| 3202 | | - cmd_pkt->handle = MAKE_HANDLE(req->id, handle); |
|---|
| 3502 | + cmd_pkt->handle = make_handle(req->id, handle); |
|---|
| 3203 | 3503 | |
|---|
| 3204 | 3504 | /* Zero out remaining portion of packet. */ |
|---|
| 3205 | 3505 | /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ |
|---|
| .. | .. |
|---|
| 3263 | 3563 | if (ql2xdbwr) |
|---|
| 3264 | 3564 | qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval); |
|---|
| 3265 | 3565 | else { |
|---|
| 3266 | | - WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); |
|---|
| 3566 | + wrt_reg_dword(ha->nxdb_wr_ptr, dbval); |
|---|
| 3267 | 3567 | wmb(); |
|---|
| 3268 | | - while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { |
|---|
| 3269 | | - WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); |
|---|
| 3568 | + while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) { |
|---|
| 3569 | + wrt_reg_dword(ha->nxdb_wr_ptr, dbval); |
|---|
| 3270 | 3570 | wmb(); |
|---|
| 3271 | 3571 | } |
|---|
| 3272 | 3572 | } |
|---|
| .. | .. |
|---|
| 3285 | 3585 | if (tot_dsds) |
|---|
| 3286 | 3586 | scsi_dma_unmap(cmd); |
|---|
| 3287 | 3587 | |
|---|
| 3288 | | - if (sp->u.scmd.ctx) { |
|---|
| 3289 | | - mempool_free(sp->u.scmd.ctx, ha->ctx_mempool); |
|---|
| 3290 | | - sp->u.scmd.ctx = NULL; |
|---|
| 3588 | + if (sp->u.scmd.crc_ctx) { |
|---|
| 3589 | + mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool); |
|---|
| 3590 | + sp->u.scmd.crc_ctx = NULL; |
|---|
| 3291 | 3591 | } |
|---|
| 3292 | 3592 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
|---|
| 3293 | 3593 | |
|---|
| .. | .. |
|---|
| 3304 | 3604 | memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); |
|---|
| 3305 | 3605 | abt_iocb->entry_type = ABORT_IOCB_TYPE; |
|---|
| 3306 | 3606 | abt_iocb->entry_count = 1; |
|---|
| 3307 | | - abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); |
|---|
| 3607 | + abt_iocb->handle = make_handle(req->id, sp->handle); |
|---|
| 3308 | 3608 | if (sp->fcport) { |
|---|
| 3309 | 3609 | abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
|---|
| 3310 | 3610 | abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; |
|---|
| .. | .. |
|---|
| 3312 | 3612 | abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; |
|---|
| 3313 | 3613 | } |
|---|
| 3314 | 3614 | abt_iocb->handle_to_abort = |
|---|
| 3315 | | - cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no, |
|---|
| 3316 | | - aio->u.abt.cmd_hndl)); |
|---|
| 3615 | + make_handle(le16_to_cpu(aio->u.abt.req_que_no), |
|---|
| 3616 | + aio->u.abt.cmd_hndl); |
|---|
| 3317 | 3617 | abt_iocb->vp_index = vha->vp_idx; |
|---|
| 3318 | | - abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no); |
|---|
| 3618 | + abt_iocb->req_que_no = aio->u.abt.req_que_no; |
|---|
| 3319 | 3619 | /* Send the command to the firmware */ |
|---|
| 3320 | 3620 | wmb(); |
|---|
| 3321 | 3621 | } |
|---|
| .. | .. |
|---|
| 3330 | 3630 | sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb)); |
|---|
| 3331 | 3631 | |
|---|
| 3332 | 3632 | for (i = 0; i < sz; i++) |
|---|
| 3333 | | - mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]); |
|---|
| 3633 | + mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i]; |
|---|
| 3334 | 3634 | } |
|---|
| 3335 | 3635 | |
|---|
| 3336 | 3636 | static void |
|---|
| .. | .. |
|---|
| 3354 | 3654 | nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; |
|---|
| 3355 | 3655 | if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { |
|---|
| 3356 | 3656 | nack->u.isp24.flags = ntfy->u.isp24.flags & |
|---|
| 3357 | | - cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); |
|---|
| 3657 | + cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); |
|---|
| 3358 | 3658 | } |
|---|
| 3359 | 3659 | nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; |
|---|
| 3360 | 3660 | nack->u.isp24.status = ntfy->u.isp24.status; |
|---|
| .. | .. |
|---|
| 3372 | 3672 | /* |
|---|
| 3373 | 3673 | * Build NVME LS request |
|---|
| 3374 | 3674 | */ |
|---|
| 3375 | | -static int |
|---|
| 3675 | +static void |
|---|
| 3376 | 3676 | qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt) |
|---|
| 3377 | 3677 | { |
|---|
| 3378 | 3678 | struct srb_iocb *nvme; |
|---|
| 3379 | | - int rval = QLA_SUCCESS; |
|---|
| 3380 | 3679 | |
|---|
| 3381 | 3680 | nvme = &sp->u.iocb_cmd; |
|---|
| 3382 | 3681 | cmd_pkt->entry_type = PT_LS4_REQUEST; |
|---|
| 3383 | 3682 | cmd_pkt->entry_count = 1; |
|---|
| 3384 | | - cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT; |
|---|
| 3683 | + cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT); |
|---|
| 3385 | 3684 | |
|---|
| 3386 | 3685 | cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec); |
|---|
| 3387 | 3686 | cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
|---|
| 3388 | 3687 | cmd_pkt->vp_index = sp->fcport->vha->vp_idx; |
|---|
| 3389 | 3688 | |
|---|
| 3390 | | - cmd_pkt->tx_dseg_count = 1; |
|---|
| 3391 | | - cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len; |
|---|
| 3392 | | - cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len; |
|---|
| 3393 | | - cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma)); |
|---|
| 3394 | | - cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma)); |
|---|
| 3689 | + cmd_pkt->tx_dseg_count = cpu_to_le16(1); |
|---|
| 3690 | + cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len); |
|---|
| 3691 | + cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len); |
|---|
| 3692 | + put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address); |
|---|
| 3395 | 3693 | |
|---|
| 3396 | | - cmd_pkt->rx_dseg_count = 1; |
|---|
| 3397 | | - cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len; |
|---|
| 3398 | | - cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len; |
|---|
| 3399 | | - cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma)); |
|---|
| 3400 | | - cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma)); |
|---|
| 3401 | | - |
|---|
| 3402 | | - return rval; |
|---|
| 3694 | + cmd_pkt->rx_dseg_count = cpu_to_le16(1); |
|---|
| 3695 | + cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len); |
|---|
| 3696 | + cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len); |
|---|
| 3697 | + put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address); |
|---|
| 3403 | 3698 | } |
|---|
| 3404 | 3699 | |
|---|
| 3405 | 3700 | static void |
|---|
| .. | .. |
|---|
| 3439 | 3734 | int |
|---|
| 3440 | 3735 | qla2x00_start_sp(srb_t *sp) |
|---|
| 3441 | 3736 | { |
|---|
| 3442 | | - int rval; |
|---|
| 3737 | + int rval = QLA_SUCCESS; |
|---|
| 3443 | 3738 | scsi_qla_host_t *vha = sp->vha; |
|---|
| 3444 | 3739 | struct qla_hw_data *ha = vha->hw; |
|---|
| 3740 | + struct qla_qpair *qp = sp->qpair; |
|---|
| 3445 | 3741 | void *pkt; |
|---|
| 3446 | 3742 | unsigned long flags; |
|---|
| 3447 | 3743 | |
|---|
| 3448 | | - rval = QLA_FUNCTION_FAILED; |
|---|
| 3449 | | - spin_lock_irqsave(&ha->hardware_lock, flags); |
|---|
| 3450 | | - pkt = qla2x00_alloc_iocbs(vha, sp); |
|---|
| 3744 | + if (vha->hw->flags.eeh_busy) |
|---|
| 3745 | + return -EIO; |
|---|
| 3746 | + |
|---|
| 3747 | + spin_lock_irqsave(qp->qp_lock_ptr, flags); |
|---|
| 3748 | + pkt = __qla2x00_alloc_iocbs(sp->qpair, sp); |
|---|
| 3451 | 3749 | if (!pkt) { |
|---|
| 3750 | + rval = -EAGAIN; |
|---|
| 3452 | 3751 | ql_log(ql_log_warn, vha, 0x700c, |
|---|
| 3453 | 3752 | "qla2x00_alloc_iocbs failed.\n"); |
|---|
| 3454 | 3753 | goto done; |
|---|
| 3455 | 3754 | } |
|---|
| 3456 | 3755 | |
|---|
| 3457 | | - rval = QLA_SUCCESS; |
|---|
| 3458 | 3756 | switch (sp->type) { |
|---|
| 3459 | 3757 | case SRB_LOGIN_CMD: |
|---|
| 3460 | 3758 | IS_FWI2_CAPABLE(ha) ? |
|---|
| .. | .. |
|---|
| 3524 | 3822 | break; |
|---|
| 3525 | 3823 | } |
|---|
| 3526 | 3824 | |
|---|
| 3825 | + if (sp->start_timer) |
|---|
| 3826 | + add_timer(&sp->u.iocb_cmd.timer); |
|---|
| 3827 | + |
|---|
| 3527 | 3828 | wmb(); |
|---|
| 3528 | | - qla2x00_start_iocbs(vha, ha->req_q_map[0]); |
|---|
| 3829 | + qla2x00_start_iocbs(vha, qp->req); |
|---|
| 3529 | 3830 | done: |
|---|
| 3530 | | - spin_unlock_irqrestore(&ha->hardware_lock, flags); |
|---|
| 3831 | + spin_unlock_irqrestore(qp->qp_lock_ptr, flags); |
|---|
| 3531 | 3832 | return rval; |
|---|
| 3532 | 3833 | } |
|---|
| 3533 | 3834 | |
|---|
| .. | .. |
|---|
| 3536 | 3837 | struct cmd_bidir *cmd_pkt, uint32_t tot_dsds) |
|---|
| 3537 | 3838 | { |
|---|
| 3538 | 3839 | uint16_t avail_dsds; |
|---|
| 3539 | | - uint32_t *cur_dsd; |
|---|
| 3840 | + struct dsd64 *cur_dsd; |
|---|
| 3540 | 3841 | uint32_t req_data_len = 0; |
|---|
| 3541 | 3842 | uint32_t rsp_data_len = 0; |
|---|
| 3542 | 3843 | struct scatterlist *sg; |
|---|
| .. | .. |
|---|
| 3545 | 3846 | struct bsg_job *bsg_job = sp->u.bsg_job; |
|---|
| 3546 | 3847 | |
|---|
| 3547 | 3848 | /*Update entry type to indicate bidir command */ |
|---|
| 3548 | | - *((uint32_t *)(&cmd_pkt->entry_type)) = |
|---|
| 3549 | | - cpu_to_le32(COMMAND_BIDIRECTIONAL); |
|---|
| 3849 | + put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type); |
|---|
| 3550 | 3850 | |
|---|
| 3551 | 3851 | /* Set the transfer direction, in this set both flags |
|---|
| 3552 | 3852 | * Also set the BD_WRAP_BACK flag, firmware will take care |
|---|
| .. | .. |
|---|
| 3572 | 3872 | * are bundled in continuation iocb |
|---|
| 3573 | 3873 | */ |
|---|
| 3574 | 3874 | avail_dsds = 1; |
|---|
| 3575 | | - cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; |
|---|
| 3875 | + cur_dsd = &cmd_pkt->fcp_dsd; |
|---|
| 3576 | 3876 | |
|---|
| 3577 | 3877 | index = 0; |
|---|
| 3578 | 3878 | |
|---|
| 3579 | 3879 | for_each_sg(bsg_job->request_payload.sg_list, sg, |
|---|
| 3580 | 3880 | bsg_job->request_payload.sg_cnt, index) { |
|---|
| 3581 | | - dma_addr_t sle_dma; |
|---|
| 3582 | 3881 | cont_a64_entry_t *cont_pkt; |
|---|
| 3583 | 3882 | |
|---|
| 3584 | 3883 | /* Allocate additional continuation packets */ |
|---|
| .. | .. |
|---|
| 3587 | 3886 | * 5 DSDS |
|---|
| 3588 | 3887 | */ |
|---|
| 3589 | 3888 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); |
|---|
| 3590 | | - cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; |
|---|
| 3889 | + cur_dsd = cont_pkt->dsd; |
|---|
| 3591 | 3890 | avail_dsds = 5; |
|---|
| 3592 | 3891 | entry_count++; |
|---|
| 3593 | 3892 | } |
|---|
| 3594 | | - sle_dma = sg_dma_address(sg); |
|---|
| 3595 | | - *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); |
|---|
| 3596 | | - *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); |
|---|
| 3597 | | - *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); |
|---|
| 3893 | + append_dsd64(&cur_dsd, sg); |
|---|
| 3598 | 3894 | avail_dsds--; |
|---|
| 3599 | 3895 | } |
|---|
| 3600 | 3896 | /* For read request DSD will always goes to continuation IOCB |
|---|
| .. | .. |
|---|
| 3604 | 3900 | */ |
|---|
| 3605 | 3901 | for_each_sg(bsg_job->reply_payload.sg_list, sg, |
|---|
| 3606 | 3902 | bsg_job->reply_payload.sg_cnt, index) { |
|---|
| 3607 | | - dma_addr_t sle_dma; |
|---|
| 3608 | 3903 | cont_a64_entry_t *cont_pkt; |
|---|
| 3609 | 3904 | |
|---|
| 3610 | 3905 | /* Allocate additional continuation packets */ |
|---|
| .. | .. |
|---|
| 3613 | 3908 | * 5 DSDS |
|---|
| 3614 | 3909 | */ |
|---|
| 3615 | 3910 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); |
|---|
| 3616 | | - cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; |
|---|
| 3911 | + cur_dsd = cont_pkt->dsd; |
|---|
| 3617 | 3912 | avail_dsds = 5; |
|---|
| 3618 | 3913 | entry_count++; |
|---|
| 3619 | 3914 | } |
|---|
| 3620 | | - sle_dma = sg_dma_address(sg); |
|---|
| 3621 | | - *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); |
|---|
| 3622 | | - *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); |
|---|
| 3623 | | - *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); |
|---|
| 3915 | + append_dsd64(&cur_dsd, sg); |
|---|
| 3624 | 3916 | avail_dsds--; |
|---|
| 3625 | 3917 | } |
|---|
| 3626 | 3918 | /* This value should be same as number of IOCB required for this cmd */ |
|---|
| .. | .. |
|---|
| 3634 | 3926 | struct qla_hw_data *ha = vha->hw; |
|---|
| 3635 | 3927 | unsigned long flags; |
|---|
| 3636 | 3928 | uint32_t handle; |
|---|
| 3637 | | - uint32_t index; |
|---|
| 3638 | 3929 | uint16_t req_cnt; |
|---|
| 3639 | 3930 | uint16_t cnt; |
|---|
| 3640 | 3931 | uint32_t *clr_ptr; |
|---|
| .. | .. |
|---|
| 3650 | 3941 | |
|---|
| 3651 | 3942 | /* Send marker if required */ |
|---|
| 3652 | 3943 | if (vha->marker_needed != 0) { |
|---|
| 3653 | | - if (qla2x00_marker(vha, req, |
|---|
| 3654 | | - rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) |
|---|
| 3944 | + if (qla2x00_marker(vha, ha->base_qpair, |
|---|
| 3945 | + 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) |
|---|
| 3655 | 3946 | return EXT_STATUS_MAILBOX; |
|---|
| 3656 | 3947 | vha->marker_needed = 0; |
|---|
| 3657 | 3948 | } |
|---|
| .. | .. |
|---|
| 3659 | 3950 | /* Acquire ring specific lock */ |
|---|
| 3660 | 3951 | spin_lock_irqsave(&ha->hardware_lock, flags); |
|---|
| 3661 | 3952 | |
|---|
| 3662 | | - /* Check for room in outstanding command list. */ |
|---|
| 3663 | | - handle = req->current_outstanding_cmd; |
|---|
| 3664 | | - for (index = 1; index < req->num_outstanding_cmds; index++) { |
|---|
| 3665 | | - handle++; |
|---|
| 3666 | | - if (handle == req->num_outstanding_cmds) |
|---|
| 3667 | | - handle = 1; |
|---|
| 3668 | | - if (!req->outstanding_cmds[handle]) |
|---|
| 3669 | | - break; |
|---|
| 3670 | | - } |
|---|
| 3671 | | - |
|---|
| 3672 | | - if (index == req->num_outstanding_cmds) { |
|---|
| 3953 | + handle = qla2xxx_get_next_handle(req); |
|---|
| 3954 | + if (handle == 0) { |
|---|
| 3673 | 3955 | rval = EXT_STATUS_BUSY; |
|---|
| 3674 | 3956 | goto queuing_error; |
|---|
| 3675 | 3957 | } |
|---|
| .. | .. |
|---|
| 3679 | 3961 | |
|---|
| 3680 | 3962 | /* Check for room on request queue. */ |
|---|
| 3681 | 3963 | if (req->cnt < req_cnt + 2) { |
|---|
| 3682 | | - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : |
|---|
| 3683 | | - RD_REG_DWORD_RELAXED(req->req_q_out); |
|---|
| 3964 | + if (IS_SHADOW_REG_CAPABLE(ha)) { |
|---|
| 3965 | + cnt = *req->out_ptr; |
|---|
| 3966 | + } else { |
|---|
| 3967 | + cnt = rd_reg_dword_relaxed(req->req_q_out); |
|---|
| 3968 | + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) |
|---|
| 3969 | + goto queuing_error; |
|---|
| 3970 | + } |
|---|
| 3971 | + |
|---|
| 3684 | 3972 | if (req->ring_index < cnt) |
|---|
| 3685 | 3973 | req->cnt = cnt - req->ring_index; |
|---|
| 3686 | 3974 | else |
|---|
| .. | .. |
|---|
| 3693 | 3981 | } |
|---|
| 3694 | 3982 | |
|---|
| 3695 | 3983 | cmd_pkt = (struct cmd_bidir *)req->ring_ptr; |
|---|
| 3696 | | - cmd_pkt->handle = MAKE_HANDLE(req->id, handle); |
|---|
| 3984 | + cmd_pkt->handle = make_handle(req->id, handle); |
|---|
| 3697 | 3985 | |
|---|
| 3698 | 3986 | /* Zero out remaining portion of packet. */ |
|---|
| 3699 | 3987 | /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ |
|---|
| .. | .. |
|---|
| 3719 | 4007 | qla2x00_start_iocbs(vha, req); |
|---|
| 3720 | 4008 | queuing_error: |
|---|
| 3721 | 4009 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
|---|
| 4010 | + |
|---|
| 3722 | 4011 | return rval; |
|---|
| 3723 | 4012 | } |
|---|