| .. | .. |
|---|
| 1 | 1 | /******************************************************************* |
|---|
| 2 | 2 | * This file is part of the Emulex Linux Device Driver for * |
|---|
| 3 | 3 | * Fibre Channel Host Bus Adapters. * |
|---|
| 4 | | - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
|---|
| 4 | + * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * |
|---|
| 5 | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
|---|
| 6 | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
|---|
| 7 | 7 | * EMULEX and SLI are trademarks of Emulex. * |
|---|
| .. | .. |
|---|
| 35 | 35 | #include <scsi/scsi_transport_fc.h> |
|---|
| 36 | 36 | #include <scsi/fc/fc_fs.h> |
|---|
| 37 | 37 | #include <linux/aer.h> |
|---|
| 38 | +#include <linux/crash_dump.h> |
|---|
| 38 | 39 | #ifdef CONFIG_X86 |
|---|
| 39 | 40 | #include <asm/set_memory.h> |
|---|
| 40 | 41 | #endif |
|---|
| 41 | | - |
|---|
| 42 | | -#include <linux/nvme-fc-driver.h> |
|---|
| 43 | 42 | |
|---|
| 44 | 43 | #include "lpfc_hw4.h" |
|---|
| 45 | 44 | #include "lpfc_hw.h" |
|---|
| .. | .. |
|---|
| 50 | 49 | #include "lpfc.h" |
|---|
| 51 | 50 | #include "lpfc_scsi.h" |
|---|
| 52 | 51 | #include "lpfc_nvme.h" |
|---|
| 53 | | -#include "lpfc_nvmet.h" |
|---|
| 54 | 52 | #include "lpfc_crtn.h" |
|---|
| 55 | 53 | #include "lpfc_logmsg.h" |
|---|
| 56 | 54 | #include "lpfc_compat.h" |
|---|
| .. | .. |
|---|
| 78 | 76 | struct hbq_dmabuf *); |
|---|
| 79 | 77 | static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, |
|---|
| 80 | 78 | struct hbq_dmabuf *dmabuf); |
|---|
| 81 | | -static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *, |
|---|
| 82 | | - struct lpfc_cqe *); |
|---|
| 79 | +static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, |
|---|
| 80 | + struct lpfc_queue *cq, struct lpfc_cqe *cqe); |
|---|
| 83 | 81 | static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, |
|---|
| 84 | 82 | int); |
|---|
| 85 | 83 | static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, |
|---|
| 86 | | - struct lpfc_eqe *eqe, uint32_t qidx); |
|---|
| 84 | + struct lpfc_queue *eq, |
|---|
| 85 | + struct lpfc_eqe *eqe); |
|---|
| 87 | 86 | static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); |
|---|
| 88 | 87 | static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); |
|---|
| 89 | | -static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, |
|---|
| 90 | | - struct lpfc_sli_ring *pring, |
|---|
| 91 | | - struct lpfc_iocbq *cmdiocb); |
|---|
| 88 | +static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q); |
|---|
| 89 | +static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, |
|---|
| 90 | + struct lpfc_queue *cq, |
|---|
| 91 | + struct lpfc_cqe *cqe); |
|---|
| 92 | 92 | |
|---|
| 93 | 93 | static IOCB_t * |
|---|
| 94 | 94 | lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) |
|---|
| .. | .. |
|---|
| 110 | 110 | * endianness. This function can be called with or without |
|---|
| 111 | 111 | * lock. |
|---|
| 112 | 112 | **/ |
|---|
| 113 | | -void |
|---|
| 113 | +static void |
|---|
| 114 | 114 | lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) |
|---|
| 115 | 115 | { |
|---|
| 116 | 116 | uint64_t *src = srcp; |
|---|
| .. | .. |
|---|
| 150 | 150 | /* sanity check on queue memory */ |
|---|
| 151 | 151 | if (unlikely(!q)) |
|---|
| 152 | 152 | return -ENOMEM; |
|---|
| 153 | | - temp_wqe = q->qe[q->host_index].wqe; |
|---|
| 153 | + temp_wqe = lpfc_sli4_qe(q, q->host_index); |
|---|
| 154 | 154 | |
|---|
| 155 | 155 | /* If the host has not yet processed the next entry then we are done */ |
|---|
| 156 | 156 | idx = ((q->host_index + 1) % q->entry_count); |
|---|
| .. | .. |
|---|
| 160 | 160 | } |
|---|
| 161 | 161 | q->WQ_posted++; |
|---|
| 162 | 162 | /* set consumption flag every once in a while */ |
|---|
| 163 | | - if (!((q->host_index + 1) % q->entry_repost)) |
|---|
| 163 | + if (!((q->host_index + 1) % q->notify_interval)) |
|---|
| 164 | 164 | bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); |
|---|
| 165 | 165 | else |
|---|
| 166 | 166 | bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); |
|---|
| .. | .. |
|---|
| 228 | 228 | * This routine will update the HBA index of a queue to reflect consumption of |
|---|
| 229 | 229 | * Work Queue Entries by the HBA. When the HBA indicates that it has consumed |
|---|
| 230 | 230 | * an entry the host calls this function to update the queue's internal |
|---|
| 231 | | - * pointers. This routine returns the number of entries that were consumed by |
|---|
| 232 | | - * the HBA. |
|---|
| 231 | + * pointers. |
|---|
| 233 | 232 | **/ |
|---|
| 234 | | -static uint32_t |
|---|
| 233 | +static void |
|---|
| 235 | 234 | lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) |
|---|
| 236 | 235 | { |
|---|
| 237 | | - uint32_t released = 0; |
|---|
| 238 | | - |
|---|
| 239 | 236 | /* sanity check on queue memory */ |
|---|
| 240 | 237 | if (unlikely(!q)) |
|---|
| 241 | | - return 0; |
|---|
| 238 | + return; |
|---|
| 242 | 239 | |
|---|
| 243 | | - if (q->hba_index == index) |
|---|
| 244 | | - return 0; |
|---|
| 245 | | - do { |
|---|
| 246 | | - q->hba_index = ((q->hba_index + 1) % q->entry_count); |
|---|
| 247 | | - released++; |
|---|
| 248 | | - } while (q->hba_index != index); |
|---|
| 249 | | - return released; |
|---|
| 240 | + q->hba_index = index; |
|---|
| 250 | 241 | } |
|---|
| 251 | 242 | |
|---|
| 252 | 243 | /** |
|---|
| 253 | 244 | * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue |
|---|
| 254 | 245 | * @q: The Mailbox Queue to operate on. |
|---|
| 255 | | - * @wqe: The Mailbox Queue Entry to put on the Work queue. |
|---|
| 246 | + * @mqe: The Mailbox Queue Entry to put on the Work queue. |
|---|
| 256 | 247 | * |
|---|
| 257 | 248 | * This routine will copy the contents of @mqe to the next available entry on |
|---|
| 258 | 249 | * the @q. This function will then ring the Work Queue Doorbell to signal the |
|---|
| .. | .. |
|---|
| 270 | 261 | /* sanity check on queue memory */ |
|---|
| 271 | 262 | if (unlikely(!q)) |
|---|
| 272 | 263 | return -ENOMEM; |
|---|
| 273 | | - temp_mqe = q->qe[q->host_index].mqe; |
|---|
| 264 | + temp_mqe = lpfc_sli4_qe(q, q->host_index); |
|---|
| 274 | 265 | |
|---|
| 275 | 266 | /* If the host has not yet processed the next entry then we are done */ |
|---|
| 276 | 267 | if (((q->host_index + 1) % q->entry_count) == q->hba_index) |
|---|
| .. | .. |
|---|
| 325 | 316 | static struct lpfc_eqe * |
|---|
| 326 | 317 | lpfc_sli4_eq_get(struct lpfc_queue *q) |
|---|
| 327 | 318 | { |
|---|
| 328 | | - struct lpfc_hba *phba; |
|---|
| 329 | 319 | struct lpfc_eqe *eqe; |
|---|
| 330 | | - uint32_t idx; |
|---|
| 331 | 320 | |
|---|
| 332 | 321 | /* sanity check on queue memory */ |
|---|
| 333 | 322 | if (unlikely(!q)) |
|---|
| 334 | 323 | return NULL; |
|---|
| 335 | | - phba = q->phba; |
|---|
| 336 | | - eqe = q->qe[q->hba_index].eqe; |
|---|
| 324 | + eqe = lpfc_sli4_qe(q, q->host_index); |
|---|
| 337 | 325 | |
|---|
| 338 | 326 | /* If the next EQE is not valid then we are done */ |
|---|
| 339 | 327 | if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) |
|---|
| 340 | 328 | return NULL; |
|---|
| 341 | | - /* If the host has not yet processed the next entry then we are done */ |
|---|
| 342 | | - idx = ((q->hba_index + 1) % q->entry_count); |
|---|
| 343 | | - if (idx == q->host_index) |
|---|
| 344 | | - return NULL; |
|---|
| 345 | | - |
|---|
| 346 | | - q->hba_index = idx; |
|---|
| 347 | | - /* if the index wrapped around, toggle the valid bit */ |
|---|
| 348 | | - if (phba->sli4_hba.pc_sli4_params.eqav && !q->hba_index) |
|---|
| 349 | | - q->qe_valid = (q->qe_valid) ? 0 : 1; |
|---|
| 350 | | - |
|---|
| 351 | 329 | |
|---|
| 352 | 330 | /* |
|---|
| 353 | 331 | * insert barrier for instruction interlock : data from the hardware |
|---|
| .. | .. |
|---|
| 367 | 345 | * @q: The Event Queue to disable interrupts |
|---|
| 368 | 346 | * |
|---|
| 369 | 347 | **/ |
|---|
| 370 | | -inline void |
|---|
| 348 | +void |
|---|
| 371 | 349 | lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) |
|---|
| 372 | 350 | { |
|---|
| 373 | 351 | struct lpfc_register doorbell; |
|---|
| .. | .. |
|---|
| 386 | 364 | * @q: The Event Queue to disable interrupts |
|---|
| 387 | 365 | * |
|---|
| 388 | 366 | **/ |
|---|
| 389 | | -inline void |
|---|
| 367 | +void |
|---|
| 390 | 368 | lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) |
|---|
| 391 | 369 | { |
|---|
| 392 | 370 | struct lpfc_register doorbell; |
|---|
| .. | .. |
|---|
| 397 | 375 | } |
|---|
| 398 | 376 | |
|---|
| 399 | 377 | /** |
|---|
| 400 | | - * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ |
|---|
| 378 | + * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state |
|---|
| 379 | + * @phba: adapter with EQ |
|---|
| 401 | 380 | * @q: The Event Queue that the host has completed processing for. |
|---|
| 381 | + * @count: Number of elements that have been consumed |
|---|
| 402 | 382 | * @arm: Indicates whether the host wants to arms this CQ. |
|---|
| 403 | 383 | * |
|---|
| 404 | | - * This routine will mark all Event Queue Entries on @q, from the last |
|---|
| 405 | | - * known completed entry to the last entry that was processed, as completed |
|---|
| 406 | | - * by clearing the valid bit for each completion queue entry. Then it will |
|---|
| 407 | | - * notify the HBA, by ringing the doorbell, that the EQEs have been processed. |
|---|
| 408 | | - * The internal host index in the @q will be updated by this routine to indicate |
|---|
| 409 | | - * that the host has finished processing the entries. The @arm parameter |
|---|
| 410 | | - * indicates that the queue should be rearmed when ringing the doorbell. |
|---|
| 411 | | - * |
|---|
| 412 | | - * This function will return the number of EQEs that were popped. |
|---|
| 384 | + * This routine will notify the HBA, by ringing the doorbell, that count |
|---|
| 385 | + * number of EQEs have been processed. The @arm parameter indicates whether |
|---|
| 386 | + * the queue should be rearmed when ringing the doorbell. |
|---|
| 413 | 387 | **/ |
|---|
| 414 | | -uint32_t |
|---|
| 415 | | -lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) |
|---|
| 388 | +void |
|---|
| 389 | +lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, |
|---|
| 390 | + uint32_t count, bool arm) |
|---|
| 416 | 391 | { |
|---|
| 417 | | - uint32_t released = 0; |
|---|
| 418 | | - struct lpfc_hba *phba; |
|---|
| 419 | | - struct lpfc_eqe *temp_eqe; |
|---|
| 420 | 392 | struct lpfc_register doorbell; |
|---|
| 421 | 393 | |
|---|
| 422 | 394 | /* sanity check on queue memory */ |
|---|
| 423 | | - if (unlikely(!q)) |
|---|
| 424 | | - return 0; |
|---|
| 425 | | - phba = q->phba; |
|---|
| 426 | | - |
|---|
| 427 | | - /* while there are valid entries */ |
|---|
| 428 | | - while (q->hba_index != q->host_index) { |
|---|
| 429 | | - if (!phba->sli4_hba.pc_sli4_params.eqav) { |
|---|
| 430 | | - temp_eqe = q->qe[q->host_index].eqe; |
|---|
| 431 | | - bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); |
|---|
| 432 | | - } |
|---|
| 433 | | - released++; |
|---|
| 434 | | - q->host_index = ((q->host_index + 1) % q->entry_count); |
|---|
| 435 | | - } |
|---|
| 436 | | - if (unlikely(released == 0 && !arm)) |
|---|
| 437 | | - return 0; |
|---|
| 395 | + if (unlikely(!q || (count == 0 && !arm))) |
|---|
| 396 | + return; |
|---|
| 438 | 397 | |
|---|
| 439 | 398 | /* ring doorbell for number popped */ |
|---|
| 440 | 399 | doorbell.word0 = 0; |
|---|
| .. | .. |
|---|
| 442 | 401 | bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); |
|---|
| 443 | 402 | bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); |
|---|
| 444 | 403 | } |
|---|
| 445 | | - bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); |
|---|
| 404 | + bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); |
|---|
| 446 | 405 | bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); |
|---|
| 447 | 406 | bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, |
|---|
| 448 | 407 | (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); |
|---|
| .. | .. |
|---|
| 451 | 410 | /* PCI read to flush PCI pipeline on re-arming for INTx mode */ |
|---|
| 452 | 411 | if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) |
|---|
| 453 | 412 | readl(q->phba->sli4_hba.EQDBregaddr); |
|---|
| 454 | | - return released; |
|---|
| 455 | 413 | } |
|---|
| 456 | 414 | |
|---|
| 457 | 415 | /** |
|---|
| 458 | | - * lpfc_sli4_if6_eq_release - Indicates the host has finished processing an EQ |
|---|
| 416 | + * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state |
|---|
| 417 | + * @phba: adapter with EQ |
|---|
| 459 | 418 | * @q: The Event Queue that the host has completed processing for. |
|---|
| 419 | + * @count: Number of elements that have been consumed |
|---|
| 460 | 420 | * @arm: Indicates whether the host wants to arms this CQ. |
|---|
| 461 | 421 | * |
|---|
| 462 | | - * This routine will mark all Event Queue Entries on @q, from the last |
|---|
| 463 | | - * known completed entry to the last entry that was processed, as completed |
|---|
| 464 | | - * by clearing the valid bit for each completion queue entry. Then it will |
|---|
| 465 | | - * notify the HBA, by ringing the doorbell, that the EQEs have been processed. |
|---|
| 466 | | - * The internal host index in the @q will be updated by this routine to indicate |
|---|
| 467 | | - * that the host has finished processing the entries. The @arm parameter |
|---|
| 468 | | - * indicates that the queue should be rearmed when ringing the doorbell. |
|---|
| 469 | | - * |
|---|
| 470 | | - * This function will return the number of EQEs that were popped. |
|---|
| 422 | + * This routine will notify the HBA, by ringing the doorbell, that count |
|---|
| 423 | + * number of EQEs have been processed. The @arm parameter indicates whether |
|---|
| 424 | + * the queue should be rearmed when ringing the doorbell. |
|---|
| 471 | 425 | **/ |
|---|
| 472 | | -uint32_t |
|---|
| 473 | | -lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm) |
|---|
| 426 | +void |
|---|
| 427 | +lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, |
|---|
| 428 | + uint32_t count, bool arm) |
|---|
| 474 | 429 | { |
|---|
| 475 | | - uint32_t released = 0; |
|---|
| 476 | | - struct lpfc_hba *phba; |
|---|
| 477 | | - struct lpfc_eqe *temp_eqe; |
|---|
| 478 | 430 | struct lpfc_register doorbell; |
|---|
| 479 | 431 | |
|---|
| 480 | 432 | /* sanity check on queue memory */ |
|---|
| 481 | | - if (unlikely(!q)) |
|---|
| 482 | | - return 0; |
|---|
| 483 | | - phba = q->phba; |
|---|
| 484 | | - |
|---|
| 485 | | - /* while there are valid entries */ |
|---|
| 486 | | - while (q->hba_index != q->host_index) { |
|---|
| 487 | | - if (!phba->sli4_hba.pc_sli4_params.eqav) { |
|---|
| 488 | | - temp_eqe = q->qe[q->host_index].eqe; |
|---|
| 489 | | - bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); |
|---|
| 490 | | - } |
|---|
| 491 | | - released++; |
|---|
| 492 | | - q->host_index = ((q->host_index + 1) % q->entry_count); |
|---|
| 493 | | - } |
|---|
| 494 | | - if (unlikely(released == 0 && !arm)) |
|---|
| 495 | | - return 0; |
|---|
| 433 | + if (unlikely(!q || (count == 0 && !arm))) |
|---|
| 434 | + return; |
|---|
| 496 | 435 | |
|---|
| 497 | 436 | /* ring doorbell for number popped */ |
|---|
| 498 | 437 | doorbell.word0 = 0; |
|---|
| 499 | 438 | if (arm) |
|---|
| 500 | 439 | bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1); |
|---|
| 501 | | - bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, released); |
|---|
| 440 | + bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count); |
|---|
| 502 | 441 | bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); |
|---|
| 503 | 442 | writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); |
|---|
| 504 | 443 | /* PCI read to flush PCI pipeline on re-arming for INTx mode */ |
|---|
| 505 | 444 | if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) |
|---|
| 506 | 445 | readl(q->phba->sli4_hba.EQDBregaddr); |
|---|
| 507 | | - return released; |
|---|
| 446 | +} |
|---|
| 447 | + |
|---|
| 448 | +static void |
|---|
| 449 | +__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, |
|---|
| 450 | + struct lpfc_eqe *eqe) |
|---|
| 451 | +{ |
|---|
| 452 | + if (!phba->sli4_hba.pc_sli4_params.eqav) |
|---|
| 453 | + bf_set_le32(lpfc_eqe_valid, eqe, 0); |
|---|
| 454 | + |
|---|
| 455 | + eq->host_index = ((eq->host_index + 1) % eq->entry_count); |
|---|
| 456 | + |
|---|
| 457 | + /* if the index wrapped around, toggle the valid bit */ |
|---|
| 458 | + if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index) |
|---|
| 459 | + eq->qe_valid = (eq->qe_valid) ? 0 : 1; |
|---|
| 460 | +} |
|---|
| 461 | + |
|---|
| 462 | +static void |
|---|
| 463 | +lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) |
|---|
| 464 | +{ |
|---|
| 465 | + struct lpfc_eqe *eqe = NULL; |
|---|
| 466 | + u32 eq_count = 0, cq_count = 0; |
|---|
| 467 | + struct lpfc_cqe *cqe = NULL; |
|---|
| 468 | + struct lpfc_queue *cq = NULL, *childq = NULL; |
|---|
| 469 | + int cqid = 0; |
|---|
| 470 | + |
|---|
| 471 | + /* walk all the EQ entries and drop on the floor */ |
|---|
| 472 | + eqe = lpfc_sli4_eq_get(eq); |
|---|
| 473 | + while (eqe) { |
|---|
| 474 | + /* Get the reference to the corresponding CQ */ |
|---|
| 475 | + cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); |
|---|
| 476 | + cq = NULL; |
|---|
| 477 | + |
|---|
| 478 | + list_for_each_entry(childq, &eq->child_list, list) { |
|---|
| 479 | + if (childq->queue_id == cqid) { |
|---|
| 480 | + cq = childq; |
|---|
| 481 | + break; |
|---|
| 482 | + } |
|---|
| 483 | + } |
|---|
| 484 | + /* If CQ is valid, iterate through it and drop all the CQEs */ |
|---|
| 485 | + if (cq) { |
|---|
| 486 | + cqe = lpfc_sli4_cq_get(cq); |
|---|
| 487 | + while (cqe) { |
|---|
| 488 | + __lpfc_sli4_consume_cqe(phba, cq, cqe); |
|---|
| 489 | + cq_count++; |
|---|
| 490 | + cqe = lpfc_sli4_cq_get(cq); |
|---|
| 491 | + } |
|---|
| 492 | + /* Clear and re-arm the CQ */ |
|---|
| 493 | + phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count, |
|---|
| 494 | + LPFC_QUEUE_REARM); |
|---|
| 495 | + cq_count = 0; |
|---|
| 496 | + } |
|---|
| 497 | + __lpfc_sli4_consume_eqe(phba, eq, eqe); |
|---|
| 498 | + eq_count++; |
|---|
| 499 | + eqe = lpfc_sli4_eq_get(eq); |
|---|
| 500 | + } |
|---|
| 501 | + |
|---|
| 502 | + /* Clear and re-arm the EQ */ |
|---|
| 503 | + phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM); |
|---|
| 504 | +} |
|---|
| 505 | + |
|---|
| 506 | +static int |
|---|
| 507 | +lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq, |
|---|
| 508 | + uint8_t rearm) |
|---|
| 509 | +{ |
|---|
| 510 | + struct lpfc_eqe *eqe; |
|---|
| 511 | + int count = 0, consumed = 0; |
|---|
| 512 | + |
|---|
| 513 | + if (cmpxchg(&eq->queue_claimed, 0, 1) != 0) |
|---|
| 514 | + goto rearm_and_exit; |
|---|
| 515 | + |
|---|
| 516 | + eqe = lpfc_sli4_eq_get(eq); |
|---|
| 517 | + while (eqe) { |
|---|
| 518 | + lpfc_sli4_hba_handle_eqe(phba, eq, eqe); |
|---|
| 519 | + __lpfc_sli4_consume_eqe(phba, eq, eqe); |
|---|
| 520 | + |
|---|
| 521 | + consumed++; |
|---|
| 522 | + if (!(++count % eq->max_proc_limit)) |
|---|
| 523 | + break; |
|---|
| 524 | + |
|---|
| 525 | + if (!(count % eq->notify_interval)) { |
|---|
| 526 | + phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, |
|---|
| 527 | + LPFC_QUEUE_NOARM); |
|---|
| 528 | + consumed = 0; |
|---|
| 529 | + } |
|---|
| 530 | + |
|---|
| 531 | + eqe = lpfc_sli4_eq_get(eq); |
|---|
| 532 | + } |
|---|
| 533 | + eq->EQ_processed += count; |
|---|
| 534 | + |
|---|
| 535 | + /* Track the max number of EQEs processed in 1 intr */ |
|---|
| 536 | + if (count > eq->EQ_max_eqe) |
|---|
| 537 | + eq->EQ_max_eqe = count; |
|---|
| 538 | + |
|---|
| 539 | + xchg(&eq->queue_claimed, 0); |
|---|
| 540 | + |
|---|
| 541 | +rearm_and_exit: |
|---|
| 542 | + /* Always clear the EQ. */ |
|---|
| 543 | + phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm); |
|---|
| 544 | + |
|---|
| 545 | + return count; |
|---|
| 508 | 546 | } |
|---|
| 509 | 547 | |
|---|
| 510 | 548 | /** |
|---|
| .. | .. |
|---|
| 519 | 557 | static struct lpfc_cqe * |
|---|
| 520 | 558 | lpfc_sli4_cq_get(struct lpfc_queue *q) |
|---|
| 521 | 559 | { |
|---|
| 522 | | - struct lpfc_hba *phba; |
|---|
| 523 | 560 | struct lpfc_cqe *cqe; |
|---|
| 524 | | - uint32_t idx; |
|---|
| 525 | 561 | |
|---|
| 526 | 562 | /* sanity check on queue memory */ |
|---|
| 527 | 563 | if (unlikely(!q)) |
|---|
| 528 | 564 | return NULL; |
|---|
| 529 | | - phba = q->phba; |
|---|
| 530 | | - cqe = q->qe[q->hba_index].cqe; |
|---|
| 565 | + cqe = lpfc_sli4_qe(q, q->host_index); |
|---|
| 531 | 566 | |
|---|
| 532 | 567 | /* If the next CQE is not valid then we are done */ |
|---|
| 533 | 568 | if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) |
|---|
| 534 | 569 | return NULL; |
|---|
| 535 | | - /* If the host has not yet processed the next entry then we are done */ |
|---|
| 536 | | - idx = ((q->hba_index + 1) % q->entry_count); |
|---|
| 537 | | - if (idx == q->host_index) |
|---|
| 538 | | - return NULL; |
|---|
| 539 | | - |
|---|
| 540 | | - q->hba_index = idx; |
|---|
| 541 | | - /* if the index wrapped around, toggle the valid bit */ |
|---|
| 542 | | - if (phba->sli4_hba.pc_sli4_params.cqav && !q->hba_index) |
|---|
| 543 | | - q->qe_valid = (q->qe_valid) ? 0 : 1; |
|---|
| 544 | 570 | |
|---|
| 545 | 571 | /* |
|---|
| 546 | 572 | * insert barrier for instruction interlock : data from the hardware |
|---|
| .. | .. |
|---|
| 554 | 580 | return cqe; |
|---|
| 555 | 581 | } |
|---|
| 556 | 582 | |
|---|
| 583 | +static void |
|---|
| 584 | +__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, |
|---|
| 585 | + struct lpfc_cqe *cqe) |
|---|
| 586 | +{ |
|---|
| 587 | + if (!phba->sli4_hba.pc_sli4_params.cqav) |
|---|
| 588 | + bf_set_le32(lpfc_cqe_valid, cqe, 0); |
|---|
| 589 | + |
|---|
| 590 | + cq->host_index = ((cq->host_index + 1) % cq->entry_count); |
|---|
| 591 | + |
|---|
| 592 | + /* if the index wrapped around, toggle the valid bit */ |
|---|
| 593 | + if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index) |
|---|
| 594 | + cq->qe_valid = (cq->qe_valid) ? 0 : 1; |
|---|
| 595 | +} |
|---|
| 596 | + |
|---|
| 557 | 597 | /** |
|---|
| 558 | | - * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ |
|---|
| 598 | + * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state. |
|---|
| 599 | + * @phba: the adapter with the CQ |
|---|
| 559 | 600 | * @q: The Completion Queue that the host has completed processing for. |
|---|
| 601 | + * @count: the number of elements that were consumed |
|---|
| 560 | 602 | * @arm: Indicates whether the host wants to arms this CQ. |
|---|
| 561 | 603 | * |
|---|
| 562 | | - * This routine will mark all Completion queue entries on @q, from the last |
|---|
| 563 | | - * known completed entry to the last entry that was processed, as completed |
|---|
| 564 | | - * by clearing the valid bit for each completion queue entry. Then it will |
|---|
| 565 | | - * notify the HBA, by ringing the doorbell, that the CQEs have been processed. |
|---|
| 566 | | - * The internal host index in the @q will be updated by this routine to indicate |
|---|
| 567 | | - * that the host has finished processing the entries. The @arm parameter |
|---|
| 568 | | - * indicates that the queue should be rearmed when ringing the doorbell. |
|---|
| 569 | | - * |
|---|
| 570 | | - * This function will return the number of CQEs that were released. |
|---|
| 604 | + * This routine will notify the HBA, by ringing the doorbell, that the |
|---|
| 605 | + * CQEs have been processed. The @arm parameter specifies whether the |
|---|
| 606 | + * queue should be rearmed when ringing the doorbell. |
|---|
| 571 | 607 | **/ |
|---|
| 572 | | -uint32_t |
|---|
| 573 | | -lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) |
|---|
| 608 | +void |
|---|
| 609 | +lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, |
|---|
| 610 | + uint32_t count, bool arm) |
|---|
| 574 | 611 | { |
|---|
| 575 | | - uint32_t released = 0; |
|---|
| 576 | | - struct lpfc_hba *phba; |
|---|
| 577 | | - struct lpfc_cqe *temp_qe; |
|---|
| 578 | 612 | struct lpfc_register doorbell; |
|---|
| 579 | 613 | |
|---|
| 580 | 614 | /* sanity check on queue memory */ |
|---|
| 581 | | - if (unlikely(!q)) |
|---|
| 582 | | - return 0; |
|---|
| 583 | | - phba = q->phba; |
|---|
| 584 | | - |
|---|
| 585 | | - /* while there are valid entries */ |
|---|
| 586 | | - while (q->hba_index != q->host_index) { |
|---|
| 587 | | - if (!phba->sli4_hba.pc_sli4_params.cqav) { |
|---|
| 588 | | - temp_qe = q->qe[q->host_index].cqe; |
|---|
| 589 | | - bf_set_le32(lpfc_cqe_valid, temp_qe, 0); |
|---|
| 590 | | - } |
|---|
| 591 | | - released++; |
|---|
| 592 | | - q->host_index = ((q->host_index + 1) % q->entry_count); |
|---|
| 593 | | - } |
|---|
| 594 | | - if (unlikely(released == 0 && !arm)) |
|---|
| 595 | | - return 0; |
|---|
| 615 | + if (unlikely(!q || (count == 0 && !arm))) |
|---|
| 616 | + return; |
|---|
| 596 | 617 | |
|---|
| 597 | 618 | /* ring doorbell for number popped */ |
|---|
| 598 | 619 | doorbell.word0 = 0; |
|---|
| 599 | 620 | if (arm) |
|---|
| 600 | 621 | bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); |
|---|
| 601 | | - bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); |
|---|
| 622 | + bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); |
|---|
| 602 | 623 | bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); |
|---|
| 603 | 624 | bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, |
|---|
| 604 | 625 | (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); |
|---|
| 605 | 626 | bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); |
|---|
| 606 | 627 | writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); |
|---|
| 607 | | - return released; |
|---|
| 608 | 628 | } |
|---|
| 609 | 629 | |
|---|
| 610 | 630 | /** |
|---|
| 611 | | - * lpfc_sli4_if6_cq_release - Indicates the host has finished processing a CQ |
|---|
| 631 | + * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state. |
|---|
| 632 | + * @phba: the adapter with the CQ |
|---|
| 612 | 633 | * @q: The Completion Queue that the host has completed processing for. |
|---|
| 634 | + * @count: the number of elements that were consumed |
|---|
| 613 | 635 | * @arm: Indicates whether the host wants to arms this CQ. |
|---|
| 614 | 636 | * |
|---|
| 615 | | - * This routine will mark all Completion queue entries on @q, from the last |
|---|
| 616 | | - * known completed entry to the last entry that was processed, as completed |
|---|
| 617 | | - * by clearing the valid bit for each completion queue entry. Then it will |
|---|
| 618 | | - * notify the HBA, by ringing the doorbell, that the CQEs have been processed. |
|---|
| 619 | | - * The internal host index in the @q will be updated by this routine to indicate |
|---|
| 620 | | - * that the host has finished processing the entries. The @arm parameter |
|---|
| 621 | | - * indicates that the queue should be rearmed when ringing the doorbell. |
|---|
| 622 | | - * |
|---|
| 623 | | - * This function will return the number of CQEs that were released. |
|---|
| 637 | + * This routine will notify the HBA, by ringing the doorbell, that the |
|---|
| 638 | + * CQEs have been processed. The @arm parameter specifies whether the |
|---|
| 639 | + * queue should be rearmed when ringing the doorbell. |
|---|
| 624 | 640 | **/ |
|---|
| 625 | | -uint32_t |
|---|
| 626 | | -lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm) |
|---|
| 641 | +void |
|---|
| 642 | +lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, |
|---|
| 643 | + uint32_t count, bool arm) |
|---|
| 627 | 644 | { |
|---|
| 628 | | - uint32_t released = 0; |
|---|
| 629 | | - struct lpfc_hba *phba; |
|---|
| 630 | | - struct lpfc_cqe *temp_qe; |
|---|
| 631 | 645 | struct lpfc_register doorbell; |
|---|
| 632 | 646 | |
|---|
| 633 | 647 | /* sanity check on queue memory */ |
|---|
| 634 | | - if (unlikely(!q)) |
|---|
| 635 | | - return 0; |
|---|
| 636 | | - phba = q->phba; |
|---|
| 637 | | - |
|---|
| 638 | | - /* while there are valid entries */ |
|---|
| 639 | | - while (q->hba_index != q->host_index) { |
|---|
| 640 | | - if (!phba->sli4_hba.pc_sli4_params.cqav) { |
|---|
| 641 | | - temp_qe = q->qe[q->host_index].cqe; |
|---|
| 642 | | - bf_set_le32(lpfc_cqe_valid, temp_qe, 0); |
|---|
| 643 | | - } |
|---|
| 644 | | - released++; |
|---|
| 645 | | - q->host_index = ((q->host_index + 1) % q->entry_count); |
|---|
| 646 | | - } |
|---|
| 647 | | - if (unlikely(released == 0 && !arm)) |
|---|
| 648 | | - return 0; |
|---|
| 648 | + if (unlikely(!q || (count == 0 && !arm))) |
|---|
| 649 | + return; |
|---|
| 649 | 650 | |
|---|
| 650 | 651 | /* ring doorbell for number popped */ |
|---|
| 651 | 652 | doorbell.word0 = 0; |
|---|
| 652 | 653 | if (arm) |
|---|
| 653 | 654 | bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1); |
|---|
| 654 | | - bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, released); |
|---|
| 655 | + bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count); |
|---|
| 655 | 656 | bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id); |
|---|
| 656 | 657 | writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); |
|---|
| 657 | | - return released; |
|---|
| 658 | 658 | } |
|---|
| 659 | 659 | |
|---|
| 660 | | -/** |
|---|
| 660 | +/* |
|---|
| 661 | 661 | * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue |
|---|
| 662 | | - * @q: The Header Receive Queue to operate on. |
|---|
| 663 | | - * @wqe: The Receive Queue Entry to put on the Receive queue. |
|---|
| 664 | 662 | * |
|---|
| 665 | 663 | * This routine will copy the contents of @wqe to the next available entry on |
|---|
| 666 | 664 | * the @q. This function will then ring the Receive Queue Doorbell to signal the |
|---|
| .. | .. |
|---|
| 684 | 682 | return -ENOMEM; |
|---|
| 685 | 683 | hq_put_index = hq->host_index; |
|---|
| 686 | 684 | dq_put_index = dq->host_index; |
|---|
| 687 | | - temp_hrqe = hq->qe[hq_put_index].rqe; |
|---|
| 688 | | - temp_drqe = dq->qe[dq_put_index].rqe; |
|---|
| 685 | + temp_hrqe = lpfc_sli4_qe(hq, hq_put_index); |
|---|
| 686 | + temp_drqe = lpfc_sli4_qe(dq, dq_put_index); |
|---|
| 689 | 687 | |
|---|
| 690 | 688 | if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) |
|---|
| 691 | 689 | return -EINVAL; |
|---|
| .. | .. |
|---|
| 703 | 701 | hq->RQ_buf_posted++; |
|---|
| 704 | 702 | |
|---|
| 705 | 703 | /* Ring The Header Receive Queue Doorbell */ |
|---|
| 706 | | - if (!(hq->host_index % hq->entry_repost)) { |
|---|
| 704 | + if (!(hq->host_index % hq->notify_interval)) { |
|---|
| 707 | 705 | doorbell.word0 = 0; |
|---|
| 708 | 706 | if (hq->db_format == LPFC_DB_RING_FORMAT) { |
|---|
| 709 | 707 | bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, |
|---|
| 710 | | - hq->entry_repost); |
|---|
| 708 | + hq->notify_interval); |
|---|
| 711 | 709 | bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); |
|---|
| 712 | 710 | } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { |
|---|
| 713 | 711 | bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, |
|---|
| 714 | | - hq->entry_repost); |
|---|
| 712 | + hq->notify_interval); |
|---|
| 715 | 713 | bf_set(lpfc_rq_db_list_fm_index, &doorbell, |
|---|
| 716 | 714 | hq->host_index); |
|---|
| 717 | 715 | bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); |
|---|
| .. | .. |
|---|
| 723 | 721 | return hq_put_index; |
|---|
| 724 | 722 | } |
|---|
| 725 | 723 | |
|---|
| 726 | | -/** |
|---|
| 724 | +/* |
|---|
| 727 | 725 | * lpfc_sli4_rq_release - Updates internal hba index for RQ |
|---|
| 728 | | - * @q: The Header Receive Queue to operate on. |
|---|
| 729 | 726 | * |
|---|
| 730 | 727 | * This routine will update the HBA index of a queue to reflect consumption of |
|---|
| 731 | 728 | * one Receive Queue Entry by the HBA. When the HBA indicates that it has |
|---|
| .. | .. |
|---|
| 924 | 921 | mod_timer(&phba->rrq_tmr, next_time); |
|---|
| 925 | 922 | list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { |
|---|
| 926 | 923 | list_del(&rrq->list); |
|---|
| 927 | | - if (!rrq->send_rrq) |
|---|
| 924 | + if (!rrq->send_rrq) { |
|---|
| 928 | 925 | /* this call will free the rrq */ |
|---|
| 929 | | - lpfc_clr_rrq_active(phba, rrq->xritag, rrq); |
|---|
| 930 | | - else if (lpfc_send_rrq(phba, rrq)) { |
|---|
| 926 | + lpfc_clr_rrq_active(phba, rrq->xritag, rrq); |
|---|
| 927 | + } else if (lpfc_send_rrq(phba, rrq)) { |
|---|
| 931 | 928 | /* if we send the rrq then the completion handler |
|---|
| 932 | 929 | * will clear the bit in the xribitmap. |
|---|
| 933 | 930 | */ |
|---|
| .. | .. |
|---|
| 1009 | 1006 | * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. |
|---|
| 1010 | 1007 | * @phba: Pointer to HBA context object. |
|---|
| 1011 | 1008 | * @ndlp: Targets nodelist pointer for this exchange. |
|---|
| 1012 | | - * @xritag the xri in the bitmap to test. |
|---|
| 1009 | + * @xritag: the xri in the bitmap to test. |
|---|
| 1013 | 1010 | * |
|---|
| 1014 | | - * This function is called with hbalock held. This function |
|---|
| 1015 | | - * returns 0 = rrq not active for this xri |
|---|
| 1016 | | - * 1 = rrq is valid for this xri. |
|---|
| 1011 | + * This function returns: |
|---|
| 1012 | + * 0 = rrq not active for this xri |
|---|
| 1013 | + * 1 = rrq is valid for this xri. |
|---|
| 1017 | 1014 | **/ |
|---|
| 1018 | 1015 | int |
|---|
| 1019 | 1016 | lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, |
|---|
| 1020 | 1017 | uint16_t xritag) |
|---|
| 1021 | 1018 | { |
|---|
| 1022 | | - lockdep_assert_held(&phba->hbalock); |
|---|
| 1023 | 1019 | if (!ndlp) |
|---|
| 1024 | 1020 | return 0; |
|---|
| 1025 | 1021 | if (!ndlp->active_rrqs_xri_bitmap) |
|---|
| 1026 | 1022 | return 0; |
|---|
| 1027 | 1023 | if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) |
|---|
| 1028 | | - return 1; |
|---|
| 1024 | + return 1; |
|---|
| 1029 | 1025 | else |
|---|
| 1030 | 1026 | return 0; |
|---|
| 1031 | 1027 | } |
|---|
| .. | .. |
|---|
| 1081 | 1077 | goto out; |
|---|
| 1082 | 1078 | |
|---|
| 1083 | 1079 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
|---|
| 1084 | | - rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); |
|---|
| 1080 | + rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC); |
|---|
| 1085 | 1081 | if (!rrq) { |
|---|
| 1086 | 1082 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
|---|
| 1087 | 1083 | "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" |
|---|
| .. | .. |
|---|
| 1120 | 1116 | /** |
|---|
| 1121 | 1117 | * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool |
|---|
| 1122 | 1118 | * @phba: Pointer to HBA context object. |
|---|
| 1123 | | - * @piocb: Pointer to the iocbq. |
|---|
| 1119 | + * @piocbq: Pointer to the iocbq. |
|---|
| 1124 | 1120 | * |
|---|
| 1125 | | - * This function is called with the ring lock held. This function |
|---|
| 1126 | | - * gets a new driver sglq object from the sglq list. If the |
|---|
| 1127 | | - * list is not empty then it is successful, it returns pointer to the newly |
|---|
| 1128 | | - * allocated sglq object else it returns NULL. |
|---|
| 1121 | + * The driver calls this function with either the nvme ls ring lock |
|---|
| 1122 | + * or the fc els ring lock held depending on the iocb usage. This function |
|---|
| 1123 | + * gets a new driver sglq object from the sglq list. If the list is not empty |
|---|
| 1124 | + * then it is successful, it returns pointer to the newly allocated sglq |
|---|
| 1125 | + * object else it returns NULL. |
|---|
| 1129 | 1126 | **/ |
|---|
| 1130 | 1127 | static struct lpfc_sglq * |
|---|
| 1131 | 1128 | __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) |
|---|
| .. | .. |
|---|
| 1133 | 1130 | struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; |
|---|
| 1134 | 1131 | struct lpfc_sglq *sglq = NULL; |
|---|
| 1135 | 1132 | struct lpfc_sglq *start_sglq = NULL; |
|---|
| 1136 | | - struct lpfc_scsi_buf *lpfc_cmd; |
|---|
| 1133 | + struct lpfc_io_buf *lpfc_cmd; |
|---|
| 1137 | 1134 | struct lpfc_nodelist *ndlp; |
|---|
| 1135 | + struct lpfc_sli_ring *pring = NULL; |
|---|
| 1138 | 1136 | int found = 0; |
|---|
| 1139 | 1137 | |
|---|
| 1140 | | - lockdep_assert_held(&phba->hbalock); |
|---|
| 1138 | + if (piocbq->iocb_flag & LPFC_IO_NVME_LS) |
|---|
| 1139 | + pring = phba->sli4_hba.nvmels_wq->pring; |
|---|
| 1140 | + else |
|---|
| 1141 | + pring = lpfc_phba_elsring(phba); |
|---|
| 1142 | + |
|---|
| 1143 | + lockdep_assert_held(&pring->ring_lock); |
|---|
| 1141 | 1144 | |
|---|
| 1142 | 1145 | if (piocbq->iocb_flag & LPFC_IO_FCP) { |
|---|
| 1143 | | - lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; |
|---|
| 1146 | + lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1; |
|---|
| 1144 | 1147 | ndlp = lpfc_cmd->rdata->pnode; |
|---|
| 1145 | 1148 | } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && |
|---|
| 1146 | 1149 | !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) { |
|---|
| .. | .. |
|---|
| 1189 | 1192 | /** |
|---|
| 1190 | 1193 | * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool |
|---|
| 1191 | 1194 | * @phba: Pointer to HBA context object. |
|---|
| 1192 | | - * @piocb: Pointer to the iocbq. |
|---|
| 1195 | + * @piocbq: Pointer to the iocbq. |
|---|
| 1193 | 1196 | * |
|---|
| 1194 | 1197 | * This function is called with the sgl_list lock held. This function |
|---|
| 1195 | 1198 | * gets a new driver sglq object from the sglq list. If the |
|---|
| .. | .. |
|---|
| 1240 | 1243 | * @phba: Pointer to HBA context object. |
|---|
| 1241 | 1244 | * @iocbq: Pointer to driver iocb object. |
|---|
| 1242 | 1245 | * |
|---|
| 1243 | | - * This function is called with hbalock held to release driver |
|---|
| 1244 | | - * iocb object to the iocb pool. The iotag in the iocb object |
|---|
| 1246 | + * This function is called to release the driver iocb object |
|---|
| 1247 | + * to the iocb pool. The iotag in the iocb object |
|---|
| 1245 | 1248 | * does not change for each use of the iocb object. This function |
|---|
| 1246 | 1249 | * clears all other fields of the iocb object when it is freed. |
|---|
| 1247 | 1250 | * The sqlq structure that holds the xritag and phys and virtual |
|---|
| .. | .. |
|---|
| 1251 | 1254 | * this IO was aborted then the sglq entry it put on the |
|---|
| 1252 | 1255 | * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the |
|---|
| 1253 | 1256 | * IO has good status or fails for any other reason then the sglq |
|---|
| 1254 | | - * entry is added to the free list (lpfc_els_sgl_list). |
|---|
| 1257 | + * entry is added to the free list (lpfc_els_sgl_list). The hbalock is |
|---|
| 1258 | + * asserted held in the code path calling this routine. |
|---|
| 1255 | 1259 | **/ |
|---|
| 1256 | 1260 | static void |
|---|
| 1257 | 1261 | __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) |
|---|
| .. | .. |
|---|
| 1260 | 1264 | size_t start_clean = offsetof(struct lpfc_iocbq, iocb); |
|---|
| 1261 | 1265 | unsigned long iflag = 0; |
|---|
| 1262 | 1266 | struct lpfc_sli_ring *pring; |
|---|
| 1263 | | - |
|---|
| 1264 | | - lockdep_assert_held(&phba->hbalock); |
|---|
| 1265 | 1267 | |
|---|
| 1266 | 1268 | if (iocbq->sli4_xritag == NO_XRI) |
|---|
| 1267 | 1269 | sglq = NULL; |
|---|
| .. | .. |
|---|
| 1325 | 1327 | * @phba: Pointer to HBA context object. |
|---|
| 1326 | 1328 | * @iocbq: Pointer to driver iocb object. |
|---|
| 1327 | 1329 | * |
|---|
| 1328 | | - * This function is called with hbalock held to release driver |
|---|
| 1329 | | - * iocb object to the iocb pool. The iotag in the iocb object |
|---|
| 1330 | | - * does not change for each use of the iocb object. This function |
|---|
| 1331 | | - * clears all other fields of the iocb object when it is freed. |
|---|
| 1330 | + * This function is called to release the driver iocb object to the |
|---|
| 1331 | + * iocb pool. The iotag in the iocb object does not change for each |
|---|
| 1332 | + * use of the iocb object. This function clears all other fields of |
|---|
| 1333 | + * the iocb object when it is freed. The hbalock is asserted held in |
|---|
| 1334 | + * the code path calling this routine. |
|---|
| 1332 | 1335 | **/ |
|---|
| 1333 | 1336 | static void |
|---|
| 1334 | 1337 | __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) |
|---|
| 1335 | 1338 | { |
|---|
| 1336 | 1339 | size_t start_clean = offsetof(struct lpfc_iocbq, iocb); |
|---|
| 1337 | | - |
|---|
| 1338 | | - lockdep_assert_held(&phba->hbalock); |
|---|
| 1339 | 1340 | |
|---|
| 1340 | 1341 | /* |
|---|
| 1341 | 1342 | * Clean all volatile data fields, preserve iotag and node struct. |
|---|
| .. | .. |
|---|
| 1405 | 1406 | |
|---|
| 1406 | 1407 | while (!list_empty(iocblist)) { |
|---|
| 1407 | 1408 | list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); |
|---|
| 1408 | | - if (!piocb->iocb_cmpl) |
|---|
| 1409 | | - lpfc_sli_release_iocbq(phba, piocb); |
|---|
| 1410 | | - else { |
|---|
| 1409 | + if (!piocb->iocb_cmpl) { |
|---|
| 1410 | + if (piocb->iocb_flag & LPFC_IO_NVME) |
|---|
| 1411 | + lpfc_nvme_cancel_iocb(phba, piocb); |
|---|
| 1412 | + else |
|---|
| 1413 | + lpfc_sli_release_iocbq(phba, piocb); |
|---|
| 1414 | + } else { |
|---|
| 1411 | 1415 | piocb->iocb.ulpStatus = ulpstatus; |
|---|
| 1412 | 1416 | piocb->iocb.un.ulpWord[4] = ulpWord4; |
|---|
| 1413 | 1417 | (piocb->iocb_cmpl) (phba, piocb, piocb); |
|---|
| .. | .. |
|---|
| 1485 | 1489 | case DSSCMD_IWRITE64_CX: |
|---|
| 1486 | 1490 | case DSSCMD_IREAD64_CR: |
|---|
| 1487 | 1491 | case DSSCMD_IREAD64_CX: |
|---|
| 1492 | + case CMD_SEND_FRAME: |
|---|
| 1488 | 1493 | type = LPFC_SOL_IOCB; |
|---|
| 1489 | 1494 | break; |
|---|
| 1490 | 1495 | case CMD_ABORT_XRI_CN: |
|---|
| .. | .. |
|---|
| 1559 | 1564 | lpfc_config_ring(phba, i, pmb); |
|---|
| 1560 | 1565 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); |
|---|
| 1561 | 1566 | if (rc != MBX_SUCCESS) { |
|---|
| 1562 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 1567 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1563 | 1568 | "0446 Adapter failed to init (%d), " |
|---|
| 1564 | 1569 | "mbxCmd x%x CFG_RING, mbxStatus x%x, " |
|---|
| 1565 | 1570 | "ring %d\n", |
|---|
| .. | .. |
|---|
| 1580 | 1585 | * @pring: Pointer to driver SLI ring object. |
|---|
| 1581 | 1586 | * @piocb: Pointer to the driver iocb object. |
|---|
| 1582 | 1587 | * |
|---|
| 1583 | | - * This function is called with hbalock held. The function adds the |
|---|
| 1588 | + * The driver calls this function with the hbalock held for SLI3 ports or |
|---|
| 1589 | + * the ring lock held for SLI4 ports. The function adds the |
|---|
| 1584 | 1590 | * new iocb to txcmplq of the given ring. This function always returns |
|---|
| 1585 | 1591 | * 0. If this function is called for ELS ring, this function checks if |
|---|
| 1586 | 1592 | * there is a vport associated with the ELS command. This function also |
|---|
| .. | .. |
|---|
| 1590 | 1596 | lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
|---|
| 1591 | 1597 | struct lpfc_iocbq *piocb) |
|---|
| 1592 | 1598 | { |
|---|
| 1593 | | - lockdep_assert_held(&phba->hbalock); |
|---|
| 1599 | + if (phba->sli_rev == LPFC_SLI_REV4) |
|---|
| 1600 | + lockdep_assert_held(&pring->ring_lock); |
|---|
| 1601 | + else |
|---|
| 1602 | + lockdep_assert_held(&phba->hbalock); |
|---|
| 1594 | 1603 | |
|---|
| 1595 | 1604 | BUG_ON(!piocb); |
|---|
| 1596 | 1605 | |
|---|
| 1597 | 1606 | list_add_tail(&piocb->list, &pring->txcmplq); |
|---|
| 1598 | 1607 | piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; |
|---|
| 1608 | + pring->txcmplq_cnt++; |
|---|
| 1599 | 1609 | |
|---|
| 1600 | 1610 | if ((unlikely(pring->ringno == LPFC_ELS_RING)) && |
|---|
| 1601 | 1611 | (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && |
|---|
| .. | .. |
|---|
| 1663 | 1673 | pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); |
|---|
| 1664 | 1674 | |
|---|
| 1665 | 1675 | if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { |
|---|
| 1666 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 1676 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1667 | 1677 | "0315 Ring %d issue: portCmdGet %d " |
|---|
| 1668 | 1678 | "is bigger than cmd ring %d\n", |
|---|
| 1669 | 1679 | pring->ringno, |
|---|
| .. | .. |
|---|
| 1773 | 1783 | * @nextiocb: Pointer to driver iocb object which need to be |
|---|
| 1774 | 1784 | * posted to firmware. |
|---|
| 1775 | 1785 | * |
|---|
| 1776 | | - * This function is called with hbalock held to post a new iocb to |
|---|
| 1777 | | - * the firmware. This function copies the new iocb to ring iocb slot and |
|---|
| 1778 | | - * updates the ring pointers. It adds the new iocb to txcmplq if there is |
|---|
| 1786 | + * This function is called to post a new iocb to the firmware. This |
|---|
| 1787 | + * function copies the new iocb to ring iocb slot and updates the |
|---|
| 1788 | + * ring pointers. It adds the new iocb to txcmplq if there is |
|---|
| 1779 | 1789 | * a completion call back for this iocb else the function will free the |
|---|
| 1780 | | - * iocb object. |
|---|
| 1790 | + * iocb object. The hbalock is asserted held in the code path calling |
|---|
| 1791 | + * this routine. |
|---|
| 1781 | 1792 | **/ |
|---|
| 1782 | 1793 | static void |
|---|
| 1783 | 1794 | lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
|---|
| 1784 | 1795 | IOCB_t *iocb, struct lpfc_iocbq *nextiocb) |
|---|
| 1785 | 1796 | { |
|---|
| 1786 | | - lockdep_assert_held(&phba->hbalock); |
|---|
| 1787 | 1797 | /* |
|---|
| 1788 | 1798 | * Set up an iotag |
|---|
| 1789 | 1799 | */ |
|---|
| .. | .. |
|---|
| 1949 | 1959 | hbqp->local_hbqGetIdx = getidx; |
|---|
| 1950 | 1960 | |
|---|
| 1951 | 1961 | if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { |
|---|
| 1952 | | - lpfc_printf_log(phba, KERN_ERR, |
|---|
| 1953 | | - LOG_SLI | LOG_VPORT, |
|---|
| 1962 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 1954 | 1963 | "1802 HBQ %d: local_hbqGetIdx " |
|---|
| 1955 | 1964 | "%u is > than hbqp->entry_count %u\n", |
|---|
| 1956 | 1965 | hbqno, hbqp->local_hbqGetIdx, |
|---|
| .. | .. |
|---|
| 2218 | 2227 | lpfc_hbq_defs[qno]->init_count); |
|---|
| 2219 | 2228 | } |
|---|
| 2220 | 2229 | |
|---|
| 2221 | | -/** |
|---|
| 2230 | +/* |
|---|
| 2222 | 2231 | * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list |
|---|
| 2223 | | - * @phba: Pointer to HBA context object. |
|---|
| 2224 | | - * @hbqno: HBQ number. |
|---|
| 2225 | 2232 | * |
|---|
| 2226 | 2233 | * This function removes the first hbq buffer on an hbq list and returns a |
|---|
| 2227 | 2234 | * pointer to that buffer. If it finds no buffers on the list it returns NULL. |
|---|
| .. | .. |
|---|
| 2240 | 2247 | /** |
|---|
| 2241 | 2248 | * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list |
|---|
| 2242 | 2249 | * @phba: Pointer to HBA context object. |
|---|
| 2243 | | - * @hbqno: HBQ number. |
|---|
| 2250 | + * @hrq: HBQ number. |
|---|
| 2244 | 2251 | * |
|---|
| 2245 | 2252 | * This function removes the first RQ buffer on an RQ buffer list and returns a |
|---|
| 2246 | 2253 | * pointer to that buffer. If it finds no buffers on the list it returns NULL. |
|---|
| .. | .. |
|---|
| 2289 | 2296 | } |
|---|
| 2290 | 2297 | } |
|---|
| 2291 | 2298 | spin_unlock_irq(&phba->hbalock); |
|---|
| 2292 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, |
|---|
| 2299 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 2293 | 2300 | "1803 Bad hbq tag. Data: x%x x%x\n", |
|---|
| 2294 | 2301 | tag, phba->hbqs[tag >> 16].buffer_count); |
|---|
| 2295 | 2302 | return NULL; |
|---|
| .. | .. |
|---|
| 2435 | 2442 | return; |
|---|
| 2436 | 2443 | } |
|---|
| 2437 | 2444 | |
|---|
| 2445 | +static void |
|---|
| 2446 | +__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
|---|
| 2447 | +{ |
|---|
| 2448 | + unsigned long iflags; |
|---|
| 2449 | + |
|---|
| 2450 | + if (ndlp->nlp_flag & NLP_RELEASE_RPI) { |
|---|
| 2451 | + lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); |
|---|
| 2452 | + spin_lock_irqsave(&vport->phba->ndlp_lock, iflags); |
|---|
| 2453 | + ndlp->nlp_flag &= ~NLP_RELEASE_RPI; |
|---|
| 2454 | + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; |
|---|
| 2455 | + spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags); |
|---|
| 2456 | + } |
|---|
| 2457 | + ndlp->nlp_flag &= ~NLP_UNREG_INP; |
|---|
| 2458 | +} |
|---|
| 2438 | 2459 | |
|---|
| 2439 | 2460 | /** |
|---|
| 2440 | 2461 | * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler |
|---|
| .. | .. |
|---|
| 2456 | 2477 | uint16_t rpi, vpi; |
|---|
| 2457 | 2478 | int rc; |
|---|
| 2458 | 2479 | |
|---|
| 2459 | | - mp = (struct lpfc_dmabuf *) (pmb->context1); |
|---|
| 2480 | + mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); |
|---|
| 2460 | 2481 | |
|---|
| 2461 | 2482 | if (mp) { |
|---|
| 2462 | 2483 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
|---|
| .. | .. |
|---|
| 2493 | 2514 | } |
|---|
| 2494 | 2515 | |
|---|
| 2495 | 2516 | if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { |
|---|
| 2496 | | - ndlp = (struct lpfc_nodelist *)pmb->context2; |
|---|
| 2517 | + ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; |
|---|
| 2497 | 2518 | lpfc_nlp_put(ndlp); |
|---|
| 2498 | | - pmb->context2 = NULL; |
|---|
| 2519 | + pmb->ctx_buf = NULL; |
|---|
| 2520 | + pmb->ctx_ndlp = NULL; |
|---|
| 2521 | + } |
|---|
| 2522 | + |
|---|
| 2523 | + if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { |
|---|
| 2524 | + ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; |
|---|
| 2525 | + |
|---|
| 2526 | + /* Check to see if there are any deferred events to process */ |
|---|
| 2527 | + if (ndlp) { |
|---|
| 2528 | + lpfc_printf_vlog( |
|---|
| 2529 | + vport, |
|---|
| 2530 | + KERN_INFO, LOG_MBOX | LOG_DISCOVERY, |
|---|
| 2531 | + "1438 UNREG cmpl deferred mbox x%x " |
|---|
| 2532 | + "on NPort x%x Data: x%x x%x %px\n", |
|---|
| 2533 | + ndlp->nlp_rpi, ndlp->nlp_DID, |
|---|
| 2534 | + ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp); |
|---|
| 2535 | + |
|---|
| 2536 | + if ((ndlp->nlp_flag & NLP_UNREG_INP) && |
|---|
| 2537 | + (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { |
|---|
| 2538 | + ndlp->nlp_flag &= ~NLP_UNREG_INP; |
|---|
| 2539 | + ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; |
|---|
| 2540 | + lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); |
|---|
| 2541 | + } else { |
|---|
| 2542 | + __lpfc_sli_rpi_release(vport, ndlp); |
|---|
| 2543 | + } |
|---|
| 2544 | + if (vport->load_flag & FC_UNLOADING) |
|---|
| 2545 | + lpfc_nlp_put(ndlp); |
|---|
| 2546 | + pmb->ctx_ndlp = NULL; |
|---|
| 2547 | + } |
|---|
| 2499 | 2548 | } |
|---|
| 2500 | 2549 | |
|---|
| 2501 | 2550 | /* Check security permission status on INIT_LINK mailbox command */ |
|---|
| 2502 | 2551 | if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && |
|---|
| 2503 | 2552 | (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) |
|---|
| 2504 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 2553 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 2505 | 2554 | "2860 SLI authentication is required " |
|---|
| 2506 | 2555 | "for INIT_LINK but has not done yet\n"); |
|---|
| 2507 | 2556 | |
|---|
| .. | .. |
|---|
| 2529 | 2578 | struct lpfc_vport *vport = pmb->vport; |
|---|
| 2530 | 2579 | struct lpfc_nodelist *ndlp; |
|---|
| 2531 | 2580 | |
|---|
| 2532 | | - ndlp = pmb->context1; |
|---|
| 2581 | + ndlp = pmb->ctx_ndlp; |
|---|
| 2533 | 2582 | if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { |
|---|
| 2534 | 2583 | if (phba->sli_rev == LPFC_SLI_REV4 && |
|---|
| 2535 | 2584 | (bf_get(lpfc_sli_intf_if_type, |
|---|
| 2536 | 2585 | &phba->sli4_hba.sli_intf) >= |
|---|
| 2537 | 2586 | LPFC_SLI_INTF_IF_TYPE_2)) { |
|---|
| 2538 | 2587 | if (ndlp) { |
|---|
| 2539 | | - lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, |
|---|
| 2540 | | - "0010 UNREG_LOGIN vpi:%x " |
|---|
| 2541 | | - "rpi:%x DID:%x map:%x %p\n", |
|---|
| 2542 | | - vport->vpi, ndlp->nlp_rpi, |
|---|
| 2543 | | - ndlp->nlp_DID, |
|---|
| 2544 | | - ndlp->nlp_usg_map, ndlp); |
|---|
| 2588 | + lpfc_printf_vlog( |
|---|
| 2589 | + vport, KERN_INFO, LOG_MBOX | LOG_SLI, |
|---|
| 2590 | + "0010 UNREG_LOGIN vpi:%x " |
|---|
| 2591 | + "rpi:%x DID:%x defer x%x flg x%x " |
|---|
| 2592 | + "map:%x %px\n", |
|---|
| 2593 | + vport->vpi, ndlp->nlp_rpi, |
|---|
| 2594 | + ndlp->nlp_DID, ndlp->nlp_defer_did, |
|---|
| 2595 | + ndlp->nlp_flag, |
|---|
| 2596 | + ndlp->nlp_usg_map, ndlp); |
|---|
| 2545 | 2597 | ndlp->nlp_flag &= ~NLP_LOGO_ACC; |
|---|
| 2546 | 2598 | lpfc_nlp_put(ndlp); |
|---|
| 2599 | + |
|---|
| 2600 | + /* Check to see if there are any deferred |
|---|
| 2601 | + * events to process |
|---|
| 2602 | + */ |
|---|
| 2603 | + if ((ndlp->nlp_flag & NLP_UNREG_INP) && |
|---|
| 2604 | + (ndlp->nlp_defer_did != |
|---|
| 2605 | + NLP_EVT_NOTHING_PENDING)) { |
|---|
| 2606 | + lpfc_printf_vlog( |
|---|
| 2607 | + vport, KERN_INFO, LOG_DISCOVERY, |
|---|
| 2608 | + "4111 UNREG cmpl deferred " |
|---|
| 2609 | + "clr x%x on " |
|---|
| 2610 | + "NPort x%x Data: x%x x%px\n", |
|---|
| 2611 | + ndlp->nlp_rpi, ndlp->nlp_DID, |
|---|
| 2612 | + ndlp->nlp_defer_did, ndlp); |
|---|
| 2613 | + ndlp->nlp_flag &= ~NLP_UNREG_INP; |
|---|
| 2614 | + ndlp->nlp_defer_did = |
|---|
| 2615 | + NLP_EVT_NOTHING_PENDING; |
|---|
| 2616 | + lpfc_issue_els_plogi( |
|---|
| 2617 | + vport, ndlp->nlp_DID, 0); |
|---|
| 2618 | + } else { |
|---|
| 2619 | + __lpfc_sli_rpi_release(vport, ndlp); |
|---|
| 2620 | + } |
|---|
| 2547 | 2621 | } |
|---|
| 2548 | 2622 | } |
|---|
| 2549 | 2623 | } |
|---|
| .. | .. |
|---|
| 2612 | 2686 | if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == |
|---|
| 2613 | 2687 | MBX_SHUTDOWN) { |
|---|
| 2614 | 2688 | /* Unknown mailbox command compl */ |
|---|
| 2615 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 2689 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 2616 | 2690 | "(%d):0323 Unknown Mailbox command " |
|---|
| 2617 | 2691 | "x%x (x%x/x%x) Cmpl\n", |
|---|
| 2618 | | - pmb->vport ? pmb->vport->vpi : 0, |
|---|
| 2692 | + pmb->vport ? pmb->vport->vpi : |
|---|
| 2693 | + LPFC_VPORT_UNKNOWN, |
|---|
| 2619 | 2694 | pmbox->mbxCommand, |
|---|
| 2620 | 2695 | lpfc_sli_config_mbox_subsys_get(phba, |
|---|
| 2621 | 2696 | pmb), |
|---|
| .. | .. |
|---|
| 2636 | 2711 | "(%d):0305 Mbox cmd cmpl " |
|---|
| 2637 | 2712 | "error - RETRYing Data: x%x " |
|---|
| 2638 | 2713 | "(x%x/x%x) x%x x%x x%x\n", |
|---|
| 2639 | | - pmb->vport ? pmb->vport->vpi : 0, |
|---|
| 2714 | + pmb->vport ? pmb->vport->vpi : |
|---|
| 2715 | + LPFC_VPORT_UNKNOWN, |
|---|
| 2640 | 2716 | pmbox->mbxCommand, |
|---|
| 2641 | 2717 | lpfc_sli_config_mbox_subsys_get(phba, |
|---|
| 2642 | 2718 | pmb), |
|---|
| .. | .. |
|---|
| 2644 | 2720 | pmb), |
|---|
| 2645 | 2721 | pmbox->mbxStatus, |
|---|
| 2646 | 2722 | pmbox->un.varWords[0], |
|---|
| 2647 | | - pmb->vport->port_state); |
|---|
| 2723 | + pmb->vport ? pmb->vport->port_state : |
|---|
| 2724 | + LPFC_VPORT_UNKNOWN); |
|---|
| 2648 | 2725 | pmbox->mbxStatus = 0; |
|---|
| 2649 | 2726 | pmbox->mbxOwner = OWN_HOST; |
|---|
| 2650 | 2727 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); |
|---|
| .. | .. |
|---|
| 2655 | 2732 | |
|---|
| 2656 | 2733 | /* Mailbox cmd <cmd> Cmpl <cmpl> */ |
|---|
| 2657 | 2734 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, |
|---|
| 2658 | | - "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " |
|---|
| 2735 | + "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps " |
|---|
| 2659 | 2736 | "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " |
|---|
| 2660 | 2737 | "x%x x%x x%x\n", |
|---|
| 2661 | 2738 | pmb->vport ? pmb->vport->vpi : 0, |
|---|
| .. | .. |
|---|
| 2710 | 2787 | } |
|---|
| 2711 | 2788 | |
|---|
| 2712 | 2789 | /** |
|---|
| 2790 | + * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer |
|---|
| 2791 | + * containing a NVME LS request. |
|---|
| 2792 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 2793 | + * @piocb: pointer to the iocbq struct representing the sequence starting |
|---|
| 2794 | + * frame. |
|---|
| 2795 | + * |
|---|
| 2796 | + * This routine initially validates the NVME LS, validates there is a login |
|---|
| 2797 | + * with the port that sent the LS, and then calls the appropriate nvme host |
|---|
| 2798 | + * or target LS request handler. |
|---|
| 2799 | + **/ |
|---|
| 2800 | +static void |
|---|
| 2801 | +lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) |
|---|
| 2802 | +{ |
|---|
| 2803 | + struct lpfc_nodelist *ndlp; |
|---|
| 2804 | + struct lpfc_dmabuf *d_buf; |
|---|
| 2805 | + struct hbq_dmabuf *nvmebuf; |
|---|
| 2806 | + struct fc_frame_header *fc_hdr; |
|---|
| 2807 | + struct lpfc_async_xchg_ctx *axchg = NULL; |
|---|
| 2808 | + char *failwhy = NULL; |
|---|
| 2809 | + uint32_t oxid, sid, did, fctl, size; |
|---|
| 2810 | + int ret = 1; |
|---|
| 2811 | + |
|---|
| 2812 | + d_buf = piocb->context2; |
|---|
| 2813 | + |
|---|
| 2814 | + nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); |
|---|
| 2815 | + fc_hdr = nvmebuf->hbuf.virt; |
|---|
| 2816 | + oxid = be16_to_cpu(fc_hdr->fh_ox_id); |
|---|
| 2817 | + sid = sli4_sid_from_fc_hdr(fc_hdr); |
|---|
| 2818 | + did = sli4_did_from_fc_hdr(fc_hdr); |
|---|
| 2819 | + fctl = (fc_hdr->fh_f_ctl[0] << 16 | |
|---|
| 2820 | + fc_hdr->fh_f_ctl[1] << 8 | |
|---|
| 2821 | + fc_hdr->fh_f_ctl[2]); |
|---|
| 2822 | + size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); |
|---|
| 2823 | + |
|---|
| 2824 | + lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n", |
|---|
| 2825 | + oxid, size, sid); |
|---|
| 2826 | + |
|---|
| 2827 | + if (phba->pport->load_flag & FC_UNLOADING) { |
|---|
| 2828 | + failwhy = "Driver Unloading"; |
|---|
| 2829 | + } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { |
|---|
| 2830 | + failwhy = "NVME FC4 Disabled"; |
|---|
| 2831 | + } else if (!phba->nvmet_support && !phba->pport->localport) { |
|---|
| 2832 | + failwhy = "No Localport"; |
|---|
| 2833 | + } else if (phba->nvmet_support && !phba->targetport) { |
|---|
| 2834 | + failwhy = "No Targetport"; |
|---|
| 2835 | + } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) { |
|---|
| 2836 | + failwhy = "Bad NVME LS R_CTL"; |
|---|
| 2837 | + } else if (unlikely((fctl & 0x00FF0000) != |
|---|
| 2838 | + (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) { |
|---|
| 2839 | + failwhy = "Bad NVME LS F_CTL"; |
|---|
| 2840 | + } else { |
|---|
| 2841 | + axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC); |
|---|
| 2842 | + if (!axchg) |
|---|
| 2843 | + failwhy = "No CTX memory"; |
|---|
| 2844 | + } |
|---|
| 2845 | + |
|---|
| 2846 | + if (unlikely(failwhy)) { |
|---|
| 2847 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 2848 | + "6154 Drop NVME LS: SID %06X OXID x%X: %s\n", |
|---|
| 2849 | + sid, oxid, failwhy); |
|---|
| 2850 | + goto out_fail; |
|---|
| 2851 | + } |
|---|
| 2852 | + |
|---|
| 2853 | + /* validate the source of the LS is logged in */ |
|---|
| 2854 | + ndlp = lpfc_findnode_did(phba->pport, sid); |
|---|
| 2855 | + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || |
|---|
| 2856 | + ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && |
|---|
| 2857 | + (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { |
|---|
| 2858 | + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, |
|---|
| 2859 | + "6216 NVME Unsol rcv: No ndlp: " |
|---|
| 2860 | + "NPort_ID x%x oxid x%x\n", |
|---|
| 2861 | + sid, oxid); |
|---|
| 2862 | + goto out_fail; |
|---|
| 2863 | + } |
|---|
| 2864 | + |
|---|
| 2865 | + axchg->phba = phba; |
|---|
| 2866 | + axchg->ndlp = ndlp; |
|---|
| 2867 | + axchg->size = size; |
|---|
| 2868 | + axchg->oxid = oxid; |
|---|
| 2869 | + axchg->sid = sid; |
|---|
| 2870 | + axchg->wqeq = NULL; |
|---|
| 2871 | + axchg->state = LPFC_NVME_STE_LS_RCV; |
|---|
| 2872 | + axchg->entry_cnt = 1; |
|---|
| 2873 | + axchg->rqb_buffer = (void *)nvmebuf; |
|---|
| 2874 | + axchg->hdwq = &phba->sli4_hba.hdwq[0]; |
|---|
| 2875 | + axchg->payload = nvmebuf->dbuf.virt; |
|---|
| 2876 | + INIT_LIST_HEAD(&axchg->list); |
|---|
| 2877 | + |
|---|
| 2878 | + if (phba->nvmet_support) |
|---|
| 2879 | + ret = lpfc_nvmet_handle_lsreq(phba, axchg); |
|---|
| 2880 | + else |
|---|
| 2881 | + ret = lpfc_nvme_handle_lsreq(phba, axchg); |
|---|
| 2882 | + |
|---|
| 2883 | + /* if zero, LS was successfully handled. If non-zero, LS not handled */ |
|---|
| 2884 | + if (!ret) |
|---|
| 2885 | + return; |
|---|
| 2886 | + |
|---|
| 2887 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 2888 | + "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X " |
|---|
| 2889 | + "NVMe%s handler failed %d\n", |
|---|
| 2890 | + did, sid, oxid, |
|---|
| 2891 | + (phba->nvmet_support) ? "T" : "I", ret); |
|---|
| 2892 | + |
|---|
| 2893 | +out_fail: |
|---|
| 2894 | + |
|---|
| 2895 | + /* recycle receive buffer */ |
|---|
| 2896 | + lpfc_in_buf_free(phba, &nvmebuf->dbuf); |
|---|
| 2897 | + |
|---|
| 2898 | + /* If start of new exchange, abort it */ |
|---|
| 2899 | + if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX))) |
|---|
| 2900 | + ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid); |
|---|
| 2901 | + |
|---|
| 2902 | + if (ret) |
|---|
| 2903 | + kfree(axchg); |
|---|
| 2904 | +} |
|---|
| 2905 | + |
|---|
| 2906 | +/** |
|---|
| 2713 | 2907 | * lpfc_complete_unsol_iocb - Complete an unsolicited sequence |
|---|
| 2714 | 2908 | * @phba: Pointer to HBA context object. |
|---|
| 2715 | 2909 | * @pring: Pointer to driver SLI ring object. |
|---|
| .. | .. |
|---|
| 2730 | 2924 | |
|---|
| 2731 | 2925 | switch (fch_type) { |
|---|
| 2732 | 2926 | case FC_TYPE_NVME: |
|---|
| 2733 | | - lpfc_nvmet_unsol_ls_event(phba, pring, saveq); |
|---|
| 2927 | + lpfc_nvme_unsol_ls_handler(phba, saveq); |
|---|
| 2734 | 2928 | return 1; |
|---|
| 2735 | 2929 | default: |
|---|
| 2736 | 2930 | break; |
|---|
| .. | .. |
|---|
| 2937 | 3131 | * |
|---|
| 2938 | 3132 | * This function looks up the iocb_lookup table to get the command iocb |
|---|
| 2939 | 3133 | * corresponding to the given response iocb using the iotag of the |
|---|
| 2940 | | - * response iocb. This function is called with the hbalock held |
|---|
| 2941 | | - * for sli3 devices or the ring_lock for sli4 devices. |
|---|
| 3134 | + * response iocb. The driver calls this function with the hbalock held |
|---|
| 3135 | + * for SLI3 ports or the ring lock held for SLI4 ports. |
|---|
| 2942 | 3136 | * This function returns the command iocb object if it finds the command |
|---|
| 2943 | 3137 | * iocb else returns NULL. |
|---|
| 2944 | 3138 | **/ |
|---|
| .. | .. |
|---|
| 2949 | 3143 | { |
|---|
| 2950 | 3144 | struct lpfc_iocbq *cmd_iocb = NULL; |
|---|
| 2951 | 3145 | uint16_t iotag; |
|---|
| 2952 | | - lockdep_assert_held(&phba->hbalock); |
|---|
| 3146 | + spinlock_t *temp_lock = NULL; |
|---|
| 3147 | + unsigned long iflag = 0; |
|---|
| 2953 | 3148 | |
|---|
| 3149 | + if (phba->sli_rev == LPFC_SLI_REV4) |
|---|
| 3150 | + temp_lock = &pring->ring_lock; |
|---|
| 3151 | + else |
|---|
| 3152 | + temp_lock = &phba->hbalock; |
|---|
| 3153 | + |
|---|
| 3154 | + spin_lock_irqsave(temp_lock, iflag); |
|---|
| 2954 | 3155 | iotag = prspiocb->iocb.ulpIoTag; |
|---|
| 2955 | 3156 | |
|---|
| 2956 | 3157 | if (iotag != 0 && iotag <= phba->sli.last_iotag) { |
|---|
| .. | .. |
|---|
| 2959 | 3160 | /* remove from txcmpl queue list */ |
|---|
| 2960 | 3161 | list_del_init(&cmd_iocb->list); |
|---|
| 2961 | 3162 | cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; |
|---|
| 3163 | + pring->txcmplq_cnt--; |
|---|
| 3164 | + spin_unlock_irqrestore(temp_lock, iflag); |
|---|
| 2962 | 3165 | return cmd_iocb; |
|---|
| 2963 | 3166 | } |
|---|
| 2964 | 3167 | } |
|---|
| 2965 | 3168 | |
|---|
| 2966 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 3169 | + spin_unlock_irqrestore(temp_lock, iflag); |
|---|
| 3170 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 2967 | 3171 | "0317 iotag x%x is out of " |
|---|
| 2968 | 3172 | "range: max iotag x%x wd0 x%x\n", |
|---|
| 2969 | 3173 | iotag, phba->sli.last_iotag, |
|---|
| .. | .. |
|---|
| 2978 | 3182 | * @iotag: IOCB tag. |
|---|
| 2979 | 3183 | * |
|---|
| 2980 | 3184 | * This function looks up the iocb_lookup table to get the command iocb |
|---|
| 2981 | | - * corresponding to the given iotag. This function is called with the |
|---|
| 2982 | | - * hbalock held. |
|---|
| 3185 | + * corresponding to the given iotag. The driver calls this function with |
|---|
| 3186 | + * the ring lock held because this function is an SLI4 port only helper. |
|---|
| 2983 | 3187 | * This function returns the command iocb object if it finds the command |
|---|
| 2984 | 3188 | * iocb else returns NULL. |
|---|
| 2985 | 3189 | **/ |
|---|
| .. | .. |
|---|
| 2988 | 3192 | struct lpfc_sli_ring *pring, uint16_t iotag) |
|---|
| 2989 | 3193 | { |
|---|
| 2990 | 3194 | struct lpfc_iocbq *cmd_iocb = NULL; |
|---|
| 3195 | + spinlock_t *temp_lock = NULL; |
|---|
| 3196 | + unsigned long iflag = 0; |
|---|
| 2991 | 3197 | |
|---|
| 2992 | | - lockdep_assert_held(&phba->hbalock); |
|---|
| 3198 | + if (phba->sli_rev == LPFC_SLI_REV4) |
|---|
| 3199 | + temp_lock = &pring->ring_lock; |
|---|
| 3200 | + else |
|---|
| 3201 | + temp_lock = &phba->hbalock; |
|---|
| 3202 | + |
|---|
| 3203 | + spin_lock_irqsave(temp_lock, iflag); |
|---|
| 2993 | 3204 | if (iotag != 0 && iotag <= phba->sli.last_iotag) { |
|---|
| 2994 | 3205 | cmd_iocb = phba->sli.iocbq_lookup[iotag]; |
|---|
| 2995 | 3206 | if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { |
|---|
| 2996 | 3207 | /* remove from txcmpl queue list */ |
|---|
| 2997 | 3208 | list_del_init(&cmd_iocb->list); |
|---|
| 2998 | 3209 | cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; |
|---|
| 3210 | + pring->txcmplq_cnt--; |
|---|
| 3211 | + spin_unlock_irqrestore(temp_lock, iflag); |
|---|
| 2999 | 3212 | return cmd_iocb; |
|---|
| 3000 | 3213 | } |
|---|
| 3001 | 3214 | } |
|---|
| 3002 | 3215 | |
|---|
| 3003 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 3216 | + spin_unlock_irqrestore(temp_lock, iflag); |
|---|
| 3217 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 3004 | 3218 | "0372 iotag x%x lookup error: max iotag (x%x) " |
|---|
| 3005 | 3219 | "iocb_flag x%x\n", |
|---|
| 3006 | 3220 | iotag, phba->sli.last_iotag, |
|---|
| .. | .. |
|---|
| 3033 | 3247 | int rc = 1; |
|---|
| 3034 | 3248 | unsigned long iflag; |
|---|
| 3035 | 3249 | |
|---|
| 3036 | | - /* Based on the iotag field, get the cmd IOCB from the txcmplq */ |
|---|
| 3037 | | - if (phba->sli_rev == LPFC_SLI_REV4) |
|---|
| 3038 | | - spin_lock_irqsave(&pring->ring_lock, iflag); |
|---|
| 3039 | | - else |
|---|
| 3040 | | - spin_lock_irqsave(&phba->hbalock, iflag); |
|---|
| 3041 | 3250 | cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); |
|---|
| 3042 | | - if (phba->sli_rev == LPFC_SLI_REV4) |
|---|
| 3043 | | - spin_unlock_irqrestore(&pring->ring_lock, iflag); |
|---|
| 3044 | | - else |
|---|
| 3045 | | - spin_unlock_irqrestore(&phba->hbalock, iflag); |
|---|
| 3046 | | - |
|---|
| 3047 | 3251 | if (cmdiocbp) { |
|---|
| 3048 | 3252 | if (cmdiocbp->iocb_cmpl) { |
|---|
| 3049 | 3253 | /* |
|---|
| .. | .. |
|---|
| 3186 | 3390 | * Ring <ringno> handler: portRspPut <portRspPut> is bigger than |
|---|
| 3187 | 3391 | * rsp ring <portRspMax> |
|---|
| 3188 | 3392 | */ |
|---|
| 3189 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 3393 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 3190 | 3394 | "0312 Ring %d handler: portRspPut %d " |
|---|
| 3191 | 3395 | "is bigger than rsp ring %d\n", |
|---|
| 3192 | 3396 | pring->ringno, le32_to_cpu(pgp->rspPutInx), |
|---|
| .. | .. |
|---|
| 3208 | 3412 | |
|---|
| 3209 | 3413 | /** |
|---|
| 3210 | 3414 | * lpfc_poll_eratt - Error attention polling timer timeout handler |
|---|
| 3211 | | - * @ptr: Pointer to address of HBA context object. |
|---|
| 3415 | + * @t: Context to fetch pointer to address of HBA context object from. |
|---|
| 3212 | 3416 | * |
|---|
| 3213 | 3417 | * This function is invoked by the Error Attention polling timer when the |
|---|
| 3214 | 3418 | * timer times out. It will check the SLI Error Attention register for |
|---|
| .. | .. |
|---|
| 3374 | 3578 | break; |
|---|
| 3375 | 3579 | } |
|---|
| 3376 | 3580 | |
|---|
| 3581 | + spin_unlock_irqrestore(&phba->hbalock, iflag); |
|---|
| 3377 | 3582 | cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, |
|---|
| 3378 | 3583 | &rspiocbq); |
|---|
| 3584 | + spin_lock_irqsave(&phba->hbalock, iflag); |
|---|
| 3379 | 3585 | if (unlikely(!cmdiocbq)) |
|---|
| 3380 | 3586 | break; |
|---|
| 3381 | 3587 | if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) |
|---|
| .. | .. |
|---|
| 3403 | 3609 | phba->brd_no, adaptermsg); |
|---|
| 3404 | 3610 | } else { |
|---|
| 3405 | 3611 | /* Unknown IOCB command */ |
|---|
| 3406 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 3612 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 3407 | 3613 | "0334 Unknown IOCB command " |
|---|
| 3408 | 3614 | "Data: x%x, x%x x%x x%x x%x\n", |
|---|
| 3409 | 3615 | type, irsp->ulpCommand, |
|---|
| .. | .. |
|---|
| 3569 | 3775 | |
|---|
| 3570 | 3776 | case LPFC_ABORT_IOCB: |
|---|
| 3571 | 3777 | cmdiocbp = NULL; |
|---|
| 3572 | | - if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) |
|---|
| 3778 | + if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) { |
|---|
| 3779 | + spin_unlock_irqrestore(&phba->hbalock, iflag); |
|---|
| 3573 | 3780 | cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, |
|---|
| 3574 | 3781 | saveq); |
|---|
| 3782 | + spin_lock_irqsave(&phba->hbalock, iflag); |
|---|
| 3783 | + } |
|---|
| 3575 | 3784 | if (cmdiocbp) { |
|---|
| 3576 | 3785 | /* Call the specified completion routine */ |
|---|
| 3577 | 3786 | if (cmdiocbp->iocb_cmpl) { |
|---|
| .. | .. |
|---|
| 3598 | 3807 | phba->brd_no, adaptermsg); |
|---|
| 3599 | 3808 | } else { |
|---|
| 3600 | 3809 | /* Unknown IOCB command */ |
|---|
| 3601 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 3810 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 3602 | 3811 | "0335 Unknown IOCB " |
|---|
| 3603 | 3812 | "command Data: x%x " |
|---|
| 3604 | 3813 | "x%x x%x x%x\n", |
|---|
| .. | .. |
|---|
| 3678 | 3887 | * Ring <ringno> handler: portRspPut <portRspPut> is bigger than |
|---|
| 3679 | 3888 | * rsp ring <portRspMax> |
|---|
| 3680 | 3889 | */ |
|---|
| 3681 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 3890 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 3682 | 3891 | "0303 Ring %d handler: portRspPut %d " |
|---|
| 3683 | 3892 | "is bigger than rsp ring %d\n", |
|---|
| 3684 | 3893 | pring->ringno, portRspPut, portRspMax); |
|---|
| .. | .. |
|---|
| 3887 | 4096 | } |
|---|
| 3888 | 4097 | |
|---|
| 3889 | 4098 | /** |
|---|
| 3890 | | - * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring |
|---|
| 3891 | | - * @phba: Pointer to HBA context object. |
|---|
| 3892 | | - * @pring: Pointer to driver SLI ring object. |
|---|
| 3893 | | - * |
|---|
| 3894 | | - * This function aborts all iocbs in the given ring and frees all the iocb |
|---|
| 3895 | | - * objects in txq. This function issues an abort iocb for all the iocb commands |
|---|
| 3896 | | - * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before |
|---|
| 3897 | | - * the return of this function. The caller is not required to hold any locks. |
|---|
| 3898 | | - **/ |
|---|
| 3899 | | -void |
|---|
| 3900 | | -lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) |
|---|
| 3901 | | -{ |
|---|
| 3902 | | - LIST_HEAD(completions); |
|---|
| 3903 | | - struct lpfc_iocbq *iocb, *next_iocb; |
|---|
| 3904 | | - |
|---|
| 3905 | | - if (pring->ringno == LPFC_ELS_RING) |
|---|
| 3906 | | - lpfc_fabric_abort_hba(phba); |
|---|
| 3907 | | - |
|---|
| 3908 | | - spin_lock_irq(&phba->hbalock); |
|---|
| 3909 | | - /* Next issue ABTS for everything on the txcmplq */ |
|---|
| 3910 | | - list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) |
|---|
| 3911 | | - lpfc_sli4_abort_nvme_io(phba, pring, iocb); |
|---|
| 3912 | | - spin_unlock_irq(&phba->hbalock); |
|---|
| 3913 | | -} |
|---|
| 3914 | | - |
|---|
| 3915 | | - |
|---|
| 3916 | | -/** |
|---|
| 3917 | 4099 | * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings |
|---|
| 3918 | 4100 | * @phba: Pointer to HBA context object. |
|---|
| 3919 | | - * @pring: Pointer to driver SLI ring object. |
|---|
| 3920 | 4101 | * |
|---|
| 3921 | 4102 | * This function aborts all iocbs in FCP rings and frees all the iocb |
|---|
| 3922 | 4103 | * objects in txq. This function issues an abort iocb for all the iocb commands |
|---|
| .. | .. |
|---|
| 3932 | 4113 | |
|---|
| 3933 | 4114 | /* Look on all the FCP Rings for the iotag */ |
|---|
| 3934 | 4115 | if (phba->sli_rev >= LPFC_SLI_REV4) { |
|---|
| 3935 | | - for (i = 0; i < phba->cfg_fcp_io_channel; i++) { |
|---|
| 3936 | | - pring = phba->sli4_hba.fcp_wq[i]->pring; |
|---|
| 4116 | + for (i = 0; i < phba->cfg_hdw_queue; i++) { |
|---|
| 4117 | + pring = phba->sli4_hba.hdwq[i].io_wq->pring; |
|---|
| 3937 | 4118 | lpfc_sli_abort_iocb_ring(phba, pring); |
|---|
| 3938 | 4119 | } |
|---|
| 3939 | 4120 | } else { |
|---|
| .. | .. |
|---|
| 3943 | 4124 | } |
|---|
| 3944 | 4125 | |
|---|
| 3945 | 4126 | /** |
|---|
| 3946 | | - * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings |
|---|
| 4127 | + * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring |
|---|
| 3947 | 4128 | * @phba: Pointer to HBA context object. |
|---|
| 3948 | 4129 | * |
|---|
| 3949 | | - * This function aborts all wqes in NVME rings. This function issues an |
|---|
| 3950 | | - * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in |
|---|
| 3951 | | - * the txcmplq is not guaranteed to complete before the return of this |
|---|
| 3952 | | - * function. The caller is not required to hold any locks. |
|---|
| 3953 | | - **/ |
|---|
| 3954 | | -void |
|---|
| 3955 | | -lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba) |
|---|
| 3956 | | -{ |
|---|
| 3957 | | - struct lpfc_sli_ring *pring; |
|---|
| 3958 | | - uint32_t i; |
|---|
| 3959 | | - |
|---|
| 3960 | | - if (phba->sli_rev < LPFC_SLI_REV4) |
|---|
| 3961 | | - return; |
|---|
| 3962 | | - |
|---|
| 3963 | | - /* Abort all IO on each NVME ring. */ |
|---|
| 3964 | | - for (i = 0; i < phba->cfg_nvme_io_channel; i++) { |
|---|
| 3965 | | - pring = phba->sli4_hba.nvme_wq[i]->pring; |
|---|
| 3966 | | - lpfc_sli_abort_wqe_ring(phba, pring); |
|---|
| 3967 | | - } |
|---|
| 3968 | | -} |
|---|
| 3969 | | - |
|---|
| 3970 | | - |
|---|
| 3971 | | -/** |
|---|
| 3972 | | - * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring |
|---|
| 3973 | | - * @phba: Pointer to HBA context object. |
|---|
| 3974 | | - * |
|---|
| 3975 | | - * This function flushes all iocbs in the fcp ring and frees all the iocb |
|---|
| 4130 | + * This function flushes all iocbs in the IO ring and frees all the iocb |
|---|
| 3976 | 4131 | * objects in txq and txcmplq. This function will not issue abort iocbs |
|---|
| 3977 | 4132 | * for all the iocb commands in txcmplq, they will just be returned with |
|---|
| 3978 | 4133 | * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI |
|---|
| 3979 | 4134 | * slot has been permanently disabled. |
|---|
| 3980 | 4135 | **/ |
|---|
| 3981 | 4136 | void |
|---|
| 3982 | | -lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) |
|---|
| 4137 | +lpfc_sli_flush_io_rings(struct lpfc_hba *phba) |
|---|
| 3983 | 4138 | { |
|---|
| 3984 | 4139 | LIST_HEAD(txq); |
|---|
| 3985 | 4140 | LIST_HEAD(txcmplq); |
|---|
| .. | .. |
|---|
| 3989 | 4144 | struct lpfc_iocbq *piocb, *next_iocb; |
|---|
| 3990 | 4145 | |
|---|
| 3991 | 4146 | spin_lock_irq(&phba->hbalock); |
|---|
| 4147 | + if (phba->hba_flag & HBA_IOQ_FLUSH || |
|---|
| 4148 | + !phba->sli4_hba.hdwq) { |
|---|
| 4149 | + spin_unlock_irq(&phba->hbalock); |
|---|
| 4150 | + return; |
|---|
| 4151 | + } |
|---|
| 3992 | 4152 | /* Indicate the I/O queues are flushed */ |
|---|
| 3993 | | - phba->hba_flag |= HBA_FCP_IOQ_FLUSH; |
|---|
| 4153 | + phba->hba_flag |= HBA_IOQ_FLUSH; |
|---|
| 3994 | 4154 | spin_unlock_irq(&phba->hbalock); |
|---|
| 3995 | 4155 | |
|---|
| 3996 | 4156 | /* Look on all the FCP Rings for the iotag */ |
|---|
| 3997 | 4157 | if (phba->sli_rev >= LPFC_SLI_REV4) { |
|---|
| 3998 | | - for (i = 0; i < phba->cfg_fcp_io_channel; i++) { |
|---|
| 3999 | | - pring = phba->sli4_hba.fcp_wq[i]->pring; |
|---|
| 4158 | + for (i = 0; i < phba->cfg_hdw_queue; i++) { |
|---|
| 4159 | + pring = phba->sli4_hba.hdwq[i].io_wq->pring; |
|---|
| 4000 | 4160 | |
|---|
| 4001 | 4161 | spin_lock_irq(&pring->ring_lock); |
|---|
| 4002 | 4162 | /* Retrieve everything on txq */ |
|---|
| .. | .. |
|---|
| 4039 | 4199 | IOERR_SLI_DOWN); |
|---|
| 4040 | 4200 | /* Flush the txcmpq */ |
|---|
| 4041 | 4201 | lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, |
|---|
| 4042 | | - IOERR_SLI_DOWN); |
|---|
| 4043 | | - } |
|---|
| 4044 | | -} |
|---|
| 4045 | | - |
|---|
| 4046 | | -/** |
|---|
| 4047 | | - * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings |
|---|
| 4048 | | - * @phba: Pointer to HBA context object. |
|---|
| 4049 | | - * |
|---|
| 4050 | | - * This function flushes all wqes in the nvme rings and frees all resources |
|---|
| 4051 | | - * in the txcmplq. This function does not issue abort wqes for the IO |
|---|
| 4052 | | - * commands in txcmplq, they will just be returned with |
|---|
| 4053 | | - * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI |
|---|
| 4054 | | - * slot has been permanently disabled. |
|---|
| 4055 | | - **/ |
|---|
| 4056 | | -void |
|---|
| 4057 | | -lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba) |
|---|
| 4058 | | -{ |
|---|
| 4059 | | - LIST_HEAD(txcmplq); |
|---|
| 4060 | | - struct lpfc_sli_ring *pring; |
|---|
| 4061 | | - uint32_t i; |
|---|
| 4062 | | - struct lpfc_iocbq *piocb, *next_iocb; |
|---|
| 4063 | | - |
|---|
| 4064 | | - if (phba->sli_rev < LPFC_SLI_REV4) |
|---|
| 4065 | | - return; |
|---|
| 4066 | | - |
|---|
| 4067 | | - /* Hint to other driver operations that a flush is in progress. */ |
|---|
| 4068 | | - spin_lock_irq(&phba->hbalock); |
|---|
| 4069 | | - phba->hba_flag |= HBA_NVME_IOQ_FLUSH; |
|---|
| 4070 | | - spin_unlock_irq(&phba->hbalock); |
|---|
| 4071 | | - |
|---|
| 4072 | | - /* Cycle through all NVME rings and complete each IO with |
|---|
| 4073 | | - * a local driver reason code. This is a flush so no |
|---|
| 4074 | | - * abort exchange to FW. |
|---|
| 4075 | | - */ |
|---|
| 4076 | | - for (i = 0; i < phba->cfg_nvme_io_channel; i++) { |
|---|
| 4077 | | - pring = phba->sli4_hba.nvme_wq[i]->pring; |
|---|
| 4078 | | - |
|---|
| 4079 | | - spin_lock_irq(&pring->ring_lock); |
|---|
| 4080 | | - list_for_each_entry_safe(piocb, next_iocb, |
|---|
| 4081 | | - &pring->txcmplq, list) |
|---|
| 4082 | | - piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; |
|---|
| 4083 | | - /* Retrieve everything on the txcmplq */ |
|---|
| 4084 | | - list_splice_init(&pring->txcmplq, &txcmplq); |
|---|
| 4085 | | - pring->txcmplq_cnt = 0; |
|---|
| 4086 | | - spin_unlock_irq(&pring->ring_lock); |
|---|
| 4087 | | - |
|---|
| 4088 | | - /* Flush the txcmpq &&&PAE */ |
|---|
| 4089 | | - lpfc_sli_cancel_iocbs(phba, &txcmplq, |
|---|
| 4090 | | - IOSTAT_LOCAL_REJECT, |
|---|
| 4091 | 4202 | IOERR_SLI_DOWN); |
|---|
| 4092 | 4203 | } |
|---|
| 4093 | 4204 | } |
|---|
| .. | .. |
|---|
| 4147 | 4258 | |
|---|
| 4148 | 4259 | /* Check to see if any errors occurred during init */ |
|---|
| 4149 | 4260 | if ((status & HS_FFERM) || (i >= 20)) { |
|---|
| 4150 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 4261 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 4151 | 4262 | "2751 Adapter failed to restart, " |
|---|
| 4152 | 4263 | "status reg x%x, FW Data: A8 x%x AC x%x\n", |
|---|
| 4153 | 4264 | status, |
|---|
| .. | .. |
|---|
| 4369 | 4480 | if (retval != MBX_SUCCESS) { |
|---|
| 4370 | 4481 | if (retval != MBX_BUSY) |
|---|
| 4371 | 4482 | mempool_free(pmb, phba->mbox_mem_pool); |
|---|
| 4372 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 4483 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 4373 | 4484 | "2752 KILL_BOARD command failed retval %d\n", |
|---|
| 4374 | 4485 | retval); |
|---|
| 4375 | 4486 | spin_lock_irq(&phba->hbalock); |
|---|
| .. | .. |
|---|
| 4450 | 4561 | } |
|---|
| 4451 | 4562 | |
|---|
| 4452 | 4563 | /* Turn off parity checking and serr during the physical reset */ |
|---|
| 4453 | | - pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); |
|---|
| 4564 | + if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) |
|---|
| 4565 | + return -EIO; |
|---|
| 4566 | + |
|---|
| 4454 | 4567 | pci_write_config_word(phba->pcidev, PCI_COMMAND, |
|---|
| 4455 | 4568 | (cfg_value & |
|---|
| 4456 | 4569 | ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); |
|---|
| .. | .. |
|---|
| 4490 | 4603 | * checking during resets the device. The caller is not required to hold |
|---|
| 4491 | 4604 | * any locks. |
|---|
| 4492 | 4605 | * |
|---|
| 4493 | | - * This function returns 0 always. |
|---|
| 4606 | + * This function returns 0 on success else returns negative error code. |
|---|
| 4494 | 4607 | **/ |
|---|
| 4495 | 4608 | int |
|---|
| 4496 | 4609 | lpfc_sli4_brdreset(struct lpfc_hba *phba) |
|---|
| .. | .. |
|---|
| 4516 | 4629 | phba->fcf.fcf_flag = 0; |
|---|
| 4517 | 4630 | spin_unlock_irq(&phba->hbalock); |
|---|
| 4518 | 4631 | |
|---|
| 4519 | | - /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ |
|---|
| 4520 | | - if (phba->hba_flag & HBA_FW_DUMP_OP) { |
|---|
| 4521 | | - phba->hba_flag &= ~HBA_FW_DUMP_OP; |
|---|
| 4522 | | - return rc; |
|---|
| 4523 | | - } |
|---|
| 4524 | | - |
|---|
| 4525 | 4632 | /* Now physically reset the device */ |
|---|
| 4526 | 4633 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
|---|
| 4527 | 4634 | "0389 Performing PCI function reset!\n"); |
|---|
| 4528 | 4635 | |
|---|
| 4529 | 4636 | /* Turn off parity checking and serr during the physical reset */ |
|---|
| 4530 | | - pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); |
|---|
| 4637 | + if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) { |
|---|
| 4638 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
|---|
| 4639 | + "3205 PCI read Config failed\n"); |
|---|
| 4640 | + return -EIO; |
|---|
| 4641 | + } |
|---|
| 4642 | + |
|---|
| 4531 | 4643 | pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & |
|---|
| 4532 | 4644 | ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); |
|---|
| 4533 | 4645 | |
|---|
| .. | .. |
|---|
| 4642 | 4754 | hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; |
|---|
| 4643 | 4755 | |
|---|
| 4644 | 4756 | rc = lpfc_sli4_brdreset(phba); |
|---|
| 4645 | | - if (rc) |
|---|
| 4646 | | - return rc; |
|---|
| 4757 | + if (rc) { |
|---|
| 4758 | + phba->link_state = LPFC_HBA_ERROR; |
|---|
| 4759 | + goto hba_down_queue; |
|---|
| 4760 | + } |
|---|
| 4647 | 4761 | |
|---|
| 4648 | 4762 | spin_lock_irq(&phba->hbalock); |
|---|
| 4649 | 4763 | phba->pport->stopped = 0; |
|---|
| .. | .. |
|---|
| 4658 | 4772 | if (hba_aer_enabled) |
|---|
| 4659 | 4773 | pci_disable_pcie_error_reporting(phba->pcidev); |
|---|
| 4660 | 4774 | |
|---|
| 4775 | +hba_down_queue: |
|---|
| 4661 | 4776 | lpfc_hba_down_post(phba); |
|---|
| 4662 | 4777 | lpfc_sli4_queue_destroy(phba); |
|---|
| 4663 | 4778 | |
|---|
| .. | .. |
|---|
| 4711 | 4826 | if (i++ >= 200) { |
|---|
| 4712 | 4827 | /* Adapter failed to init, timeout, status reg |
|---|
| 4713 | 4828 | <status> */ |
|---|
| 4714 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 4829 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 4715 | 4830 | "0436 Adapter failed to init, " |
|---|
| 4716 | 4831 | "timeout, status reg x%x, " |
|---|
| 4717 | 4832 | "FW Data: A8 x%x AC x%x\n", status, |
|---|
| .. | .. |
|---|
| 4726 | 4841 | /* ERROR: During chipset initialization */ |
|---|
| 4727 | 4842 | /* Adapter failed to init, chipset, status reg |
|---|
| 4728 | 4843 | <status> */ |
|---|
| 4729 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 4844 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 4730 | 4845 | "0437 Adapter failed to init, " |
|---|
| 4731 | 4846 | "chipset, status reg x%x, " |
|---|
| 4732 | 4847 | "FW Data: A8 x%x AC x%x\n", status, |
|---|
| .. | .. |
|---|
| 4757 | 4872 | if (status & HS_FFERM) { |
|---|
| 4758 | 4873 | /* ERROR: During chipset initialization */ |
|---|
| 4759 | 4874 | /* Adapter failed to init, chipset, status reg <status> */ |
|---|
| 4760 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 4875 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 4761 | 4876 | "0438 Adapter failed to init, chipset, " |
|---|
| 4762 | 4877 | "status reg x%x, " |
|---|
| 4763 | 4878 | "FW Data: A8 x%x AC x%x\n", status, |
|---|
| .. | .. |
|---|
| 4902 | 5017 | lpfc_sli4_rb_setup(struct lpfc_hba *phba) |
|---|
| 4903 | 5018 | { |
|---|
| 4904 | 5019 | phba->hbq_in_use = 1; |
|---|
| 4905 | | - phba->hbqs[LPFC_ELS_HBQ].entry_count = |
|---|
| 4906 | | - lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; |
|---|
| 5020 | + /** |
|---|
| 5021 | + * Specific case when the MDS diagnostics is enabled and supported. |
|---|
| 5022 | + * The receive buffer count is truncated to manage the incoming |
|---|
| 5023 | + * traffic. |
|---|
| 5024 | + **/ |
|---|
| 5025 | + if (phba->cfg_enable_mds_diags && phba->mds_diags_support) |
|---|
| 5026 | + phba->hbqs[LPFC_ELS_HBQ].entry_count = |
|---|
| 5027 | + lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1; |
|---|
| 5028 | + else |
|---|
| 5029 | + phba->hbqs[LPFC_ELS_HBQ].entry_count = |
|---|
| 5030 | + lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; |
|---|
| 4907 | 5031 | phba->hbq_count = 1; |
|---|
| 4908 | 5032 | lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); |
|---|
| 4909 | 5033 | /* Initially populate or replenish the HBQs */ |
|---|
| .. | .. |
|---|
| 4971 | 5095 | LPFC_SLI3_CRP_ENABLED | |
|---|
| 4972 | 5096 | LPFC_SLI3_DSS_ENABLED); |
|---|
| 4973 | 5097 | if (rc != MBX_SUCCESS) { |
|---|
| 4974 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 5098 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 4975 | 5099 | "0442 Adapter failed to init, mbxCmd x%x " |
|---|
| 4976 | 5100 | "CONFIG_PORT, mbxStatus x%x Data: x%x\n", |
|---|
| 4977 | 5101 | pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); |
|---|
| .. | .. |
|---|
| 5009 | 5133 | |
|---|
| 5010 | 5134 | } else |
|---|
| 5011 | 5135 | phba->max_vpi = 0; |
|---|
| 5012 | | - phba->fips_level = 0; |
|---|
| 5013 | | - phba->fips_spec_rev = 0; |
|---|
| 5014 | | - if (pmb->u.mb.un.varCfgPort.gdss) { |
|---|
| 5015 | | - phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; |
|---|
| 5016 | | - phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; |
|---|
| 5017 | | - phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; |
|---|
| 5018 | | - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
|---|
| 5019 | | - "2850 Security Crypto Active. FIPS x%d " |
|---|
| 5020 | | - "(Spec Rev: x%d)", |
|---|
| 5021 | | - phba->fips_level, phba->fips_spec_rev); |
|---|
| 5022 | | - } |
|---|
| 5023 | | - if (pmb->u.mb.un.varCfgPort.sec_err) { |
|---|
| 5024 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 5025 | | - "2856 Config Port Security Crypto " |
|---|
| 5026 | | - "Error: x%x ", |
|---|
| 5027 | | - pmb->u.mb.un.varCfgPort.sec_err); |
|---|
| 5028 | | - } |
|---|
| 5029 | 5136 | if (pmb->u.mb.un.varCfgPort.gerbm) |
|---|
| 5030 | 5137 | phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; |
|---|
| 5031 | 5138 | if (pmb->u.mb.un.varCfgPort.gcrp) |
|---|
| .. | .. |
|---|
| 5038 | 5145 | if (pmb->u.mb.un.varCfgPort.gbg == 0) { |
|---|
| 5039 | 5146 | phba->cfg_enable_bg = 0; |
|---|
| 5040 | 5147 | phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; |
|---|
| 5041 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 5148 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 5042 | 5149 | "0443 Adapter did not grant " |
|---|
| 5043 | 5150 | "BlockGuard\n"); |
|---|
| 5044 | 5151 | } |
|---|
| .. | .. |
|---|
| 5077 | 5184 | switch (phba->cfg_sli_mode) { |
|---|
| 5078 | 5185 | case 2: |
|---|
| 5079 | 5186 | if (phba->cfg_enable_npiv) { |
|---|
| 5080 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, |
|---|
| 5187 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 5081 | 5188 | "1824 NPIV enabled: Override sli_mode " |
|---|
| 5082 | 5189 | "parameter (%d) to auto (0).\n", |
|---|
| 5083 | 5190 | phba->cfg_sli_mode); |
|---|
| .. | .. |
|---|
| 5089 | 5196 | case 3: |
|---|
| 5090 | 5197 | break; |
|---|
| 5091 | 5198 | default: |
|---|
| 5092 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, |
|---|
| 5199 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 5093 | 5200 | "1819 Unrecognized sli_mode parameter: %d.\n", |
|---|
| 5094 | 5201 | phba->cfg_sli_mode); |
|---|
| 5095 | 5202 | |
|---|
| .. | .. |
|---|
| 5100 | 5207 | rc = lpfc_sli_config_port(phba, mode); |
|---|
| 5101 | 5208 | |
|---|
| 5102 | 5209 | if (rc && phba->cfg_sli_mode == 3) |
|---|
| 5103 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, |
|---|
| 5210 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 5104 | 5211 | "1820 Unable to select SLI-3. " |
|---|
| 5105 | 5212 | "Not supported by adapter.\n"); |
|---|
| 5106 | 5213 | if (rc && mode != 2) |
|---|
| .. | .. |
|---|
| 5194 | 5301 | |
|---|
| 5195 | 5302 | lpfc_sli_hba_setup_error: |
|---|
| 5196 | 5303 | phba->link_state = LPFC_HBA_ERROR; |
|---|
| 5197 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 5304 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 5198 | 5305 | "0445 Firmware initialization failed\n"); |
|---|
| 5199 | 5306 | return rc; |
|---|
| 5200 | 5307 | } |
|---|
| .. | .. |
|---|
| 5202 | 5309 | /** |
|---|
| 5203 | 5310 | * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region |
|---|
| 5204 | 5311 | * @phba: Pointer to HBA context object. |
|---|
| 5205 | | - * @mboxq: mailbox pointer. |
|---|
| 5312 | + * |
|---|
| 5206 | 5313 | * This function issue a dump mailbox command to read config region |
|---|
| 5207 | 5314 | * 23 and parse the records in the region and populate driver |
|---|
| 5208 | 5315 | * data structure. |
|---|
| .. | .. |
|---|
| 5232 | 5339 | goto out_free_mboxq; |
|---|
| 5233 | 5340 | } |
|---|
| 5234 | 5341 | |
|---|
| 5235 | | - mp = (struct lpfc_dmabuf *) mboxq->context1; |
|---|
| 5342 | + mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; |
|---|
| 5236 | 5343 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
|---|
| 5237 | 5344 | |
|---|
| 5238 | 5345 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, |
|---|
| .. | .. |
|---|
| 5313 | 5420 | * mailbox command. |
|---|
| 5314 | 5421 | */ |
|---|
| 5315 | 5422 | dma_size = *vpd_size; |
|---|
| 5316 | | - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size, |
|---|
| 5317 | | - &dmabuf->phys, GFP_KERNEL); |
|---|
| 5423 | + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size, |
|---|
| 5424 | + &dmabuf->phys, GFP_KERNEL); |
|---|
| 5318 | 5425 | if (!dmabuf->virt) { |
|---|
| 5319 | 5426 | kfree(dmabuf); |
|---|
| 5320 | 5427 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 5358 | 5465 | } |
|---|
| 5359 | 5466 | |
|---|
| 5360 | 5467 | /** |
|---|
| 5361 | | - * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name |
|---|
| 5468 | + * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes |
|---|
| 5362 | 5469 | * @phba: pointer to lpfc hba data structure. |
|---|
| 5363 | 5470 | * |
|---|
| 5364 | 5471 | * This routine retrieves SLI4 device physical port name this PCI function |
|---|
| .. | .. |
|---|
| 5366 | 5473 | * |
|---|
| 5367 | 5474 | * Return codes |
|---|
| 5368 | 5475 | * 0 - successful |
|---|
| 5369 | | - * otherwise - failed to retrieve physical port name |
|---|
| 5476 | + * otherwise - failed to retrieve controller attributes |
|---|
| 5370 | 5477 | **/ |
|---|
| 5371 | 5478 | static int |
|---|
| 5372 | | -lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) |
|---|
| 5479 | +lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) |
|---|
| 5373 | 5480 | { |
|---|
| 5374 | 5481 | LPFC_MBOXQ_t *mboxq; |
|---|
| 5375 | 5482 | struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; |
|---|
| 5376 | 5483 | struct lpfc_controller_attribute *cntl_attr; |
|---|
| 5377 | | - struct lpfc_mbx_get_port_name *get_port_name; |
|---|
| 5378 | 5484 | void *virtaddr = NULL; |
|---|
| 5379 | 5485 | uint32_t alloclen, reqlen; |
|---|
| 5380 | 5486 | uint32_t shdr_status, shdr_add_status; |
|---|
| 5381 | 5487 | union lpfc_sli4_cfg_shdr *shdr; |
|---|
| 5382 | | - char cport_name = 0; |
|---|
| 5383 | 5488 | int rc; |
|---|
| 5384 | | - |
|---|
| 5385 | | - /* We assume nothing at this point */ |
|---|
| 5386 | | - phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; |
|---|
| 5387 | | - phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; |
|---|
| 5388 | 5489 | |
|---|
| 5389 | 5490 | mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
|---|
| 5390 | 5491 | if (!mboxq) |
|---|
| 5391 | 5492 | return -ENOMEM; |
|---|
| 5392 | | - /* obtain link type and link number via READ_CONFIG */ |
|---|
| 5393 | | - phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; |
|---|
| 5394 | | - lpfc_sli4_read_config(phba); |
|---|
| 5395 | | - if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) |
|---|
| 5396 | | - goto retrieve_ppname; |
|---|
| 5397 | 5493 | |
|---|
| 5398 | | - /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ |
|---|
| 5494 | + /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */ |
|---|
| 5399 | 5495 | reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); |
|---|
| 5400 | 5496 | alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, |
|---|
| 5401 | 5497 | LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, |
|---|
| 5402 | 5498 | LPFC_SLI4_MBX_NEMBED); |
|---|
| 5499 | + |
|---|
| 5403 | 5500 | if (alloclen < reqlen) { |
|---|
| 5404 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 5501 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 5405 | 5502 | "3084 Allocated DMA memory size (%d) is " |
|---|
| 5406 | 5503 | "less than the requested DMA memory size " |
|---|
| 5407 | 5504 | "(%d)\n", alloclen, reqlen); |
|---|
| .. | .. |
|---|
| 5425 | 5522 | rc = -ENXIO; |
|---|
| 5426 | 5523 | goto out_free_mboxq; |
|---|
| 5427 | 5524 | } |
|---|
| 5525 | + |
|---|
| 5428 | 5526 | cntl_attr = &mbx_cntl_attr->cntl_attr; |
|---|
| 5429 | 5527 | phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; |
|---|
| 5430 | 5528 | phba->sli4_hba.lnk_info.lnk_tp = |
|---|
| 5431 | 5529 | bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); |
|---|
| 5432 | 5530 | phba->sli4_hba.lnk_info.lnk_no = |
|---|
| 5433 | 5531 | bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); |
|---|
| 5532 | + |
|---|
| 5533 | + memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); |
|---|
| 5534 | + strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, |
|---|
| 5535 | + sizeof(phba->BIOSVersion)); |
|---|
| 5536 | + |
|---|
| 5434 | 5537 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
|---|
| 5435 | | - "3086 lnk_type:%d, lnk_numb:%d\n", |
|---|
| 5538 | + "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n", |
|---|
| 5436 | 5539 | phba->sli4_hba.lnk_info.lnk_tp, |
|---|
| 5437 | | - phba->sli4_hba.lnk_info.lnk_no); |
|---|
| 5540 | + phba->sli4_hba.lnk_info.lnk_no, |
|---|
| 5541 | + phba->BIOSVersion); |
|---|
| 5542 | +out_free_mboxq: |
|---|
| 5543 | + if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) |
|---|
| 5544 | + lpfc_sli4_mbox_cmd_free(phba, mboxq); |
|---|
| 5545 | + else |
|---|
| 5546 | + mempool_free(mboxq, phba->mbox_mem_pool); |
|---|
| 5547 | + return rc; |
|---|
| 5548 | +} |
|---|
| 5549 | + |
|---|
| 5550 | +/** |
|---|
| 5551 | + * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name |
|---|
| 5552 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 5553 | + * |
|---|
| 5554 | + * This routine retrieves SLI4 device physical port name this PCI function |
|---|
| 5555 | + * is attached to. |
|---|
| 5556 | + * |
|---|
| 5557 | + * Return codes |
|---|
| 5558 | + * 0 - successful |
|---|
| 5559 | + * otherwise - failed to retrieve physical port name |
|---|
| 5560 | + **/ |
|---|
| 5561 | +static int |
|---|
| 5562 | +lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) |
|---|
| 5563 | +{ |
|---|
| 5564 | + LPFC_MBOXQ_t *mboxq; |
|---|
| 5565 | + struct lpfc_mbx_get_port_name *get_port_name; |
|---|
| 5566 | + uint32_t shdr_status, shdr_add_status; |
|---|
| 5567 | + union lpfc_sli4_cfg_shdr *shdr; |
|---|
| 5568 | + char cport_name = 0; |
|---|
| 5569 | + int rc; |
|---|
| 5570 | + |
|---|
| 5571 | + /* We assume nothing at this point */ |
|---|
| 5572 | + phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; |
|---|
| 5573 | + phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; |
|---|
| 5574 | + |
|---|
| 5575 | + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
|---|
| 5576 | + if (!mboxq) |
|---|
| 5577 | + return -ENOMEM; |
|---|
| 5578 | + /* obtain link type and link number via READ_CONFIG */ |
|---|
| 5579 | + phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; |
|---|
| 5580 | + lpfc_sli4_read_config(phba); |
|---|
| 5581 | + if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) |
|---|
| 5582 | + goto retrieve_ppname; |
|---|
| 5583 | + |
|---|
| 5584 | + /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ |
|---|
| 5585 | + rc = lpfc_sli4_get_ctl_attr(phba); |
|---|
| 5586 | + if (rc) |
|---|
| 5587 | + goto out_free_mboxq; |
|---|
| 5438 | 5588 | |
|---|
| 5439 | 5589 | retrieve_ppname: |
|---|
| 5440 | 5590 | lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, |
|---|
| .. | .. |
|---|
| 5494 | 5644 | } |
|---|
| 5495 | 5645 | |
|---|
| 5496 | 5646 | out_free_mboxq: |
|---|
| 5497 | | - if (rc != MBX_TIMEOUT) { |
|---|
| 5498 | | - if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) |
|---|
| 5499 | | - lpfc_sli4_mbox_cmd_free(phba, mboxq); |
|---|
| 5500 | | - else |
|---|
| 5501 | | - mempool_free(mboxq, phba->mbox_mem_pool); |
|---|
| 5502 | | - } |
|---|
| 5647 | + if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) |
|---|
| 5648 | + lpfc_sli4_mbox_cmd_free(phba, mboxq); |
|---|
| 5649 | + else |
|---|
| 5650 | + mempool_free(mboxq, phba->mbox_mem_pool); |
|---|
| 5503 | 5651 | return rc; |
|---|
| 5504 | 5652 | } |
|---|
| 5505 | 5653 | |
|---|
| .. | .. |
|---|
| 5515 | 5663 | { |
|---|
| 5516 | 5664 | int qidx; |
|---|
| 5517 | 5665 | struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; |
|---|
| 5666 | + struct lpfc_sli4_hdw_queue *qp; |
|---|
| 5667 | + struct lpfc_queue *eq; |
|---|
| 5518 | 5668 | |
|---|
| 5519 | | - sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM); |
|---|
| 5520 | | - sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM); |
|---|
| 5669 | + sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM); |
|---|
| 5670 | + sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM); |
|---|
| 5521 | 5671 | if (sli4_hba->nvmels_cq) |
|---|
| 5522 | | - sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq, |
|---|
| 5672 | + sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0, |
|---|
| 5673 | + LPFC_QUEUE_REARM); |
|---|
| 5674 | + |
|---|
| 5675 | + if (sli4_hba->hdwq) { |
|---|
| 5676 | + /* Loop thru all Hardware Queues */ |
|---|
| 5677 | + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { |
|---|
| 5678 | + qp = &sli4_hba->hdwq[qidx]; |
|---|
| 5679 | + /* ARM the corresponding CQ */ |
|---|
| 5680 | + sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0, |
|---|
| 5523 | 5681 | LPFC_QUEUE_REARM); |
|---|
| 5682 | + } |
|---|
| 5524 | 5683 | |
|---|
| 5525 | | - if (sli4_hba->fcp_cq) |
|---|
| 5526 | | - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) |
|---|
| 5527 | | - sli4_hba->sli4_cq_release(sli4_hba->fcp_cq[qidx], |
|---|
| 5528 | | - LPFC_QUEUE_REARM); |
|---|
| 5529 | | - |
|---|
| 5530 | | - if (sli4_hba->nvme_cq) |
|---|
| 5531 | | - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) |
|---|
| 5532 | | - sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx], |
|---|
| 5533 | | - LPFC_QUEUE_REARM); |
|---|
| 5534 | | - |
|---|
| 5535 | | - if (phba->cfg_fof) |
|---|
| 5536 | | - sli4_hba->sli4_cq_release(sli4_hba->oas_cq, LPFC_QUEUE_REARM); |
|---|
| 5537 | | - |
|---|
| 5538 | | - if (sli4_hba->hba_eq) |
|---|
| 5539 | | - for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) |
|---|
| 5540 | | - sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx], |
|---|
| 5541 | | - LPFC_QUEUE_REARM); |
|---|
| 5542 | | - |
|---|
| 5543 | | - if (phba->nvmet_support) { |
|---|
| 5544 | | - for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { |
|---|
| 5545 | | - sli4_hba->sli4_cq_release( |
|---|
| 5546 | | - sli4_hba->nvmet_cqset[qidx], |
|---|
| 5547 | | - LPFC_QUEUE_REARM); |
|---|
| 5684 | + /* Loop thru all IRQ vectors */ |
|---|
| 5685 | + for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { |
|---|
| 5686 | + eq = sli4_hba->hba_eq_hdl[qidx].eq; |
|---|
| 5687 | + /* ARM the corresponding EQ */ |
|---|
| 5688 | + sli4_hba->sli4_write_eq_db(phba, eq, |
|---|
| 5689 | + 0, LPFC_QUEUE_REARM); |
|---|
| 5548 | 5690 | } |
|---|
| 5549 | 5691 | } |
|---|
| 5550 | 5692 | |
|---|
| 5551 | | - if (phba->cfg_fof) |
|---|
| 5552 | | - sli4_hba->sli4_eq_release(sli4_hba->fof_eq, LPFC_QUEUE_REARM); |
|---|
| 5693 | + if (phba->nvmet_support) { |
|---|
| 5694 | + for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { |
|---|
| 5695 | + sli4_hba->sli4_write_cq_db(phba, |
|---|
| 5696 | + sli4_hba->nvmet_cqset[qidx], 0, |
|---|
| 5697 | + LPFC_QUEUE_REARM); |
|---|
| 5698 | + } |
|---|
| 5699 | + } |
|---|
| 5553 | 5700 | } |
|---|
| 5554 | 5701 | |
|---|
| 5555 | 5702 | /** |
|---|
| .. | .. |
|---|
| 5607 | 5754 | rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; |
|---|
| 5608 | 5755 | if (bf_get(lpfc_mbox_hdr_status, |
|---|
| 5609 | 5756 | &rsrc_info->header.cfg_shdr.response)) { |
|---|
| 5610 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, |
|---|
| 5757 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 5611 | 5758 | "2930 Failed to get resource extents " |
|---|
| 5612 | 5759 | "Status 0x%x Add'l Status 0x%x\n", |
|---|
| 5613 | 5760 | bf_get(lpfc_mbox_hdr_status, |
|---|
| .. | .. |
|---|
| 5696 | 5843 | /** |
|---|
| 5697 | 5844 | * lpfc_sli4_cfg_post_extnts - |
|---|
| 5698 | 5845 | * @phba: Pointer to HBA context object. |
|---|
| 5699 | | - * @extnt_cnt - number of available extents. |
|---|
| 5700 | | - * @type - the extent type (rpi, xri, vfi, vpi). |
|---|
| 5701 | | - * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. |
|---|
| 5702 | | - * @mbox - pointer to the caller's allocated mailbox structure. |
|---|
| 5846 | + * @extnt_cnt: number of available extents. |
|---|
| 5847 | + * @type: the extent type (rpi, xri, vfi, vpi). |
|---|
| 5848 | + * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation. |
|---|
| 5849 | + * @mbox: pointer to the caller's allocated mailbox structure. |
|---|
| 5703 | 5850 | * |
|---|
| 5704 | 5851 | * This function executes the extents allocation request. It also |
|---|
| 5705 | 5852 | * takes care of the amount of memory needed to allocate or get the |
|---|
| .. | .. |
|---|
| 5745 | 5892 | LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, |
|---|
| 5746 | 5893 | req_len, *emb); |
|---|
| 5747 | 5894 | if (alloc_len < req_len) { |
|---|
| 5748 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 5895 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 5749 | 5896 | "2982 Allocated DMA memory size (x%x) is " |
|---|
| 5750 | 5897 | "less than the requested DMA memory " |
|---|
| 5751 | 5898 | "size (x%x)\n", alloc_len, req_len); |
|---|
| .. | .. |
|---|
| 5801 | 5948 | return -EIO; |
|---|
| 5802 | 5949 | |
|---|
| 5803 | 5950 | if ((rsrc_cnt == 0) || (rsrc_size == 0)) { |
|---|
| 5804 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, |
|---|
| 5951 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 5805 | 5952 | "3009 No available Resource Extents " |
|---|
| 5806 | 5953 | "for resource type 0x%x: Count: 0x%x, " |
|---|
| 5807 | 5954 | "Size 0x%x\n", type, rsrc_cnt, |
|---|
| .. | .. |
|---|
| 5978 | 6125 | list_add_tail(&rsrc_blks->list, ext_blk_list); |
|---|
| 5979 | 6126 | rsrc_start = rsrc_id; |
|---|
| 5980 | 6127 | if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { |
|---|
| 5981 | | - phba->sli4_hba.scsi_xri_start = rsrc_start + |
|---|
| 6128 | + phba->sli4_hba.io_xri_start = rsrc_start + |
|---|
| 5982 | 6129 | lpfc_sli4_get_iocb_cnt(phba); |
|---|
| 5983 | | - phba->sli4_hba.nvme_xri_start = |
|---|
| 5984 | | - phba->sli4_hba.scsi_xri_start + |
|---|
| 5985 | | - phba->sli4_hba.scsi_xri_max; |
|---|
| 5986 | 6130 | } |
|---|
| 5987 | 6131 | |
|---|
| 5988 | 6132 | while (rsrc_id < (rsrc_start + rsrc_size)) { |
|---|
| .. | .. |
|---|
| 6055 | 6199 | dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; |
|---|
| 6056 | 6200 | if (bf_get(lpfc_mbox_hdr_status, |
|---|
| 6057 | 6201 | &dealloc_rsrc->header.cfg_shdr.response)) { |
|---|
| 6058 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, |
|---|
| 6202 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 6059 | 6203 | "2919 Failed to release resource extents " |
|---|
| 6060 | 6204 | "for type %d - Status 0x%x Add'l Status 0x%x. " |
|---|
| 6061 | 6205 | "Resource memory not released.\n", |
|---|
| .. | .. |
|---|
| 6146 | 6290 | mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; |
|---|
| 6147 | 6291 | mbox->u.mqe.un.set_feature.param_len = 8; |
|---|
| 6148 | 6292 | break; |
|---|
| 6293 | + case LPFC_SET_DUAL_DUMP: |
|---|
| 6294 | + bf_set(lpfc_mbx_set_feature_dd, |
|---|
| 6295 | + &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP); |
|---|
| 6296 | + bf_set(lpfc_mbx_set_feature_ddquery, |
|---|
| 6297 | + &mbox->u.mqe.un.set_feature, 0); |
|---|
| 6298 | + mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP; |
|---|
| 6299 | + mbox->u.mqe.un.set_feature.param_len = 4; |
|---|
| 6300 | + break; |
|---|
| 6149 | 6301 | } |
|---|
| 6150 | 6302 | |
|---|
| 6151 | 6303 | return; |
|---|
| 6304 | +} |
|---|
| 6305 | + |
|---|
| 6306 | +/** |
|---|
| 6307 | + * lpfc_ras_stop_fwlog: Disable FW logging by the adapter |
|---|
| 6308 | + * @phba: Pointer to HBA context object. |
|---|
| 6309 | + * |
|---|
| 6310 | + * Disable FW logging into host memory on the adapter. To |
|---|
| 6311 | + * be done before reading logs from the host memory. |
|---|
| 6312 | + **/ |
|---|
| 6313 | +void |
|---|
| 6314 | +lpfc_ras_stop_fwlog(struct lpfc_hba *phba) |
|---|
| 6315 | +{ |
|---|
| 6316 | + struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; |
|---|
| 6317 | + |
|---|
| 6318 | + spin_lock_irq(&phba->hbalock); |
|---|
| 6319 | + ras_fwlog->state = INACTIVE; |
|---|
| 6320 | + spin_unlock_irq(&phba->hbalock); |
|---|
| 6321 | + |
|---|
| 6322 | + /* Disable FW logging to host memory */ |
|---|
| 6323 | + writel(LPFC_CTL_PDEV_CTL_DDL_RAS, |
|---|
| 6324 | + phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); |
|---|
| 6325 | + |
|---|
| 6326 | + /* Wait 10ms for firmware to stop using DMA buffer */ |
|---|
| 6327 | + usleep_range(10 * 1000, 20 * 1000); |
|---|
| 6328 | +} |
|---|
| 6329 | + |
|---|
| 6330 | +/** |
|---|
| 6331 | + * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging. |
|---|
| 6332 | + * @phba: Pointer to HBA context object. |
|---|
| 6333 | + * |
|---|
| 6334 | + * This function is called to free memory allocated for RAS FW logging |
|---|
| 6335 | + * support in the driver. |
|---|
| 6336 | + **/ |
|---|
| 6337 | +void |
|---|
| 6338 | +lpfc_sli4_ras_dma_free(struct lpfc_hba *phba) |
|---|
| 6339 | +{ |
|---|
| 6340 | + struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; |
|---|
| 6341 | + struct lpfc_dmabuf *dmabuf, *next; |
|---|
| 6342 | + |
|---|
| 6343 | + if (!list_empty(&ras_fwlog->fwlog_buff_list)) { |
|---|
| 6344 | + list_for_each_entry_safe(dmabuf, next, |
|---|
| 6345 | + &ras_fwlog->fwlog_buff_list, |
|---|
| 6346 | + list) { |
|---|
| 6347 | + list_del(&dmabuf->list); |
|---|
| 6348 | + dma_free_coherent(&phba->pcidev->dev, |
|---|
| 6349 | + LPFC_RAS_MAX_ENTRY_SIZE, |
|---|
| 6350 | + dmabuf->virt, dmabuf->phys); |
|---|
| 6351 | + kfree(dmabuf); |
|---|
| 6352 | + } |
|---|
| 6353 | + } |
|---|
| 6354 | + |
|---|
| 6355 | + if (ras_fwlog->lwpd.virt) { |
|---|
| 6356 | + dma_free_coherent(&phba->pcidev->dev, |
|---|
| 6357 | + sizeof(uint32_t) * 2, |
|---|
| 6358 | + ras_fwlog->lwpd.virt, |
|---|
| 6359 | + ras_fwlog->lwpd.phys); |
|---|
| 6360 | + ras_fwlog->lwpd.virt = NULL; |
|---|
| 6361 | + } |
|---|
| 6362 | + |
|---|
| 6363 | + spin_lock_irq(&phba->hbalock); |
|---|
| 6364 | + ras_fwlog->state = INACTIVE; |
|---|
| 6365 | + spin_unlock_irq(&phba->hbalock); |
|---|
| 6366 | +} |
|---|
| 6367 | + |
|---|
| 6368 | +/** |
|---|
| 6369 | + * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support |
|---|
| 6370 | + * @phba: Pointer to HBA context object. |
|---|
| 6371 | + * @fwlog_buff_count: Count of buffers to be created. |
|---|
| 6372 | + * |
|---|
| 6373 | + * This routine DMA memory for Log Write Position Data[LPWD] and buffer |
|---|
| 6374 | + * to update FW log is posted to the adapter. |
|---|
| 6375 | + * Buffer count is calculated based on module param ras_fwlog_buffsize |
|---|
| 6376 | + * Size of each buffer posted to FW is 64K. |
|---|
| 6377 | + **/ |
|---|
| 6378 | + |
|---|
| 6379 | +static int |
|---|
| 6380 | +lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, |
|---|
| 6381 | + uint32_t fwlog_buff_count) |
|---|
| 6382 | +{ |
|---|
| 6383 | + struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; |
|---|
| 6384 | + struct lpfc_dmabuf *dmabuf; |
|---|
| 6385 | + int rc = 0, i = 0; |
|---|
| 6386 | + |
|---|
| 6387 | + /* Initialize List */ |
|---|
| 6388 | + INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list); |
|---|
| 6389 | + |
|---|
| 6390 | + /* Allocate memory for the LWPD */ |
|---|
| 6391 | + ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev, |
|---|
| 6392 | + sizeof(uint32_t) * 2, |
|---|
| 6393 | + &ras_fwlog->lwpd.phys, |
|---|
| 6394 | + GFP_KERNEL); |
|---|
| 6395 | + if (!ras_fwlog->lwpd.virt) { |
|---|
| 6396 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 6397 | + "6185 LWPD Memory Alloc Failed\n"); |
|---|
| 6398 | + |
|---|
| 6399 | + return -ENOMEM; |
|---|
| 6400 | + } |
|---|
| 6401 | + |
|---|
| 6402 | + ras_fwlog->fw_buffcount = fwlog_buff_count; |
|---|
| 6403 | + for (i = 0; i < ras_fwlog->fw_buffcount; i++) { |
|---|
| 6404 | + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), |
|---|
| 6405 | + GFP_KERNEL); |
|---|
| 6406 | + if (!dmabuf) { |
|---|
| 6407 | + rc = -ENOMEM; |
|---|
| 6408 | + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
|---|
| 6409 | + "6186 Memory Alloc failed FW logging"); |
|---|
| 6410 | + goto free_mem; |
|---|
| 6411 | + } |
|---|
| 6412 | + |
|---|
| 6413 | + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, |
|---|
| 6414 | + LPFC_RAS_MAX_ENTRY_SIZE, |
|---|
| 6415 | + &dmabuf->phys, GFP_KERNEL); |
|---|
| 6416 | + if (!dmabuf->virt) { |
|---|
| 6417 | + kfree(dmabuf); |
|---|
| 6418 | + rc = -ENOMEM; |
|---|
| 6419 | + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
|---|
| 6420 | + "6187 DMA Alloc Failed FW logging"); |
|---|
| 6421 | + goto free_mem; |
|---|
| 6422 | + } |
|---|
| 6423 | + dmabuf->buffer_tag = i; |
|---|
| 6424 | + list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list); |
|---|
| 6425 | + } |
|---|
| 6426 | + |
|---|
| 6427 | +free_mem: |
|---|
| 6428 | + if (rc) |
|---|
| 6429 | + lpfc_sli4_ras_dma_free(phba); |
|---|
| 6430 | + |
|---|
| 6431 | + return rc; |
|---|
| 6432 | +} |
|---|
| 6433 | + |
|---|
| 6434 | +/** |
|---|
| 6435 | + * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command |
|---|
| 6436 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 6437 | + * @pmb: pointer to the driver internal queue element for mailbox command. |
|---|
| 6438 | + * |
|---|
| 6439 | + * Completion handler for driver's RAS MBX command to the device. |
|---|
| 6440 | + **/ |
|---|
| 6441 | +static void |
|---|
| 6442 | +lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
|---|
| 6443 | +{ |
|---|
| 6444 | + MAILBOX_t *mb; |
|---|
| 6445 | + union lpfc_sli4_cfg_shdr *shdr; |
|---|
| 6446 | + uint32_t shdr_status, shdr_add_status; |
|---|
| 6447 | + struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; |
|---|
| 6448 | + |
|---|
| 6449 | + mb = &pmb->u.mb; |
|---|
| 6450 | + |
|---|
| 6451 | + shdr = (union lpfc_sli4_cfg_shdr *) |
|---|
| 6452 | + &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr; |
|---|
| 6453 | + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 6454 | + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 6455 | + |
|---|
| 6456 | + if (mb->mbxStatus != MBX_SUCCESS || shdr_status) { |
|---|
| 6457 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 6458 | + "6188 FW LOG mailbox " |
|---|
| 6459 | + "completed with status x%x add_status x%x," |
|---|
| 6460 | + " mbx status x%x\n", |
|---|
| 6461 | + shdr_status, shdr_add_status, mb->mbxStatus); |
|---|
| 6462 | + |
|---|
| 6463 | + ras_fwlog->ras_hwsupport = false; |
|---|
| 6464 | + goto disable_ras; |
|---|
| 6465 | + } |
|---|
| 6466 | + |
|---|
| 6467 | + spin_lock_irq(&phba->hbalock); |
|---|
| 6468 | + ras_fwlog->state = ACTIVE; |
|---|
| 6469 | + spin_unlock_irq(&phba->hbalock); |
|---|
| 6470 | + mempool_free(pmb, phba->mbox_mem_pool); |
|---|
| 6471 | + |
|---|
| 6472 | + return; |
|---|
| 6473 | + |
|---|
| 6474 | +disable_ras: |
|---|
| 6475 | + /* Free RAS DMA memory */ |
|---|
| 6476 | + lpfc_sli4_ras_dma_free(phba); |
|---|
| 6477 | + mempool_free(pmb, phba->mbox_mem_pool); |
|---|
| 6478 | +} |
|---|
| 6479 | + |
|---|
| 6480 | +/** |
|---|
| 6481 | + * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command |
|---|
| 6482 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 6483 | + * @fwlog_level: Logging verbosity level. |
|---|
| 6484 | + * @fwlog_enable: Enable/Disable logging. |
|---|
| 6485 | + * |
|---|
| 6486 | + * Initialize memory and post mailbox command to enable FW logging in host |
|---|
| 6487 | + * memory. |
|---|
| 6488 | + **/ |
|---|
| 6489 | +int |
|---|
| 6490 | +lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, |
|---|
| 6491 | + uint32_t fwlog_level, |
|---|
| 6492 | + uint32_t fwlog_enable) |
|---|
| 6493 | +{ |
|---|
| 6494 | + struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; |
|---|
| 6495 | + struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL; |
|---|
| 6496 | + struct lpfc_dmabuf *dmabuf; |
|---|
| 6497 | + LPFC_MBOXQ_t *mbox; |
|---|
| 6498 | + uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; |
|---|
| 6499 | + int rc = 0; |
|---|
| 6500 | + |
|---|
| 6501 | + spin_lock_irq(&phba->hbalock); |
|---|
| 6502 | + ras_fwlog->state = INACTIVE; |
|---|
| 6503 | + spin_unlock_irq(&phba->hbalock); |
|---|
| 6504 | + |
|---|
| 6505 | + fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * |
|---|
| 6506 | + phba->cfg_ras_fwlog_buffsize); |
|---|
| 6507 | + fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE); |
|---|
| 6508 | + |
|---|
| 6509 | + /* |
|---|
| 6510 | + * If re-enabling FW logging support use earlier allocated |
|---|
| 6511 | + * DMA buffers while posting MBX command. |
|---|
| 6512 | + **/ |
|---|
| 6513 | + if (!ras_fwlog->lwpd.virt) { |
|---|
| 6514 | + rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count); |
|---|
| 6515 | + if (rc) { |
|---|
| 6516 | + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
|---|
| 6517 | + "6189 FW Log Memory Allocation Failed"); |
|---|
| 6518 | + return rc; |
|---|
| 6519 | + } |
|---|
| 6520 | + } |
|---|
| 6521 | + |
|---|
| 6522 | + /* Setup Mailbox command */ |
|---|
| 6523 | + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
|---|
| 6524 | + if (!mbox) { |
|---|
| 6525 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 6526 | + "6190 RAS MBX Alloc Failed"); |
|---|
| 6527 | + rc = -ENOMEM; |
|---|
| 6528 | + goto mem_free; |
|---|
| 6529 | + } |
|---|
| 6530 | + |
|---|
| 6531 | + ras_fwlog->fw_loglevel = fwlog_level; |
|---|
| 6532 | + len = (sizeof(struct lpfc_mbx_set_ras_fwlog) - |
|---|
| 6533 | + sizeof(struct lpfc_sli4_cfg_mhdr)); |
|---|
| 6534 | + |
|---|
| 6535 | + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL, |
|---|
| 6536 | + LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION, |
|---|
| 6537 | + len, LPFC_SLI4_MBX_EMBED); |
|---|
| 6538 | + |
|---|
| 6539 | + mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog; |
|---|
| 6540 | + bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request, |
|---|
| 6541 | + fwlog_enable); |
|---|
| 6542 | + bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request, |
|---|
| 6543 | + ras_fwlog->fw_loglevel); |
|---|
| 6544 | + bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request, |
|---|
| 6545 | + ras_fwlog->fw_buffcount); |
|---|
| 6546 | + bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request, |
|---|
| 6547 | + LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE); |
|---|
| 6548 | + |
|---|
| 6549 | + /* Update DMA buffer address */ |
|---|
| 6550 | + list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) { |
|---|
| 6551 | + memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE); |
|---|
| 6552 | + |
|---|
| 6553 | + mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo = |
|---|
| 6554 | + putPaddrLow(dmabuf->phys); |
|---|
| 6555 | + |
|---|
| 6556 | + mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi = |
|---|
| 6557 | + putPaddrHigh(dmabuf->phys); |
|---|
| 6558 | + } |
|---|
| 6559 | + |
|---|
| 6560 | + /* Update LPWD address */ |
|---|
| 6561 | + mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); |
|---|
| 6562 | + mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); |
|---|
| 6563 | + |
|---|
| 6564 | + spin_lock_irq(&phba->hbalock); |
|---|
| 6565 | + ras_fwlog->state = REG_INPROGRESS; |
|---|
| 6566 | + spin_unlock_irq(&phba->hbalock); |
|---|
| 6567 | + mbox->vport = phba->pport; |
|---|
| 6568 | + mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; |
|---|
| 6569 | + |
|---|
| 6570 | + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
|---|
| 6571 | + |
|---|
| 6572 | + if (rc == MBX_NOT_FINISHED) { |
|---|
| 6573 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 6574 | + "6191 FW-Log Mailbox failed. " |
|---|
| 6575 | + "status %d mbxStatus : x%x", rc, |
|---|
| 6576 | + bf_get(lpfc_mqe_status, &mbox->u.mqe)); |
|---|
| 6577 | + mempool_free(mbox, phba->mbox_mem_pool); |
|---|
| 6578 | + rc = -EIO; |
|---|
| 6579 | + goto mem_free; |
|---|
| 6580 | + } else |
|---|
| 6581 | + rc = 0; |
|---|
| 6582 | +mem_free: |
|---|
| 6583 | + if (rc) |
|---|
| 6584 | + lpfc_sli4_ras_dma_free(phba); |
|---|
| 6585 | + |
|---|
| 6586 | + return rc; |
|---|
| 6587 | +} |
|---|
| 6588 | + |
|---|
| 6589 | +/** |
|---|
| 6590 | + * lpfc_sli4_ras_setup - Check if RAS supported on the adapter |
|---|
| 6591 | + * @phba: Pointer to HBA context object. |
|---|
| 6592 | + * |
|---|
| 6593 | + * Check if RAS is supported on the adapter and initialize it. |
|---|
| 6594 | + **/ |
|---|
| 6595 | +void |
|---|
| 6596 | +lpfc_sli4_ras_setup(struct lpfc_hba *phba) |
|---|
| 6597 | +{ |
|---|
| 6598 | + /* Check RAS FW Log needs to be enabled or not */ |
|---|
| 6599 | + if (lpfc_check_fwlog_support(phba)) |
|---|
| 6600 | + return; |
|---|
| 6601 | + |
|---|
| 6602 | + lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, |
|---|
| 6603 | + LPFC_RAS_ENABLE_LOGGING); |
|---|
| 6152 | 6604 | } |
|---|
| 6153 | 6605 | |
|---|
| 6154 | 6606 | /** |
|---|
| .. | .. |
|---|
| 6254 | 6706 | /* RPIs. */ |
|---|
| 6255 | 6707 | count = phba->sli4_hba.max_cfg_param.max_rpi; |
|---|
| 6256 | 6708 | if (count <= 0) { |
|---|
| 6257 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 6709 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 6258 | 6710 | "3279 Invalid provisioning of " |
|---|
| 6259 | 6711 | "rpi:%d\n", count); |
|---|
| 6260 | 6712 | rc = -EINVAL; |
|---|
| .. | .. |
|---|
| 6282 | 6734 | /* VPIs. */ |
|---|
| 6283 | 6735 | count = phba->sli4_hba.max_cfg_param.max_vpi; |
|---|
| 6284 | 6736 | if (count <= 0) { |
|---|
| 6285 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 6737 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 6286 | 6738 | "3280 Invalid provisioning of " |
|---|
| 6287 | 6739 | "vpi:%d\n", count); |
|---|
| 6288 | 6740 | rc = -EINVAL; |
|---|
| .. | .. |
|---|
| 6309 | 6761 | /* XRIs. */ |
|---|
| 6310 | 6762 | count = phba->sli4_hba.max_cfg_param.max_xri; |
|---|
| 6311 | 6763 | if (count <= 0) { |
|---|
| 6312 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 6764 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 6313 | 6765 | "3281 Invalid provisioning of " |
|---|
| 6314 | 6766 | "xri:%d\n", count); |
|---|
| 6315 | 6767 | rc = -EINVAL; |
|---|
| .. | .. |
|---|
| 6338 | 6790 | /* VFIs. */ |
|---|
| 6339 | 6791 | count = phba->sli4_hba.max_cfg_param.max_vfi; |
|---|
| 6340 | 6792 | if (count <= 0) { |
|---|
| 6341 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 6793 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 6342 | 6794 | "3282 Invalid provisioning of " |
|---|
| 6343 | 6795 | "vfi:%d\n", count); |
|---|
| 6344 | 6796 | rc = -EINVAL; |
|---|
| .. | .. |
|---|
| 6432 | 6884 | * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. |
|---|
| 6433 | 6885 | * @phba: Pointer to HBA context object. |
|---|
| 6434 | 6886 | * @type: The resource extent type. |
|---|
| 6435 | | - * @extnt_count: buffer to hold port extent count response |
|---|
| 6887 | + * @extnt_cnt: buffer to hold port extent count response |
|---|
| 6436 | 6888 | * @extnt_size: buffer to hold port extent size response. |
|---|
| 6437 | 6889 | * |
|---|
| 6438 | 6890 | * This function calls the port to read the host allocated extents |
|---|
| .. | .. |
|---|
| 6516 | 6968 | LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, |
|---|
| 6517 | 6969 | req_len, emb); |
|---|
| 6518 | 6970 | if (alloc_len < req_len) { |
|---|
| 6519 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 6971 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 6520 | 6972 | "2983 Allocated DMA memory size (x%x) is " |
|---|
| 6521 | 6973 | "less than the requested DMA memory " |
|---|
| 6522 | 6974 | "size (x%x)\n", alloc_len, req_len); |
|---|
| .. | .. |
|---|
| 6559 | 7011 | } |
|---|
| 6560 | 7012 | |
|---|
| 6561 | 7013 | if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { |
|---|
| 6562 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, |
|---|
| 7014 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 6563 | 7015 | "2984 Failed to read allocated resources " |
|---|
| 6564 | 7016 | "for type %d - Status 0x%x Add'l Status 0x%x.\n", |
|---|
| 6565 | 7017 | type, |
|---|
| .. | .. |
|---|
| 6576 | 7028 | /** |
|---|
| 6577 | 7029 | * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block |
|---|
| 6578 | 7030 | * @phba: pointer to lpfc hba data structure. |
|---|
| 6579 | | - * @pring: Pointer to driver SLI ring object. |
|---|
| 6580 | 7031 | * @sgl_list: linked link of sgl buffers to post |
|---|
| 6581 | 7032 | * @cnt: number of linked list buffers |
|---|
| 6582 | 7033 | * |
|---|
| .. | .. |
|---|
| 6714 | 7165 | spin_unlock(&phba->sli4_hba.sgl_list_lock); |
|---|
| 6715 | 7166 | spin_unlock_irq(&phba->hbalock); |
|---|
| 6716 | 7167 | } else { |
|---|
| 6717 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 7168 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 6718 | 7169 | "3161 Failure to post sgl to port.\n"); |
|---|
| 6719 | 7170 | return -EIO; |
|---|
| 6720 | 7171 | } |
|---|
| .. | .. |
|---|
| 6723 | 7174 | return total_cnt; |
|---|
| 6724 | 7175 | } |
|---|
| 6725 | 7176 | |
|---|
| 6726 | | -void |
|---|
| 7177 | +/** |
|---|
| 7178 | + * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls |
|---|
| 7179 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 7180 | + * |
|---|
| 7181 | + * This routine walks the list of nvme buffers that have been allocated and |
|---|
| 7182 | + * repost them to the port by using SGL block post. This is needed after a |
|---|
| 7183 | + * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine |
|---|
| 7184 | + * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list |
|---|
| 7185 | + * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers. |
|---|
| 7186 | + * |
|---|
| 7187 | + * Returns: 0 = success, non-zero failure. |
|---|
| 7188 | + **/ |
|---|
| 7189 | +static int |
|---|
| 7190 | +lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba) |
|---|
| 7191 | +{ |
|---|
| 7192 | + LIST_HEAD(post_nblist); |
|---|
| 7193 | + int num_posted, rc = 0; |
|---|
| 7194 | + |
|---|
| 7195 | + /* get all NVME buffers need to repost to a local list */ |
|---|
| 7196 | + lpfc_io_buf_flush(phba, &post_nblist); |
|---|
| 7197 | + |
|---|
| 7198 | + /* post the list of nvme buffer sgls to port if available */ |
|---|
| 7199 | + if (!list_empty(&post_nblist)) { |
|---|
| 7200 | + num_posted = lpfc_sli4_post_io_sgl_list( |
|---|
| 7201 | + phba, &post_nblist, phba->sli4_hba.io_xri_cnt); |
|---|
| 7202 | + /* failed to post any nvme buffer, return error */ |
|---|
| 7203 | + if (num_posted == 0) |
|---|
| 7204 | + rc = -EIO; |
|---|
| 7205 | + } |
|---|
| 7206 | + return rc; |
|---|
| 7207 | +} |
|---|
| 7208 | + |
|---|
| 7209 | +static void |
|---|
| 6727 | 7210 | lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) |
|---|
| 6728 | 7211 | { |
|---|
| 6729 | 7212 | uint32_t len; |
|---|
| .. | .. |
|---|
| 6785 | 7268 | drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); |
|---|
| 6786 | 7269 | rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); |
|---|
| 6787 | 7270 | if (rc < 0) { |
|---|
| 6788 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 7271 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 6789 | 7272 | "6421 Cannot post to HRQ %d: %x %x %x " |
|---|
| 6790 | 7273 | "DRQ %x %x\n", |
|---|
| 6791 | 7274 | hrq->queue_id, |
|---|
| .. | .. |
|---|
| 6806 | 7289 | } |
|---|
| 6807 | 7290 | |
|---|
| 6808 | 7291 | /** |
|---|
| 7292 | + * lpfc_init_idle_stat_hb - Initialize idle_stat tracking |
|---|
| 7293 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 7294 | + * |
|---|
| 7295 | + * This routine initializes the per-cq idle_stat to dynamically dictate |
|---|
| 7296 | + * polling decisions. |
|---|
| 7297 | + * |
|---|
| 7298 | + * Return codes: |
|---|
| 7299 | + * None |
|---|
| 7300 | + **/ |
|---|
| 7301 | +static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba) |
|---|
| 7302 | +{ |
|---|
| 7303 | + int i; |
|---|
| 7304 | + struct lpfc_sli4_hdw_queue *hdwq; |
|---|
| 7305 | + struct lpfc_queue *cq; |
|---|
| 7306 | + struct lpfc_idle_stat *idle_stat; |
|---|
| 7307 | + u64 wall; |
|---|
| 7308 | + |
|---|
| 7309 | + for_each_present_cpu(i) { |
|---|
| 7310 | + hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; |
|---|
| 7311 | + cq = hdwq->io_cq; |
|---|
| 7312 | + |
|---|
| 7313 | + /* Skip if we've already handled this cq's primary CPU */ |
|---|
| 7314 | + if (cq->chann != i) |
|---|
| 7315 | + continue; |
|---|
| 7316 | + |
|---|
| 7317 | + idle_stat = &phba->sli4_hba.idle_stat[i]; |
|---|
| 7318 | + |
|---|
| 7319 | + idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1); |
|---|
| 7320 | + idle_stat->prev_wall = wall; |
|---|
| 7321 | + |
|---|
| 7322 | + if (phba->nvmet_support) |
|---|
| 7323 | + cq->poll_mode = LPFC_QUEUE_WORK; |
|---|
| 7324 | + else |
|---|
| 7325 | + cq->poll_mode = LPFC_IRQ_POLL; |
|---|
| 7326 | + } |
|---|
| 7327 | + |
|---|
| 7328 | + if (!phba->nvmet_support) |
|---|
| 7329 | + schedule_delayed_work(&phba->idle_stat_delay_work, |
|---|
| 7330 | + msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); |
|---|
| 7331 | +} |
|---|
| 7332 | + |
|---|
| 7333 | +static void lpfc_sli4_dip(struct lpfc_hba *phba) |
|---|
| 7334 | +{ |
|---|
| 7335 | + uint32_t if_type; |
|---|
| 7336 | + |
|---|
| 7337 | + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); |
|---|
| 7338 | + if (if_type == LPFC_SLI_INTF_IF_TYPE_2 || |
|---|
| 7339 | + if_type == LPFC_SLI_INTF_IF_TYPE_6) { |
|---|
| 7340 | + struct lpfc_register reg_data; |
|---|
| 7341 | + |
|---|
| 7342 | + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, |
|---|
| 7343 | + ®_data.word0)) |
|---|
| 7344 | + return; |
|---|
| 7345 | + |
|---|
| 7346 | + if (bf_get(lpfc_sliport_status_dip, ®_data)) |
|---|
| 7347 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7348 | + "2904 Firmware Dump Image Present" |
|---|
| 7349 | + " on Adapter"); |
|---|
| 7350 | + } |
|---|
| 7351 | +} |
|---|
| 7352 | + |
|---|
| 7353 | +/** |
|---|
| 6809 | 7354 | * lpfc_sli4_hba_setup - SLI4 device initialization PCI function |
|---|
| 6810 | 7355 | * @phba: Pointer to HBA context object. |
|---|
| 6811 | 7356 | * |
|---|
| .. | .. |
|---|
| 6817 | 7362 | int |
|---|
| 6818 | 7363 | lpfc_sli4_hba_setup(struct lpfc_hba *phba) |
|---|
| 6819 | 7364 | { |
|---|
| 6820 | | - int rc, i, cnt; |
|---|
| 7365 | + int rc, i, cnt, len, dd; |
|---|
| 6821 | 7366 | LPFC_MBOXQ_t *mboxq; |
|---|
| 6822 | 7367 | struct lpfc_mqe *mqe; |
|---|
| 6823 | 7368 | uint8_t *vpd; |
|---|
| .. | .. |
|---|
| 6827 | 7372 | struct lpfc_vport *vport = phba->pport; |
|---|
| 6828 | 7373 | struct lpfc_dmabuf *mp; |
|---|
| 6829 | 7374 | struct lpfc_rqb *rqbp; |
|---|
| 7375 | + u32 flg; |
|---|
| 6830 | 7376 | |
|---|
| 6831 | 7377 | /* Perform a PCI function reset to start from clean */ |
|---|
| 6832 | 7378 | rc = lpfc_pci_function_reset(phba); |
|---|
| .. | .. |
|---|
| 6840 | 7386 | else { |
|---|
| 6841 | 7387 | spin_lock_irq(&phba->hbalock); |
|---|
| 6842 | 7388 | phba->sli.sli_flag |= LPFC_SLI_ACTIVE; |
|---|
| 7389 | + flg = phba->sli.sli_flag; |
|---|
| 6843 | 7390 | spin_unlock_irq(&phba->hbalock); |
|---|
| 7391 | + /* Allow a little time after setting SLI_ACTIVE for any polled |
|---|
| 7392 | + * MBX commands to complete via BSG. |
|---|
| 7393 | + */ |
|---|
| 7394 | + for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) { |
|---|
| 7395 | + msleep(20); |
|---|
| 7396 | + spin_lock_irq(&phba->hbalock); |
|---|
| 7397 | + flg = phba->sli.sli_flag; |
|---|
| 7398 | + spin_unlock_irq(&phba->hbalock); |
|---|
| 7399 | + } |
|---|
| 6844 | 7400 | } |
|---|
| 7401 | + |
|---|
| 7402 | + lpfc_sli4_dip(phba); |
|---|
| 6845 | 7403 | |
|---|
| 6846 | 7404 | /* |
|---|
| 6847 | 7405 | * Allocate a single mailbox container for initializing the |
|---|
| .. | .. |
|---|
| 6880 | 7438 | else |
|---|
| 6881 | 7439 | phba->hba_flag &= ~HBA_FIP_SUPPORT; |
|---|
| 6882 | 7440 | |
|---|
| 6883 | | - phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH; |
|---|
| 7441 | + phba->hba_flag &= ~HBA_IOQ_FLUSH; |
|---|
| 6884 | 7442 | |
|---|
| 6885 | 7443 | if (phba->sli_rev != LPFC_SLI_REV4) { |
|---|
| 6886 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 7444 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 6887 | 7445 | "0376 READ_REV Error. SLI Level %d " |
|---|
| 6888 | 7446 | "FCoE enabled %d\n", |
|---|
| 6889 | 7447 | phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); |
|---|
| .. | .. |
|---|
| 6912 | 7470 | "3080 Successful retrieving SLI4 device " |
|---|
| 6913 | 7471 | "physical port name: %s.\n", phba->Port); |
|---|
| 6914 | 7472 | |
|---|
| 7473 | + rc = lpfc_sli4_get_ctl_attr(phba); |
|---|
| 7474 | + if (!rc) |
|---|
| 7475 | + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, |
|---|
| 7476 | + "8351 Successful retrieving SLI4 device " |
|---|
| 7477 | + "CTL ATTR\n"); |
|---|
| 7478 | + |
|---|
| 6915 | 7479 | /* |
|---|
| 6916 | 7480 | * Evaluate the read rev and vpd data. Populate the driver |
|---|
| 6917 | 7481 | * state with the results. If this routine fails, the failure |
|---|
| .. | .. |
|---|
| 6919 | 7483 | */ |
|---|
| 6920 | 7484 | rc = lpfc_parse_vpd(phba, vpd, vpd_size); |
|---|
| 6921 | 7485 | if (unlikely(!rc)) { |
|---|
| 6922 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 7486 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 6923 | 7487 | "0377 Error %d parsing vpd. " |
|---|
| 6924 | 7488 | "Using defaults.\n", rc); |
|---|
| 6925 | 7489 | rc = 0; |
|---|
| .. | .. |
|---|
| 6964 | 7528 | phba->vpd.rev.opFwName, |
|---|
| 6965 | 7529 | phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, |
|---|
| 6966 | 7530 | phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); |
|---|
| 6967 | | - |
|---|
| 6968 | | - /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ |
|---|
| 6969 | | - rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); |
|---|
| 6970 | | - if (phba->pport->cfg_lun_queue_depth > rc) { |
|---|
| 6971 | | - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
|---|
| 6972 | | - "3362 LUN queue depth changed from %d to %d\n", |
|---|
| 6973 | | - phba->pport->cfg_lun_queue_depth, rc); |
|---|
| 6974 | | - phba->pport->cfg_lun_queue_depth = rc; |
|---|
| 6975 | | - } |
|---|
| 6976 | 7531 | |
|---|
| 6977 | 7532 | if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == |
|---|
| 6978 | 7533 | LPFC_SLI_INTF_IF_TYPE_0) { |
|---|
| .. | .. |
|---|
| 7062 | 7617 | phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); |
|---|
| 7063 | 7618 | spin_unlock_irq(&phba->hbalock); |
|---|
| 7064 | 7619 | |
|---|
| 7620 | + /* Always try to enable dual dump feature if we can */ |
|---|
| 7621 | + lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP); |
|---|
| 7622 | + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
|---|
| 7623 | + dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature); |
|---|
| 7624 | + if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP)) |
|---|
| 7625 | + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 7626 | + "6448 Dual Dump is enabled\n"); |
|---|
| 7627 | + else |
|---|
| 7628 | + lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT, |
|---|
| 7629 | + "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, " |
|---|
| 7630 | + "rc:x%x dd:x%x\n", |
|---|
| 7631 | + bf_get(lpfc_mqe_command, &mboxq->u.mqe), |
|---|
| 7632 | + lpfc_sli_config_mbox_subsys_get( |
|---|
| 7633 | + phba, mboxq), |
|---|
| 7634 | + lpfc_sli_config_mbox_opcode_get( |
|---|
| 7635 | + phba, mboxq), |
|---|
| 7636 | + rc, dd); |
|---|
| 7065 | 7637 | /* |
|---|
| 7066 | 7638 | * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent |
|---|
| 7067 | 7639 | * calls depends on these resources to complete port setup. |
|---|
| 7068 | 7640 | */ |
|---|
| 7069 | 7641 | rc = lpfc_sli4_alloc_resource_identifiers(phba); |
|---|
| 7070 | 7642 | if (rc) { |
|---|
| 7071 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 7643 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7072 | 7644 | "2920 Failed to alloc Resource IDs " |
|---|
| 7073 | 7645 | "rc = x%x\n", rc); |
|---|
| 7074 | 7646 | goto out_free_mbox; |
|---|
| .. | .. |
|---|
| 7093 | 7665 | |
|---|
| 7094 | 7666 | mboxq->vport = vport; |
|---|
| 7095 | 7667 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
|---|
| 7096 | | - mp = (struct lpfc_dmabuf *) mboxq->context1; |
|---|
| 7668 | + mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; |
|---|
| 7097 | 7669 | if (rc == MBX_SUCCESS) { |
|---|
| 7098 | 7670 | memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); |
|---|
| 7099 | 7671 | rc = 0; |
|---|
| .. | .. |
|---|
| 7105 | 7677 | */ |
|---|
| 7106 | 7678 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
|---|
| 7107 | 7679 | kfree(mp); |
|---|
| 7108 | | - mboxq->context1 = NULL; |
|---|
| 7680 | + mboxq->ctx_buf = NULL; |
|---|
| 7109 | 7681 | if (unlikely(rc)) { |
|---|
| 7110 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 7682 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7111 | 7683 | "0382 READ_SPARAM command failed " |
|---|
| 7112 | 7684 | "status %d, mbxStatus x%x\n", |
|---|
| 7113 | 7685 | rc, bf_get(lpfc_mqe_status, mqe)); |
|---|
| .. | .. |
|---|
| 7125 | 7697 | /* Create all the SLI4 queues */ |
|---|
| 7126 | 7698 | rc = lpfc_sli4_queue_create(phba); |
|---|
| 7127 | 7699 | if (rc) { |
|---|
| 7128 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 7700 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7129 | 7701 | "3089 Failed to allocate queues\n"); |
|---|
| 7130 | 7702 | rc = -ENODEV; |
|---|
| 7131 | 7703 | goto out_free_mbox; |
|---|
| .. | .. |
|---|
| 7133 | 7705 | /* Set up all the queues to the device */ |
|---|
| 7134 | 7706 | rc = lpfc_sli4_queue_setup(phba); |
|---|
| 7135 | 7707 | if (unlikely(rc)) { |
|---|
| 7136 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 7708 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7137 | 7709 | "0381 Error %d during queue setup.\n ", rc); |
|---|
| 7138 | 7710 | goto out_stop_timers; |
|---|
| 7139 | 7711 | } |
|---|
| .. | .. |
|---|
| 7144 | 7716 | /* update host els xri-sgl sizes and mappings */ |
|---|
| 7145 | 7717 | rc = lpfc_sli4_els_sgl_update(phba); |
|---|
| 7146 | 7718 | if (unlikely(rc)) { |
|---|
| 7147 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 7719 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7148 | 7720 | "1400 Failed to update xri-sgl size and " |
|---|
| 7149 | 7721 | "mapping: %d\n", rc); |
|---|
| 7150 | 7722 | goto out_destroy_queue; |
|---|
| .. | .. |
|---|
| 7154 | 7726 | rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, |
|---|
| 7155 | 7727 | phba->sli4_hba.els_xri_cnt); |
|---|
| 7156 | 7728 | if (unlikely(rc < 0)) { |
|---|
| 7157 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 7729 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7158 | 7730 | "0582 Error %d during els sgl post " |
|---|
| 7159 | 7731 | "operation\n", rc); |
|---|
| 7160 | 7732 | rc = -ENODEV; |
|---|
| .. | .. |
|---|
| 7166 | 7738 | /* update host nvmet xri-sgl sizes and mappings */ |
|---|
| 7167 | 7739 | rc = lpfc_sli4_nvmet_sgl_update(phba); |
|---|
| 7168 | 7740 | if (unlikely(rc)) { |
|---|
| 7169 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 7741 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7170 | 7742 | "6308 Failed to update nvmet-sgl size " |
|---|
| 7171 | 7743 | "and mapping: %d\n", rc); |
|---|
| 7172 | 7744 | goto out_destroy_queue; |
|---|
| .. | .. |
|---|
| 7178 | 7750 | &phba->sli4_hba.lpfc_nvmet_sgl_list, |
|---|
| 7179 | 7751 | phba->sli4_hba.nvmet_xri_cnt); |
|---|
| 7180 | 7752 | if (unlikely(rc < 0)) { |
|---|
| 7181 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 7753 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7182 | 7754 | "3117 Error %d during nvmet " |
|---|
| 7183 | 7755 | "sgl post\n", rc); |
|---|
| 7184 | 7756 | rc = -ENODEV; |
|---|
| .. | .. |
|---|
| 7186 | 7758 | } |
|---|
| 7187 | 7759 | phba->sli4_hba.nvmet_xri_cnt = rc; |
|---|
| 7188 | 7760 | |
|---|
| 7189 | | - cnt = phba->cfg_iocb_cnt * 1024; |
|---|
| 7190 | | - /* We need 1 iocbq for every SGL, for IO processing */ |
|---|
| 7191 | | - cnt += phba->sli4_hba.nvmet_xri_cnt; |
|---|
| 7761 | + /* We allocate an iocbq for every receive context SGL. |
|---|
| 7762 | + * The additional allocation is for abort and ls handling. |
|---|
| 7763 | + */ |
|---|
| 7764 | + cnt = phba->sli4_hba.nvmet_xri_cnt + |
|---|
| 7765 | + phba->sli4_hba.max_cfg_param.max_xri; |
|---|
| 7192 | 7766 | } else { |
|---|
| 7193 | | - /* update host scsi xri-sgl sizes and mappings */ |
|---|
| 7194 | | - rc = lpfc_sli4_scsi_sgl_update(phba); |
|---|
| 7767 | + /* update host common xri-sgl sizes and mappings */ |
|---|
| 7768 | + rc = lpfc_sli4_io_sgl_update(phba); |
|---|
| 7195 | 7769 | if (unlikely(rc)) { |
|---|
| 7196 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 7197 | | - "6309 Failed to update scsi-sgl size " |
|---|
| 7198 | | - "and mapping: %d\n", rc); |
|---|
| 7199 | | - goto out_destroy_queue; |
|---|
| 7200 | | - } |
|---|
| 7201 | | - |
|---|
| 7202 | | - /* update host nvme xri-sgl sizes and mappings */ |
|---|
| 7203 | | - rc = lpfc_sli4_nvme_sgl_update(phba); |
|---|
| 7204 | | - if (unlikely(rc)) { |
|---|
| 7205 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 7770 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7206 | 7771 | "6082 Failed to update nvme-sgl size " |
|---|
| 7207 | 7772 | "and mapping: %d\n", rc); |
|---|
| 7208 | 7773 | goto out_destroy_queue; |
|---|
| 7209 | 7774 | } |
|---|
| 7210 | 7775 | |
|---|
| 7211 | | - cnt = phba->cfg_iocb_cnt * 1024; |
|---|
| 7776 | + /* register the allocated common sgl pool to the port */ |
|---|
| 7777 | + rc = lpfc_sli4_repost_io_sgl_list(phba); |
|---|
| 7778 | + if (unlikely(rc)) { |
|---|
| 7779 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7780 | + "6116 Error %d during nvme sgl post " |
|---|
| 7781 | + "operation\n", rc); |
|---|
| 7782 | + /* Some NVME buffers were moved to abort nvme list */ |
|---|
| 7783 | + /* A pci function reset will repost them */ |
|---|
| 7784 | + rc = -ENODEV; |
|---|
| 7785 | + goto out_destroy_queue; |
|---|
| 7786 | + } |
|---|
| 7787 | + /* Each lpfc_io_buf job structure has an iocbq element. |
|---|
| 7788 | + * This cnt provides for abort, els, ct and ls requests. |
|---|
| 7789 | + */ |
|---|
| 7790 | + cnt = phba->sli4_hba.max_cfg_param.max_xri; |
|---|
| 7212 | 7791 | } |
|---|
| 7213 | 7792 | |
|---|
| 7214 | 7793 | if (!phba->sli.iocbq_lookup) { |
|---|
| 7215 | 7794 | /* Initialize and populate the iocb list per host */ |
|---|
| 7216 | 7795 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
|---|
| 7217 | | - "2821 initialize iocb list %d total %d\n", |
|---|
| 7218 | | - phba->cfg_iocb_cnt, cnt); |
|---|
| 7796 | + "2821 initialize iocb list with %d entries\n", |
|---|
| 7797 | + cnt); |
|---|
| 7219 | 7798 | rc = lpfc_init_iocb_list(phba, cnt); |
|---|
| 7220 | 7799 | if (rc) { |
|---|
| 7221 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 7800 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7222 | 7801 | "1413 Failed to init iocb list.\n"); |
|---|
| 7223 | 7802 | goto out_destroy_queue; |
|---|
| 7224 | 7803 | } |
|---|
| .. | .. |
|---|
| 7244 | 7823 | } |
|---|
| 7245 | 7824 | } |
|---|
| 7246 | 7825 | |
|---|
| 7247 | | - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { |
|---|
| 7248 | | - /* register the allocated scsi sgl pool to the port */ |
|---|
| 7249 | | - rc = lpfc_sli4_repost_scsi_sgl_list(phba); |
|---|
| 7250 | | - if (unlikely(rc)) { |
|---|
| 7251 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 7252 | | - "0383 Error %d during scsi sgl post " |
|---|
| 7253 | | - "operation\n", rc); |
|---|
| 7254 | | - /* Some Scsi buffers were moved to abort scsi list */ |
|---|
| 7255 | | - /* A pci function reset will repost them */ |
|---|
| 7256 | | - rc = -ENODEV; |
|---|
| 7257 | | - goto out_destroy_queue; |
|---|
| 7258 | | - } |
|---|
| 7259 | | - } |
|---|
| 7260 | | - |
|---|
| 7261 | | - if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && |
|---|
| 7262 | | - (phba->nvmet_support == 0)) { |
|---|
| 7263 | | - |
|---|
| 7264 | | - /* register the allocated nvme sgl pool to the port */ |
|---|
| 7265 | | - rc = lpfc_repost_nvme_sgl_list(phba); |
|---|
| 7266 | | - if (unlikely(rc)) { |
|---|
| 7267 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 7268 | | - "6116 Error %d during nvme sgl post " |
|---|
| 7269 | | - "operation\n", rc); |
|---|
| 7270 | | - /* Some NVME buffers were moved to abort nvme list */ |
|---|
| 7271 | | - /* A pci function reset will repost them */ |
|---|
| 7272 | | - rc = -ENODEV; |
|---|
| 7273 | | - goto out_destroy_queue; |
|---|
| 7274 | | - } |
|---|
| 7275 | | - } |
|---|
| 7276 | | - |
|---|
| 7277 | 7826 | /* Post the rpi header region to the device. */ |
|---|
| 7278 | 7827 | rc = lpfc_sli4_post_all_rpi_hdrs(phba); |
|---|
| 7279 | 7828 | if (unlikely(rc)) { |
|---|
| 7280 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 7829 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7281 | 7830 | "0393 Error %d during rpi post operation\n", |
|---|
| 7282 | 7831 | rc); |
|---|
| 7283 | 7832 | rc = -ENODEV; |
|---|
| .. | .. |
|---|
| 7323 | 7872 | lpfc_sli_read_link_ste(phba); |
|---|
| 7324 | 7873 | } |
|---|
| 7325 | 7874 | |
|---|
| 7326 | | - /* Arm the CQs and then EQs on device */ |
|---|
| 7327 | | - lpfc_sli4_arm_cqeq_intr(phba); |
|---|
| 7875 | + /* Don't post more new bufs if repost already recovered |
|---|
| 7876 | + * the nvme sgls. |
|---|
| 7877 | + */ |
|---|
| 7878 | + if (phba->nvmet_support == 0) { |
|---|
| 7879 | + if (phba->sli4_hba.io_xri_cnt == 0) { |
|---|
| 7880 | + len = lpfc_new_io_buf( |
|---|
| 7881 | + phba, phba->sli4_hba.io_xri_max); |
|---|
| 7882 | + if (len == 0) { |
|---|
| 7883 | + rc = -ENOMEM; |
|---|
| 7884 | + goto out_unset_queue; |
|---|
| 7885 | + } |
|---|
| 7328 | 7886 | |
|---|
| 7329 | | - /* Indicate device interrupt mode */ |
|---|
| 7330 | | - phba->sli4_hba.intr_enable = 1; |
|---|
| 7887 | + if (phba->cfg_xri_rebalancing) |
|---|
| 7888 | + lpfc_create_multixri_pools(phba); |
|---|
| 7889 | + } |
|---|
| 7890 | + } else { |
|---|
| 7891 | + phba->cfg_xri_rebalancing = 0; |
|---|
| 7892 | + } |
|---|
| 7331 | 7893 | |
|---|
| 7332 | 7894 | /* Allow asynchronous mailbox command to go through */ |
|---|
| 7333 | 7895 | spin_lock_irq(&phba->hbalock); |
|---|
| .. | .. |
|---|
| 7350 | 7912 | jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); |
|---|
| 7351 | 7913 | phba->hb_outstanding = 0; |
|---|
| 7352 | 7914 | phba->last_completion_time = jiffies; |
|---|
| 7915 | + |
|---|
| 7916 | + /* start eq_delay heartbeat */ |
|---|
| 7917 | + if (phba->cfg_auto_imax) |
|---|
| 7918 | + queue_delayed_work(phba->wq, &phba->eq_delay_work, |
|---|
| 7919 | + msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); |
|---|
| 7920 | + |
|---|
| 7921 | + /* start per phba idle_stat_delay heartbeat */ |
|---|
| 7922 | + lpfc_init_idle_stat_hb(phba); |
|---|
| 7353 | 7923 | |
|---|
| 7354 | 7924 | /* Start error attention (ERATT) polling timer */ |
|---|
| 7355 | 7925 | mod_timer(&phba->eratt_poll, |
|---|
| .. | .. |
|---|
| 7380 | 7950 | */ |
|---|
| 7381 | 7951 | spin_lock_irq(&phba->hbalock); |
|---|
| 7382 | 7952 | phba->link_state = LPFC_LINK_DOWN; |
|---|
| 7953 | + |
|---|
| 7954 | + /* Check if physical ports are trunked */ |
|---|
| 7955 | + if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) |
|---|
| 7956 | + phba->trunk_link.link0.state = LPFC_LINK_DOWN; |
|---|
| 7957 | + if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) |
|---|
| 7958 | + phba->trunk_link.link1.state = LPFC_LINK_DOWN; |
|---|
| 7959 | + if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) |
|---|
| 7960 | + phba->trunk_link.link2.state = LPFC_LINK_DOWN; |
|---|
| 7961 | + if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) |
|---|
| 7962 | + phba->trunk_link.link3.state = LPFC_LINK_DOWN; |
|---|
| 7383 | 7963 | spin_unlock_irq(&phba->hbalock); |
|---|
| 7964 | + |
|---|
| 7965 | + /* Arm the CQs and then EQs on device */ |
|---|
| 7966 | + lpfc_sli4_arm_cqeq_intr(phba); |
|---|
| 7967 | + |
|---|
| 7968 | + /* Indicate device interrupt mode */ |
|---|
| 7969 | + phba->sli4_hba.intr_enable = 1; |
|---|
| 7970 | + |
|---|
| 7384 | 7971 | if (!(phba->hba_flag & HBA_FCOE_MODE) && |
|---|
| 7385 | 7972 | (phba->hba_flag & LINK_DISABLED)) { |
|---|
| 7386 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, |
|---|
| 7973 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7387 | 7974 | "3103 Adapter Link is disabled.\n"); |
|---|
| 7388 | 7975 | lpfc_down_link(phba, mboxq); |
|---|
| 7389 | 7976 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
|---|
| 7390 | 7977 | if (rc != MBX_SUCCESS) { |
|---|
| 7391 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, |
|---|
| 7978 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7392 | 7979 | "3104 Adapter failed to issue " |
|---|
| 7393 | 7980 | "DOWN_LINK mbox cmd, rc:x%x\n", rc); |
|---|
| 7394 | | - goto out_unset_queue; |
|---|
| 7981 | + goto out_io_buff_free; |
|---|
| 7395 | 7982 | } |
|---|
| 7396 | 7983 | } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { |
|---|
| 7397 | 7984 | /* don't perform init_link on SLI4 FC port loopback test */ |
|---|
| 7398 | 7985 | if (!(phba->link_flag & LS_LOOPBACK_MODE)) { |
|---|
| 7399 | 7986 | rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); |
|---|
| 7400 | 7987 | if (rc) |
|---|
| 7401 | | - goto out_unset_queue; |
|---|
| 7988 | + goto out_io_buff_free; |
|---|
| 7402 | 7989 | } |
|---|
| 7403 | 7990 | } |
|---|
| 7404 | 7991 | mempool_free(mboxq, phba->mbox_mem_pool); |
|---|
| 7405 | 7992 | return rc; |
|---|
| 7993 | +out_io_buff_free: |
|---|
| 7994 | + /* Free allocated IO Buffers */ |
|---|
| 7995 | + lpfc_io_free(phba); |
|---|
| 7406 | 7996 | out_unset_queue: |
|---|
| 7407 | 7997 | /* Unset all the queues set up in this routine when error out */ |
|---|
| 7408 | 7998 | lpfc_sli4_queue_unset(phba); |
|---|
| .. | .. |
|---|
| 7419 | 8009 | |
|---|
| 7420 | 8010 | /** |
|---|
| 7421 | 8011 | * lpfc_mbox_timeout - Timeout call back function for mbox timer |
|---|
| 7422 | | - * @ptr: context object - pointer to hba structure. |
|---|
| 8012 | + * @t: Context to fetch pointer to hba structure from. |
|---|
| 7423 | 8013 | * |
|---|
| 7424 | 8014 | * This is the callback function for mailbox timer. The mailbox |
|---|
| 7425 | 8015 | * timer is armed when a new mailbox command is issued and the timer |
|---|
| .. | .. |
|---|
| 7473 | 8063 | mcq = phba->sli4_hba.mbx_cq; |
|---|
| 7474 | 8064 | idx = mcq->hba_index; |
|---|
| 7475 | 8065 | qe_valid = mcq->qe_valid; |
|---|
| 7476 | | - while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) { |
|---|
| 7477 | | - mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; |
|---|
| 8066 | + while (bf_get_le32(lpfc_cqe_valid, |
|---|
| 8067 | + (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) { |
|---|
| 8068 | + mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx)); |
|---|
| 7478 | 8069 | if (bf_get_le32(lpfc_trailer_completed, mcqe) && |
|---|
| 7479 | 8070 | (!bf_get_le32(lpfc_trailer_async, mcqe))) { |
|---|
| 7480 | 8071 | pending_completions = true; |
|---|
| .. | .. |
|---|
| 7503 | 8094 | * and will process all the completions associated with the eq for the |
|---|
| 7504 | 8095 | * mailbox completion queue. |
|---|
| 7505 | 8096 | **/ |
|---|
| 7506 | | -bool |
|---|
| 8097 | +static bool |
|---|
| 7507 | 8098 | lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) |
|---|
| 7508 | 8099 | { |
|---|
| 7509 | 8100 | struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; |
|---|
| 7510 | 8101 | uint32_t eqidx; |
|---|
| 7511 | 8102 | struct lpfc_queue *fpeq = NULL; |
|---|
| 7512 | | - struct lpfc_eqe *eqe; |
|---|
| 8103 | + struct lpfc_queue *eq; |
|---|
| 7513 | 8104 | bool mbox_pending; |
|---|
| 7514 | 8105 | |
|---|
| 7515 | 8106 | if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) |
|---|
| 7516 | 8107 | return false; |
|---|
| 7517 | 8108 | |
|---|
| 7518 | | - /* Find the eq associated with the mcq */ |
|---|
| 7519 | | - |
|---|
| 7520 | | - if (sli4_hba->hba_eq) |
|---|
| 7521 | | - for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) |
|---|
| 7522 | | - if (sli4_hba->hba_eq[eqidx]->queue_id == |
|---|
| 7523 | | - sli4_hba->mbx_cq->assoc_qid) { |
|---|
| 7524 | | - fpeq = sli4_hba->hba_eq[eqidx]; |
|---|
| 8109 | + /* Find the EQ associated with the mbox CQ */ |
|---|
| 8110 | + if (sli4_hba->hdwq) { |
|---|
| 8111 | + for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) { |
|---|
| 8112 | + eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq; |
|---|
| 8113 | + if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) { |
|---|
| 8114 | + fpeq = eq; |
|---|
| 7525 | 8115 | break; |
|---|
| 7526 | 8116 | } |
|---|
| 8117 | + } |
|---|
| 8118 | + } |
|---|
| 7527 | 8119 | if (!fpeq) |
|---|
| 7528 | 8120 | return false; |
|---|
| 7529 | 8121 | |
|---|
| .. | .. |
|---|
| 7543 | 8135 | */ |
|---|
| 7544 | 8136 | |
|---|
| 7545 | 8137 | if (mbox_pending) |
|---|
| 7546 | | - while ((eqe = lpfc_sli4_eq_get(fpeq))) { |
|---|
| 7547 | | - lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx); |
|---|
| 7548 | | - fpeq->EQ_processed++; |
|---|
| 7549 | | - } |
|---|
| 7550 | | - |
|---|
| 7551 | | - /* Always clear and re-arm the EQ */ |
|---|
| 7552 | | - |
|---|
| 7553 | | - sli4_hba->sli4_eq_release(fpeq, LPFC_QUEUE_REARM); |
|---|
| 8138 | + /* process and rearm the EQ */ |
|---|
| 8139 | + lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM); |
|---|
| 8140 | + else |
|---|
| 8141 | + /* Always clear and re-arm the EQ */ |
|---|
| 8142 | + sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); |
|---|
| 7554 | 8143 | |
|---|
| 7555 | 8144 | return mbox_pending; |
|---|
| 7556 | 8145 | |
|---|
| .. | .. |
|---|
| 7594 | 8183 | } |
|---|
| 7595 | 8184 | |
|---|
| 7596 | 8185 | /* Mbox cmd <mbxCommand> timeout */ |
|---|
| 7597 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 7598 | | - "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", |
|---|
| 8186 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 8187 | + "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n", |
|---|
| 7599 | 8188 | mb->mbxCommand, |
|---|
| 7600 | 8189 | phba->pport->port_state, |
|---|
| 7601 | 8190 | phba->sli.sli_flag, |
|---|
| .. | .. |
|---|
| 7616 | 8205 | |
|---|
| 7617 | 8206 | lpfc_sli_abort_fcp_rings(phba); |
|---|
| 7618 | 8207 | |
|---|
| 7619 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 8208 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7620 | 8209 | "0345 Resetting board due to mailbox timeout\n"); |
|---|
| 7621 | 8210 | |
|---|
| 7622 | 8211 | /* Reset the HBA device */ |
|---|
| .. | .. |
|---|
| 7714 | 8303 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
|---|
| 7715 | 8304 | |
|---|
| 7716 | 8305 | /* Mbox command <mbxCommand> cannot issue */ |
|---|
| 7717 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 8306 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7718 | 8307 | "(%d):0311 Mailbox command x%x cannot " |
|---|
| 7719 | 8308 | "issue Data: x%x x%x\n", |
|---|
| 7720 | 8309 | pmbox->vport ? pmbox->vport->vpi : 0, |
|---|
| .. | .. |
|---|
| 7726 | 8315 | if (lpfc_readl(phba->HCregaddr, &hc_copy) || |
|---|
| 7727 | 8316 | !(hc_copy & HC_MBINT_ENA)) { |
|---|
| 7728 | 8317 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
|---|
| 7729 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 8318 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7730 | 8319 | "(%d):2528 Mailbox command x%x cannot " |
|---|
| 7731 | 8320 | "issue Data: x%x x%x\n", |
|---|
| 7732 | 8321 | pmbox->vport ? pmbox->vport->vpi : 0, |
|---|
| .. | .. |
|---|
| 7745 | 8334 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
|---|
| 7746 | 8335 | |
|---|
| 7747 | 8336 | /* Mbox command <mbxCommand> cannot issue */ |
|---|
| 7748 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 8337 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7749 | 8338 | "(%d):2529 Mailbox command x%x " |
|---|
| 7750 | 8339 | "cannot issue Data: x%x x%x\n", |
|---|
| 7751 | 8340 | pmbox->vport ? pmbox->vport->vpi : 0, |
|---|
| .. | .. |
|---|
| 7757 | 8346 | if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { |
|---|
| 7758 | 8347 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
|---|
| 7759 | 8348 | /* Mbox command <mbxCommand> cannot issue */ |
|---|
| 7760 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 8349 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7761 | 8350 | "(%d):2530 Mailbox command x%x " |
|---|
| 7762 | 8351 | "cannot issue Data: x%x x%x\n", |
|---|
| 7763 | 8352 | pmbox->vport ? pmbox->vport->vpi : 0, |
|---|
| .. | .. |
|---|
| 7810 | 8399 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
|---|
| 7811 | 8400 | spin_unlock_irqrestore(&phba->hbalock, drvr_flag); |
|---|
| 7812 | 8401 | /* Mbox command <mbxCommand> cannot issue */ |
|---|
| 7813 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 8402 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 7814 | 8403 | "(%d):2531 Mailbox command x%x " |
|---|
| 7815 | 8404 | "cannot issue Data: x%x x%x\n", |
|---|
| 7816 | 8405 | pmbox->vport ? pmbox->vport->vpi : 0, |
|---|
| .. | .. |
|---|
| 7865 | 8454 | } |
|---|
| 7866 | 8455 | |
|---|
| 7867 | 8456 | /* Copy the mailbox extension data */ |
|---|
| 7868 | | - if (pmbox->in_ext_byte_len && pmbox->context2) { |
|---|
| 7869 | | - lpfc_sli_pcimem_bcopy(pmbox->context2, |
|---|
| 7870 | | - (uint8_t *)phba->mbox_ext, |
|---|
| 7871 | | - pmbox->in_ext_byte_len); |
|---|
| 8457 | + if (pmbox->in_ext_byte_len && pmbox->ctx_buf) { |
|---|
| 8458 | + lpfc_sli_pcimem_bcopy(pmbox->ctx_buf, |
|---|
| 8459 | + (uint8_t *)phba->mbox_ext, |
|---|
| 8460 | + pmbox->in_ext_byte_len); |
|---|
| 7872 | 8461 | } |
|---|
| 7873 | 8462 | /* Copy command data to host SLIM area */ |
|---|
| 7874 | 8463 | lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); |
|---|
| .. | .. |
|---|
| 7879 | 8468 | = MAILBOX_HBA_EXT_OFFSET; |
|---|
| 7880 | 8469 | |
|---|
| 7881 | 8470 | /* Copy the mailbox extension data */ |
|---|
| 7882 | | - if (pmbox->in_ext_byte_len && pmbox->context2) |
|---|
| 8471 | + if (pmbox->in_ext_byte_len && pmbox->ctx_buf) |
|---|
| 7883 | 8472 | lpfc_memcpy_to_slim(phba->MBslimaddr + |
|---|
| 7884 | 8473 | MAILBOX_HBA_EXT_OFFSET, |
|---|
| 7885 | | - pmbox->context2, pmbox->in_ext_byte_len); |
|---|
| 8474 | + pmbox->ctx_buf, pmbox->in_ext_byte_len); |
|---|
| 7886 | 8475 | |
|---|
| 7887 | 8476 | if (mbx->mbxCommand == MBX_CONFIG_PORT) |
|---|
| 7888 | 8477 | /* copy command data into host mbox for cmpl */ |
|---|
| .. | .. |
|---|
| 8005 | 8594 | lpfc_sli_pcimem_bcopy(phba->mbox, mbx, |
|---|
| 8006 | 8595 | MAILBOX_CMD_SIZE); |
|---|
| 8007 | 8596 | /* Copy the mailbox extension data */ |
|---|
| 8008 | | - if (pmbox->out_ext_byte_len && pmbox->context2) { |
|---|
| 8597 | + if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { |
|---|
| 8009 | 8598 | lpfc_sli_pcimem_bcopy(phba->mbox_ext, |
|---|
| 8010 | | - pmbox->context2, |
|---|
| 8599 | + pmbox->ctx_buf, |
|---|
| 8011 | 8600 | pmbox->out_ext_byte_len); |
|---|
| 8012 | 8601 | } |
|---|
| 8013 | 8602 | } else { |
|---|
| .. | .. |
|---|
| 8015 | 8604 | lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, |
|---|
| 8016 | 8605 | MAILBOX_CMD_SIZE); |
|---|
| 8017 | 8606 | /* Copy the mailbox extension data */ |
|---|
| 8018 | | - if (pmbox->out_ext_byte_len && pmbox->context2) { |
|---|
| 8019 | | - lpfc_memcpy_from_slim(pmbox->context2, |
|---|
| 8607 | + if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { |
|---|
| 8608 | + lpfc_memcpy_from_slim( |
|---|
| 8609 | + pmbox->ctx_buf, |
|---|
| 8020 | 8610 | phba->MBslimaddr + |
|---|
| 8021 | 8611 | MAILBOX_HBA_EXT_OFFSET, |
|---|
| 8022 | 8612 | pmbox->out_ext_byte_len); |
|---|
| .. | .. |
|---|
| 8127 | 8717 | psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; |
|---|
| 8128 | 8718 | spin_unlock_irq(&phba->hbalock); |
|---|
| 8129 | 8719 | |
|---|
| 8130 | | - /* wake up worker thread to post asynchronlous mailbox command */ |
|---|
| 8720 | + /* wake up worker thread to post asynchronous mailbox command */ |
|---|
| 8131 | 8721 | lpfc_worker_wake_up(phba); |
|---|
| 8132 | 8722 | } |
|---|
| 8133 | 8723 | |
|---|
| .. | .. |
|---|
| 8156 | 8746 | bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); |
|---|
| 8157 | 8747 | db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); |
|---|
| 8158 | 8748 | if (!db_ready) |
|---|
| 8159 | | - msleep(2); |
|---|
| 8749 | + mdelay(2); |
|---|
| 8160 | 8750 | |
|---|
| 8161 | 8751 | if (time_after(jiffies, timeout)) |
|---|
| 8162 | 8752 | return MBXERR_ERROR; |
|---|
| .. | .. |
|---|
| 8200 | 8790 | spin_lock_irqsave(&phba->hbalock, iflag); |
|---|
| 8201 | 8791 | if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { |
|---|
| 8202 | 8792 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
|---|
| 8203 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 8793 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 8204 | 8794 | "(%d):2532 Mailbox command x%x (x%x/x%x) " |
|---|
| 8205 | 8795 | "cannot issue Data: x%x x%x\n", |
|---|
| 8206 | 8796 | mboxq->vport ? mboxq->vport->vpi : 0, |
|---|
| .. | .. |
|---|
| 8219 | 8809 | rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); |
|---|
| 8220 | 8810 | if (rc) |
|---|
| 8221 | 8811 | goto exit; |
|---|
| 8222 | | - |
|---|
| 8223 | 8812 | /* |
|---|
| 8224 | 8813 | * Initialize the bootstrap memory region to avoid stale data areas |
|---|
| 8225 | 8814 | * in the mailbox post. Then copy the caller's mailbox contents to |
|---|
| .. | .. |
|---|
| 8300 | 8889 | /** |
|---|
| 8301 | 8890 | * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware |
|---|
| 8302 | 8891 | * @phba: Pointer to HBA context object. |
|---|
| 8303 | | - * @pmbox: Pointer to mailbox object. |
|---|
| 8892 | + * @mboxq: Pointer to mailbox object. |
|---|
| 8304 | 8893 | * @flag: Flag indicating how the mailbox need to be processed. |
|---|
| 8305 | 8894 | * |
|---|
| 8306 | 8895 | * This function is called by discovery code and HBA management code to submit |
|---|
| .. | .. |
|---|
| 8322 | 8911 | |
|---|
| 8323 | 8912 | rc = lpfc_mbox_dev_check(phba); |
|---|
| 8324 | 8913 | if (unlikely(rc)) { |
|---|
| 8325 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 8914 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 8326 | 8915 | "(%d):2544 Mailbox command x%x (x%x/x%x) " |
|---|
| 8327 | 8916 | "cannot issue Data: x%x x%x\n", |
|---|
| 8328 | 8917 | mboxq->vport ? mboxq->vport->vpi : 0, |
|---|
| .. | .. |
|---|
| 8344 | 8933 | "(%d):2541 Mailbox command x%x " |
|---|
| 8345 | 8934 | "(x%x/x%x) failure: " |
|---|
| 8346 | 8935 | "mqe_sta: x%x mcqe_sta: x%x/x%x " |
|---|
| 8347 | | - "Data: x%x x%x\n,", |
|---|
| 8936 | + "Data: x%x x%x\n", |
|---|
| 8348 | 8937 | mboxq->vport ? mboxq->vport->vpi : 0, |
|---|
| 8349 | 8938 | mboxq->u.mb.mbxCommand, |
|---|
| 8350 | 8939 | lpfc_sli_config_mbox_subsys_get(phba, |
|---|
| .. | .. |
|---|
| 8378 | 8967 | "(%d):2597 Sync Mailbox command " |
|---|
| 8379 | 8968 | "x%x (x%x/x%x) failure: " |
|---|
| 8380 | 8969 | "mqe_sta: x%x mcqe_sta: x%x/x%x " |
|---|
| 8381 | | - "Data: x%x x%x\n,", |
|---|
| 8970 | + "Data: x%x x%x\n", |
|---|
| 8382 | 8971 | mboxq->vport ? mboxq->vport->vpi : 0, |
|---|
| 8383 | 8972 | mboxq->u.mb.mbxCommand, |
|---|
| 8384 | 8973 | lpfc_sli_config_mbox_subsys_get(phba, |
|---|
| .. | .. |
|---|
| 8396 | 8985 | return rc; |
|---|
| 8397 | 8986 | } |
|---|
| 8398 | 8987 | |
|---|
| 8399 | | - /* Now, interrupt mode asynchrous mailbox command */ |
|---|
| 8988 | + /* Now, interrupt mode asynchronous mailbox command */ |
|---|
| 8400 | 8989 | rc = lpfc_mbox_cmd_check(phba, mboxq); |
|---|
| 8401 | 8990 | if (rc) { |
|---|
| 8402 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 8991 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 8403 | 8992 | "(%d):2543 Mailbox command x%x (x%x/x%x) " |
|---|
| 8404 | 8993 | "cannot issue Data: x%x x%x\n", |
|---|
| 8405 | 8994 | mboxq->vport ? mboxq->vport->vpi : 0, |
|---|
| .. | .. |
|---|
| 8467 | 9056 | } |
|---|
| 8468 | 9057 | if (unlikely(phba->sli.mbox_active)) { |
|---|
| 8469 | 9058 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
|---|
| 8470 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 9059 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 8471 | 9060 | "0384 There is pending active mailbox cmd\n"); |
|---|
| 8472 | 9061 | return MBX_NOT_FINISHED; |
|---|
| 8473 | 9062 | } |
|---|
| .. | .. |
|---|
| 8528 | 9117 | /* Post the mailbox command to the port */ |
|---|
| 8529 | 9118 | rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); |
|---|
| 8530 | 9119 | if (rc != MBX_SUCCESS) { |
|---|
| 8531 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
|---|
| 9120 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 8532 | 9121 | "(%d):2533 Mailbox command x%x (x%x/x%x) " |
|---|
| 8533 | 9122 | "cannot issue Data: x%x x%x\n", |
|---|
| 8534 | 9123 | mboxq->vport ? mboxq->vport->vpi : 0, |
|---|
| .. | .. |
|---|
| 8604 | 9193 | phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; |
|---|
| 8605 | 9194 | break; |
|---|
| 8606 | 9195 | default: |
|---|
| 8607 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 9196 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 8608 | 9197 | "1420 Invalid HBA PCI-device group: 0x%x\n", |
|---|
| 8609 | 9198 | dev_grp); |
|---|
| 8610 | 9199 | return -ENODEV; |
|---|
| .. | .. |
|---|
| 8619 | 9208 | * @pring: Pointer to driver SLI ring object. |
|---|
| 8620 | 9209 | * @piocb: Pointer to address of newly added command iocb. |
|---|
| 8621 | 9210 | * |
|---|
| 8622 | | - * This function is called with hbalock held to add a command |
|---|
| 9211 | + * This function is called with hbalock held for SLI3 ports or |
|---|
| 9212 | + * the ring lock held for SLI4 ports to add a command |
|---|
| 8623 | 9213 | * iocb to the txq when SLI layer cannot submit the command iocb |
|---|
| 8624 | 9214 | * to the ring. |
|---|
| 8625 | 9215 | **/ |
|---|
| .. | .. |
|---|
| 8627 | 9217 | __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
|---|
| 8628 | 9218 | struct lpfc_iocbq *piocb) |
|---|
| 8629 | 9219 | { |
|---|
| 8630 | | - lockdep_assert_held(&phba->hbalock); |
|---|
| 9220 | + if (phba->sli_rev == LPFC_SLI_REV4) |
|---|
| 9221 | + lockdep_assert_held(&pring->ring_lock); |
|---|
| 9222 | + else |
|---|
| 9223 | + lockdep_assert_held(&phba->hbalock); |
|---|
| 8631 | 9224 | /* Insert the caller's iocb in the txq tail for later processing. */ |
|---|
| 8632 | 9225 | list_add_tail(&piocb->list, &pring->txq); |
|---|
| 8633 | 9226 | } |
|---|
| .. | .. |
|---|
| 8701 | 9294 | if (piocb->iocb_cmpl && (!piocb->vport) && |
|---|
| 8702 | 9295 | (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && |
|---|
| 8703 | 9296 | (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { |
|---|
| 8704 | | - lpfc_printf_log(phba, KERN_ERR, |
|---|
| 8705 | | - LOG_SLI | LOG_VPORT, |
|---|
| 9297 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 8706 | 9298 | "1807 IOCB x%x failed. No vport\n", |
|---|
| 8707 | 9299 | piocb->iocb.ulpCommand); |
|---|
| 8708 | 9300 | dump_stack(); |
|---|
| .. | .. |
|---|
| 8755 | 9347 | */ |
|---|
| 8756 | 9348 | if (piocb->iocb_cmpl) |
|---|
| 8757 | 9349 | piocb->iocb_cmpl = NULL; |
|---|
| 8758 | | - /*FALLTHROUGH*/ |
|---|
| 9350 | + fallthrough; |
|---|
| 8759 | 9351 | case CMD_CREATE_XRI_CR: |
|---|
| 8760 | 9352 | case CMD_CLOSE_XRI_CN: |
|---|
| 8761 | 9353 | case CMD_CLOSE_XRI_CX: |
|---|
| .. | .. |
|---|
| 8803 | 9395 | /** |
|---|
| 8804 | 9396 | * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. |
|---|
| 8805 | 9397 | * @phba: Pointer to HBA context object. |
|---|
| 8806 | | - * @piocb: Pointer to command iocb. |
|---|
| 9398 | + * @piocbq: Pointer to command iocb. |
|---|
| 8807 | 9399 | * @sglq: Pointer to the scatter gather queue object. |
|---|
| 8808 | 9400 | * |
|---|
| 8809 | 9401 | * This routine converts the bpl or bde that is in the IOCB |
|---|
| .. | .. |
|---|
| 8911 | 9503 | /** |
|---|
| 8912 | 9504 | * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. |
|---|
| 8913 | 9505 | * @phba: Pointer to HBA context object. |
|---|
| 8914 | | - * @piocb: Pointer to command iocb. |
|---|
| 9506 | + * @iocbq: Pointer to command iocb. |
|---|
| 8915 | 9507 | * @wqe: Pointer to the work queue entry. |
|---|
| 8916 | 9508 | * |
|---|
| 8917 | 9509 | * This routine converts the iocb command to its Work Queue Entry |
|---|
| .. | .. |
|---|
| 8956 | 9548 | memset(wqe, 0, sizeof(union lpfc_wqe128)); |
|---|
| 8957 | 9549 | /* Some of the fields are in the right position already */ |
|---|
| 8958 | 9550 | memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); |
|---|
| 8959 | | - if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) { |
|---|
| 8960 | | - /* The ct field has moved so reset */ |
|---|
| 8961 | | - wqe->generic.wqe_com.word7 = 0; |
|---|
| 8962 | | - wqe->generic.wqe_com.word10 = 0; |
|---|
| 8963 | | - } |
|---|
| 9551 | + /* The ct field has moved so reset */ |
|---|
| 9552 | + wqe->generic.wqe_com.word7 = 0; |
|---|
| 9553 | + wqe->generic.wqe_com.word10 = 0; |
|---|
| 8964 | 9554 | |
|---|
| 8965 | 9555 | abort_tag = (uint32_t) iocbq->iotag; |
|---|
| 8966 | 9556 | xritag = iocbq->sli4_xritag; |
|---|
| .. | .. |
|---|
| 8999 | 9589 | else |
|---|
| 9000 | 9590 | ndlp = (struct lpfc_nodelist *)iocbq->context1; |
|---|
| 9001 | 9591 | if (!iocbq->iocb.ulpLe) { |
|---|
| 9002 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 9592 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 9003 | 9593 | "2007 Only Limited Edition cmd Format" |
|---|
| 9004 | 9594 | " supported 0x%x\n", |
|---|
| 9005 | 9595 | iocbq->iocb.ulpCommand); |
|---|
| .. | .. |
|---|
| 9030 | 9620 | if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { |
|---|
| 9031 | 9621 | if (pcmd && (*pcmd == ELS_CMD_FLOGI || |
|---|
| 9032 | 9622 | *pcmd == ELS_CMD_SCR || |
|---|
| 9623 | + *pcmd == ELS_CMD_RDF || |
|---|
| 9624 | + *pcmd == ELS_CMD_RSCN_XMT || |
|---|
| 9033 | 9625 | *pcmd == ELS_CMD_FDISC || |
|---|
| 9034 | 9626 | *pcmd == ELS_CMD_LOGO || |
|---|
| 9035 | 9627 | *pcmd == ELS_CMD_PLOGI)) { |
|---|
| .. | .. |
|---|
| 9069 | 9661 | cmnd = CMD_XMIT_SEQUENCE64_CR; |
|---|
| 9070 | 9662 | if (phba->link_flag & LS_LOOPBACK_MODE) |
|---|
| 9071 | 9663 | bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); |
|---|
| 9664 | + fallthrough; |
|---|
| 9072 | 9665 | case CMD_XMIT_SEQUENCE64_CR: |
|---|
| 9073 | 9666 | /* word3 iocb=io_tag32 wqe=reserved */ |
|---|
| 9074 | 9667 | wqe->xmit_sequence.rsvd3 = 0; |
|---|
| .. | .. |
|---|
| 9137 | 9730 | bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); |
|---|
| 9138 | 9731 | |
|---|
| 9139 | 9732 | if (phba->fcp_embed_io) { |
|---|
| 9140 | | - struct lpfc_scsi_buf *lpfc_cmd; |
|---|
| 9733 | + struct lpfc_io_buf *lpfc_cmd; |
|---|
| 9141 | 9734 | struct sli4_sge *sgl; |
|---|
| 9142 | 9735 | struct fcp_cmnd *fcp_cmnd; |
|---|
| 9143 | 9736 | uint32_t *ptr; |
|---|
| .. | .. |
|---|
| 9145 | 9738 | /* 128 byte wqe support here */ |
|---|
| 9146 | 9739 | |
|---|
| 9147 | 9740 | lpfc_cmd = iocbq->context1; |
|---|
| 9148 | | - sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; |
|---|
| 9741 | + sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; |
|---|
| 9149 | 9742 | fcp_cmnd = lpfc_cmd->fcp_cmnd; |
|---|
| 9150 | 9743 | |
|---|
| 9151 | 9744 | /* Word 0-2 - FCP_CMND */ |
|---|
| .. | .. |
|---|
| 9201 | 9794 | bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); |
|---|
| 9202 | 9795 | |
|---|
| 9203 | 9796 | if (phba->fcp_embed_io) { |
|---|
| 9204 | | - struct lpfc_scsi_buf *lpfc_cmd; |
|---|
| 9797 | + struct lpfc_io_buf *lpfc_cmd; |
|---|
| 9205 | 9798 | struct sli4_sge *sgl; |
|---|
| 9206 | 9799 | struct fcp_cmnd *fcp_cmnd; |
|---|
| 9207 | 9800 | uint32_t *ptr; |
|---|
| .. | .. |
|---|
| 9209 | 9802 | /* 128 byte wqe support here */ |
|---|
| 9210 | 9803 | |
|---|
| 9211 | 9804 | lpfc_cmd = iocbq->context1; |
|---|
| 9212 | | - sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; |
|---|
| 9805 | + sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; |
|---|
| 9213 | 9806 | fcp_cmnd = lpfc_cmd->fcp_cmnd; |
|---|
| 9214 | 9807 | |
|---|
| 9215 | 9808 | /* Word 0-2 - FCP_CMND */ |
|---|
| .. | .. |
|---|
| 9258 | 9851 | /* Note, word 10 is already initialized to 0 */ |
|---|
| 9259 | 9852 | |
|---|
| 9260 | 9853 | if (phba->fcp_embed_io) { |
|---|
| 9261 | | - struct lpfc_scsi_buf *lpfc_cmd; |
|---|
| 9854 | + struct lpfc_io_buf *lpfc_cmd; |
|---|
| 9262 | 9855 | struct sli4_sge *sgl; |
|---|
| 9263 | 9856 | struct fcp_cmnd *fcp_cmnd; |
|---|
| 9264 | 9857 | uint32_t *ptr; |
|---|
| .. | .. |
|---|
| 9266 | 9859 | /* 128 byte wqe support here */ |
|---|
| 9267 | 9860 | |
|---|
| 9268 | 9861 | lpfc_cmd = iocbq->context1; |
|---|
| 9269 | | - sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; |
|---|
| 9862 | + sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; |
|---|
| 9270 | 9863 | fcp_cmnd = lpfc_cmd->fcp_cmnd; |
|---|
| 9271 | 9864 | |
|---|
| 9272 | 9865 | /* Word 0-2 - FCP_CMND */ |
|---|
| .. | .. |
|---|
| 9304 | 9897 | /* word6 context tag copied in memcpy */ |
|---|
| 9305 | 9898 | if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { |
|---|
| 9306 | 9899 | ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); |
|---|
| 9307 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 9900 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 9308 | 9901 | "2015 Invalid CT %x command 0x%x\n", |
|---|
| 9309 | 9902 | ct, iocbq->iocb.ulpCommand); |
|---|
| 9310 | 9903 | return IOCB_ERROR; |
|---|
| .. | .. |
|---|
| 9417 | 10010 | * we re-construct this WQE here based on information in |
|---|
| 9418 | 10011 | * iocbq from scratch. |
|---|
| 9419 | 10012 | */ |
|---|
| 9420 | | - memset(wqe, 0, sizeof(union lpfc_wqe)); |
|---|
| 10013 | + memset(wqe, 0, sizeof(*wqe)); |
|---|
| 9421 | 10014 | /* OX_ID is invariable to who sent ABTS to CT exchange */ |
|---|
| 9422 | 10015 | bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, |
|---|
| 9423 | 10016 | bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); |
|---|
| .. | .. |
|---|
| 9464 | 10057 | |
|---|
| 9465 | 10058 | break; |
|---|
| 9466 | 10059 | case CMD_SEND_FRAME: |
|---|
| 10060 | + bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME); |
|---|
| 10061 | + bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */ |
|---|
| 10062 | + bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */ |
|---|
| 10063 | + bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1); |
|---|
| 10064 | + bf_set(wqe_xbl, &wqe->generic.wqe_com, 1); |
|---|
| 10065 | + bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); |
|---|
| 10066 | + bf_set(wqe_xc, &wqe->generic.wqe_com, 1); |
|---|
| 10067 | + bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA); |
|---|
| 10068 | + bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); |
|---|
| 9467 | 10069 | bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); |
|---|
| 9468 | 10070 | bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); |
|---|
| 9469 | 10071 | return 0; |
|---|
| .. | .. |
|---|
| 9474 | 10076 | case CMD_FCP_TRSP64_CX: /* Target mode rcv */ |
|---|
| 9475 | 10077 | case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ |
|---|
| 9476 | 10078 | default: |
|---|
| 9477 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 10079 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 9478 | 10080 | "2014 Invalid command 0x%x\n", |
|---|
| 9479 | 10081 | iocbq->iocb.ulpCommand); |
|---|
| 9480 | 10082 | return IOCB_ERROR; |
|---|
| .. | .. |
|---|
| 9509 | 10111 | * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue |
|---|
| 9510 | 10112 | * an iocb command to an HBA with SLI-4 interface spec. |
|---|
| 9511 | 10113 | * |
|---|
| 9512 | | - * This function is called with hbalock held. The function will return success |
|---|
| 10114 | + * This function is called with ringlock held. The function will return success |
|---|
| 9513 | 10115 | * after it successfully submit the iocb to firmware or after adding to the |
|---|
| 9514 | 10116 | * txq. |
|---|
| 9515 | 10117 | **/ |
|---|
| .. | .. |
|---|
| 9525 | 10127 | /* Get the WQ */ |
|---|
| 9526 | 10128 | if ((piocb->iocb_flag & LPFC_IO_FCP) || |
|---|
| 9527 | 10129 | (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { |
|---|
| 9528 | | - if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) |
|---|
| 9529 | | - wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx]; |
|---|
| 9530 | | - else |
|---|
| 9531 | | - wq = phba->sli4_hba.oas_wq; |
|---|
| 10130 | + wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq; |
|---|
| 9532 | 10131 | } else { |
|---|
| 9533 | 10132 | wq = phba->sli4_hba.els_wq; |
|---|
| 9534 | 10133 | } |
|---|
| .. | .. |
|---|
| 9540 | 10139 | * The WQE can be either 64 or 128 bytes, |
|---|
| 9541 | 10140 | */ |
|---|
| 9542 | 10141 | |
|---|
| 9543 | | - lockdep_assert_held(&phba->hbalock); |
|---|
| 10142 | + lockdep_assert_held(&pring->ring_lock); |
|---|
| 9544 | 10143 | |
|---|
| 9545 | 10144 | if (piocb->sli4_xritag == NO_XRI) { |
|---|
| 9546 | 10145 | if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || |
|---|
| .. | .. |
|---|
| 9598 | 10197 | return 0; |
|---|
| 9599 | 10198 | } |
|---|
| 9600 | 10199 | |
|---|
| 9601 | | -/** |
|---|
| 10200 | +/* |
|---|
| 9602 | 10201 | * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb |
|---|
| 9603 | 10202 | * |
|---|
| 9604 | 10203 | * This routine wraps the actual lockless version for issusing IOCB function |
|---|
| .. | .. |
|---|
| 9639 | 10238 | phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; |
|---|
| 9640 | 10239 | break; |
|---|
| 9641 | 10240 | default: |
|---|
| 9642 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 10241 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 9643 | 10242 | "1419 Invalid HBA PCI-device group: 0x%x\n", |
|---|
| 9644 | 10243 | dev_grp); |
|---|
| 9645 | 10244 | return -ENODEV; |
|---|
| .. | .. |
|---|
| 9662 | 10261 | struct lpfc_sli_ring * |
|---|
| 9663 | 10262 | lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) |
|---|
| 9664 | 10263 | { |
|---|
| 10264 | + struct lpfc_io_buf *lpfc_cmd; |
|---|
| 10265 | + |
|---|
| 9665 | 10266 | if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { |
|---|
| 9666 | | - if (!(phba->cfg_fof) || |
|---|
| 9667 | | - (!(piocb->iocb_flag & LPFC_IO_FOF))) { |
|---|
| 9668 | | - if (unlikely(!phba->sli4_hba.fcp_wq)) |
|---|
| 9669 | | - return NULL; |
|---|
| 9670 | | - /* |
|---|
| 9671 | | - * for abort iocb hba_wqidx should already |
|---|
| 9672 | | - * be setup based on what work queue we used. |
|---|
| 9673 | | - */ |
|---|
| 9674 | | - if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { |
|---|
| 9675 | | - piocb->hba_wqidx = |
|---|
| 9676 | | - lpfc_sli4_scmd_to_wqidx_distr(phba, |
|---|
| 9677 | | - piocb->context1); |
|---|
| 9678 | | - piocb->hba_wqidx = piocb->hba_wqidx % |
|---|
| 9679 | | - phba->cfg_fcp_io_channel; |
|---|
| 9680 | | - } |
|---|
| 9681 | | - return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring; |
|---|
| 9682 | | - } else { |
|---|
| 9683 | | - if (unlikely(!phba->sli4_hba.oas_wq)) |
|---|
| 9684 | | - return NULL; |
|---|
| 9685 | | - piocb->hba_wqidx = 0; |
|---|
| 9686 | | - return phba->sli4_hba.oas_wq->pring; |
|---|
| 10267 | + if (unlikely(!phba->sli4_hba.hdwq)) |
|---|
| 10268 | + return NULL; |
|---|
| 10269 | + /* |
|---|
| 10270 | + * for abort iocb hba_wqidx should already |
|---|
| 10271 | + * be setup based on what work queue we used. |
|---|
| 10272 | + */ |
|---|
| 10273 | + if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { |
|---|
| 10274 | + lpfc_cmd = (struct lpfc_io_buf *)piocb->context1; |
|---|
| 10275 | + piocb->hba_wqidx = lpfc_cmd->hdwq_no; |
|---|
| 9687 | 10276 | } |
|---|
| 10277 | + return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring; |
|---|
| 9688 | 10278 | } else { |
|---|
| 9689 | 10279 | if (unlikely(!phba->sli4_hba.els_wq)) |
|---|
| 9690 | 10280 | return NULL; |
|---|
| .. | .. |
|---|
| 9696 | 10286 | /** |
|---|
| 9697 | 10287 | * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb |
|---|
| 9698 | 10288 | * @phba: Pointer to HBA context object. |
|---|
| 9699 | | - * @pring: Pointer to driver SLI ring object. |
|---|
| 10289 | + * @ring_number: Ring number |
|---|
| 9700 | 10290 | * @piocb: Pointer to command iocb. |
|---|
| 9701 | 10291 | * @flag: Flag indicating if this command can be put into txq. |
|---|
| 9702 | 10292 | * |
|---|
| .. | .. |
|---|
| 9710 | 10300 | lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, |
|---|
| 9711 | 10301 | struct lpfc_iocbq *piocb, uint32_t flag) |
|---|
| 9712 | 10302 | { |
|---|
| 9713 | | - struct lpfc_hba_eq_hdl *hba_eq_hdl; |
|---|
| 9714 | 10303 | struct lpfc_sli_ring *pring; |
|---|
| 9715 | | - struct lpfc_queue *fpeq; |
|---|
| 9716 | | - struct lpfc_eqe *eqe; |
|---|
| 10304 | + struct lpfc_queue *eq; |
|---|
| 9717 | 10305 | unsigned long iflags; |
|---|
| 9718 | | - int rc, idx; |
|---|
| 10306 | + int rc; |
|---|
| 9719 | 10307 | |
|---|
| 9720 | 10308 | if (phba->sli_rev == LPFC_SLI_REV4) { |
|---|
| 10309 | + eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq; |
|---|
| 10310 | + |
|---|
| 9721 | 10311 | pring = lpfc_sli4_calc_ring(phba, piocb); |
|---|
| 9722 | 10312 | if (unlikely(pring == NULL)) |
|---|
| 9723 | 10313 | return IOCB_ERROR; |
|---|
| .. | .. |
|---|
| 9726 | 10316 | rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); |
|---|
| 9727 | 10317 | spin_unlock_irqrestore(&pring->ring_lock, iflags); |
|---|
| 9728 | 10318 | |
|---|
| 9729 | | - if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) { |
|---|
| 9730 | | - idx = piocb->hba_wqidx; |
|---|
| 9731 | | - hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx]; |
|---|
| 9732 | | - |
|---|
| 9733 | | - if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) { |
|---|
| 9734 | | - |
|---|
| 9735 | | - /* Get associated EQ with this index */ |
|---|
| 9736 | | - fpeq = phba->sli4_hba.hba_eq[idx]; |
|---|
| 9737 | | - |
|---|
| 9738 | | - /* Turn off interrupts from this EQ */ |
|---|
| 9739 | | - phba->sli4_hba.sli4_eq_clr_intr(fpeq); |
|---|
| 9740 | | - |
|---|
| 9741 | | - /* |
|---|
| 9742 | | - * Process all the events on FCP EQ |
|---|
| 9743 | | - */ |
|---|
| 9744 | | - while ((eqe = lpfc_sli4_eq_get(fpeq))) { |
|---|
| 9745 | | - lpfc_sli4_hba_handle_eqe(phba, |
|---|
| 9746 | | - eqe, idx); |
|---|
| 9747 | | - fpeq->EQ_processed++; |
|---|
| 9748 | | - } |
|---|
| 9749 | | - |
|---|
| 9750 | | - /* Always clear and re-arm the EQ */ |
|---|
| 9751 | | - phba->sli4_hba.sli4_eq_release(fpeq, |
|---|
| 9752 | | - LPFC_QUEUE_REARM); |
|---|
| 9753 | | - } |
|---|
| 9754 | | - atomic_inc(&hba_eq_hdl->hba_eq_in_use); |
|---|
| 9755 | | - } |
|---|
| 10319 | + lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH); |
|---|
| 9756 | 10320 | } else { |
|---|
| 9757 | 10321 | /* For now, SLI2/3 will still use hbalock */ |
|---|
| 9758 | 10322 | spin_lock_irqsave(&phba->hbalock, iflags); |
|---|
| .. | .. |
|---|
| 9806 | 10370 | pring->prt[0].type = phba->cfg_multi_ring_type; |
|---|
| 9807 | 10371 | pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; |
|---|
| 9808 | 10372 | return 0; |
|---|
| 10373 | +} |
|---|
| 10374 | + |
|---|
| 10375 | +static void |
|---|
| 10376 | +lpfc_sli_post_recovery_event(struct lpfc_hba *phba, |
|---|
| 10377 | + struct lpfc_nodelist *ndlp) |
|---|
| 10378 | +{ |
|---|
| 10379 | + unsigned long iflags; |
|---|
| 10380 | + struct lpfc_work_evt *evtp = &ndlp->recovery_evt; |
|---|
| 10381 | + |
|---|
| 10382 | + spin_lock_irqsave(&phba->hbalock, iflags); |
|---|
| 10383 | + if (!list_empty(&evtp->evt_listp)) { |
|---|
| 10384 | + spin_unlock_irqrestore(&phba->hbalock, iflags); |
|---|
| 10385 | + return; |
|---|
| 10386 | + } |
|---|
| 10387 | + |
|---|
| 10388 | + /* Incrementing the reference count until the queued work is done. */ |
|---|
| 10389 | + evtp->evt_arg1 = lpfc_nlp_get(ndlp); |
|---|
| 10390 | + if (!evtp->evt_arg1) { |
|---|
| 10391 | + spin_unlock_irqrestore(&phba->hbalock, iflags); |
|---|
| 10392 | + return; |
|---|
| 10393 | + } |
|---|
| 10394 | + evtp->evt = LPFC_EVT_RECOVER_PORT; |
|---|
| 10395 | + list_add_tail(&evtp->evt_listp, &phba->work_list); |
|---|
| 10396 | + spin_unlock_irqrestore(&phba->hbalock, iflags); |
|---|
| 10397 | + |
|---|
| 10398 | + lpfc_worker_wake_up(phba); |
|---|
| 9809 | 10399 | } |
|---|
| 9810 | 10400 | |
|---|
| 9811 | 10401 | /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. |
|---|
| .. | .. |
|---|
| 9871 | 10461 | struct lpfc_nodelist *ndlp, |
|---|
| 9872 | 10462 | struct sli4_wcqe_xri_aborted *axri) |
|---|
| 9873 | 10463 | { |
|---|
| 9874 | | - struct lpfc_vport *vport; |
|---|
| 9875 | 10464 | uint32_t ext_status = 0; |
|---|
| 9876 | 10465 | |
|---|
| 9877 | 10466 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { |
|---|
| .. | .. |
|---|
| 9881 | 10470 | return; |
|---|
| 9882 | 10471 | } |
|---|
| 9883 | 10472 | |
|---|
| 9884 | | - vport = ndlp->vport; |
|---|
| 9885 | 10473 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
|---|
| 9886 | 10474 | "3116 Port generated FCP XRI ABORT event on " |
|---|
| 9887 | 10475 | "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", |
|---|
| .. | .. |
|---|
| 9898 | 10486 | ext_status = axri->parameter & IOERR_PARAM_MASK; |
|---|
| 9899 | 10487 | if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && |
|---|
| 9900 | 10488 | ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) |
|---|
| 9901 | | - lpfc_sli_abts_recover_port(vport, ndlp); |
|---|
| 10489 | + lpfc_sli_post_recovery_event(phba, ndlp); |
|---|
| 9902 | 10490 | } |
|---|
| 9903 | 10491 | |
|---|
| 9904 | 10492 | /** |
|---|
| .. | .. |
|---|
| 9934 | 10522 | temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; |
|---|
| 9935 | 10523 | if (evt_code == ASYNC_TEMP_WARN) { |
|---|
| 9936 | 10524 | temp_event_data.event_code = LPFC_THRESHOLD_TEMP; |
|---|
| 9937 | | - lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, |
|---|
| 10525 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 9938 | 10526 | "0347 Adapter is very hot, please take " |
|---|
| 9939 | 10527 | "corrective action. temperature : %d Celsius\n", |
|---|
| 9940 | 10528 | (uint32_t) icmd->ulpContext); |
|---|
| 9941 | 10529 | } else { |
|---|
| 9942 | 10530 | temp_event_data.event_code = LPFC_NORMAL_TEMP; |
|---|
| 9943 | | - lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, |
|---|
| 10531 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 9944 | 10532 | "0340 Adapter temperature is OK now. " |
|---|
| 9945 | 10533 | "temperature : %d Celsius\n", |
|---|
| 9946 | 10534 | (uint32_t) icmd->ulpContext); |
|---|
| .. | .. |
|---|
| 9957 | 10545 | break; |
|---|
| 9958 | 10546 | default: |
|---|
| 9959 | 10547 | iocb_w = (uint32_t *) icmd; |
|---|
| 9960 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 10548 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 9961 | 10549 | "0346 Ring %d handler: unexpected ASYNC_STATUS" |
|---|
| 9962 | 10550 | " evt_code 0x%x\n" |
|---|
| 9963 | 10551 | "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" |
|---|
| .. | .. |
|---|
| 10167 | 10755 | INIT_LIST_HEAD(&psli->mboxq); |
|---|
| 10168 | 10756 | INIT_LIST_HEAD(&psli->mboxq_cmpl); |
|---|
| 10169 | 10757 | /* Initialize list headers for txq and txcmplq as double linked lists */ |
|---|
| 10170 | | - for (i = 0; i < phba->cfg_fcp_io_channel; i++) { |
|---|
| 10171 | | - pring = phba->sli4_hba.fcp_wq[i]->pring; |
|---|
| 10758 | + for (i = 0; i < phba->cfg_hdw_queue; i++) { |
|---|
| 10759 | + pring = phba->sli4_hba.hdwq[i].io_wq->pring; |
|---|
| 10172 | 10760 | pring->flag = 0; |
|---|
| 10173 | 10761 | pring->ringno = LPFC_FCP_RING; |
|---|
| 10174 | | - INIT_LIST_HEAD(&pring->txq); |
|---|
| 10175 | | - INIT_LIST_HEAD(&pring->txcmplq); |
|---|
| 10176 | | - INIT_LIST_HEAD(&pring->iocb_continueq); |
|---|
| 10177 | | - spin_lock_init(&pring->ring_lock); |
|---|
| 10178 | | - } |
|---|
| 10179 | | - for (i = 0; i < phba->cfg_nvme_io_channel; i++) { |
|---|
| 10180 | | - pring = phba->sli4_hba.nvme_wq[i]->pring; |
|---|
| 10181 | | - pring->flag = 0; |
|---|
| 10182 | | - pring->ringno = LPFC_FCP_RING; |
|---|
| 10762 | + pring->txcmplq_cnt = 0; |
|---|
| 10183 | 10763 | INIT_LIST_HEAD(&pring->txq); |
|---|
| 10184 | 10764 | INIT_LIST_HEAD(&pring->txcmplq); |
|---|
| 10185 | 10765 | INIT_LIST_HEAD(&pring->iocb_continueq); |
|---|
| .. | .. |
|---|
| 10188 | 10768 | pring = phba->sli4_hba.els_wq->pring; |
|---|
| 10189 | 10769 | pring->flag = 0; |
|---|
| 10190 | 10770 | pring->ringno = LPFC_ELS_RING; |
|---|
| 10771 | + pring->txcmplq_cnt = 0; |
|---|
| 10191 | 10772 | INIT_LIST_HEAD(&pring->txq); |
|---|
| 10192 | 10773 | INIT_LIST_HEAD(&pring->txcmplq); |
|---|
| 10193 | 10774 | INIT_LIST_HEAD(&pring->iocb_continueq); |
|---|
| 10194 | 10775 | spin_lock_init(&pring->ring_lock); |
|---|
| 10195 | 10776 | |
|---|
| 10196 | | - if (phba->cfg_nvme_io_channel) { |
|---|
| 10777 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
|---|
| 10197 | 10778 | pring = phba->sli4_hba.nvmels_wq->pring; |
|---|
| 10198 | 10779 | pring->flag = 0; |
|---|
| 10199 | 10780 | pring->ringno = LPFC_ELS_RING; |
|---|
| 10200 | | - INIT_LIST_HEAD(&pring->txq); |
|---|
| 10201 | | - INIT_LIST_HEAD(&pring->txcmplq); |
|---|
| 10202 | | - INIT_LIST_HEAD(&pring->iocb_continueq); |
|---|
| 10203 | | - spin_lock_init(&pring->ring_lock); |
|---|
| 10204 | | - } |
|---|
| 10205 | | - |
|---|
| 10206 | | - if (phba->cfg_fof) { |
|---|
| 10207 | | - pring = phba->sli4_hba.oas_wq->pring; |
|---|
| 10208 | | - pring->flag = 0; |
|---|
| 10209 | | - pring->ringno = LPFC_FCP_RING; |
|---|
| 10781 | + pring->txcmplq_cnt = 0; |
|---|
| 10210 | 10782 | INIT_LIST_HEAD(&pring->txq); |
|---|
| 10211 | 10783 | INIT_LIST_HEAD(&pring->txcmplq); |
|---|
| 10212 | 10784 | INIT_LIST_HEAD(&pring->iocb_continueq); |
|---|
| .. | .. |
|---|
| 10279 | 10851 | LPFC_MBOXQ_t *pmb; |
|---|
| 10280 | 10852 | unsigned long iflag; |
|---|
| 10281 | 10853 | |
|---|
| 10854 | + /* Disable softirqs, including timers from obtaining phba->hbalock */ |
|---|
| 10855 | + local_bh_disable(); |
|---|
| 10856 | + |
|---|
| 10282 | 10857 | /* Flush all the mailbox commands in the mbox system */ |
|---|
| 10283 | 10858 | spin_lock_irqsave(&phba->hbalock, iflag); |
|---|
| 10859 | + |
|---|
| 10284 | 10860 | /* The pending mailbox command queue */ |
|---|
| 10285 | 10861 | list_splice_init(&phba->sli.mboxq, &completions); |
|---|
| 10286 | 10862 | /* The outstanding active mailbox command */ |
|---|
| .. | .. |
|---|
| 10292 | 10868 | /* The completed mailbox command queue */ |
|---|
| 10293 | 10869 | list_splice_init(&phba->sli.mboxq_cmpl, &completions); |
|---|
| 10294 | 10870 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
|---|
| 10871 | + |
|---|
| 10872 | + /* Enable softirqs again, done with phba->hbalock */ |
|---|
| 10873 | + local_bh_enable(); |
|---|
| 10295 | 10874 | |
|---|
| 10296 | 10875 | /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ |
|---|
| 10297 | 10876 | while (!list_empty(&completions)) { |
|---|
| .. | .. |
|---|
| 10376 | 10955 | set_bit(LPFC_DATA_READY, &phba->data_flags); |
|---|
| 10377 | 10956 | } |
|---|
| 10378 | 10957 | prev_pring_flag = pring->flag; |
|---|
| 10379 | | - spin_lock_irq(&pring->ring_lock); |
|---|
| 10958 | + spin_lock(&pring->ring_lock); |
|---|
| 10380 | 10959 | list_for_each_entry_safe(iocb, next_iocb, |
|---|
| 10381 | 10960 | &pring->txq, list) { |
|---|
| 10382 | 10961 | if (iocb->vport != vport) |
|---|
| 10383 | 10962 | continue; |
|---|
| 10384 | 10963 | list_move_tail(&iocb->list, &completions); |
|---|
| 10385 | 10964 | } |
|---|
| 10386 | | - spin_unlock_irq(&pring->ring_lock); |
|---|
| 10965 | + spin_unlock(&pring->ring_lock); |
|---|
| 10387 | 10966 | list_for_each_entry_safe(iocb, next_iocb, |
|---|
| 10388 | 10967 | &pring->txcmplq, list) { |
|---|
| 10389 | 10968 | if (iocb->vport != vport) |
|---|
| .. | .. |
|---|
| 10432 | 11011 | |
|---|
| 10433 | 11012 | lpfc_hba_down_prep(phba); |
|---|
| 10434 | 11013 | |
|---|
| 11014 | + /* Disable softirqs, including timers from obtaining phba->hbalock */ |
|---|
| 11015 | + local_bh_disable(); |
|---|
| 11016 | + |
|---|
| 10435 | 11017 | lpfc_fabric_abort_hba(phba); |
|---|
| 10436 | 11018 | |
|---|
| 10437 | 11019 | spin_lock_irqsave(&phba->hbalock, flags); |
|---|
| .. | .. |
|---|
| 10456 | 11038 | pring = qp->pring; |
|---|
| 10457 | 11039 | if (!pring) |
|---|
| 10458 | 11040 | continue; |
|---|
| 10459 | | - spin_lock_irq(&pring->ring_lock); |
|---|
| 11041 | + spin_lock(&pring->ring_lock); |
|---|
| 10460 | 11042 | list_splice_init(&pring->txq, &completions); |
|---|
| 10461 | | - spin_unlock_irq(&pring->ring_lock); |
|---|
| 11043 | + spin_unlock(&pring->ring_lock); |
|---|
| 10462 | 11044 | if (pring == phba->sli4_hba.els_wq->pring) { |
|---|
| 10463 | 11045 | pring->flag |= LPFC_DEFERRED_RING_EVENT; |
|---|
| 10464 | 11046 | /* Set the lpfc data pending flag */ |
|---|
| .. | .. |
|---|
| 10484 | 11066 | lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); |
|---|
| 10485 | 11067 | kfree(buf_ptr); |
|---|
| 10486 | 11068 | } |
|---|
| 11069 | + |
|---|
| 11070 | + /* Enable softirqs again, done with phba->hbalock */ |
|---|
| 11071 | + local_bh_enable(); |
|---|
| 10487 | 11072 | |
|---|
| 10488 | 11073 | /* Return any active mbox cmds */ |
|---|
| 10489 | 11074 | del_timer_sync(&psli->mbox_tmo); |
|---|
| .. | .. |
|---|
| 10634 | 11219 | } |
|---|
| 10635 | 11220 | |
|---|
| 10636 | 11221 | spin_unlock_irq(&phba->hbalock); |
|---|
| 10637 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 11222 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 10638 | 11223 | "0402 Cannot find virtual addr for buffer tag on " |
|---|
| 10639 | | - "ring %d Data x%lx x%p x%p x%x\n", |
|---|
| 11224 | + "ring %d Data x%lx x%px x%px x%x\n", |
|---|
| 10640 | 11225 | pring->ringno, (unsigned long) tag, |
|---|
| 10641 | 11226 | slp->next, slp->prev, pring->postbufq_cnt); |
|---|
| 10642 | 11227 | |
|---|
| .. | .. |
|---|
| 10678 | 11263 | } |
|---|
| 10679 | 11264 | |
|---|
| 10680 | 11265 | spin_unlock_irq(&phba->hbalock); |
|---|
| 10681 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 11266 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 10682 | 11267 | "0410 Cannot find virtual addr for mapped buf on " |
|---|
| 10683 | | - "ring %d Data x%llx x%p x%p x%x\n", |
|---|
| 11268 | + "ring %d Data x%llx x%px x%px x%x\n", |
|---|
| 10684 | 11269 | pring->ringno, (unsigned long long)phys, |
|---|
| 10685 | 11270 | slp->next, slp->prev, pring->postbufq_cnt); |
|---|
| 10686 | 11271 | return NULL; |
|---|
| .. | .. |
|---|
| 10735 | 11320 | abort_iocb = phba->sli.iocbq_lookup[abort_context]; |
|---|
| 10736 | 11321 | |
|---|
| 10737 | 11322 | lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, |
|---|
| 10738 | | - "0327 Cannot abort els iocb %p " |
|---|
| 11323 | + "0327 Cannot abort els iocb x%px " |
|---|
| 10739 | 11324 | "with tag %x context %x, abort status %x, " |
|---|
| 10740 | 11325 | "abort code %x\n", |
|---|
| 10741 | 11326 | abort_iocb, abort_iotag, abort_context, |
|---|
| .. | .. |
|---|
| 10789 | 11374 | * request, this function issues abort out unconditionally. This function is |
|---|
| 10790 | 11375 | * called with hbalock held. The function returns 0 when it fails due to |
|---|
| 10791 | 11376 | * memory allocation failure or when the command iocb is an abort request. |
|---|
| 11377 | + * The hbalock is asserted held in the code path calling this routine. |
|---|
| 10792 | 11378 | **/ |
|---|
| 10793 | 11379 | static int |
|---|
| 10794 | 11380 | lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
|---|
| .. | .. |
|---|
| 10801 | 11387 | int retval; |
|---|
| 10802 | 11388 | unsigned long iflags; |
|---|
| 10803 | 11389 | struct lpfc_nodelist *ndlp; |
|---|
| 10804 | | - |
|---|
| 10805 | | - lockdep_assert_held(&phba->hbalock); |
|---|
| 10806 | 11390 | |
|---|
| 10807 | 11391 | /* |
|---|
| 10808 | 11392 | * There are certain command types we don't want to abort. And we |
|---|
| .. | .. |
|---|
| 10956 | 11540 | } |
|---|
| 10957 | 11541 | |
|---|
| 10958 | 11542 | /** |
|---|
| 10959 | | - * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb |
|---|
| 10960 | | - * @phba: Pointer to HBA context object. |
|---|
| 10961 | | - * @pring: Pointer to driver SLI ring object. |
|---|
| 10962 | | - * @cmdiocb: Pointer to driver command iocb object. |
|---|
| 10963 | | - * |
|---|
| 10964 | | - * This function issues an abort iocb for the provided command iocb down to |
|---|
| 10965 | | - * the port. Other than the case the outstanding command iocb is an abort |
|---|
| 10966 | | - * request, this function issues abort out unconditionally. This function is |
|---|
| 10967 | | - * called with hbalock held. The function returns 0 when it fails due to |
|---|
| 10968 | | - * memory allocation failure or when the command iocb is an abort request. |
|---|
| 10969 | | - **/ |
|---|
| 10970 | | -static int |
|---|
| 10971 | | -lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
|---|
| 10972 | | - struct lpfc_iocbq *cmdiocb) |
|---|
| 10973 | | -{ |
|---|
| 10974 | | - struct lpfc_vport *vport = cmdiocb->vport; |
|---|
| 10975 | | - struct lpfc_iocbq *abtsiocbp; |
|---|
| 10976 | | - union lpfc_wqe128 *abts_wqe; |
|---|
| 10977 | | - int retval; |
|---|
| 10978 | | - |
|---|
| 10979 | | - /* |
|---|
| 10980 | | - * There are certain command types we don't want to abort. And we |
|---|
| 10981 | | - * don't want to abort commands that are already in the process of |
|---|
| 10982 | | - * being aborted. |
|---|
| 10983 | | - */ |
|---|
| 10984 | | - if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || |
|---|
| 10985 | | - cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || |
|---|
| 10986 | | - (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) |
|---|
| 10987 | | - return 0; |
|---|
| 10988 | | - |
|---|
| 10989 | | - /* issue ABTS for this io based on iotag */ |
|---|
| 10990 | | - abtsiocbp = __lpfc_sli_get_iocbq(phba); |
|---|
| 10991 | | - if (abtsiocbp == NULL) |
|---|
| 10992 | | - return 0; |
|---|
| 10993 | | - |
|---|
| 10994 | | - /* This signals the response to set the correct status |
|---|
| 10995 | | - * before calling the completion handler |
|---|
| 10996 | | - */ |
|---|
| 10997 | | - cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; |
|---|
| 10998 | | - |
|---|
| 10999 | | - /* Complete prepping the abort wqe and issue to the FW. */ |
|---|
| 11000 | | - abts_wqe = &abtsiocbp->wqe; |
|---|
| 11001 | | - |
|---|
| 11002 | | - /* Clear any stale WQE contents */ |
|---|
| 11003 | | - memset(abts_wqe, 0, sizeof(union lpfc_wqe)); |
|---|
| 11004 | | - bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); |
|---|
| 11005 | | - |
|---|
| 11006 | | - /* word 7 */ |
|---|
| 11007 | | - bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); |
|---|
| 11008 | | - bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, |
|---|
| 11009 | | - cmdiocb->iocb.ulpClass); |
|---|
| 11010 | | - |
|---|
| 11011 | | - /* word 8 - tell the FW to abort the IO associated with this |
|---|
| 11012 | | - * outstanding exchange ID. |
|---|
| 11013 | | - */ |
|---|
| 11014 | | - abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag; |
|---|
| 11015 | | - |
|---|
| 11016 | | - /* word 9 - this is the iotag for the abts_wqe completion. */ |
|---|
| 11017 | | - bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, |
|---|
| 11018 | | - abtsiocbp->iotag); |
|---|
| 11019 | | - |
|---|
| 11020 | | - /* word 10 */ |
|---|
| 11021 | | - bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); |
|---|
| 11022 | | - bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); |
|---|
| 11023 | | - |
|---|
| 11024 | | - /* word 11 */ |
|---|
| 11025 | | - bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); |
|---|
| 11026 | | - bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); |
|---|
| 11027 | | - bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); |
|---|
| 11028 | | - |
|---|
| 11029 | | - /* ABTS WQE must go to the same WQ as the WQE to be aborted */ |
|---|
| 11030 | | - abtsiocbp->iocb_flag |= LPFC_IO_NVME; |
|---|
| 11031 | | - abtsiocbp->vport = vport; |
|---|
| 11032 | | - abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; |
|---|
| 11033 | | - retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp); |
|---|
| 11034 | | - if (retval) { |
|---|
| 11035 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, |
|---|
| 11036 | | - "6147 Failed abts issue_wqe with status x%x " |
|---|
| 11037 | | - "for oxid x%x\n", |
|---|
| 11038 | | - retval, cmdiocb->sli4_xritag); |
|---|
| 11039 | | - lpfc_sli_release_iocbq(phba, abtsiocbp); |
|---|
| 11040 | | - return retval; |
|---|
| 11041 | | - } |
|---|
| 11042 | | - |
|---|
| 11043 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, |
|---|
| 11044 | | - "6148 Drv Abort NVME Request Issued for " |
|---|
| 11045 | | - "ox_id x%x on reqtag x%x\n", |
|---|
| 11046 | | - cmdiocb->sli4_xritag, |
|---|
| 11047 | | - abtsiocbp->iotag); |
|---|
| 11048 | | - |
|---|
| 11049 | | - return retval; |
|---|
| 11050 | | -} |
|---|
| 11051 | | - |
|---|
| 11052 | | -/** |
|---|
| 11053 | 11543 | * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. |
|---|
| 11054 | 11544 | * @phba: pointer to lpfc HBA data structure. |
|---|
| 11055 | 11545 | * |
|---|
| .. | .. |
|---|
| 11105 | 11595 | uint16_t tgt_id, uint64_t lun_id, |
|---|
| 11106 | 11596 | lpfc_ctx_cmd ctx_cmd) |
|---|
| 11107 | 11597 | { |
|---|
| 11108 | | - struct lpfc_scsi_buf *lpfc_cmd; |
|---|
| 11598 | + struct lpfc_io_buf *lpfc_cmd; |
|---|
| 11599 | + IOCB_t *icmd = NULL; |
|---|
| 11109 | 11600 | int rc = 1; |
|---|
| 11110 | 11601 | |
|---|
| 11111 | 11602 | if (iocbq->vport != vport) |
|---|
| 11112 | 11603 | return rc; |
|---|
| 11113 | 11604 | |
|---|
| 11114 | | - if (!(iocbq->iocb_flag & LPFC_IO_FCP) || |
|---|
| 11115 | | - !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) |
|---|
| 11605 | + if (!(iocbq->iocb_flag & LPFC_IO_FCP) || |
|---|
| 11606 | + !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) || |
|---|
| 11607 | + iocbq->iocb_flag & LPFC_DRIVER_ABORTED) |
|---|
| 11116 | 11608 | return rc; |
|---|
| 11117 | 11609 | |
|---|
| 11118 | | - lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); |
|---|
| 11610 | + icmd = &iocbq->iocb; |
|---|
| 11611 | + if (icmd->ulpCommand == CMD_ABORT_XRI_CN || |
|---|
| 11612 | + icmd->ulpCommand == CMD_CLOSE_XRI_CN) |
|---|
| 11613 | + return rc; |
|---|
| 11614 | + |
|---|
| 11615 | + lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); |
|---|
| 11119 | 11616 | |
|---|
| 11120 | 11617 | if (lpfc_cmd->pCmd == NULL) |
|---|
| 11121 | 11618 | return rc; |
|---|
| .. | .. |
|---|
| 11244 | 11741 | int i; |
|---|
| 11245 | 11742 | |
|---|
| 11246 | 11743 | /* all I/Os are in process of being flushed */ |
|---|
| 11247 | | - if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) |
|---|
| 11744 | + if (phba->hba_flag & HBA_IOQ_FLUSH) |
|---|
| 11248 | 11745 | return errcnt; |
|---|
| 11249 | 11746 | |
|---|
| 11250 | 11747 | for (i = 1; i <= phba->sli.last_iotag; i++) { |
|---|
| .. | .. |
|---|
| 11321 | 11818 | * @pring: Pointer to driver SLI ring object. |
|---|
| 11322 | 11819 | * @tgt_id: SCSI ID of the target. |
|---|
| 11323 | 11820 | * @lun_id: LUN ID of the scsi device. |
|---|
| 11324 | | - * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. |
|---|
| 11821 | + * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. |
|---|
| 11325 | 11822 | * |
|---|
| 11326 | 11823 | * This function sends an abort command for every SCSI command |
|---|
| 11327 | 11824 | * associated with the given virtual port pending on the ring |
|---|
| .. | .. |
|---|
| 11342 | 11839 | uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) |
|---|
| 11343 | 11840 | { |
|---|
| 11344 | 11841 | struct lpfc_hba *phba = vport->phba; |
|---|
| 11345 | | - struct lpfc_scsi_buf *lpfc_cmd; |
|---|
| 11842 | + struct lpfc_io_buf *lpfc_cmd; |
|---|
| 11346 | 11843 | struct lpfc_iocbq *abtsiocbq; |
|---|
| 11347 | 11844 | struct lpfc_nodelist *ndlp; |
|---|
| 11348 | 11845 | struct lpfc_iocbq *iocbq; |
|---|
| 11349 | 11846 | IOCB_t *icmd; |
|---|
| 11350 | 11847 | int sum, i, ret_val; |
|---|
| 11351 | 11848 | unsigned long iflags; |
|---|
| 11352 | | - struct lpfc_sli_ring *pring_s4; |
|---|
| 11849 | + struct lpfc_sli_ring *pring_s4 = NULL; |
|---|
| 11353 | 11850 | |
|---|
| 11354 | 11851 | spin_lock_irqsave(&phba->hbalock, iflags); |
|---|
| 11355 | 11852 | |
|---|
| 11356 | 11853 | /* all I/Os are in process of being flushed */ |
|---|
| 11357 | | - if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { |
|---|
| 11854 | + if (phba->hba_flag & HBA_IOQ_FLUSH) { |
|---|
| 11358 | 11855 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
|---|
| 11359 | 11856 | return 0; |
|---|
| 11360 | 11857 | } |
|---|
| .. | .. |
|---|
| 11367 | 11864 | cmd) != 0) |
|---|
| 11368 | 11865 | continue; |
|---|
| 11369 | 11866 | |
|---|
| 11867 | + /* Guard against IO completion being called at same time */ |
|---|
| 11868 | + lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); |
|---|
| 11869 | + spin_lock(&lpfc_cmd->buf_lock); |
|---|
| 11870 | + |
|---|
| 11871 | + if (!lpfc_cmd->pCmd) { |
|---|
| 11872 | + spin_unlock(&lpfc_cmd->buf_lock); |
|---|
| 11873 | + continue; |
|---|
| 11874 | + } |
|---|
| 11875 | + |
|---|
| 11876 | + if (phba->sli_rev == LPFC_SLI_REV4) { |
|---|
| 11877 | + pring_s4 = |
|---|
| 11878 | + phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring; |
|---|
| 11879 | + if (!pring_s4) { |
|---|
| 11880 | + spin_unlock(&lpfc_cmd->buf_lock); |
|---|
| 11881 | + continue; |
|---|
| 11882 | + } |
|---|
| 11883 | + /* Note: both hbalock and ring_lock must be set here */ |
|---|
| 11884 | + spin_lock(&pring_s4->ring_lock); |
|---|
| 11885 | + } |
|---|
| 11886 | + |
|---|
| 11370 | 11887 | /* |
|---|
| 11371 | 11888 | * If the iocbq is already being aborted, don't take a second |
|---|
| 11372 | 11889 | * action, but do count it. |
|---|
| 11373 | 11890 | */ |
|---|
| 11374 | | - if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) |
|---|
| 11891 | + if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) || |
|---|
| 11892 | + !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { |
|---|
| 11893 | + if (phba->sli_rev == LPFC_SLI_REV4) |
|---|
| 11894 | + spin_unlock(&pring_s4->ring_lock); |
|---|
| 11895 | + spin_unlock(&lpfc_cmd->buf_lock); |
|---|
| 11375 | 11896 | continue; |
|---|
| 11897 | + } |
|---|
| 11376 | 11898 | |
|---|
| 11377 | 11899 | /* issue ABTS for this IOCB based on iotag */ |
|---|
| 11378 | 11900 | abtsiocbq = __lpfc_sli_get_iocbq(phba); |
|---|
| 11379 | | - if (abtsiocbq == NULL) |
|---|
| 11901 | + if (!abtsiocbq) { |
|---|
| 11902 | + if (phba->sli_rev == LPFC_SLI_REV4) |
|---|
| 11903 | + spin_unlock(&pring_s4->ring_lock); |
|---|
| 11904 | + spin_unlock(&lpfc_cmd->buf_lock); |
|---|
| 11380 | 11905 | continue; |
|---|
| 11906 | + } |
|---|
| 11381 | 11907 | |
|---|
| 11382 | 11908 | icmd = &iocbq->iocb; |
|---|
| 11383 | 11909 | abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; |
|---|
| .. | .. |
|---|
| 11398 | 11924 | if (iocbq->iocb_flag & LPFC_IO_FOF) |
|---|
| 11399 | 11925 | abtsiocbq->iocb_flag |= LPFC_IO_FOF; |
|---|
| 11400 | 11926 | |
|---|
| 11401 | | - lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); |
|---|
| 11402 | 11927 | ndlp = lpfc_cmd->rdata->pnode; |
|---|
| 11403 | 11928 | |
|---|
| 11404 | 11929 | if (lpfc_is_link_up(phba) && |
|---|
| .. | .. |
|---|
| 11417 | 11942 | iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; |
|---|
| 11418 | 11943 | |
|---|
| 11419 | 11944 | if (phba->sli_rev == LPFC_SLI_REV4) { |
|---|
| 11420 | | - pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocbq); |
|---|
| 11421 | | - if (!pring_s4) |
|---|
| 11422 | | - continue; |
|---|
| 11423 | | - /* Note: both hbalock and ring_lock must be set here */ |
|---|
| 11424 | | - spin_lock(&pring_s4->ring_lock); |
|---|
| 11425 | 11945 | ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, |
|---|
| 11426 | 11946 | abtsiocbq, 0); |
|---|
| 11427 | 11947 | spin_unlock(&pring_s4->ring_lock); |
|---|
| .. | .. |
|---|
| 11430 | 11950 | abtsiocbq, 0); |
|---|
| 11431 | 11951 | } |
|---|
| 11432 | 11952 | |
|---|
| 11953 | + spin_unlock(&lpfc_cmd->buf_lock); |
|---|
| 11433 | 11954 | |
|---|
| 11434 | 11955 | if (ret_val == IOCB_ERROR) |
|---|
| 11435 | 11956 | __lpfc_sli_release_iocbq(phba, abtsiocbq); |
|---|
| .. | .. |
|---|
| 11464 | 11985 | { |
|---|
| 11465 | 11986 | wait_queue_head_t *pdone_q; |
|---|
| 11466 | 11987 | unsigned long iflags; |
|---|
| 11467 | | - struct lpfc_scsi_buf *lpfc_cmd; |
|---|
| 11988 | + struct lpfc_io_buf *lpfc_cmd; |
|---|
| 11468 | 11989 | |
|---|
| 11469 | 11990 | spin_lock_irqsave(&phba->hbalock, iflags); |
|---|
| 11470 | 11991 | if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { |
|---|
| .. | .. |
|---|
| 11493 | 12014 | /* Set the exchange busy flag for task management commands */ |
|---|
| 11494 | 12015 | if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && |
|---|
| 11495 | 12016 | !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { |
|---|
| 11496 | | - lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, |
|---|
| 12017 | + lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, |
|---|
| 11497 | 12018 | cur_iocbq); |
|---|
| 11498 | | - lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; |
|---|
| 12019 | + if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY)) |
|---|
| 12020 | + lpfc_cmd->flags |= LPFC_SBUF_XBUSY; |
|---|
| 12021 | + else |
|---|
| 12022 | + lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; |
|---|
| 11499 | 12023 | } |
|---|
| 11500 | 12024 | |
|---|
| 11501 | 12025 | pdone_q = cmdiocbq->context_un.wait_queue; |
|---|
| .. | .. |
|---|
| 11534 | 12058 | /** |
|---|
| 11535 | 12059 | * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands |
|---|
| 11536 | 12060 | * @phba: Pointer to HBA context object.. |
|---|
| 11537 | | - * @pring: Pointer to sli ring. |
|---|
| 12061 | + * @ring_number: Ring number |
|---|
| 11538 | 12062 | * @piocb: Pointer to command iocb. |
|---|
| 11539 | 12063 | * @prspiocbq: Pointer to response iocb. |
|---|
| 11540 | 12064 | * @timeout: Timeout in number of seconds. |
|---|
| .. | .. |
|---|
| 11640 | 12164 | * completed. Not that it completed successfully. |
|---|
| 11641 | 12165 | * */ |
|---|
| 11642 | 12166 | } else if (timeleft == 0) { |
|---|
| 11643 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 12167 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 11644 | 12168 | "0338 IOCB wait timeout error - no " |
|---|
| 11645 | 12169 | "wake response Data x%x\n", timeout); |
|---|
| 11646 | 12170 | retval = IOCB_TIMEDOUT; |
|---|
| 11647 | 12171 | } else { |
|---|
| 11648 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 12172 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 11649 | 12173 | "0330 IOCB wake NOT set, " |
|---|
| 11650 | 12174 | "Data x%x x%lx\n", |
|---|
| 11651 | 12175 | timeout, (timeleft / jiffies)); |
|---|
| .. | .. |
|---|
| 11754 | 12278 | /** |
|---|
| 11755 | 12279 | * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system |
|---|
| 11756 | 12280 | * @phba: Pointer to HBA context. |
|---|
| 12281 | + * @mbx_action: Mailbox shutdown options. |
|---|
| 11757 | 12282 | * |
|---|
| 11758 | 12283 | * This function is called to shutdown the driver's mailbox sub-system. |
|---|
| 11759 | 12284 | * It first marks the mailbox sub-system is in a block state to prevent |
|---|
| .. | .. |
|---|
| 11780 | 12305 | } |
|---|
| 11781 | 12306 | timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; |
|---|
| 11782 | 12307 | |
|---|
| 12308 | + /* Disable softirqs, including timers from obtaining phba->hbalock */ |
|---|
| 12309 | + local_bh_disable(); |
|---|
| 12310 | + |
|---|
| 11783 | 12311 | spin_lock_irq(&phba->hbalock); |
|---|
| 11784 | 12312 | psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; |
|---|
| 11785 | 12313 | |
|---|
| .. | .. |
|---|
| 11793 | 12321 | 1000) + jiffies; |
|---|
| 11794 | 12322 | spin_unlock_irq(&phba->hbalock); |
|---|
| 11795 | 12323 | |
|---|
| 12324 | + /* Enable softirqs again, done with phba->hbalock */ |
|---|
| 12325 | + local_bh_enable(); |
|---|
| 12326 | + |
|---|
| 11796 | 12327 | while (phba->sli.mbox_active) { |
|---|
| 11797 | 12328 | /* Check active mailbox complete status every 2ms */ |
|---|
| 11798 | 12329 | msleep(2); |
|---|
| .. | .. |
|---|
| 11802 | 12333 | */ |
|---|
| 11803 | 12334 | break; |
|---|
| 11804 | 12335 | } |
|---|
| 11805 | | - } else |
|---|
| 12336 | + } else { |
|---|
| 11806 | 12337 | spin_unlock_irq(&phba->hbalock); |
|---|
| 12338 | + |
|---|
| 12339 | + /* Enable softirqs again, done with phba->hbalock */ |
|---|
| 12340 | + local_bh_enable(); |
|---|
| 12341 | + } |
|---|
| 11807 | 12342 | |
|---|
| 11808 | 12343 | lpfc_sli_mbox_sys_flush(phba); |
|---|
| 11809 | 12344 | } |
|---|
| .. | .. |
|---|
| 11878 | 12413 | uint32_t uerr_sta_hi, uerr_sta_lo; |
|---|
| 11879 | 12414 | uint32_t if_type, portsmphr; |
|---|
| 11880 | 12415 | struct lpfc_register portstat_reg; |
|---|
| 12416 | + u32 logmask; |
|---|
| 11881 | 12417 | |
|---|
| 11882 | 12418 | /* |
|---|
| 11883 | 12419 | * For now, use the SLI4 device internal unrecoverable error |
|---|
| .. | .. |
|---|
| 11897 | 12433 | } |
|---|
| 11898 | 12434 | if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || |
|---|
| 11899 | 12435 | (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { |
|---|
| 11900 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 12436 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 11901 | 12437 | "1423 HBA Unrecoverable error: " |
|---|
| 11902 | 12438 | "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " |
|---|
| 11903 | 12439 | "ue_mask_lo_reg=0x%x, " |
|---|
| .. | .. |
|---|
| 11928 | 12464 | readl(phba->sli4_hba.u.if_type2.ERR1regaddr); |
|---|
| 11929 | 12465 | phba->work_status[1] = |
|---|
| 11930 | 12466 | readl(phba->sli4_hba.u.if_type2.ERR2regaddr); |
|---|
| 11931 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 12467 | + logmask = LOG_TRACE_EVENT; |
|---|
| 12468 | + if (phba->work_status[0] == |
|---|
| 12469 | + SLIPORT_ERR1_REG_ERR_CODE_2 && |
|---|
| 12470 | + phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART) |
|---|
| 12471 | + logmask = LOG_SLI; |
|---|
| 12472 | + lpfc_printf_log(phba, KERN_ERR, logmask, |
|---|
| 11932 | 12473 | "2885 Port Status Event: " |
|---|
| 11933 | 12474 | "port status reg 0x%x, " |
|---|
| 11934 | 12475 | "port smphr reg 0x%x, " |
|---|
| .. | .. |
|---|
| 11944 | 12485 | break; |
|---|
| 11945 | 12486 | case LPFC_SLI_INTF_IF_TYPE_1: |
|---|
| 11946 | 12487 | default: |
|---|
| 11947 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 12488 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 11948 | 12489 | "2886 HBA Error Attention on unsupported " |
|---|
| 11949 | 12490 | "if type %d.", if_type); |
|---|
| 11950 | 12491 | return 1; |
|---|
| .. | .. |
|---|
| 12008 | 12549 | ha_copy = lpfc_sli4_eratt_read(phba); |
|---|
| 12009 | 12550 | break; |
|---|
| 12010 | 12551 | default: |
|---|
| 12011 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 12552 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 12012 | 12553 | "0299 Invalid SLI revision (%d)\n", |
|---|
| 12013 | 12554 | phba->sli_rev); |
|---|
| 12014 | 12555 | ha_copy = 0; |
|---|
| .. | .. |
|---|
| 12241 | 12782 | * Stray Mailbox Interrupt, mbxCommand <cmd> |
|---|
| 12242 | 12783 | * mbxStatus <status> |
|---|
| 12243 | 12784 | */ |
|---|
| 12244 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | |
|---|
| 12245 | | - LOG_SLI, |
|---|
| 12785 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 12246 | 12786 | "(%d):0304 Stray Mailbox " |
|---|
| 12247 | 12787 | "Interrupt mbxCommand x%x " |
|---|
| 12248 | 12788 | "mbxStatus x%x\n", |
|---|
| .. | .. |
|---|
| 12260 | 12800 | lpfc_sli_pcimem_bcopy(mbox, pmbox, |
|---|
| 12261 | 12801 | MAILBOX_CMD_SIZE); |
|---|
| 12262 | 12802 | if (pmb->out_ext_byte_len && |
|---|
| 12263 | | - pmb->context2) |
|---|
| 12803 | + pmb->ctx_buf) |
|---|
| 12264 | 12804 | lpfc_sli_pcimem_bcopy( |
|---|
| 12265 | 12805 | phba->mbox_ext, |
|---|
| 12266 | | - pmb->context2, |
|---|
| 12806 | + pmb->ctx_buf, |
|---|
| 12267 | 12807 | pmb->out_ext_byte_len); |
|---|
| 12268 | 12808 | } |
|---|
| 12269 | 12809 | if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { |
|---|
| .. | .. |
|---|
| 12278 | 12818 | |
|---|
| 12279 | 12819 | if (!pmbox->mbxStatus) { |
|---|
| 12280 | 12820 | mp = (struct lpfc_dmabuf *) |
|---|
| 12281 | | - (pmb->context1); |
|---|
| 12821 | + (pmb->ctx_buf); |
|---|
| 12282 | 12822 | ndlp = (struct lpfc_nodelist *) |
|---|
| 12283 | | - pmb->context2; |
|---|
| 12823 | + pmb->ctx_ndlp; |
|---|
| 12284 | 12824 | |
|---|
| 12285 | 12825 | /* Reg_LOGIN of dflt RPI was |
|---|
| 12286 | 12826 | * successful. new lets get |
|---|
| .. | .. |
|---|
| 12293 | 12833 | pmb); |
|---|
| 12294 | 12834 | pmb->mbox_cmpl = |
|---|
| 12295 | 12835 | lpfc_mbx_cmpl_dflt_rpi; |
|---|
| 12296 | | - pmb->context1 = mp; |
|---|
| 12297 | | - pmb->context2 = ndlp; |
|---|
| 12836 | + pmb->ctx_buf = mp; |
|---|
| 12837 | + pmb->ctx_ndlp = ndlp; |
|---|
| 12298 | 12838 | pmb->vport = vport; |
|---|
| 12299 | 12839 | rc = lpfc_sli_issue_mbox(phba, |
|---|
| 12300 | 12840 | pmb, |
|---|
| .. | .. |
|---|
| 12302 | 12842 | if (rc != MBX_BUSY) |
|---|
| 12303 | 12843 | lpfc_printf_log(phba, |
|---|
| 12304 | 12844 | KERN_ERR, |
|---|
| 12305 | | - LOG_MBOX | LOG_SLI, |
|---|
| 12845 | + LOG_TRACE_EVENT, |
|---|
| 12306 | 12846 | "0350 rc should have" |
|---|
| 12307 | 12847 | "been MBX_BUSY\n"); |
|---|
| 12308 | 12848 | if (rc != MBX_NOT_FINISHED) |
|---|
| .. | .. |
|---|
| 12331 | 12871 | MBX_NOWAIT); |
|---|
| 12332 | 12872 | } while (rc == MBX_NOT_FINISHED); |
|---|
| 12333 | 12873 | if (rc != MBX_SUCCESS) |
|---|
| 12334 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | |
|---|
| 12335 | | - LOG_SLI, "0349 rc should be " |
|---|
| 12874 | + lpfc_printf_log(phba, KERN_ERR, |
|---|
| 12875 | + LOG_TRACE_EVENT, |
|---|
| 12876 | + "0349 rc should be " |
|---|
| 12336 | 12877 | "MBX_SUCCESS\n"); |
|---|
| 12337 | 12878 | } |
|---|
| 12338 | 12879 | |
|---|
| .. | .. |
|---|
| 12557 | 13098 | } /* lpfc_sli_intr_handler */ |
|---|
| 12558 | 13099 | |
|---|
| 12559 | 13100 | /** |
|---|
| 12560 | | - * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event |
|---|
| 12561 | | - * @phba: pointer to lpfc hba data structure. |
|---|
| 12562 | | - * |
|---|
| 12563 | | - * This routine is invoked by the worker thread to process all the pending |
|---|
| 12564 | | - * SLI4 FCP abort XRI events. |
|---|
| 12565 | | - **/ |
|---|
| 12566 | | -void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) |
|---|
| 12567 | | -{ |
|---|
| 12568 | | - struct lpfc_cq_event *cq_event; |
|---|
| 12569 | | - |
|---|
| 12570 | | - /* First, declare the fcp xri abort event has been handled */ |
|---|
| 12571 | | - spin_lock_irq(&phba->hbalock); |
|---|
| 12572 | | - phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; |
|---|
| 12573 | | - spin_unlock_irq(&phba->hbalock); |
|---|
| 12574 | | - /* Now, handle all the fcp xri abort events */ |
|---|
| 12575 | | - while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { |
|---|
| 12576 | | - /* Get the first event from the head of the event queue */ |
|---|
| 12577 | | - spin_lock_irq(&phba->hbalock); |
|---|
| 12578 | | - list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, |
|---|
| 12579 | | - cq_event, struct lpfc_cq_event, list); |
|---|
| 12580 | | - spin_unlock_irq(&phba->hbalock); |
|---|
| 12581 | | - /* Notify aborted XRI for FCP work queue */ |
|---|
| 12582 | | - lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); |
|---|
| 12583 | | - /* Free the event processed back to the free pool */ |
|---|
| 12584 | | - lpfc_sli4_cq_event_release(phba, cq_event); |
|---|
| 12585 | | - } |
|---|
| 12586 | | -} |
|---|
| 12587 | | - |
|---|
| 12588 | | -/** |
|---|
| 12589 | 13101 | * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event |
|---|
| 12590 | 13102 | * @phba: pointer to lpfc hba data structure. |
|---|
| 12591 | 13103 | * |
|---|
| .. | .. |
|---|
| 12595 | 13107 | void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) |
|---|
| 12596 | 13108 | { |
|---|
| 12597 | 13109 | struct lpfc_cq_event *cq_event; |
|---|
| 13110 | + unsigned long iflags; |
|---|
| 12598 | 13111 | |
|---|
| 12599 | 13112 | /* First, declare the els xri abort event has been handled */ |
|---|
| 12600 | | - spin_lock_irq(&phba->hbalock); |
|---|
| 13113 | + spin_lock_irqsave(&phba->hbalock, iflags); |
|---|
| 12601 | 13114 | phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; |
|---|
| 12602 | | - spin_unlock_irq(&phba->hbalock); |
|---|
| 13115 | + spin_unlock_irqrestore(&phba->hbalock, iflags); |
|---|
| 13116 | + |
|---|
| 12603 | 13117 | /* Now, handle all the els xri abort events */ |
|---|
| 13118 | + spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); |
|---|
| 12604 | 13119 | while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { |
|---|
| 12605 | 13120 | /* Get the first event from the head of the event queue */ |
|---|
| 12606 | | - spin_lock_irq(&phba->hbalock); |
|---|
| 12607 | 13121 | list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, |
|---|
| 12608 | 13122 | cq_event, struct lpfc_cq_event, list); |
|---|
| 12609 | | - spin_unlock_irq(&phba->hbalock); |
|---|
| 13123 | + spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, |
|---|
| 13124 | + iflags); |
|---|
| 12610 | 13125 | /* Notify aborted XRI for ELS work queue */ |
|---|
| 12611 | 13126 | lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); |
|---|
| 13127 | + |
|---|
| 12612 | 13128 | /* Free the event processed back to the free pool */ |
|---|
| 12613 | 13129 | lpfc_sli4_cq_event_release(phba, cq_event); |
|---|
| 13130 | + spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, |
|---|
| 13131 | + iflags); |
|---|
| 12614 | 13132 | } |
|---|
| 13133 | + spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags); |
|---|
| 12615 | 13134 | } |
|---|
| 12616 | 13135 | |
|---|
| 12617 | 13136 | /** |
|---|
| .. | .. |
|---|
| 12733 | 13252 | /** |
|---|
| 12734 | 13253 | * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe |
|---|
| 12735 | 13254 | * @phba: Pointer to HBA context object. |
|---|
| 12736 | | - * @wcqe: Pointer to work-queue completion queue entry. |
|---|
| 13255 | + * @irspiocbq: Pointer to work-queue completion queue entry. |
|---|
| 12737 | 13256 | * |
|---|
| 12738 | 13257 | * This routine handles an ELS work-queue completion event and construct |
|---|
| 12739 | 13258 | * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common |
|---|
| .. | .. |
|---|
| 12755 | 13274 | return NULL; |
|---|
| 12756 | 13275 | |
|---|
| 12757 | 13276 | wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; |
|---|
| 12758 | | - spin_lock_irqsave(&pring->ring_lock, iflags); |
|---|
| 12759 | 13277 | pring->stats.iocb_event++; |
|---|
| 12760 | 13278 | /* Look up the ELS command IOCB and create pseudo response IOCB */ |
|---|
| 12761 | 13279 | cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, |
|---|
| 12762 | 13280 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); |
|---|
| 12763 | 13281 | if (unlikely(!cmdiocbq)) { |
|---|
| 12764 | | - spin_unlock_irqrestore(&pring->ring_lock, iflags); |
|---|
| 12765 | 13282 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
|---|
| 12766 | 13283 | "0386 ELS complete with no corresponding " |
|---|
| 12767 | 13284 | "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", |
|---|
| .. | .. |
|---|
| 12771 | 13288 | return NULL; |
|---|
| 12772 | 13289 | } |
|---|
| 12773 | 13290 | |
|---|
| 13291 | + spin_lock_irqsave(&pring->ring_lock, iflags); |
|---|
| 12774 | 13292 | /* Put the iocb back on the txcmplq */ |
|---|
| 12775 | 13293 | lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); |
|---|
| 12776 | 13294 | spin_unlock_irqrestore(&pring->ring_lock, iflags); |
|---|
| .. | .. |
|---|
| 12789 | 13307 | /* Allocate a new internal CQ_EVENT entry */ |
|---|
| 12790 | 13308 | cq_event = lpfc_sli4_cq_event_alloc(phba); |
|---|
| 12791 | 13309 | if (!cq_event) { |
|---|
| 12792 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 13310 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 12793 | 13311 | "0602 Failed to alloc CQ_EVENT entry\n"); |
|---|
| 12794 | 13312 | return NULL; |
|---|
| 12795 | 13313 | } |
|---|
| .. | .. |
|---|
| 12800 | 13318 | } |
|---|
| 12801 | 13319 | |
|---|
| 12802 | 13320 | /** |
|---|
| 12803 | | - * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event |
|---|
| 13321 | + * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event |
|---|
| 12804 | 13322 | * @phba: Pointer to HBA context object. |
|---|
| 12805 | | - * @cqe: Pointer to mailbox completion queue entry. |
|---|
| 13323 | + * @mcqe: Pointer to mailbox completion queue entry. |
|---|
| 12806 | 13324 | * |
|---|
| 12807 | | - * This routine process a mailbox completion queue entry with asynchrous |
|---|
| 13325 | + * This routine process a mailbox completion queue entry with asynchronous |
|---|
| 12808 | 13326 | * event. |
|---|
| 12809 | 13327 | * |
|---|
| 12810 | 13328 | * Return: true if work posted to worker thread, otherwise false. |
|---|
| .. | .. |
|---|
| 12823 | 13341 | cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe)); |
|---|
| 12824 | 13342 | if (!cq_event) |
|---|
| 12825 | 13343 | return false; |
|---|
| 12826 | | - spin_lock_irqsave(&phba->hbalock, iflags); |
|---|
| 13344 | + |
|---|
| 13345 | + spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags); |
|---|
| 12827 | 13346 | list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); |
|---|
| 13347 | + spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags); |
|---|
| 13348 | + |
|---|
| 12828 | 13349 | /* Set the async event flag */ |
|---|
| 13350 | + spin_lock_irqsave(&phba->hbalock, iflags); |
|---|
| 12829 | 13351 | phba->hba_flag |= ASYNC_EVENT; |
|---|
| 12830 | 13352 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
|---|
| 12831 | 13353 | |
|---|
| .. | .. |
|---|
| 12835 | 13357 | /** |
|---|
| 12836 | 13358 | * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event |
|---|
| 12837 | 13359 | * @phba: Pointer to HBA context object. |
|---|
| 12838 | | - * @cqe: Pointer to mailbox completion queue entry. |
|---|
| 13360 | + * @mcqe: Pointer to mailbox completion queue entry. |
|---|
| 12839 | 13361 | * |
|---|
| 12840 | 13362 | * This routine process a mailbox completion queue entry with mailbox |
|---|
| 12841 | 13363 | * completion event. |
|---|
| .. | .. |
|---|
| 12864 | 13386 | spin_lock_irqsave(&phba->hbalock, iflags); |
|---|
| 12865 | 13387 | pmb = phba->sli.mbox_active; |
|---|
| 12866 | 13388 | if (unlikely(!pmb)) { |
|---|
| 12867 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, |
|---|
| 13389 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 12868 | 13390 | "1832 No pending MBOX command to handle\n"); |
|---|
| 12869 | 13391 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
|---|
| 12870 | 13392 | goto out_no_mqe_complete; |
|---|
| .. | .. |
|---|
| 12900 | 13422 | mcqe_status, |
|---|
| 12901 | 13423 | pmbox->un.varWords[0], 0); |
|---|
| 12902 | 13424 | if (mcqe_status == MB_CQE_STATUS_SUCCESS) { |
|---|
| 12903 | | - mp = (struct lpfc_dmabuf *)(pmb->context1); |
|---|
| 12904 | | - ndlp = (struct lpfc_nodelist *)pmb->context2; |
|---|
| 13425 | + mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); |
|---|
| 13426 | + ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; |
|---|
| 12905 | 13427 | /* Reg_LOGIN of dflt RPI was successful. Now lets get |
|---|
| 12906 | 13428 | * RID of the PPI using the same mbox buffer. |
|---|
| 12907 | 13429 | */ |
|---|
| 12908 | 13430 | lpfc_unreg_login(phba, vport->vpi, |
|---|
| 12909 | 13431 | pmbox->un.varWords[0], pmb); |
|---|
| 12910 | 13432 | pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; |
|---|
| 12911 | | - pmb->context1 = mp; |
|---|
| 12912 | | - pmb->context2 = ndlp; |
|---|
| 13433 | + pmb->ctx_buf = mp; |
|---|
| 13434 | + pmb->ctx_ndlp = ndlp; |
|---|
| 12913 | 13435 | pmb->vport = vport; |
|---|
| 12914 | 13436 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); |
|---|
| 12915 | 13437 | if (rc != MBX_BUSY) |
|---|
| 12916 | | - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | |
|---|
| 12917 | | - LOG_SLI, "0385 rc should " |
|---|
| 13438 | + lpfc_printf_log(phba, KERN_ERR, |
|---|
| 13439 | + LOG_TRACE_EVENT, |
|---|
| 13440 | + "0385 rc should " |
|---|
| 12918 | 13441 | "have been MBX_BUSY\n"); |
|---|
| 12919 | 13442 | if (rc != MBX_NOT_FINISHED) |
|---|
| 12920 | 13443 | goto send_current_mbox; |
|---|
| .. | .. |
|---|
| 12955 | 13478 | /** |
|---|
| 12956 | 13479 | * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry |
|---|
| 12957 | 13480 | * @phba: Pointer to HBA context object. |
|---|
| 13481 | + * @cq: Pointer to associated CQ |
|---|
| 12958 | 13482 | * @cqe: Pointer to mailbox completion queue entry. |
|---|
| 12959 | 13483 | * |
|---|
| 12960 | 13484 | * This routine process a mailbox completion queue entry, it invokes the |
|---|
| 12961 | | - * proper mailbox complete handling or asynchrous event handling routine |
|---|
| 13485 | + * proper mailbox complete handling or asynchronous event handling routine |
|---|
| 12962 | 13486 | * according to the MCQE's async bit. |
|---|
| 12963 | 13487 | * |
|---|
| 12964 | 13488 | * Return: true if work posted to worker thread, otherwise false. |
|---|
| 12965 | 13489 | **/ |
|---|
| 12966 | 13490 | static bool |
|---|
| 12967 | | -lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) |
|---|
| 13491 | +lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, |
|---|
| 13492 | + struct lpfc_cqe *cqe) |
|---|
| 12968 | 13493 | { |
|---|
| 12969 | 13494 | struct lpfc_mcqe mcqe; |
|---|
| 12970 | 13495 | bool workposted; |
|---|
| 13496 | + |
|---|
| 13497 | + cq->CQ_mbox++; |
|---|
| 12971 | 13498 | |
|---|
| 12972 | 13499 | /* Copy the mailbox MCQE and convert endian order as needed */ |
|---|
| 12973 | 13500 | lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); |
|---|
| .. | .. |
|---|
| 12999 | 13526 | struct lpfc_sli_ring *pring = cq->pring; |
|---|
| 13000 | 13527 | int txq_cnt = 0; |
|---|
| 13001 | 13528 | int txcmplq_cnt = 0; |
|---|
| 13002 | | - int fcp_txcmplq_cnt = 0; |
|---|
| 13003 | 13529 | |
|---|
| 13004 | 13530 | /* Check for response status */ |
|---|
| 13005 | 13531 | if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { |
|---|
| .. | .. |
|---|
| 13019 | 13545 | txq_cnt++; |
|---|
| 13020 | 13546 | if (!list_empty(&pring->txcmplq)) |
|---|
| 13021 | 13547 | txcmplq_cnt++; |
|---|
| 13022 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 13548 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 13023 | 13549 | "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " |
|---|
| 13024 | | - "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", |
|---|
| 13550 | + "els_txcmplq_cnt=%d\n", |
|---|
| 13025 | 13551 | txq_cnt, phba->iocb_cnt, |
|---|
| 13026 | | - fcp_txcmplq_cnt, |
|---|
| 13027 | 13552 | txcmplq_cnt); |
|---|
| 13028 | 13553 | return false; |
|---|
| 13029 | 13554 | } |
|---|
| .. | .. |
|---|
| 13086 | 13611 | unsigned long iflags; |
|---|
| 13087 | 13612 | |
|---|
| 13088 | 13613 | switch (cq->subtype) { |
|---|
| 13089 | | - case LPFC_FCP: |
|---|
| 13090 | | - cq_event = lpfc_cq_event_setup( |
|---|
| 13091 | | - phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); |
|---|
| 13092 | | - if (!cq_event) |
|---|
| 13093 | | - return false; |
|---|
| 13094 | | - spin_lock_irqsave(&phba->hbalock, iflags); |
|---|
| 13095 | | - list_add_tail(&cq_event->list, |
|---|
| 13096 | | - &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); |
|---|
| 13097 | | - /* Set the fcp xri abort event flag */ |
|---|
| 13098 | | - phba->hba_flag |= FCP_XRI_ABORT_EVENT; |
|---|
| 13099 | | - spin_unlock_irqrestore(&phba->hbalock, iflags); |
|---|
| 13100 | | - workposted = true; |
|---|
| 13614 | + case LPFC_IO: |
|---|
| 13615 | + lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq); |
|---|
| 13616 | + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { |
|---|
| 13617 | + /* Notify aborted XRI for NVME work queue */ |
|---|
| 13618 | + if (phba->nvmet_support) |
|---|
| 13619 | + lpfc_sli4_nvmet_xri_aborted(phba, wcqe); |
|---|
| 13620 | + } |
|---|
| 13621 | + workposted = false; |
|---|
| 13101 | 13622 | break; |
|---|
| 13102 | 13623 | case LPFC_NVME_LS: /* NVME LS uses ELS resources */ |
|---|
| 13103 | 13624 | case LPFC_ELS: |
|---|
| 13104 | | - cq_event = lpfc_cq_event_setup( |
|---|
| 13105 | | - phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); |
|---|
| 13106 | | - if (!cq_event) |
|---|
| 13107 | | - return false; |
|---|
| 13108 | | - spin_lock_irqsave(&phba->hbalock, iflags); |
|---|
| 13625 | + cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe)); |
|---|
| 13626 | + if (!cq_event) { |
|---|
| 13627 | + workposted = false; |
|---|
| 13628 | + break; |
|---|
| 13629 | + } |
|---|
| 13630 | + cq_event->hdwq = cq->hdwq; |
|---|
| 13631 | + spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, |
|---|
| 13632 | + iflags); |
|---|
| 13109 | 13633 | list_add_tail(&cq_event->list, |
|---|
| 13110 | 13634 | &phba->sli4_hba.sp_els_xri_aborted_work_queue); |
|---|
| 13111 | 13635 | /* Set the els xri abort event flag */ |
|---|
| 13112 | 13636 | phba->hba_flag |= ELS_XRI_ABORT_EVENT; |
|---|
| 13113 | | - spin_unlock_irqrestore(&phba->hbalock, iflags); |
|---|
| 13637 | + spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, |
|---|
| 13638 | + iflags); |
|---|
| 13114 | 13639 | workposted = true; |
|---|
| 13115 | 13640 | break; |
|---|
| 13116 | | - case LPFC_NVME: |
|---|
| 13117 | | - /* Notify aborted XRI for NVME work queue */ |
|---|
| 13118 | | - if (phba->nvmet_support) |
|---|
| 13119 | | - lpfc_sli4_nvmet_xri_aborted(phba, wcqe); |
|---|
| 13120 | | - else |
|---|
| 13121 | | - lpfc_sli4_nvme_xri_aborted(phba, wcqe); |
|---|
| 13122 | | - |
|---|
| 13123 | | - workposted = false; |
|---|
| 13124 | | - break; |
|---|
| 13125 | 13641 | default: |
|---|
| 13126 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 13642 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 13127 | 13643 | "0603 Invalid CQ subtype %d: " |
|---|
| 13128 | 13644 | "%08x %08x %08x %08x\n", |
|---|
| 13129 | 13645 | cq->subtype, wcqe->word0, wcqe->parameter, |
|---|
| .. | .. |
|---|
| 13133 | 13649 | } |
|---|
| 13134 | 13650 | return workposted; |
|---|
| 13135 | 13651 | } |
|---|
| 13652 | + |
|---|
| 13653 | +#define FC_RCTL_MDS_DIAGS 0xF4 |
|---|
| 13136 | 13654 | |
|---|
| 13137 | 13655 | /** |
|---|
| 13138 | 13656 | * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry |
|---|
| .. | .. |
|---|
| 13169 | 13687 | status = bf_get(lpfc_rcqe_status, rcqe); |
|---|
| 13170 | 13688 | switch (status) { |
|---|
| 13171 | 13689 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: |
|---|
| 13172 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 13690 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 13173 | 13691 | "2537 Receive Frame Truncated!!\n"); |
|---|
| 13692 | + fallthrough; |
|---|
| 13174 | 13693 | case FC_STATUS_RQ_SUCCESS: |
|---|
| 13175 | 13694 | spin_lock_irqsave(&phba->hbalock, iflags); |
|---|
| 13176 | 13695 | lpfc_sli4_rq_release(hrq, drq); |
|---|
| .. | .. |
|---|
| 13184 | 13703 | hrq->RQ_buf_posted--; |
|---|
| 13185 | 13704 | memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); |
|---|
| 13186 | 13705 | |
|---|
| 13187 | | - /* If a NVME LS event (type 0x28), treat it as Fast path */ |
|---|
| 13188 | 13706 | fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; |
|---|
| 13189 | 13707 | |
|---|
| 13190 | | - /* save off the frame for the word thread to process */ |
|---|
| 13708 | + if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || |
|---|
| 13709 | + fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { |
|---|
| 13710 | + spin_unlock_irqrestore(&phba->hbalock, iflags); |
|---|
| 13711 | + /* Handle MDS Loopback frames */ |
|---|
| 13712 | + if (!(phba->pport->load_flag & FC_UNLOADING)) |
|---|
| 13713 | + lpfc_sli4_handle_mds_loopback(phba->pport, |
|---|
| 13714 | + dma_buf); |
|---|
| 13715 | + else |
|---|
| 13716 | + lpfc_in_buf_free(phba, &dma_buf->dbuf); |
|---|
| 13717 | + break; |
|---|
| 13718 | + } |
|---|
| 13719 | + |
|---|
| 13720 | + /* save off the frame for the work thread to process */ |
|---|
| 13191 | 13721 | list_add_tail(&dma_buf->cq_event.list, |
|---|
| 13192 | 13722 | &phba->sli4_hba.sp_queue_event); |
|---|
| 13193 | 13723 | /* Frame received */ |
|---|
| .. | .. |
|---|
| 13198 | 13728 | case FC_STATUS_INSUFF_BUF_FRM_DISC: |
|---|
| 13199 | 13729 | if (phba->nvmet_support) { |
|---|
| 13200 | 13730 | tgtp = phba->targetport->private; |
|---|
| 13201 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, |
|---|
| 13731 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 13202 | 13732 | "6402 RQE Error x%x, posted %d err_cnt " |
|---|
| 13203 | 13733 | "%d: %x %x %x\n", |
|---|
| 13204 | 13734 | status, hrq->RQ_buf_posted, |
|---|
| .. | .. |
|---|
| 13207 | 13737 | atomic_read(&tgtp->rcv_fcp_cmd_out), |
|---|
| 13208 | 13738 | atomic_read(&tgtp->xmt_fcp_release)); |
|---|
| 13209 | 13739 | } |
|---|
| 13210 | | - /* fallthrough */ |
|---|
| 13740 | + fallthrough; |
|---|
| 13211 | 13741 | |
|---|
| 13212 | 13742 | case FC_STATUS_INSUFF_BUF_NEED_BUF: |
|---|
| 13213 | 13743 | hrq->RQ_no_posted_buf++; |
|---|
| .. | .. |
|---|
| 13226 | 13756 | * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry |
|---|
| 13227 | 13757 | * @phba: Pointer to HBA context object. |
|---|
| 13228 | 13758 | * @cq: Pointer to the completion queue. |
|---|
| 13229 | | - * @wcqe: Pointer to a completion queue entry. |
|---|
| 13759 | + * @cqe: Pointer to a completion queue entry. |
|---|
| 13230 | 13760 | * |
|---|
| 13231 | 13761 | * This routine process a slow-path work-queue or receive queue completion queue |
|---|
| 13232 | 13762 | * entry. |
|---|
| .. | .. |
|---|
| 13270 | 13800 | (struct lpfc_rcqe *)&cqevt); |
|---|
| 13271 | 13801 | break; |
|---|
| 13272 | 13802 | default: |
|---|
| 13273 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 13803 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 13274 | 13804 | "0388 Not a valid WCQE code: x%x\n", |
|---|
| 13275 | 13805 | bf_get(lpfc_cqe_code, &cqevt)); |
|---|
| 13276 | 13806 | break; |
|---|
| .. | .. |
|---|
| 13282 | 13812 | * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry |
|---|
| 13283 | 13813 | * @phba: Pointer to HBA context object. |
|---|
| 13284 | 13814 | * @eqe: Pointer to fast-path event queue entry. |
|---|
| 13815 | + * @speq: Pointer to slow-path event queue. |
|---|
| 13285 | 13816 | * |
|---|
| 13286 | 13817 | * This routine process a event queue entry from the slow-path event queue. |
|---|
| 13287 | 13818 | * It will check the MajorCode and MinorCode to determine this is for a |
|---|
| .. | .. |
|---|
| 13297 | 13828 | { |
|---|
| 13298 | 13829 | struct lpfc_queue *cq = NULL, *childq; |
|---|
| 13299 | 13830 | uint16_t cqid; |
|---|
| 13831 | + int ret = 0; |
|---|
| 13300 | 13832 | |
|---|
| 13301 | 13833 | /* Get the reference to the corresponding CQ */ |
|---|
| 13302 | 13834 | cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); |
|---|
| .. | .. |
|---|
| 13309 | 13841 | } |
|---|
| 13310 | 13842 | if (unlikely(!cq)) { |
|---|
| 13311 | 13843 | if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) |
|---|
| 13312 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 13844 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 13313 | 13845 | "0365 Slow-path CQ identifier " |
|---|
| 13314 | 13846 | "(%d) does not exist\n", cqid); |
|---|
| 13315 | 13847 | return; |
|---|
| .. | .. |
|---|
| 13318 | 13850 | /* Save EQ associated with this CQ */ |
|---|
| 13319 | 13851 | cq->assoc_qp = speq; |
|---|
| 13320 | 13852 | |
|---|
| 13321 | | - if (!queue_work(phba->wq, &cq->spwork)) |
|---|
| 13322 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 13323 | | - "0390 Cannot schedule soft IRQ " |
|---|
| 13853 | + if (is_kdump_kernel()) |
|---|
| 13854 | + ret = queue_work(phba->wq, &cq->spwork); |
|---|
| 13855 | + else |
|---|
| 13856 | + ret = queue_work_on(cq->chann, phba->wq, &cq->spwork); |
|---|
| 13857 | + |
|---|
| 13858 | + if (!ret) |
|---|
| 13859 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 13860 | + "0390 Cannot schedule queue work " |
|---|
| 13324 | 13861 | "for CQ eqcqid=%d, cqid=%d on CPU %d\n", |
|---|
| 13325 | | - cqid, cq->queue_id, smp_processor_id()); |
|---|
| 13862 | + cqid, cq->queue_id, raw_smp_processor_id()); |
|---|
| 13863 | +} |
|---|
| 13864 | + |
|---|
| 13865 | +/** |
|---|
| 13866 | + * __lpfc_sli4_process_cq - Process elements of a CQ |
|---|
| 13867 | + * @phba: Pointer to HBA context object. |
|---|
| 13868 | + * @cq: Pointer to CQ to be processed |
|---|
| 13869 | + * @handler: Routine to process each cqe |
|---|
| 13870 | + * @delay: Pointer to usdelay to set in case of rescheduling of the handler |
|---|
| 13871 | + * @poll_mode: Polling mode we were called from |
|---|
| 13872 | + * |
|---|
| 13873 | + * This routine processes completion queue entries in a CQ. While a valid |
|---|
| 13874 | + * queue element is found, the handler is called. During processing checks |
|---|
| 13875 | + * are made for periodic doorbell writes to let the hardware know of |
|---|
| 13876 | + * element consumption. |
|---|
| 13877 | + * |
|---|
| 13878 | + * If the max limit on cqes to process is hit, or there are no more valid |
|---|
| 13879 | + * entries, the loop stops. If we processed a sufficient number of elements, |
|---|
| 13880 | + * meaning there is sufficient load, rather than rearming and generating |
|---|
| 13881 | + * another interrupt, a cq rescheduling delay will be set. A delay of 0 |
|---|
| 13882 | + * indicates no rescheduling. |
|---|
| 13883 | + * |
|---|
| 13884 | + * Returns True if work scheduled, False otherwise. |
|---|
| 13885 | + **/ |
|---|
| 13886 | +static bool |
|---|
| 13887 | +__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, |
|---|
| 13888 | + bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, |
|---|
| 13889 | + struct lpfc_cqe *), unsigned long *delay, |
|---|
| 13890 | + enum lpfc_poll_mode poll_mode) |
|---|
| 13891 | +{ |
|---|
| 13892 | + struct lpfc_cqe *cqe; |
|---|
| 13893 | + bool workposted = false; |
|---|
| 13894 | + int count = 0, consumed = 0; |
|---|
| 13895 | + bool arm = true; |
|---|
| 13896 | + |
|---|
| 13897 | + /* default - no reschedule */ |
|---|
| 13898 | + *delay = 0; |
|---|
| 13899 | + |
|---|
| 13900 | + if (cmpxchg(&cq->queue_claimed, 0, 1) != 0) |
|---|
| 13901 | + goto rearm_and_exit; |
|---|
| 13902 | + |
|---|
| 13903 | + /* Process all the entries to the CQ */ |
|---|
| 13904 | + cq->q_flag = 0; |
|---|
| 13905 | + cqe = lpfc_sli4_cq_get(cq); |
|---|
| 13906 | + while (cqe) { |
|---|
| 13907 | + workposted |= handler(phba, cq, cqe); |
|---|
| 13908 | + __lpfc_sli4_consume_cqe(phba, cq, cqe); |
|---|
| 13909 | + |
|---|
| 13910 | + consumed++; |
|---|
| 13911 | + if (!(++count % cq->max_proc_limit)) |
|---|
| 13912 | + break; |
|---|
| 13913 | + |
|---|
| 13914 | + if (!(count % cq->notify_interval)) { |
|---|
| 13915 | + phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, |
|---|
| 13916 | + LPFC_QUEUE_NOARM); |
|---|
| 13917 | + consumed = 0; |
|---|
| 13918 | + cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK; |
|---|
| 13919 | + } |
|---|
| 13920 | + |
|---|
| 13921 | + if (count == LPFC_NVMET_CQ_NOTIFY) |
|---|
| 13922 | + cq->q_flag |= HBA_NVMET_CQ_NOTIFY; |
|---|
| 13923 | + |
|---|
| 13924 | + cqe = lpfc_sli4_cq_get(cq); |
|---|
| 13925 | + } |
|---|
| 13926 | + if (count >= phba->cfg_cq_poll_threshold) { |
|---|
| 13927 | + *delay = 1; |
|---|
| 13928 | + arm = false; |
|---|
| 13929 | + } |
|---|
| 13930 | + |
|---|
| 13931 | + /* Note: complete the irq_poll softirq before rearming CQ */ |
|---|
| 13932 | + if (poll_mode == LPFC_IRQ_POLL) |
|---|
| 13933 | + irq_poll_complete(&cq->iop); |
|---|
| 13934 | + |
|---|
| 13935 | + /* Track the max number of CQEs processed in 1 EQ */ |
|---|
| 13936 | + if (count > cq->CQ_max_cqe) |
|---|
| 13937 | + cq->CQ_max_cqe = count; |
|---|
| 13938 | + |
|---|
| 13939 | + cq->assoc_qp->EQ_cqe_cnt += count; |
|---|
| 13940 | + |
|---|
| 13941 | + /* Catch the no cq entry condition */ |
|---|
| 13942 | + if (unlikely(count == 0)) |
|---|
| 13943 | + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
|---|
| 13944 | + "0369 No entry from completion queue " |
|---|
| 13945 | + "qid=%d\n", cq->queue_id); |
|---|
| 13946 | + |
|---|
| 13947 | + xchg(&cq->queue_claimed, 0); |
|---|
| 13948 | + |
|---|
| 13949 | +rearm_and_exit: |
|---|
| 13950 | + phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, |
|---|
| 13951 | + arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM); |
|---|
| 13952 | + |
|---|
| 13953 | + return workposted; |
|---|
| 13326 | 13954 | } |
|---|
| 13327 | 13955 | |
|---|
| 13328 | 13956 | /** |
|---|
| 13329 | 13957 | * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry |
|---|
| 13330 | | - * @phba: Pointer to HBA context object. |
|---|
| 13958 | + * @cq: pointer to CQ to process |
|---|
| 13331 | 13959 | * |
|---|
| 13332 | | - * This routine process a event queue entry from the slow-path event queue. |
|---|
| 13333 | | - * It will check the MajorCode and MinorCode to determine this is for a |
|---|
| 13334 | | - * completion event on a completion queue, if not, an error shall be logged |
|---|
| 13335 | | - * and just return. Otherwise, it will get to the corresponding completion |
|---|
| 13336 | | - * queue and process all the entries on that completion queue, rearm the |
|---|
| 13337 | | - * completion queue, and then return. |
|---|
| 13960 | + * This routine calls the cq processing routine with a handler specific |
|---|
| 13961 | + * to the type of queue bound to it. |
|---|
| 13338 | 13962 | * |
|---|
| 13963 | + * The CQ routine returns two values: the first is the calling status, |
|---|
| 13964 | + * which indicates whether work was queued to the background discovery |
|---|
| 13965 | + * thread. If true, the routine should wakeup the discovery thread; |
|---|
| 13966 | + * the second is the delay parameter. If non-zero, rather than rearming |
|---|
| 13967 | + * the CQ and yet another interrupt, the CQ handler should be queued so |
|---|
| 13968 | + * that it is processed in a subsequent polling action. The value of |
|---|
| 13969 | + * the delay indicates when to reschedule it. |
|---|
| 13339 | 13970 | **/ |
|---|
| 13340 | 13971 | static void |
|---|
| 13341 | | -lpfc_sli4_sp_process_cq(struct work_struct *work) |
|---|
| 13972 | +__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) |
|---|
| 13342 | 13973 | { |
|---|
| 13343 | | - struct lpfc_queue *cq = |
|---|
| 13344 | | - container_of(work, struct lpfc_queue, spwork); |
|---|
| 13345 | 13974 | struct lpfc_hba *phba = cq->phba; |
|---|
| 13346 | | - struct lpfc_cqe *cqe; |
|---|
| 13975 | + unsigned long delay; |
|---|
| 13347 | 13976 | bool workposted = false; |
|---|
| 13348 | | - int ccount = 0; |
|---|
| 13977 | + int ret = 0; |
|---|
| 13349 | 13978 | |
|---|
| 13350 | | - /* Process all the entries to the CQ */ |
|---|
| 13979 | + /* Process and rearm the CQ */ |
|---|
| 13351 | 13980 | switch (cq->type) { |
|---|
| 13352 | 13981 | case LPFC_MCQ: |
|---|
| 13353 | | - while ((cqe = lpfc_sli4_cq_get(cq))) { |
|---|
| 13354 | | - workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); |
|---|
| 13355 | | - if (!(++ccount % cq->entry_repost)) |
|---|
| 13356 | | - break; |
|---|
| 13357 | | - cq->CQ_mbox++; |
|---|
| 13358 | | - } |
|---|
| 13982 | + workposted |= __lpfc_sli4_process_cq(phba, cq, |
|---|
| 13983 | + lpfc_sli4_sp_handle_mcqe, |
|---|
| 13984 | + &delay, LPFC_QUEUE_WORK); |
|---|
| 13359 | 13985 | break; |
|---|
| 13360 | 13986 | case LPFC_WCQ: |
|---|
| 13361 | | - while ((cqe = lpfc_sli4_cq_get(cq))) { |
|---|
| 13362 | | - if (cq->subtype == LPFC_FCP || |
|---|
| 13363 | | - cq->subtype == LPFC_NVME) { |
|---|
| 13364 | | -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
|---|
| 13365 | | - if (phba->ktime_on) |
|---|
| 13366 | | - cq->isr_timestamp = ktime_get_ns(); |
|---|
| 13367 | | - else |
|---|
| 13368 | | - cq->isr_timestamp = 0; |
|---|
| 13369 | | -#endif |
|---|
| 13370 | | - workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, |
|---|
| 13371 | | - cqe); |
|---|
| 13372 | | - } else { |
|---|
| 13373 | | - workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, |
|---|
| 13374 | | - cqe); |
|---|
| 13375 | | - } |
|---|
| 13376 | | - if (!(++ccount % cq->entry_repost)) |
|---|
| 13377 | | - break; |
|---|
| 13378 | | - } |
|---|
| 13379 | | - |
|---|
| 13380 | | - /* Track the max number of CQEs processed in 1 EQ */ |
|---|
| 13381 | | - if (ccount > cq->CQ_max_cqe) |
|---|
| 13382 | | - cq->CQ_max_cqe = ccount; |
|---|
| 13987 | + if (cq->subtype == LPFC_IO) |
|---|
| 13988 | + workposted |= __lpfc_sli4_process_cq(phba, cq, |
|---|
| 13989 | + lpfc_sli4_fp_handle_cqe, |
|---|
| 13990 | + &delay, LPFC_QUEUE_WORK); |
|---|
| 13991 | + else |
|---|
| 13992 | + workposted |= __lpfc_sli4_process_cq(phba, cq, |
|---|
| 13993 | + lpfc_sli4_sp_handle_cqe, |
|---|
| 13994 | + &delay, LPFC_QUEUE_WORK); |
|---|
| 13383 | 13995 | break; |
|---|
| 13384 | 13996 | default: |
|---|
| 13385 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 13997 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 13386 | 13998 | "0370 Invalid completion queue type (%d)\n", |
|---|
| 13387 | 13999 | cq->type); |
|---|
| 13388 | 14000 | return; |
|---|
| 13389 | 14001 | } |
|---|
| 13390 | 14002 | |
|---|
| 13391 | | - /* Catch the no cq entry condition, log an error */ |
|---|
| 13392 | | - if (unlikely(ccount == 0)) |
|---|
| 13393 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 13394 | | - "0371 No entry from the CQ: identifier " |
|---|
| 13395 | | - "(x%x), type (%d)\n", cq->queue_id, cq->type); |
|---|
| 13396 | | - |
|---|
| 13397 | | - /* In any case, flash and re-arm the RCQ */ |
|---|
| 13398 | | - phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM); |
|---|
| 14003 | + if (delay) { |
|---|
| 14004 | + if (is_kdump_kernel()) |
|---|
| 14005 | + ret = queue_delayed_work(phba->wq, &cq->sched_spwork, |
|---|
| 14006 | + delay); |
|---|
| 14007 | + else |
|---|
| 14008 | + ret = queue_delayed_work_on(cq->chann, phba->wq, |
|---|
| 14009 | + &cq->sched_spwork, delay); |
|---|
| 14010 | + if (!ret) |
|---|
| 14011 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 14012 | + "0394 Cannot schedule queue work " |
|---|
| 14013 | + "for cqid=%d on CPU %d\n", |
|---|
| 14014 | + cq->queue_id, cq->chann); |
|---|
| 14015 | + } |
|---|
| 13399 | 14016 | |
|---|
| 13400 | 14017 | /* wake up worker thread if there are works to be done */ |
|---|
| 13401 | 14018 | if (workposted) |
|---|
| 13402 | 14019 | lpfc_worker_wake_up(phba); |
|---|
| 14020 | +} |
|---|
| 14021 | + |
|---|
| 14022 | +/** |
|---|
| 14023 | + * lpfc_sli4_sp_process_cq - slow-path work handler when started by |
|---|
| 14024 | + * interrupt |
|---|
| 14025 | + * @work: pointer to work element |
|---|
| 14026 | + * |
|---|
| 14027 | + * translates from the work handler and calls the slow-path handler. |
|---|
| 14028 | + **/ |
|---|
| 14029 | +static void |
|---|
| 14030 | +lpfc_sli4_sp_process_cq(struct work_struct *work) |
|---|
| 14031 | +{ |
|---|
| 14032 | + struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork); |
|---|
| 14033 | + |
|---|
| 14034 | + __lpfc_sli4_sp_process_cq(cq); |
|---|
| 14035 | +} |
|---|
| 14036 | + |
|---|
| 14037 | +/** |
|---|
| 14038 | + * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer |
|---|
| 14039 | + * @work: pointer to work element |
|---|
| 14040 | + * |
|---|
| 14041 | + * translates from the work handler and calls the slow-path handler. |
|---|
| 14042 | + **/ |
|---|
| 14043 | +static void |
|---|
| 14044 | +lpfc_sli4_dly_sp_process_cq(struct work_struct *work) |
|---|
| 14045 | +{ |
|---|
| 14046 | + struct lpfc_queue *cq = container_of(to_delayed_work(work), |
|---|
| 14047 | + struct lpfc_queue, sched_spwork); |
|---|
| 14048 | + |
|---|
| 14049 | + __lpfc_sli4_sp_process_cq(cq); |
|---|
| 13403 | 14050 | } |
|---|
| 13404 | 14051 | |
|---|
| 13405 | 14052 | /** |
|---|
| .. | .. |
|---|
| 13431 | 14078 | IOERR_NO_RESOURCES)) |
|---|
| 13432 | 14079 | phba->lpfc_rampdown_queue_depth(phba); |
|---|
| 13433 | 14080 | |
|---|
| 13434 | | - /* Log the error status */ |
|---|
| 14081 | + /* Log the cmpl status */ |
|---|
| 13435 | 14082 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
|---|
| 13436 | | - "0373 FCP CQE error: status=x%x: " |
|---|
| 14083 | + "0373 FCP CQE cmpl: status=x%x: " |
|---|
| 13437 | 14084 | "CQE: %08x %08x %08x %08x\n", |
|---|
| 13438 | 14085 | bf_get(lpfc_wcqe_c_status, wcqe), |
|---|
| 13439 | 14086 | wcqe->word0, wcqe->total_data_placed, |
|---|
| .. | .. |
|---|
| 13443 | 14090 | /* Look up the FCP command IOCB and create pseudo response IOCB */ |
|---|
| 13444 | 14091 | spin_lock_irqsave(&pring->ring_lock, iflags); |
|---|
| 13445 | 14092 | pring->stats.iocb_event++; |
|---|
| 14093 | + spin_unlock_irqrestore(&pring->ring_lock, iflags); |
|---|
| 13446 | 14094 | cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, |
|---|
| 13447 | 14095 | bf_get(lpfc_wcqe_c_request_tag, wcqe)); |
|---|
| 13448 | | - spin_unlock_irqrestore(&pring->ring_lock, iflags); |
|---|
| 13449 | 14096 | if (unlikely(!cmdiocbq)) { |
|---|
| 13450 | 14097 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
|---|
| 13451 | 14098 | "0374 FCP complete with no corresponding " |
|---|
| .. | .. |
|---|
| 13527 | 14174 | /** |
|---|
| 13528 | 14175 | * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry |
|---|
| 13529 | 14176 | * @phba: Pointer to HBA context object. |
|---|
| 14177 | + * @cq: Pointer to completion queue. |
|---|
| 13530 | 14178 | * @rcqe: Pointer to receive-queue completion queue entry. |
|---|
| 13531 | 14179 | * |
|---|
| 13532 | 14180 | * This routine process a receive-queue completion queue entry. |
|---|
| .. | .. |
|---|
| 13571 | 14219 | status = bf_get(lpfc_rcqe_status, rcqe); |
|---|
| 13572 | 14220 | switch (status) { |
|---|
| 13573 | 14221 | case FC_STATUS_RQ_BUF_LEN_EXCEEDED: |
|---|
| 13574 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 14222 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 13575 | 14223 | "6126 Receive Frame Truncated!!\n"); |
|---|
| 13576 | | - /* Drop thru */ |
|---|
| 14224 | + fallthrough; |
|---|
| 13577 | 14225 | case FC_STATUS_RQ_SUCCESS: |
|---|
| 13578 | 14226 | spin_lock_irqsave(&phba->hbalock, iflags); |
|---|
| 13579 | 14227 | lpfc_sli4_rq_release(hrq, drq); |
|---|
| .. | .. |
|---|
| 13590 | 14238 | |
|---|
| 13591 | 14239 | /* Just some basic sanity checks on FCP Command frame */ |
|---|
| 13592 | 14240 | fctl = (fc_hdr->fh_f_ctl[0] << 16 | |
|---|
| 13593 | | - fc_hdr->fh_f_ctl[1] << 8 | |
|---|
| 13594 | | - fc_hdr->fh_f_ctl[2]); |
|---|
| 14241 | + fc_hdr->fh_f_ctl[1] << 8 | |
|---|
| 14242 | + fc_hdr->fh_f_ctl[2]); |
|---|
| 13595 | 14243 | if (((fctl & |
|---|
| 13596 | 14244 | (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != |
|---|
| 13597 | 14245 | (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || |
|---|
| .. | .. |
|---|
| 13599 | 14247 | goto drop; |
|---|
| 13600 | 14248 | |
|---|
| 13601 | 14249 | if (fc_hdr->fh_type == FC_TYPE_FCP) { |
|---|
| 13602 | | - dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); |
|---|
| 14250 | + dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); |
|---|
| 13603 | 14251 | lpfc_nvmet_unsol_fcp_event( |
|---|
| 13604 | | - phba, idx, dma_buf, |
|---|
| 13605 | | - cq->isr_timestamp); |
|---|
| 14252 | + phba, idx, dma_buf, cq->isr_timestamp, |
|---|
| 14253 | + cq->q_flag & HBA_NVMET_CQ_NOTIFY); |
|---|
| 13606 | 14254 | return false; |
|---|
| 13607 | 14255 | } |
|---|
| 13608 | 14256 | drop: |
|---|
| 13609 | | - lpfc_in_buf_free(phba, &dma_buf->dbuf); |
|---|
| 14257 | + lpfc_rq_buf_free(phba, &dma_buf->hbuf); |
|---|
| 13610 | 14258 | break; |
|---|
| 13611 | 14259 | case FC_STATUS_INSUFF_BUF_FRM_DISC: |
|---|
| 13612 | 14260 | if (phba->nvmet_support) { |
|---|
| 13613 | 14261 | tgtp = phba->targetport->private; |
|---|
| 13614 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, |
|---|
| 14262 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 13615 | 14263 | "6401 RQE Error x%x, posted %d err_cnt " |
|---|
| 13616 | 14264 | "%d: %x %x %x\n", |
|---|
| 13617 | 14265 | status, hrq->RQ_buf_posted, |
|---|
| .. | .. |
|---|
| 13620 | 14268 | atomic_read(&tgtp->rcv_fcp_cmd_out), |
|---|
| 13621 | 14269 | atomic_read(&tgtp->xmt_fcp_release)); |
|---|
| 13622 | 14270 | } |
|---|
| 13623 | | - /* fallthrough */ |
|---|
| 14271 | + fallthrough; |
|---|
| 13624 | 14272 | |
|---|
| 13625 | 14273 | case FC_STATUS_INSUFF_BUF_NEED_BUF: |
|---|
| 13626 | 14274 | hrq->RQ_no_posted_buf++; |
|---|
| .. | .. |
|---|
| 13633 | 14281 | |
|---|
| 13634 | 14282 | /** |
|---|
| 13635 | 14283 | * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry |
|---|
| 14284 | + * @phba: adapter with cq |
|---|
| 13636 | 14285 | * @cq: Pointer to the completion queue. |
|---|
| 13637 | | - * @eqe: Pointer to fast-path completion queue entry. |
|---|
| 14286 | + * @cqe: Pointer to fast-path completion queue entry. |
|---|
| 13638 | 14287 | * |
|---|
| 13639 | 14288 | * This routine process a fast-path work queue completion entry from fast-path |
|---|
| 13640 | 14289 | * event queue for FCP command response completion. |
|---|
| 14290 | + * |
|---|
| 14291 | + * Return: true if work posted to worker thread, otherwise false. |
|---|
| 13641 | 14292 | **/ |
|---|
| 13642 | | -static int |
|---|
| 14293 | +static bool |
|---|
| 13643 | 14294 | lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, |
|---|
| 13644 | 14295 | struct lpfc_cqe *cqe) |
|---|
| 13645 | 14296 | { |
|---|
| .. | .. |
|---|
| 13656 | 14307 | cq->CQ_wq++; |
|---|
| 13657 | 14308 | /* Process the WQ complete event */ |
|---|
| 13658 | 14309 | phba->last_completion_time = jiffies; |
|---|
| 13659 | | - if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME)) |
|---|
| 13660 | | - lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, |
|---|
| 13661 | | - (struct lpfc_wcqe_complete *)&wcqe); |
|---|
| 13662 | | - if (cq->subtype == LPFC_NVME_LS) |
|---|
| 14310 | + if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS) |
|---|
| 13663 | 14311 | lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, |
|---|
| 13664 | 14312 | (struct lpfc_wcqe_complete *)&wcqe); |
|---|
| 13665 | 14313 | break; |
|---|
| .. | .. |
|---|
| 13685 | 14333 | } |
|---|
| 13686 | 14334 | break; |
|---|
| 13687 | 14335 | default: |
|---|
| 13688 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 14336 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 13689 | 14337 | "0144 Not a valid CQE code: x%x\n", |
|---|
| 13690 | 14338 | bf_get(lpfc_wcqe_c_code, &wcqe)); |
|---|
| 13691 | 14339 | break; |
|---|
| .. | .. |
|---|
| 13694 | 14342 | } |
|---|
| 13695 | 14343 | |
|---|
| 13696 | 14344 | /** |
|---|
| 14345 | + * lpfc_sli4_sched_cq_work - Schedules cq work |
|---|
| 14346 | + * @phba: Pointer to HBA context object. |
|---|
| 14347 | + * @cq: Pointer to CQ |
|---|
| 14348 | + * @cqid: CQ ID |
|---|
| 14349 | + * |
|---|
| 14350 | + * This routine checks the poll mode of the CQ corresponding to |
|---|
| 14351 | + * cq->chann, then either schedules a softirq or queue_work to complete |
|---|
| 14352 | + * cq work. |
|---|
| 14353 | + * |
|---|
| 14354 | + * queue_work path is taken if in NVMET mode, or if poll_mode is in |
|---|
| 14355 | + * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken. |
|---|
| 14356 | + * |
|---|
| 14357 | + **/ |
|---|
| 14358 | +static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba, |
|---|
| 14359 | + struct lpfc_queue *cq, uint16_t cqid) |
|---|
| 14360 | +{ |
|---|
| 14361 | + int ret = 0; |
|---|
| 14362 | + |
|---|
| 14363 | + switch (cq->poll_mode) { |
|---|
| 14364 | + case LPFC_IRQ_POLL: |
|---|
| 14365 | + irq_poll_sched(&cq->iop); |
|---|
| 14366 | + break; |
|---|
| 14367 | + case LPFC_QUEUE_WORK: |
|---|
| 14368 | + default: |
|---|
| 14369 | + if (is_kdump_kernel()) |
|---|
| 14370 | + ret = queue_work(phba->wq, &cq->irqwork); |
|---|
| 14371 | + else |
|---|
| 14372 | + ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork); |
|---|
| 14373 | + if (!ret) |
|---|
| 14374 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 14375 | + "0383 Cannot schedule queue work " |
|---|
| 14376 | + "for CQ eqcqid=%d, cqid=%d on CPU %d\n", |
|---|
| 14377 | + cqid, cq->queue_id, |
|---|
| 14378 | + raw_smp_processor_id()); |
|---|
| 14379 | + } |
|---|
| 14380 | +} |
|---|
| 14381 | + |
|---|
| 14382 | +/** |
|---|
| 13697 | 14383 | * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry |
|---|
| 13698 | 14384 | * @phba: Pointer to HBA context object. |
|---|
| 14385 | + * @eq: Pointer to the queue structure. |
|---|
| 13699 | 14386 | * @eqe: Pointer to fast-path event queue entry. |
|---|
| 13700 | 14387 | * |
|---|
| 13701 | 14388 | * This routine process a event queue entry from the fast-path event queue. |
|---|
| .. | .. |
|---|
| 13706 | 14393 | * completion queue, and then return. |
|---|
| 13707 | 14394 | **/ |
|---|
| 13708 | 14395 | static void |
|---|
| 13709 | | -lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, |
|---|
| 13710 | | - uint32_t qidx) |
|---|
| 14396 | +lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, |
|---|
| 14397 | + struct lpfc_eqe *eqe) |
|---|
| 13711 | 14398 | { |
|---|
| 13712 | 14399 | struct lpfc_queue *cq = NULL; |
|---|
| 14400 | + uint32_t qidx = eq->hdwq; |
|---|
| 13713 | 14401 | uint16_t cqid, id; |
|---|
| 13714 | 14402 | |
|---|
| 13715 | 14403 | if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { |
|---|
| 13716 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 14404 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 13717 | 14405 | "0366 Not a valid completion " |
|---|
| 13718 | 14406 | "event: majorcode=x%x, minorcode=x%x\n", |
|---|
| 13719 | 14407 | bf_get_le32(lpfc_eqe_major_code, eqe), |
|---|
| .. | .. |
|---|
| 13724 | 14412 | /* Get the reference to the corresponding CQ */ |
|---|
| 13725 | 14413 | cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); |
|---|
| 13726 | 14414 | |
|---|
| 14415 | + /* Use the fast lookup method first */ |
|---|
| 14416 | + if (cqid <= phba->sli4_hba.cq_max) { |
|---|
| 14417 | + cq = phba->sli4_hba.cq_lookup[cqid]; |
|---|
| 14418 | + if (cq) |
|---|
| 14419 | + goto work_cq; |
|---|
| 14420 | + } |
|---|
| 14421 | + |
|---|
| 14422 | + /* Next check for NVMET completion */ |
|---|
| 13727 | 14423 | if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { |
|---|
| 13728 | 14424 | id = phba->sli4_hba.nvmet_cqset[0]->queue_id; |
|---|
| 13729 | 14425 | if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { |
|---|
| .. | .. |
|---|
| 13731 | 14427 | cq = phba->sli4_hba.nvmet_cqset[cqid - id]; |
|---|
| 13732 | 14428 | goto process_cq; |
|---|
| 13733 | 14429 | } |
|---|
| 13734 | | - } |
|---|
| 13735 | | - |
|---|
| 13736 | | - if (phba->sli4_hba.nvme_cq_map && |
|---|
| 13737 | | - (cqid == phba->sli4_hba.nvme_cq_map[qidx])) { |
|---|
| 13738 | | - /* Process NVME / NVMET command completion */ |
|---|
| 13739 | | - cq = phba->sli4_hba.nvme_cq[qidx]; |
|---|
| 13740 | | - goto process_cq; |
|---|
| 13741 | | - } |
|---|
| 13742 | | - |
|---|
| 13743 | | - if (phba->sli4_hba.fcp_cq_map && |
|---|
| 13744 | | - (cqid == phba->sli4_hba.fcp_cq_map[qidx])) { |
|---|
| 13745 | | - /* Process FCP command completion */ |
|---|
| 13746 | | - cq = phba->sli4_hba.fcp_cq[qidx]; |
|---|
| 13747 | | - goto process_cq; |
|---|
| 13748 | 14430 | } |
|---|
| 13749 | 14431 | |
|---|
| 13750 | 14432 | if (phba->sli4_hba.nvmels_cq && |
|---|
| .. | .. |
|---|
| 13755 | 14437 | |
|---|
| 13756 | 14438 | /* Otherwise this is a Slow path event */ |
|---|
| 13757 | 14439 | if (cq == NULL) { |
|---|
| 13758 | | - lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]); |
|---|
| 14440 | + lpfc_sli4_sp_handle_eqe(phba, eqe, |
|---|
| 14441 | + phba->sli4_hba.hdwq[qidx].hba_eq); |
|---|
| 13759 | 14442 | return; |
|---|
| 13760 | 14443 | } |
|---|
| 13761 | 14444 | |
|---|
| 13762 | 14445 | process_cq: |
|---|
| 13763 | 14446 | if (unlikely(cqid != cq->queue_id)) { |
|---|
| 13764 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 14447 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 13765 | 14448 | "0368 Miss-matched fast-path completion " |
|---|
| 13766 | 14449 | "queue identifier: eqcqid=%d, fcpcqid=%d\n", |
|---|
| 13767 | 14450 | cqid, cq->queue_id); |
|---|
| 13768 | 14451 | return; |
|---|
| 13769 | 14452 | } |
|---|
| 13770 | 14453 | |
|---|
| 13771 | | - /* Save EQ associated with this CQ */ |
|---|
| 13772 | | - cq->assoc_qp = phba->sli4_hba.hba_eq[qidx]; |
|---|
| 13773 | | - |
|---|
| 13774 | | - if (!queue_work(phba->wq, &cq->irqwork)) |
|---|
| 13775 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 13776 | | - "0363 Cannot schedule soft IRQ " |
|---|
| 13777 | | - "for CQ eqcqid=%d, cqid=%d on CPU %d\n", |
|---|
| 13778 | | - cqid, cq->queue_id, smp_processor_id()); |
|---|
| 14454 | +work_cq: |
|---|
| 14455 | +#if defined(CONFIG_SCSI_LPFC_DEBUG_FS) |
|---|
| 14456 | + if (phba->ktime_on) |
|---|
| 14457 | + cq->isr_timestamp = ktime_get_ns(); |
|---|
| 14458 | + else |
|---|
| 14459 | + cq->isr_timestamp = 0; |
|---|
| 14460 | +#endif |
|---|
| 14461 | + lpfc_sli4_sched_cq_work(phba, cq, cqid); |
|---|
| 13779 | 14462 | } |
|---|
| 13780 | 14463 | |
|---|
| 13781 | 14464 | /** |
|---|
| 13782 | | - * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry |
|---|
| 13783 | | - * @phba: Pointer to HBA context object. |
|---|
| 13784 | | - * @eqe: Pointer to fast-path event queue entry. |
|---|
| 14465 | + * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry |
|---|
| 14466 | + * @cq: Pointer to CQ to be processed |
|---|
| 14467 | + * @poll_mode: Enum lpfc_poll_state to determine poll mode |
|---|
| 13785 | 14468 | * |
|---|
| 13786 | | - * This routine process a event queue entry from the fast-path event queue. |
|---|
| 13787 | | - * It will check the MajorCode and MinorCode to determine this is for a |
|---|
| 13788 | | - * completion event on a completion queue, if not, an error shall be logged |
|---|
| 13789 | | - * and just return. Otherwise, it will get to the corresponding completion |
|---|
| 13790 | | - * queue and process all the entries on the completion queue, rearm the |
|---|
| 13791 | | - * completion queue, and then return. |
|---|
| 14469 | + * This routine calls the cq processing routine with the handler for |
|---|
| 14470 | + * fast path CQEs. |
|---|
| 14471 | + * |
|---|
| 14472 | + * The CQ routine returns two values: the first is the calling status, |
|---|
| 14473 | + * which indicates whether work was queued to the background discovery |
|---|
| 14474 | + * thread. If true, the routine should wakeup the discovery thread; |
|---|
| 14475 | + * the second is the delay parameter. If non-zero, rather than rearming |
|---|
| 14476 | + * the CQ and yet another interrupt, the CQ handler should be queued so |
|---|
| 14477 | + * that it is processed in a subsequent polling action. The value of |
|---|
| 14478 | + * the delay indicates when to reschedule it. |
|---|
| 13792 | 14479 | **/ |
|---|
| 13793 | 14480 | static void |
|---|
| 13794 | | -lpfc_sli4_hba_process_cq(struct work_struct *work) |
|---|
| 14481 | +__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq, |
|---|
| 14482 | + enum lpfc_poll_mode poll_mode) |
|---|
| 13795 | 14483 | { |
|---|
| 13796 | | - struct lpfc_queue *cq = |
|---|
| 13797 | | - container_of(work, struct lpfc_queue, irqwork); |
|---|
| 13798 | 14484 | struct lpfc_hba *phba = cq->phba; |
|---|
| 13799 | | - struct lpfc_cqe *cqe; |
|---|
| 14485 | + unsigned long delay; |
|---|
| 13800 | 14486 | bool workposted = false; |
|---|
| 13801 | | - int ccount = 0; |
|---|
| 14487 | + int ret = 0; |
|---|
| 13802 | 14488 | |
|---|
| 13803 | | - /* Process all the entries to the CQ */ |
|---|
| 13804 | | - while ((cqe = lpfc_sli4_cq_get(cq))) { |
|---|
| 13805 | | -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS |
|---|
| 13806 | | - if (phba->ktime_on) |
|---|
| 13807 | | - cq->isr_timestamp = ktime_get_ns(); |
|---|
| 14489 | + /* process and rearm the CQ */ |
|---|
| 14490 | + workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, |
|---|
| 14491 | + &delay, poll_mode); |
|---|
| 14492 | + |
|---|
| 14493 | + if (delay) { |
|---|
| 14494 | + if (is_kdump_kernel()) |
|---|
| 14495 | + ret = queue_delayed_work(phba->wq, &cq->sched_irqwork, |
|---|
| 14496 | + delay); |
|---|
| 13808 | 14497 | else |
|---|
| 13809 | | - cq->isr_timestamp = 0; |
|---|
| 13810 | | -#endif |
|---|
| 13811 | | - workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); |
|---|
| 13812 | | - if (!(++ccount % cq->entry_repost)) |
|---|
| 13813 | | - break; |
|---|
| 14498 | + ret = queue_delayed_work_on(cq->chann, phba->wq, |
|---|
| 14499 | + &cq->sched_irqwork, delay); |
|---|
| 14500 | + if (!ret) |
|---|
| 14501 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 14502 | + "0367 Cannot schedule queue work " |
|---|
| 14503 | + "for cqid=%d on CPU %d\n", |
|---|
| 14504 | + cq->queue_id, cq->chann); |
|---|
| 13814 | 14505 | } |
|---|
| 13815 | | - |
|---|
| 13816 | | - /* Track the max number of CQEs processed in 1 EQ */ |
|---|
| 13817 | | - if (ccount > cq->CQ_max_cqe) |
|---|
| 13818 | | - cq->CQ_max_cqe = ccount; |
|---|
| 13819 | | - cq->assoc_qp->EQ_cqe_cnt += ccount; |
|---|
| 13820 | | - |
|---|
| 13821 | | - /* Catch the no cq entry condition */ |
|---|
| 13822 | | - if (unlikely(ccount == 0)) |
|---|
| 13823 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 13824 | | - "0369 No entry from fast-path completion " |
|---|
| 13825 | | - "queue fcpcqid=%d\n", cq->queue_id); |
|---|
| 13826 | | - |
|---|
| 13827 | | - /* In any case, flash and re-arm the CQ */ |
|---|
| 13828 | | - phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM); |
|---|
| 13829 | 14506 | |
|---|
| 13830 | 14507 | /* wake up worker thread if there are works to be done */ |
|---|
| 13831 | 14508 | if (workposted) |
|---|
| 13832 | 14509 | lpfc_worker_wake_up(phba); |
|---|
| 13833 | 14510 | } |
|---|
| 13834 | 14511 | |
|---|
| 13835 | | -static void |
|---|
| 13836 | | -lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) |
|---|
| 13837 | | -{ |
|---|
| 13838 | | - struct lpfc_eqe *eqe; |
|---|
| 13839 | | - |
|---|
| 13840 | | - /* walk all the EQ entries and drop on the floor */ |
|---|
| 13841 | | - while ((eqe = lpfc_sli4_eq_get(eq))) |
|---|
| 13842 | | - ; |
|---|
| 13843 | | - |
|---|
| 13844 | | - /* Clear and re-arm the EQ */ |
|---|
| 13845 | | - phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM); |
|---|
| 13846 | | -} |
|---|
| 13847 | | - |
|---|
| 13848 | | - |
|---|
| 13849 | 14512 | /** |
|---|
| 13850 | | - * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue |
|---|
| 13851 | | - * entry |
|---|
| 13852 | | - * @phba: Pointer to HBA context object. |
|---|
| 13853 | | - * @eqe: Pointer to fast-path event queue entry. |
|---|
| 14513 | + * lpfc_sli4_hba_process_cq - fast-path work handler when started by |
|---|
| 14514 | + * interrupt |
|---|
| 14515 | + * @work: pointer to work element |
|---|
| 13854 | 14516 | * |
|---|
| 13855 | | - * This routine process a event queue entry from the Flash Optimized Fabric |
|---|
| 13856 | | - * event queue. It will check the MajorCode and MinorCode to determine this |
|---|
| 13857 | | - * is for a completion event on a completion queue, if not, an error shall be |
|---|
| 13858 | | - * logged and just return. Otherwise, it will get to the corresponding |
|---|
| 13859 | | - * completion queue and process all the entries on the completion queue, rearm |
|---|
| 13860 | | - * the completion queue, and then return. |
|---|
| 14517 | + * translates from the work handler and calls the fast-path handler. |
|---|
| 13861 | 14518 | **/ |
|---|
| 13862 | 14519 | static void |
|---|
| 13863 | | -lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) |
|---|
| 14520 | +lpfc_sli4_hba_process_cq(struct work_struct *work) |
|---|
| 13864 | 14521 | { |
|---|
| 13865 | | - struct lpfc_queue *cq; |
|---|
| 13866 | | - uint16_t cqid; |
|---|
| 14522 | + struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); |
|---|
| 13867 | 14523 | |
|---|
| 13868 | | - if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { |
|---|
| 13869 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 13870 | | - "9147 Not a valid completion " |
|---|
| 13871 | | - "event: majorcode=x%x, minorcode=x%x\n", |
|---|
| 13872 | | - bf_get_le32(lpfc_eqe_major_code, eqe), |
|---|
| 13873 | | - bf_get_le32(lpfc_eqe_minor_code, eqe)); |
|---|
| 13874 | | - return; |
|---|
| 13875 | | - } |
|---|
| 13876 | | - |
|---|
| 13877 | | - /* Get the reference to the corresponding CQ */ |
|---|
| 13878 | | - cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); |
|---|
| 13879 | | - |
|---|
| 13880 | | - /* Next check for OAS */ |
|---|
| 13881 | | - cq = phba->sli4_hba.oas_cq; |
|---|
| 13882 | | - if (unlikely(!cq)) { |
|---|
| 13883 | | - if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) |
|---|
| 13884 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 13885 | | - "9148 OAS completion queue " |
|---|
| 13886 | | - "does not exist\n"); |
|---|
| 13887 | | - return; |
|---|
| 13888 | | - } |
|---|
| 13889 | | - |
|---|
| 13890 | | - if (unlikely(cqid != cq->queue_id)) { |
|---|
| 13891 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 13892 | | - "9149 Miss-matched fast-path compl " |
|---|
| 13893 | | - "queue id: eqcqid=%d, fcpcqid=%d\n", |
|---|
| 13894 | | - cqid, cq->queue_id); |
|---|
| 13895 | | - return; |
|---|
| 13896 | | - } |
|---|
| 13897 | | - |
|---|
| 13898 | | - /* Save EQ associated with this CQ */ |
|---|
| 13899 | | - cq->assoc_qp = phba->sli4_hba.fof_eq; |
|---|
| 13900 | | - |
|---|
| 13901 | | - /* CQ work will be processed on CPU affinitized to this IRQ */ |
|---|
| 13902 | | - if (!queue_work(phba->wq, &cq->irqwork)) |
|---|
| 13903 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 13904 | | - "0367 Cannot schedule soft IRQ " |
|---|
| 13905 | | - "for CQ eqcqid=%d, cqid=%d on CPU %d\n", |
|---|
| 13906 | | - cqid, cq->queue_id, smp_processor_id()); |
|---|
| 14524 | + __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK); |
|---|
| 13907 | 14525 | } |
|---|
| 13908 | 14526 | |
|---|
| 13909 | 14527 | /** |
|---|
| 13910 | | - * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device |
|---|
| 13911 | | - * @irq: Interrupt number. |
|---|
| 13912 | | - * @dev_id: The device context pointer. |
|---|
| 14528 | + * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer |
|---|
| 14529 | + * @work: pointer to work element |
|---|
| 13913 | 14530 | * |
|---|
| 13914 | | - * This function is directly called from the PCI layer as an interrupt |
|---|
| 13915 | | - * service routine when device with SLI-4 interface spec is enabled with |
|---|
| 13916 | | - * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric |
|---|
| 13917 | | - * IOCB ring event in the HBA. However, when the device is enabled with either |
|---|
| 13918 | | - * MSI or Pin-IRQ interrupt mode, this function is called as part of the |
|---|
| 13919 | | - * device-level interrupt handler. When the PCI slot is in error recovery |
|---|
| 13920 | | - * or the HBA is undergoing initialization, the interrupt handler will not |
|---|
| 13921 | | - * process the interrupt. The Flash Optimized Fabric ring event are handled in |
|---|
| 13922 | | - * the intrrupt context. This function is called without any lock held. |
|---|
| 13923 | | - * It gets the hbalock to access and update SLI data structures. Note that, |
|---|
| 13924 | | - * the EQ to CQ are one-to-one map such that the EQ index is |
|---|
| 13925 | | - * equal to that of CQ index. |
|---|
| 13926 | | - * |
|---|
| 13927 | | - * This function returns IRQ_HANDLED when interrupt is handled else it |
|---|
| 13928 | | - * returns IRQ_NONE. |
|---|
| 14531 | + * translates from the work handler and calls the fast-path handler. |
|---|
| 13929 | 14532 | **/ |
|---|
| 13930 | | -irqreturn_t |
|---|
| 13931 | | -lpfc_sli4_fof_intr_handler(int irq, void *dev_id) |
|---|
| 14533 | +static void |
|---|
| 14534 | +lpfc_sli4_dly_hba_process_cq(struct work_struct *work) |
|---|
| 13932 | 14535 | { |
|---|
| 13933 | | - struct lpfc_hba *phba; |
|---|
| 13934 | | - struct lpfc_hba_eq_hdl *hba_eq_hdl; |
|---|
| 13935 | | - struct lpfc_queue *eq; |
|---|
| 13936 | | - struct lpfc_eqe *eqe; |
|---|
| 13937 | | - unsigned long iflag; |
|---|
| 13938 | | - int ecount = 0; |
|---|
| 14536 | + struct lpfc_queue *cq = container_of(to_delayed_work(work), |
|---|
| 14537 | + struct lpfc_queue, sched_irqwork); |
|---|
| 13939 | 14538 | |
|---|
| 13940 | | - /* Get the driver's phba structure from the dev_id */ |
|---|
| 13941 | | - hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; |
|---|
| 13942 | | - phba = hba_eq_hdl->phba; |
|---|
| 13943 | | - |
|---|
| 13944 | | - if (unlikely(!phba)) |
|---|
| 13945 | | - return IRQ_NONE; |
|---|
| 13946 | | - |
|---|
| 13947 | | - /* Get to the EQ struct associated with this vector */ |
|---|
| 13948 | | - eq = phba->sli4_hba.fof_eq; |
|---|
| 13949 | | - if (unlikely(!eq)) |
|---|
| 13950 | | - return IRQ_NONE; |
|---|
| 13951 | | - |
|---|
| 13952 | | - /* Check device state for handling interrupt */ |
|---|
| 13953 | | - if (unlikely(lpfc_intr_state_check(phba))) { |
|---|
| 13954 | | - /* Check again for link_state with lock held */ |
|---|
| 13955 | | - spin_lock_irqsave(&phba->hbalock, iflag); |
|---|
| 13956 | | - if (phba->link_state < LPFC_LINK_DOWN) |
|---|
| 13957 | | - /* Flush, clear interrupt, and rearm the EQ */ |
|---|
| 13958 | | - lpfc_sli4_eq_flush(phba, eq); |
|---|
| 13959 | | - spin_unlock_irqrestore(&phba->hbalock, iflag); |
|---|
| 13960 | | - return IRQ_NONE; |
|---|
| 13961 | | - } |
|---|
| 13962 | | - |
|---|
| 13963 | | - /* |
|---|
| 13964 | | - * Process all the event on FCP fast-path EQ |
|---|
| 13965 | | - */ |
|---|
| 13966 | | - while ((eqe = lpfc_sli4_eq_get(eq))) { |
|---|
| 13967 | | - lpfc_sli4_fof_handle_eqe(phba, eqe); |
|---|
| 13968 | | - if (!(++ecount % eq->entry_repost)) |
|---|
| 13969 | | - break; |
|---|
| 13970 | | - eq->EQ_processed++; |
|---|
| 13971 | | - } |
|---|
| 13972 | | - |
|---|
| 13973 | | - /* Track the max number of EQEs processed in 1 intr */ |
|---|
| 13974 | | - if (ecount > eq->EQ_max_eqe) |
|---|
| 13975 | | - eq->EQ_max_eqe = ecount; |
|---|
| 13976 | | - |
|---|
| 13977 | | - |
|---|
| 13978 | | - if (unlikely(ecount == 0)) { |
|---|
| 13979 | | - eq->EQ_no_entry++; |
|---|
| 13980 | | - |
|---|
| 13981 | | - if (phba->intr_type == MSIX) |
|---|
| 13982 | | - /* MSI-X treated interrupt served as no EQ share INT */ |
|---|
| 13983 | | - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
|---|
| 13984 | | - "9145 MSI-X interrupt with no EQE\n"); |
|---|
| 13985 | | - else { |
|---|
| 13986 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 13987 | | - "9146 ISR interrupt with no EQE\n"); |
|---|
| 13988 | | - /* Non MSI-X treated on interrupt as EQ share INT */ |
|---|
| 13989 | | - return IRQ_NONE; |
|---|
| 13990 | | - } |
|---|
| 13991 | | - } |
|---|
| 13992 | | - /* Always clear and re-arm the fast-path EQ */ |
|---|
| 13993 | | - phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM); |
|---|
| 13994 | | - return IRQ_HANDLED; |
|---|
| 14539 | + __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK); |
|---|
| 13995 | 14540 | } |
|---|
| 13996 | 14541 | |
|---|
| 13997 | 14542 | /** |
|---|
| .. | .. |
|---|
| 14026 | 14571 | struct lpfc_hba *phba; |
|---|
| 14027 | 14572 | struct lpfc_hba_eq_hdl *hba_eq_hdl; |
|---|
| 14028 | 14573 | struct lpfc_queue *fpeq; |
|---|
| 14029 | | - struct lpfc_eqe *eqe; |
|---|
| 14030 | 14574 | unsigned long iflag; |
|---|
| 14031 | 14575 | int ecount = 0; |
|---|
| 14032 | 14576 | int hba_eqidx; |
|---|
| 14577 | + struct lpfc_eq_intr_info *eqi; |
|---|
| 14033 | 14578 | |
|---|
| 14034 | 14579 | /* Get the driver's phba structure from the dev_id */ |
|---|
| 14035 | 14580 | hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; |
|---|
| .. | .. |
|---|
| 14038 | 14583 | |
|---|
| 14039 | 14584 | if (unlikely(!phba)) |
|---|
| 14040 | 14585 | return IRQ_NONE; |
|---|
| 14041 | | - if (unlikely(!phba->sli4_hba.hba_eq)) |
|---|
| 14586 | + if (unlikely(!phba->sli4_hba.hdwq)) |
|---|
| 14042 | 14587 | return IRQ_NONE; |
|---|
| 14043 | 14588 | |
|---|
| 14044 | 14589 | /* Get to the EQ struct associated with this vector */ |
|---|
| 14045 | | - fpeq = phba->sli4_hba.hba_eq[hba_eqidx]; |
|---|
| 14590 | + fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq; |
|---|
| 14046 | 14591 | if (unlikely(!fpeq)) |
|---|
| 14047 | 14592 | return IRQ_NONE; |
|---|
| 14048 | | - |
|---|
| 14049 | | - if (lpfc_fcp_look_ahead) { |
|---|
| 14050 | | - if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) |
|---|
| 14051 | | - phba->sli4_hba.sli4_eq_clr_intr(fpeq); |
|---|
| 14052 | | - else { |
|---|
| 14053 | | - atomic_inc(&hba_eq_hdl->hba_eq_in_use); |
|---|
| 14054 | | - return IRQ_NONE; |
|---|
| 14055 | | - } |
|---|
| 14056 | | - } |
|---|
| 14057 | 14593 | |
|---|
| 14058 | 14594 | /* Check device state for handling interrupt */ |
|---|
| 14059 | 14595 | if (unlikely(lpfc_intr_state_check(phba))) { |
|---|
| .. | .. |
|---|
| 14061 | 14597 | spin_lock_irqsave(&phba->hbalock, iflag); |
|---|
| 14062 | 14598 | if (phba->link_state < LPFC_LINK_DOWN) |
|---|
| 14063 | 14599 | /* Flush, clear interrupt, and rearm the EQ */ |
|---|
| 14064 | | - lpfc_sli4_eq_flush(phba, fpeq); |
|---|
| 14600 | + lpfc_sli4_eqcq_flush(phba, fpeq); |
|---|
| 14065 | 14601 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
|---|
| 14066 | | - if (lpfc_fcp_look_ahead) |
|---|
| 14067 | | - atomic_inc(&hba_eq_hdl->hba_eq_in_use); |
|---|
| 14068 | 14602 | return IRQ_NONE; |
|---|
| 14069 | 14603 | } |
|---|
| 14070 | 14604 | |
|---|
| 14071 | | - /* |
|---|
| 14072 | | - * Process all the event on FCP fast-path EQ |
|---|
| 14073 | | - */ |
|---|
| 14074 | | - while ((eqe = lpfc_sli4_eq_get(fpeq))) { |
|---|
| 14075 | | - lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx); |
|---|
| 14076 | | - if (!(++ecount % fpeq->entry_repost)) |
|---|
| 14077 | | - break; |
|---|
| 14078 | | - fpeq->EQ_processed++; |
|---|
| 14079 | | - } |
|---|
| 14605 | + eqi = this_cpu_ptr(phba->sli4_hba.eq_info); |
|---|
| 14606 | + eqi->icnt++; |
|---|
| 14080 | 14607 | |
|---|
| 14081 | | - /* Track the max number of EQEs processed in 1 intr */ |
|---|
| 14082 | | - if (ecount > fpeq->EQ_max_eqe) |
|---|
| 14083 | | - fpeq->EQ_max_eqe = ecount; |
|---|
| 14608 | + fpeq->last_cpu = raw_smp_processor_id(); |
|---|
| 14084 | 14609 | |
|---|
| 14085 | | - /* Always clear and re-arm the fast-path EQ */ |
|---|
| 14086 | | - phba->sli4_hba.sli4_eq_release(fpeq, LPFC_QUEUE_REARM); |
|---|
| 14610 | + if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && |
|---|
| 14611 | + fpeq->q_flag & HBA_EQ_DELAY_CHK && |
|---|
| 14612 | + phba->cfg_auto_imax && |
|---|
| 14613 | + fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && |
|---|
| 14614 | + phba->sli.sli_flag & LPFC_SLI_USE_EQDR) |
|---|
| 14615 | + lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); |
|---|
| 14616 | + |
|---|
| 14617 | + /* process and rearm the EQ */ |
|---|
| 14618 | + ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM); |
|---|
| 14087 | 14619 | |
|---|
| 14088 | 14620 | if (unlikely(ecount == 0)) { |
|---|
| 14089 | 14621 | fpeq->EQ_no_entry++; |
|---|
| 14090 | | - |
|---|
| 14091 | | - if (lpfc_fcp_look_ahead) { |
|---|
| 14092 | | - atomic_inc(&hba_eq_hdl->hba_eq_in_use); |
|---|
| 14093 | | - return IRQ_NONE; |
|---|
| 14094 | | - } |
|---|
| 14095 | | - |
|---|
| 14096 | 14622 | if (phba->intr_type == MSIX) |
|---|
| 14097 | 14623 | /* MSI-X treated interrupt served as no EQ share INT */ |
|---|
| 14098 | 14624 | lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, |
|---|
| .. | .. |
|---|
| 14101 | 14627 | /* Non MSI-X treated on interrupt as EQ share INT */ |
|---|
| 14102 | 14628 | return IRQ_NONE; |
|---|
| 14103 | 14629 | } |
|---|
| 14104 | | - |
|---|
| 14105 | | - if (lpfc_fcp_look_ahead) |
|---|
| 14106 | | - atomic_inc(&hba_eq_hdl->hba_eq_in_use); |
|---|
| 14107 | 14630 | |
|---|
| 14108 | 14631 | return IRQ_HANDLED; |
|---|
| 14109 | 14632 | } /* lpfc_sli4_fp_intr_handler */ |
|---|
| .. | .. |
|---|
| 14142 | 14665 | /* |
|---|
| 14143 | 14666 | * Invoke fast-path host attention interrupt handling as appropriate. |
|---|
| 14144 | 14667 | */ |
|---|
| 14145 | | - for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) { |
|---|
| 14668 | + for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { |
|---|
| 14146 | 14669 | hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, |
|---|
| 14147 | | - &phba->sli4_hba.hba_eq_hdl[qidx]); |
|---|
| 14148 | | - if (hba_irq_rc == IRQ_HANDLED) |
|---|
| 14149 | | - hba_handled |= true; |
|---|
| 14150 | | - } |
|---|
| 14151 | | - |
|---|
| 14152 | | - if (phba->cfg_fof) { |
|---|
| 14153 | | - hba_irq_rc = lpfc_sli4_fof_intr_handler(irq, |
|---|
| 14154 | 14670 | &phba->sli4_hba.hba_eq_hdl[qidx]); |
|---|
| 14155 | 14671 | if (hba_irq_rc == IRQ_HANDLED) |
|---|
| 14156 | 14672 | hba_handled |= true; |
|---|
| .. | .. |
|---|
| 14158 | 14674 | |
|---|
| 14159 | 14675 | return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; |
|---|
| 14160 | 14676 | } /* lpfc_sli4_intr_handler */ |
|---|
| 14677 | + |
|---|
| 14678 | +void lpfc_sli4_poll_hbtimer(struct timer_list *t) |
|---|
| 14679 | +{ |
|---|
| 14680 | + struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer); |
|---|
| 14681 | + struct lpfc_queue *eq; |
|---|
| 14682 | + int i = 0; |
|---|
| 14683 | + |
|---|
| 14684 | + rcu_read_lock(); |
|---|
| 14685 | + |
|---|
| 14686 | + list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list) |
|---|
| 14687 | + i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH); |
|---|
| 14688 | + if (!list_empty(&phba->poll_list)) |
|---|
| 14689 | + mod_timer(&phba->cpuhp_poll_timer, |
|---|
| 14690 | + jiffies + msecs_to_jiffies(LPFC_POLL_HB)); |
|---|
| 14691 | + |
|---|
| 14692 | + rcu_read_unlock(); |
|---|
| 14693 | +} |
|---|
| 14694 | + |
|---|
| 14695 | +inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path) |
|---|
| 14696 | +{ |
|---|
| 14697 | + struct lpfc_hba *phba = eq->phba; |
|---|
| 14698 | + int i = 0; |
|---|
| 14699 | + |
|---|
| 14700 | + /* |
|---|
| 14701 | + * Unlocking an irq is one of the entry point to check |
|---|
| 14702 | + * for re-schedule, but we are good for io submission |
|---|
| 14703 | + * path as midlayer does a get_cpu to glue us in. Flush |
|---|
| 14704 | + * out the invalidate queue so we can see the updated |
|---|
| 14705 | + * value for flag. |
|---|
| 14706 | + */ |
|---|
| 14707 | + smp_rmb(); |
|---|
| 14708 | + |
|---|
| 14709 | + if (READ_ONCE(eq->mode) == LPFC_EQ_POLL) |
|---|
| 14710 | + /* We will not likely get the completion for the caller |
|---|
| 14711 | + * during this iteration but i guess that's fine. |
|---|
| 14712 | + * Future io's coming on this eq should be able to |
|---|
| 14713 | + * pick it up. As for the case of single io's, they |
|---|
| 14714 | + * will be handled through a sched from polling timer |
|---|
| 14715 | + * function which is currently triggered every 1msec. |
|---|
| 14716 | + */ |
|---|
| 14717 | + i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM); |
|---|
| 14718 | + |
|---|
| 14719 | + return i; |
|---|
| 14720 | +} |
|---|
| 14721 | + |
|---|
| 14722 | +static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq) |
|---|
| 14723 | +{ |
|---|
| 14724 | + struct lpfc_hba *phba = eq->phba; |
|---|
| 14725 | + |
|---|
| 14726 | + /* kickstart slowpath processing if needed */ |
|---|
| 14727 | + if (list_empty(&phba->poll_list)) |
|---|
| 14728 | + mod_timer(&phba->cpuhp_poll_timer, |
|---|
| 14729 | + jiffies + msecs_to_jiffies(LPFC_POLL_HB)); |
|---|
| 14730 | + |
|---|
| 14731 | + list_add_rcu(&eq->_poll_list, &phba->poll_list); |
|---|
| 14732 | + synchronize_rcu(); |
|---|
| 14733 | +} |
|---|
| 14734 | + |
|---|
| 14735 | +static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq) |
|---|
| 14736 | +{ |
|---|
| 14737 | + struct lpfc_hba *phba = eq->phba; |
|---|
| 14738 | + |
|---|
| 14739 | + /* Disable slowpath processing for this eq. Kick start the eq |
|---|
| 14740 | + * by RE-ARMING the eq's ASAP |
|---|
| 14741 | + */ |
|---|
| 14742 | + list_del_rcu(&eq->_poll_list); |
|---|
| 14743 | + synchronize_rcu(); |
|---|
| 14744 | + |
|---|
| 14745 | + if (list_empty(&phba->poll_list)) |
|---|
| 14746 | + del_timer_sync(&phba->cpuhp_poll_timer); |
|---|
| 14747 | +} |
|---|
| 14748 | + |
|---|
| 14749 | +void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba) |
|---|
| 14750 | +{ |
|---|
| 14751 | + struct lpfc_queue *eq, *next; |
|---|
| 14752 | + |
|---|
| 14753 | + list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) |
|---|
| 14754 | + list_del(&eq->_poll_list); |
|---|
| 14755 | + |
|---|
| 14756 | + INIT_LIST_HEAD(&phba->poll_list); |
|---|
| 14757 | + synchronize_rcu(); |
|---|
| 14758 | +} |
|---|
| 14759 | + |
|---|
| 14760 | +static inline void |
|---|
| 14761 | +__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode) |
|---|
| 14762 | +{ |
|---|
| 14763 | + if (mode == eq->mode) |
|---|
| 14764 | + return; |
|---|
| 14765 | + /* |
|---|
| 14766 | + * currently this function is only called during a hotplug |
|---|
| 14767 | + * event and the cpu on which this function is executing |
|---|
| 14768 | + * is going offline. By now the hotplug has instructed |
|---|
| 14769 | + * the scheduler to remove this cpu from cpu active mask. |
|---|
| 14770 | + * So we don't need to work about being put aside by the |
|---|
| 14771 | + * scheduler for a high priority process. Yes, the inte- |
|---|
| 14772 | + * rrupts could come but they are known to retire ASAP. |
|---|
| 14773 | + */ |
|---|
| 14774 | + |
|---|
| 14775 | + /* Disable polling in the fastpath */ |
|---|
| 14776 | + WRITE_ONCE(eq->mode, mode); |
|---|
| 14777 | + /* flush out the store buffer */ |
|---|
| 14778 | + smp_wmb(); |
|---|
| 14779 | + |
|---|
| 14780 | + /* |
|---|
| 14781 | + * Add this eq to the polling list and start polling. For |
|---|
| 14782 | + * a grace period both interrupt handler and poller will |
|---|
| 14783 | + * try to process the eq _but_ that's fine. We have a |
|---|
| 14784 | + * synchronization mechanism in place (queue_claimed) to |
|---|
| 14785 | + * deal with it. This is just a draining phase for int- |
|---|
| 14786 | + * errupt handler (not eq's) as we have guranteed through |
|---|
| 14787 | + * barrier that all the CPUs have seen the new CQ_POLLED |
|---|
| 14788 | + * state. which will effectively disable the REARMING of |
|---|
| 14789 | + * the EQ. The whole idea is eq's die off eventually as |
|---|
| 14790 | + * we are not rearming EQ's anymore. |
|---|
| 14791 | + */ |
|---|
| 14792 | + mode ? lpfc_sli4_add_to_poll_list(eq) : |
|---|
| 14793 | + lpfc_sli4_remove_from_poll_list(eq); |
|---|
| 14794 | +} |
|---|
| 14795 | + |
|---|
| 14796 | +void lpfc_sli4_start_polling(struct lpfc_queue *eq) |
|---|
| 14797 | +{ |
|---|
| 14798 | + __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL); |
|---|
| 14799 | +} |
|---|
| 14800 | + |
|---|
| 14801 | +void lpfc_sli4_stop_polling(struct lpfc_queue *eq) |
|---|
| 14802 | +{ |
|---|
| 14803 | + struct lpfc_hba *phba = eq->phba; |
|---|
| 14804 | + |
|---|
| 14805 | + __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT); |
|---|
| 14806 | + |
|---|
| 14807 | + /* Kick start for the pending io's in h/w. |
|---|
| 14808 | + * Once we switch back to interrupt processing on a eq |
|---|
| 14809 | + * the io path completion will only arm eq's when it |
|---|
| 14810 | + * receives a completion. But since eq's are in disa- |
|---|
| 14811 | + * rmed state it doesn't receive a completion. This |
|---|
| 14812 | + * creates a deadlock scenaro. |
|---|
| 14813 | + */ |
|---|
| 14814 | + phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM); |
|---|
| 14815 | +} |
|---|
| 14161 | 14816 | |
|---|
| 14162 | 14817 | /** |
|---|
| 14163 | 14818 | * lpfc_sli4_queue_free - free a queue structure and associated memory |
|---|
| .. | .. |
|---|
| 14175 | 14830 | if (!queue) |
|---|
| 14176 | 14831 | return; |
|---|
| 14177 | 14832 | |
|---|
| 14833 | + if (!list_empty(&queue->wq_list)) |
|---|
| 14834 | + list_del(&queue->wq_list); |
|---|
| 14835 | + |
|---|
| 14178 | 14836 | while (!list_empty(&queue->page_list)) { |
|---|
| 14179 | 14837 | list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, |
|---|
| 14180 | 14838 | list); |
|---|
| .. | .. |
|---|
| 14187 | 14845 | kfree(queue->rqbp); |
|---|
| 14188 | 14846 | } |
|---|
| 14189 | 14847 | |
|---|
| 14190 | | - if (!list_empty(&queue->wq_list)) |
|---|
| 14191 | | - list_del(&queue->wq_list); |
|---|
| 14848 | + if (!list_empty(&queue->cpu_list)) |
|---|
| 14849 | + list_del(&queue->cpu_list); |
|---|
| 14192 | 14850 | |
|---|
| 14193 | 14851 | kfree(queue); |
|---|
| 14194 | 14852 | return; |
|---|
| .. | .. |
|---|
| 14199 | 14857 | * @phba: The HBA that this queue is being created on. |
|---|
| 14200 | 14858 | * @page_size: The size of a queue page |
|---|
| 14201 | 14859 | * @entry_size: The size of each queue entry for this queue. |
|---|
| 14202 | | - * @entry count: The number of entries that this queue will handle. |
|---|
| 14860 | + * @entry_count: The number of entries that this queue will handle. |
|---|
| 14861 | + * @cpu: The cpu that will primarily utilize this queue. |
|---|
| 14203 | 14862 | * |
|---|
| 14204 | 14863 | * This function allocates a queue structure and the DMAable memory used for |
|---|
| 14205 | 14864 | * the host resident queue. This function must be called before creating the |
|---|
| .. | .. |
|---|
| 14207 | 14866 | **/ |
|---|
| 14208 | 14867 | struct lpfc_queue * |
|---|
| 14209 | 14868 | lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, |
|---|
| 14210 | | - uint32_t entry_size, uint32_t entry_count) |
|---|
| 14869 | + uint32_t entry_size, uint32_t entry_count, int cpu) |
|---|
| 14211 | 14870 | { |
|---|
| 14212 | 14871 | struct lpfc_queue *queue; |
|---|
| 14213 | 14872 | struct lpfc_dmabuf *dmabuf; |
|---|
| 14214 | | - int x, total_qe_count; |
|---|
| 14215 | | - void *dma_pointer; |
|---|
| 14216 | 14873 | uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; |
|---|
| 14874 | + uint16_t x, pgcnt; |
|---|
| 14217 | 14875 | |
|---|
| 14218 | 14876 | if (!phba->sli4_hba.pc_sli4_params.supported) |
|---|
| 14219 | 14877 | hw_page_size = page_size; |
|---|
| 14220 | 14878 | |
|---|
| 14221 | | - queue = kzalloc(sizeof(struct lpfc_queue) + |
|---|
| 14222 | | - (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); |
|---|
| 14223 | | - if (!queue) |
|---|
| 14224 | | - return NULL; |
|---|
| 14225 | | - queue->page_count = (ALIGN(entry_size * entry_count, |
|---|
| 14226 | | - hw_page_size))/hw_page_size; |
|---|
| 14879 | + pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size; |
|---|
| 14227 | 14880 | |
|---|
| 14228 | 14881 | /* If needed, Adjust page count to match the max the adapter supports */ |
|---|
| 14229 | | - if (phba->sli4_hba.pc_sli4_params.wqpcnt && |
|---|
| 14230 | | - (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)) |
|---|
| 14231 | | - queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt; |
|---|
| 14882 | + if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt) |
|---|
| 14883 | + pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt; |
|---|
| 14884 | + |
|---|
| 14885 | + queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt), |
|---|
| 14886 | + GFP_KERNEL, cpu_to_node(cpu)); |
|---|
| 14887 | + if (!queue) |
|---|
| 14888 | + return NULL; |
|---|
| 14232 | 14889 | |
|---|
| 14233 | 14890 | INIT_LIST_HEAD(&queue->list); |
|---|
| 14891 | + INIT_LIST_HEAD(&queue->_poll_list); |
|---|
| 14234 | 14892 | INIT_LIST_HEAD(&queue->wq_list); |
|---|
| 14235 | 14893 | INIT_LIST_HEAD(&queue->wqfull_list); |
|---|
| 14236 | 14894 | INIT_LIST_HEAD(&queue->page_list); |
|---|
| 14237 | 14895 | INIT_LIST_HEAD(&queue->child_list); |
|---|
| 14896 | + INIT_LIST_HEAD(&queue->cpu_list); |
|---|
| 14238 | 14897 | |
|---|
| 14239 | 14898 | /* Set queue parameters now. If the system cannot provide memory |
|---|
| 14240 | 14899 | * resources, the free routine needs to know what was allocated. |
|---|
| 14241 | 14900 | */ |
|---|
| 14901 | + queue->page_count = pgcnt; |
|---|
| 14902 | + queue->q_pgs = (void **)&queue[1]; |
|---|
| 14903 | + queue->entry_cnt_per_pg = hw_page_size / entry_size; |
|---|
| 14242 | 14904 | queue->entry_size = entry_size; |
|---|
| 14243 | 14905 | queue->entry_count = entry_count; |
|---|
| 14244 | 14906 | queue->page_size = hw_page_size; |
|---|
| 14245 | 14907 | queue->phba = phba; |
|---|
| 14246 | 14908 | |
|---|
| 14247 | | - for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { |
|---|
| 14248 | | - dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); |
|---|
| 14909 | + for (x = 0; x < queue->page_count; x++) { |
|---|
| 14910 | + dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL, |
|---|
| 14911 | + dev_to_node(&phba->pcidev->dev)); |
|---|
| 14249 | 14912 | if (!dmabuf) |
|---|
| 14250 | 14913 | goto out_fail; |
|---|
| 14251 | | - dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, |
|---|
| 14252 | | - hw_page_size, &dmabuf->phys, |
|---|
| 14253 | | - GFP_KERNEL); |
|---|
| 14914 | + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, |
|---|
| 14915 | + hw_page_size, &dmabuf->phys, |
|---|
| 14916 | + GFP_KERNEL); |
|---|
| 14254 | 14917 | if (!dmabuf->virt) { |
|---|
| 14255 | 14918 | kfree(dmabuf); |
|---|
| 14256 | 14919 | goto out_fail; |
|---|
| 14257 | 14920 | } |
|---|
| 14258 | 14921 | dmabuf->buffer_tag = x; |
|---|
| 14259 | 14922 | list_add_tail(&dmabuf->list, &queue->page_list); |
|---|
| 14260 | | - /* initialize queue's entry array */ |
|---|
| 14261 | | - dma_pointer = dmabuf->virt; |
|---|
| 14262 | | - for (; total_qe_count < entry_count && |
|---|
| 14263 | | - dma_pointer < (hw_page_size + dmabuf->virt); |
|---|
| 14264 | | - total_qe_count++, dma_pointer += entry_size) { |
|---|
| 14265 | | - queue->qe[total_qe_count].address = dma_pointer; |
|---|
| 14266 | | - } |
|---|
| 14923 | + /* use lpfc_sli4_qe to index a paritcular entry in this page */ |
|---|
| 14924 | + queue->q_pgs[x] = dmabuf->virt; |
|---|
| 14267 | 14925 | } |
|---|
| 14268 | 14926 | INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); |
|---|
| 14269 | 14927 | INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); |
|---|
| 14928 | + INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq); |
|---|
| 14929 | + INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq); |
|---|
| 14270 | 14930 | |
|---|
| 14271 | | - /* entry_repost will be set during q creation */ |
|---|
| 14931 | + /* notify_interval will be set during q creation */ |
|---|
| 14272 | 14932 | |
|---|
| 14273 | 14933 | return queue; |
|---|
| 14274 | 14934 | out_fail: |
|---|
| .. | .. |
|---|
| 14305 | 14965 | } |
|---|
| 14306 | 14966 | |
|---|
| 14307 | 14967 | /** |
|---|
| 14308 | | - * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs |
|---|
| 14309 | | - * @phba: HBA structure that indicates port to create a queue on. |
|---|
| 14310 | | - * @startq: The starting FCP EQ to modify |
|---|
| 14968 | + * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs |
|---|
| 14969 | + * @phba: HBA structure that EQs are on. |
|---|
| 14970 | + * @startq: The starting EQ index to modify |
|---|
| 14971 | + * @numq: The number of EQs (consecutive indexes) to modify |
|---|
| 14972 | + * @usdelay: amount of delay |
|---|
| 14311 | 14973 | * |
|---|
| 14312 | | - * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA. |
|---|
| 14313 | | - * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be |
|---|
| 14314 | | - * updated in one mailbox command. |
|---|
| 14974 | + * This function revises the EQ delay on 1 or more EQs. The EQ delay |
|---|
| 14975 | + * is set either by writing to a register (if supported by the SLI Port) |
|---|
| 14976 | + * or by mailbox command. The mailbox command allows several EQs to be |
|---|
| 14977 | + * updated at once. |
|---|
| 14315 | 14978 | * |
|---|
| 14316 | | - * The @phba struct is used to send mailbox command to HBA. The @startq |
|---|
| 14317 | | - * is used to get the starting FCP EQ to change. |
|---|
| 14318 | | - * This function is asynchronous and will wait for the mailbox |
|---|
| 14319 | | - * command to finish before continuing. |
|---|
| 14979 | + * The @phba struct is used to send a mailbox command to HBA. The @startq |
|---|
| 14980 | + * is used to get the starting EQ index to change. The @numq value is |
|---|
| 14981 | + * used to specify how many consecutive EQ indexes, starting at EQ index, |
|---|
| 14982 | + * are to be changed. This function is asynchronous and will wait for any |
|---|
| 14983 | + * mailbox commands to finish before returning. |
|---|
| 14320 | 14984 | * |
|---|
| 14321 | | - * On success this function will return a zero. If unable to allocate enough |
|---|
| 14322 | | - * memory this function will return -ENOMEM. If the queue create mailbox command |
|---|
| 14323 | | - * fails this function will return -ENXIO. |
|---|
| 14985 | + * On success this function will return a zero. If unable to allocate |
|---|
| 14986 | + * enough memory this function will return -ENOMEM. If a mailbox command |
|---|
| 14987 | + * fails this function will return -ENXIO. Note: on ENXIO, some EQs may |
|---|
| 14988 | + * have had their delay multipler changed. |
|---|
| 14324 | 14989 | **/ |
|---|
| 14325 | | -int |
|---|
| 14990 | +void |
|---|
| 14326 | 14991 | lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, |
|---|
| 14327 | | - uint32_t numq, uint32_t imax) |
|---|
| 14992 | + uint32_t numq, uint32_t usdelay) |
|---|
| 14328 | 14993 | { |
|---|
| 14329 | 14994 | struct lpfc_mbx_modify_eq_delay *eq_delay; |
|---|
| 14330 | 14995 | LPFC_MBOXQ_t *mbox; |
|---|
| 14331 | 14996 | struct lpfc_queue *eq; |
|---|
| 14332 | | - int cnt, rc, length, status = 0; |
|---|
| 14997 | + int cnt = 0, rc, length; |
|---|
| 14333 | 14998 | uint32_t shdr_status, shdr_add_status; |
|---|
| 14334 | | - uint32_t result, val; |
|---|
| 14999 | + uint32_t dmult; |
|---|
| 14335 | 15000 | int qidx; |
|---|
| 14336 | 15001 | union lpfc_sli4_cfg_shdr *shdr; |
|---|
| 14337 | | - uint16_t dmult; |
|---|
| 14338 | 15002 | |
|---|
| 14339 | | - if (startq >= phba->io_channel_irqs) |
|---|
| 14340 | | - return 0; |
|---|
| 15003 | + if (startq >= phba->cfg_irq_chann) |
|---|
| 15004 | + return; |
|---|
| 15005 | + |
|---|
| 15006 | + if (usdelay > 0xFFFF) { |
|---|
| 15007 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME, |
|---|
| 15008 | + "6429 usdelay %d too large. Scaled down to " |
|---|
| 15009 | + "0xFFFF.\n", usdelay); |
|---|
| 15010 | + usdelay = 0xFFFF; |
|---|
| 15011 | + } |
|---|
| 15012 | + |
|---|
| 15013 | + /* set values by EQ_DELAY register if supported */ |
|---|
| 15014 | + if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { |
|---|
| 15015 | + for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { |
|---|
| 15016 | + eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; |
|---|
| 15017 | + if (!eq) |
|---|
| 15018 | + continue; |
|---|
| 15019 | + |
|---|
| 15020 | + lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay); |
|---|
| 15021 | + |
|---|
| 15022 | + if (++cnt >= numq) |
|---|
| 15023 | + break; |
|---|
| 15024 | + } |
|---|
| 15025 | + return; |
|---|
| 15026 | + } |
|---|
| 15027 | + |
|---|
| 15028 | + /* Otherwise, set values by mailbox cmd */ |
|---|
| 14341 | 15029 | |
|---|
| 14342 | 15030 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
|---|
| 14343 | | - if (!mbox) |
|---|
| 14344 | | - return -ENOMEM; |
|---|
| 15031 | + if (!mbox) { |
|---|
| 15032 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15033 | + "6428 Failed allocating mailbox cmd buffer." |
|---|
| 15034 | + " EQ delay was not set.\n"); |
|---|
| 15035 | + return; |
|---|
| 15036 | + } |
|---|
| 14345 | 15037 | length = (sizeof(struct lpfc_mbx_modify_eq_delay) - |
|---|
| 14346 | 15038 | sizeof(struct lpfc_sli4_cfg_mhdr)); |
|---|
| 14347 | 15039 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
|---|
| .. | .. |
|---|
| 14350 | 15042 | eq_delay = &mbox->u.mqe.un.eq_delay; |
|---|
| 14351 | 15043 | |
|---|
| 14352 | 15044 | /* Calculate delay multiper from maximum interrupt per second */ |
|---|
| 14353 | | - result = imax / phba->io_channel_irqs; |
|---|
| 14354 | | - if (result > LPFC_DMULT_CONST || result == 0) |
|---|
| 14355 | | - dmult = 0; |
|---|
| 14356 | | - else |
|---|
| 14357 | | - dmult = LPFC_DMULT_CONST/result - 1; |
|---|
| 15045 | + dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC; |
|---|
| 15046 | + if (dmult) |
|---|
| 15047 | + dmult--; |
|---|
| 14358 | 15048 | if (dmult > LPFC_DMULT_MAX) |
|---|
| 14359 | 15049 | dmult = LPFC_DMULT_MAX; |
|---|
| 14360 | 15050 | |
|---|
| 14361 | | - cnt = 0; |
|---|
| 14362 | | - for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) { |
|---|
| 14363 | | - eq = phba->sli4_hba.hba_eq[qidx]; |
|---|
| 15051 | + for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { |
|---|
| 15052 | + eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; |
|---|
| 14364 | 15053 | if (!eq) |
|---|
| 14365 | 15054 | continue; |
|---|
| 14366 | | - eq->q_mode = imax; |
|---|
| 15055 | + eq->q_mode = usdelay; |
|---|
| 14367 | 15056 | eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; |
|---|
| 14368 | 15057 | eq_delay->u.request.eq[cnt].phase = 0; |
|---|
| 14369 | 15058 | eq_delay->u.request.eq[cnt].delay_multi = dmult; |
|---|
| 14370 | | - cnt++; |
|---|
| 14371 | 15059 | |
|---|
| 14372 | | - /* q_mode is only used for auto_imax */ |
|---|
| 14373 | | - if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { |
|---|
| 14374 | | - /* Use EQ Delay Register method for q_mode */ |
|---|
| 14375 | | - |
|---|
| 14376 | | - /* Convert for EQ Delay register */ |
|---|
| 14377 | | - val = phba->cfg_fcp_imax; |
|---|
| 14378 | | - if (val) { |
|---|
| 14379 | | - /* First, interrupts per sec per EQ */ |
|---|
| 14380 | | - val = phba->cfg_fcp_imax / |
|---|
| 14381 | | - phba->io_channel_irqs; |
|---|
| 14382 | | - |
|---|
| 14383 | | - /* us delay between each interrupt */ |
|---|
| 14384 | | - val = LPFC_SEC_TO_USEC / val; |
|---|
| 14385 | | - } |
|---|
| 14386 | | - eq->q_mode = val; |
|---|
| 14387 | | - } else { |
|---|
| 14388 | | - eq->q_mode = imax; |
|---|
| 14389 | | - } |
|---|
| 14390 | | - |
|---|
| 14391 | | - if (cnt >= numq) |
|---|
| 15060 | + if (++cnt >= numq) |
|---|
| 14392 | 15061 | break; |
|---|
| 14393 | 15062 | } |
|---|
| 14394 | 15063 | eq_delay->u.request.num_eq = cnt; |
|---|
| 14395 | 15064 | |
|---|
| 14396 | 15065 | mbox->vport = phba->pport; |
|---|
| 14397 | 15066 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
|---|
| 14398 | | - mbox->context1 = NULL; |
|---|
| 15067 | + mbox->ctx_buf = NULL; |
|---|
| 15068 | + mbox->ctx_ndlp = NULL; |
|---|
| 14399 | 15069 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
|---|
| 14400 | 15070 | shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; |
|---|
| 14401 | 15071 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 14402 | 15072 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 14403 | 15073 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 14404 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 15074 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 14405 | 15075 | "2512 MODIFY_EQ_DELAY mailbox failed with " |
|---|
| 14406 | 15076 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 14407 | 15077 | shdr_status, shdr_add_status, rc); |
|---|
| 14408 | | - status = -ENXIO; |
|---|
| 14409 | 15078 | } |
|---|
| 14410 | 15079 | mempool_free(mbox, phba->mbox_mem_pool); |
|---|
| 14411 | | - return status; |
|---|
| 15080 | + return; |
|---|
| 14412 | 15081 | } |
|---|
| 14413 | 15082 | |
|---|
| 14414 | 15083 | /** |
|---|
| .. | .. |
|---|
| 14479 | 15148 | dmult); |
|---|
| 14480 | 15149 | switch (eq->entry_count) { |
|---|
| 14481 | 15150 | default: |
|---|
| 14482 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 15151 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 14483 | 15152 | "0360 Unsupported EQ count. (%d)\n", |
|---|
| 14484 | 15153 | eq->entry_count); |
|---|
| 14485 | | - if (eq->entry_count < 256) |
|---|
| 14486 | | - return -EINVAL; |
|---|
| 14487 | | - /* otherwise default to smallest count (drop through) */ |
|---|
| 15154 | + if (eq->entry_count < 256) { |
|---|
| 15155 | + status = -EINVAL; |
|---|
| 15156 | + goto out; |
|---|
| 15157 | + } |
|---|
| 15158 | + fallthrough; /* otherwise default to smallest count */ |
|---|
| 14488 | 15159 | case 256: |
|---|
| 14489 | 15160 | bf_set(lpfc_eq_context_count, &eq_create->u.request.context, |
|---|
| 14490 | 15161 | LPFC_EQ_CNT_256); |
|---|
| .. | .. |
|---|
| 14515 | 15186 | } |
|---|
| 14516 | 15187 | mbox->vport = phba->pport; |
|---|
| 14517 | 15188 | mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
|---|
| 14518 | | - mbox->context1 = NULL; |
|---|
| 15189 | + mbox->ctx_buf = NULL; |
|---|
| 15190 | + mbox->ctx_ndlp = NULL; |
|---|
| 14519 | 15191 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
|---|
| 14520 | 15192 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 14521 | 15193 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 14522 | 15194 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 14523 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 15195 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 14524 | 15196 | "2500 EQ_CREATE mailbox failed with " |
|---|
| 14525 | 15197 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 14526 | 15198 | shdr_status, shdr_add_status, rc); |
|---|
| .. | .. |
|---|
| 14532 | 15204 | if (eq->queue_id == 0xFFFF) |
|---|
| 14533 | 15205 | status = -ENXIO; |
|---|
| 14534 | 15206 | eq->host_index = 0; |
|---|
| 14535 | | - eq->hba_index = 0; |
|---|
| 14536 | | - eq->entry_repost = LPFC_EQ_REPOST; |
|---|
| 14537 | | - |
|---|
| 15207 | + eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL; |
|---|
| 15208 | + eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT; |
|---|
| 15209 | +out: |
|---|
| 14538 | 15210 | mempool_free(mbox, phba->mbox_mem_pool); |
|---|
| 14539 | 15211 | return status; |
|---|
| 15212 | +} |
|---|
| 15213 | + |
|---|
| 15214 | +static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget) |
|---|
| 15215 | +{ |
|---|
| 15216 | + struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop); |
|---|
| 15217 | + |
|---|
| 15218 | + __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL); |
|---|
| 15219 | + |
|---|
| 15220 | + return 1; |
|---|
| 14540 | 15221 | } |
|---|
| 14541 | 15222 | |
|---|
| 14542 | 15223 | /** |
|---|
| .. | .. |
|---|
| 14544 | 15225 | * @phba: HBA structure that indicates port to create a queue on. |
|---|
| 14545 | 15226 | * @cq: The queue structure to use to create the completion queue. |
|---|
| 14546 | 15227 | * @eq: The event queue to bind this completion queue to. |
|---|
| 15228 | + * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc). |
|---|
| 15229 | + * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). |
|---|
| 14547 | 15230 | * |
|---|
| 14548 | 15231 | * This function creates a completion queue, as detailed in @wq, on a port, |
|---|
| 14549 | 15232 | * described by @phba by sending a CQ_CREATE mailbox command to the HBA. |
|---|
| .. | .. |
|---|
| 14570 | 15253 | int rc, length, status = 0; |
|---|
| 14571 | 15254 | uint32_t shdr_status, shdr_add_status; |
|---|
| 14572 | 15255 | union lpfc_sli4_cfg_shdr *shdr; |
|---|
| 14573 | | - uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; |
|---|
| 14574 | 15256 | |
|---|
| 14575 | 15257 | /* sanity check on queue memory */ |
|---|
| 14576 | 15258 | if (!cq || !eq) |
|---|
| 14577 | 15259 | return -ENODEV; |
|---|
| 14578 | | - if (!phba->sli4_hba.pc_sli4_params.supported) |
|---|
| 14579 | | - hw_page_size = cq->page_size; |
|---|
| 14580 | 15260 | |
|---|
| 14581 | 15261 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
|---|
| 14582 | 15262 | if (!mbox) |
|---|
| .. | .. |
|---|
| 14617 | 15297 | LPFC_CQ_CNT_WORD7); |
|---|
| 14618 | 15298 | break; |
|---|
| 14619 | 15299 | } |
|---|
| 14620 | | - /* Fall Thru */ |
|---|
| 15300 | + fallthrough; |
|---|
| 14621 | 15301 | default: |
|---|
| 14622 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 15302 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 14623 | 15303 | "0361 Unsupported CQ count: " |
|---|
| 14624 | 15304 | "entry cnt %d sz %d pg cnt %d\n", |
|---|
| 14625 | 15305 | cq->entry_count, cq->entry_size, |
|---|
| .. | .. |
|---|
| 14628 | 15308 | status = -EINVAL; |
|---|
| 14629 | 15309 | goto out; |
|---|
| 14630 | 15310 | } |
|---|
| 14631 | | - /* otherwise default to smallest count (drop through) */ |
|---|
| 15311 | + fallthrough; /* otherwise default to smallest count */ |
|---|
| 14632 | 15312 | case 256: |
|---|
| 14633 | 15313 | bf_set(lpfc_cq_context_count, &cq_create->u.request.context, |
|---|
| 14634 | 15314 | LPFC_CQ_CNT_256); |
|---|
| .. | .. |
|---|
| 14655 | 15335 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 14656 | 15336 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 14657 | 15337 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 14658 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 15338 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 14659 | 15339 | "2501 CQ_CREATE mailbox failed with " |
|---|
| 14660 | 15340 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 14661 | 15341 | shdr_status, shdr_add_status, rc); |
|---|
| .. | .. |
|---|
| 14674 | 15354 | cq->subtype = subtype; |
|---|
| 14675 | 15355 | cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); |
|---|
| 14676 | 15356 | cq->assoc_qid = eq->queue_id; |
|---|
| 15357 | + cq->assoc_qp = eq; |
|---|
| 14677 | 15358 | cq->host_index = 0; |
|---|
| 14678 | | - cq->hba_index = 0; |
|---|
| 14679 | | - cq->entry_repost = LPFC_CQ_REPOST; |
|---|
| 15359 | + cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; |
|---|
| 15360 | + cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count); |
|---|
| 14680 | 15361 | |
|---|
| 15362 | + if (cq->queue_id > phba->sli4_hba.cq_max) |
|---|
| 15363 | + phba->sli4_hba.cq_max = cq->queue_id; |
|---|
| 15364 | + |
|---|
| 15365 | + irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler); |
|---|
| 14681 | 15366 | out: |
|---|
| 14682 | 15367 | mempool_free(mbox, phba->mbox_mem_pool); |
|---|
| 14683 | 15368 | return status; |
|---|
| .. | .. |
|---|
| 14687 | 15372 | * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ |
|---|
| 14688 | 15373 | * @phba: HBA structure that indicates port to create a queue on. |
|---|
| 14689 | 15374 | * @cqp: The queue structure array to use to create the completion queues. |
|---|
| 14690 | | - * @eqp: The event queue array to bind these completion queues to. |
|---|
| 15375 | + * @hdwq: The hardware queue array with the EQ to bind completion queues to. |
|---|
| 15376 | + * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc). |
|---|
| 15377 | + * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). |
|---|
| 14691 | 15378 | * |
|---|
| 14692 | 15379 | * This function creates a set of completion queue, s to support MRQ |
|---|
| 14693 | 15380 | * as detailed in @cqp, on a port, |
|---|
| .. | .. |
|---|
| 14707 | 15394 | **/ |
|---|
| 14708 | 15395 | int |
|---|
| 14709 | 15396 | lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, |
|---|
| 14710 | | - struct lpfc_queue **eqp, uint32_t type, uint32_t subtype) |
|---|
| 15397 | + struct lpfc_sli4_hdw_queue *hdwq, uint32_t type, |
|---|
| 15398 | + uint32_t subtype) |
|---|
| 14711 | 15399 | { |
|---|
| 14712 | 15400 | struct lpfc_queue *cq; |
|---|
| 14713 | 15401 | struct lpfc_queue *eq; |
|---|
| .. | .. |
|---|
| 14722 | 15410 | |
|---|
| 14723 | 15411 | /* sanity check on queue memory */ |
|---|
| 14724 | 15412 | numcq = phba->cfg_nvmet_mrq; |
|---|
| 14725 | | - if (!cqp || !eqp || !numcq) |
|---|
| 15413 | + if (!cqp || !hdwq || !numcq) |
|---|
| 14726 | 15414 | return -ENODEV; |
|---|
| 14727 | 15415 | |
|---|
| 14728 | 15416 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
|---|
| .. | .. |
|---|
| 14736 | 15424 | LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, |
|---|
| 14737 | 15425 | LPFC_SLI4_MBX_NEMBED); |
|---|
| 14738 | 15426 | if (alloclen < length) { |
|---|
| 14739 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 15427 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 14740 | 15428 | "3098 Allocated DMA memory size (%d) is " |
|---|
| 14741 | 15429 | "less than the requested DMA memory size " |
|---|
| 14742 | 15430 | "(%d)\n", alloclen, length); |
|---|
| .. | .. |
|---|
| 14749 | 15437 | |
|---|
| 14750 | 15438 | for (idx = 0; idx < numcq; idx++) { |
|---|
| 14751 | 15439 | cq = cqp[idx]; |
|---|
| 14752 | | - eq = eqp[idx]; |
|---|
| 15440 | + eq = hdwq[idx].hba_eq; |
|---|
| 14753 | 15441 | if (!cq || !eq) { |
|---|
| 14754 | 15442 | status = -ENOMEM; |
|---|
| 14755 | 15443 | goto out; |
|---|
| .. | .. |
|---|
| 14788 | 15476 | LPFC_CQ_CNT_WORD7); |
|---|
| 14789 | 15477 | break; |
|---|
| 14790 | 15478 | } |
|---|
| 14791 | | - /* Fall Thru */ |
|---|
| 15479 | + fallthrough; |
|---|
| 14792 | 15480 | default: |
|---|
| 14793 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 15481 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 14794 | 15482 | "3118 Bad CQ count. (%d)\n", |
|---|
| 14795 | 15483 | cq->entry_count); |
|---|
| 14796 | 15484 | if (cq->entry_count < 256) { |
|---|
| 14797 | 15485 | status = -EINVAL; |
|---|
| 14798 | 15486 | goto out; |
|---|
| 14799 | 15487 | } |
|---|
| 14800 | | - /* otherwise default to smallest (drop thru) */ |
|---|
| 15488 | + fallthrough; /* otherwise default to smallest */ |
|---|
| 14801 | 15489 | case 256: |
|---|
| 14802 | 15490 | bf_set(lpfc_mbx_cq_create_set_cqe_cnt, |
|---|
| 14803 | 15491 | &cq_set->u.request, LPFC_CQ_CNT_256); |
|---|
| .. | .. |
|---|
| 14882 | 15570 | cq->type = type; |
|---|
| 14883 | 15571 | cq->subtype = subtype; |
|---|
| 14884 | 15572 | cq->assoc_qid = eq->queue_id; |
|---|
| 15573 | + cq->assoc_qp = eq; |
|---|
| 14885 | 15574 | cq->host_index = 0; |
|---|
| 14886 | | - cq->hba_index = 0; |
|---|
| 14887 | | - cq->entry_repost = LPFC_CQ_REPOST; |
|---|
| 15575 | + cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; |
|---|
| 15576 | + cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, |
|---|
| 15577 | + cq->entry_count); |
|---|
| 14888 | 15578 | cq->chann = idx; |
|---|
| 14889 | 15579 | |
|---|
| 14890 | 15580 | rc = 0; |
|---|
| .. | .. |
|---|
| 14906 | 15596 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 14907 | 15597 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 14908 | 15598 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 14909 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 15599 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 14910 | 15600 | "3119 CQ_CREATE_SET mailbox failed with " |
|---|
| 14911 | 15601 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 14912 | 15602 | shdr_status, shdr_add_status, rc); |
|---|
| .. | .. |
|---|
| 14922 | 15612 | for (idx = 0; idx < numcq; idx++) { |
|---|
| 14923 | 15613 | cq = cqp[idx]; |
|---|
| 14924 | 15614 | cq->queue_id = rc + idx; |
|---|
| 15615 | + if (cq->queue_id > phba->sli4_hba.cq_max) |
|---|
| 15616 | + phba->sli4_hba.cq_max = cq->queue_id; |
|---|
| 14925 | 15617 | } |
|---|
| 14926 | 15618 | |
|---|
| 14927 | 15619 | out: |
|---|
| .. | .. |
|---|
| 15062 | 15754 | cq->queue_id); |
|---|
| 15063 | 15755 | switch (mq->entry_count) { |
|---|
| 15064 | 15756 | default: |
|---|
| 15065 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 15757 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15066 | 15758 | "0362 Unsupported MQ count. (%d)\n", |
|---|
| 15067 | 15759 | mq->entry_count); |
|---|
| 15068 | 15760 | if (mq->entry_count < 16) { |
|---|
| 15069 | 15761 | status = -EINVAL; |
|---|
| 15070 | 15762 | goto out; |
|---|
| 15071 | 15763 | } |
|---|
| 15072 | | - /* otherwise default to smallest count (drop through) */ |
|---|
| 15764 | + fallthrough; /* otherwise default to smallest count */ |
|---|
| 15073 | 15765 | case 16: |
|---|
| 15074 | 15766 | bf_set(lpfc_mq_context_ring_size, |
|---|
| 15075 | 15767 | &mq_create_ext->u.request.context, |
|---|
| .. | .. |
|---|
| 15118 | 15810 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 15119 | 15811 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 15120 | 15812 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 15121 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 15813 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15122 | 15814 | "2502 MQ_CREATE mailbox failed with " |
|---|
| 15123 | 15815 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 15124 | 15816 | shdr_status, shdr_add_status, rc); |
|---|
| .. | .. |
|---|
| 15134 | 15826 | mq->subtype = subtype; |
|---|
| 15135 | 15827 | mq->host_index = 0; |
|---|
| 15136 | 15828 | mq->hba_index = 0; |
|---|
| 15137 | | - mq->entry_repost = LPFC_MQ_REPOST; |
|---|
| 15138 | 15829 | |
|---|
| 15139 | 15830 | /* link the mq onto the parent cq child list */ |
|---|
| 15140 | 15831 | list_add_tail(&mq->list, &cq->child_list); |
|---|
| .. | .. |
|---|
| 15182 | 15873 | uint16_t pci_barset; |
|---|
| 15183 | 15874 | uint8_t dpp_barset; |
|---|
| 15184 | 15875 | uint32_t dpp_offset; |
|---|
| 15185 | | - unsigned long pg_addr; |
|---|
| 15186 | 15876 | uint8_t wq_create_version; |
|---|
| 15877 | +#ifdef CONFIG_X86 |
|---|
| 15878 | + unsigned long pg_addr; |
|---|
| 15879 | +#endif |
|---|
| 15187 | 15880 | |
|---|
| 15188 | 15881 | /* sanity check on queue memory */ |
|---|
| 15189 | 15882 | if (!wq || !cq) |
|---|
| .. | .. |
|---|
| 15268 | 15961 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 15269 | 15962 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 15270 | 15963 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 15271 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 15964 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15272 | 15965 | "2503 WQ_CREATE mailbox failed with " |
|---|
| 15273 | 15966 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 15274 | 15967 | shdr_status, shdr_add_status, rc); |
|---|
| .. | .. |
|---|
| 15295 | 15988 | &wq_create->u.response); |
|---|
| 15296 | 15989 | if ((wq->db_format != LPFC_DB_LIST_FORMAT) && |
|---|
| 15297 | 15990 | (wq->db_format != LPFC_DB_RING_FORMAT)) { |
|---|
| 15298 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 15991 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15299 | 15992 | "3265 WQ[%d] doorbell format " |
|---|
| 15300 | 15993 | "not supported: x%x\n", |
|---|
| 15301 | 15994 | wq->queue_id, wq->db_format); |
|---|
| .. | .. |
|---|
| 15307 | 16000 | bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, |
|---|
| 15308 | 16001 | pci_barset); |
|---|
| 15309 | 16002 | if (!bar_memmap_p) { |
|---|
| 15310 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16003 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15311 | 16004 | "3263 WQ[%d] failed to memmap " |
|---|
| 15312 | 16005 | "pci barset:x%x\n", |
|---|
| 15313 | 16006 | wq->queue_id, pci_barset); |
|---|
| .. | .. |
|---|
| 15317 | 16010 | db_offset = wq_create->u.response.doorbell_offset; |
|---|
| 15318 | 16011 | if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && |
|---|
| 15319 | 16012 | (db_offset != LPFC_ULP1_WQ_DOORBELL)) { |
|---|
| 15320 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16013 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15321 | 16014 | "3252 WQ[%d] doorbell offset " |
|---|
| 15322 | 16015 | "not supported: x%x\n", |
|---|
| 15323 | 16016 | wq->queue_id, db_offset); |
|---|
| .. | .. |
|---|
| 15341 | 16034 | bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, |
|---|
| 15342 | 16035 | pci_barset); |
|---|
| 15343 | 16036 | if (!bar_memmap_p) { |
|---|
| 15344 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16037 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15345 | 16038 | "3267 WQ[%d] failed to memmap " |
|---|
| 15346 | 16039 | "pci barset:x%x\n", |
|---|
| 15347 | 16040 | wq->queue_id, pci_barset); |
|---|
| .. | .. |
|---|
| 15357 | 16050 | bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, |
|---|
| 15358 | 16051 | dpp_barset); |
|---|
| 15359 | 16052 | if (!bar_memmap_p) { |
|---|
| 15360 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16053 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15361 | 16054 | "3268 WQ[%d] failed to memmap " |
|---|
| 15362 | 16055 | "pci barset:x%x\n", |
|---|
| 15363 | 16056 | wq->queue_id, dpp_barset); |
|---|
| .. | .. |
|---|
| 15373 | 16066 | wq->queue_id, pci_barset, db_offset, |
|---|
| 15374 | 16067 | wq->dpp_id, dpp_barset, dpp_offset); |
|---|
| 15375 | 16068 | |
|---|
| 16069 | +#ifdef CONFIG_X86 |
|---|
| 15376 | 16070 | /* Enable combined writes for DPP aperture */ |
|---|
| 15377 | 16071 | pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; |
|---|
| 15378 | | -#ifdef CONFIG_X86 |
|---|
| 15379 | 16072 | rc = set_memory_wc(pg_addr, 1); |
|---|
| 15380 | 16073 | if (rc) { |
|---|
| 15381 | 16074 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| .. | .. |
|---|
| 15400 | 16093 | wq->subtype = subtype; |
|---|
| 15401 | 16094 | wq->host_index = 0; |
|---|
| 15402 | 16095 | wq->hba_index = 0; |
|---|
| 15403 | | - wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL; |
|---|
| 16096 | + wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL; |
|---|
| 15404 | 16097 | |
|---|
| 15405 | 16098 | /* link the wq onto the parent cq child list */ |
|---|
| 15406 | 16099 | list_add_tail(&wq->list, &cq->child_list); |
|---|
| .. | .. |
|---|
| 15415 | 16108 | * @hrq: The queue structure to use to create the header receive queue. |
|---|
| 15416 | 16109 | * @drq: The queue structure to use to create the data receive queue. |
|---|
| 15417 | 16110 | * @cq: The completion queue to bind this work queue to. |
|---|
| 16111 | + * @subtype: The subtype of the work queue indicating its functionality. |
|---|
| 15418 | 16112 | * |
|---|
| 15419 | 16113 | * This function creates a receive buffer queue pair , as detailed in @hrq and |
|---|
| 15420 | 16114 | * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command |
|---|
| .. | .. |
|---|
| 15481 | 16175 | } else { |
|---|
| 15482 | 16176 | switch (hrq->entry_count) { |
|---|
| 15483 | 16177 | default: |
|---|
| 15484 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 16178 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15485 | 16179 | "2535 Unsupported RQ count. (%d)\n", |
|---|
| 15486 | 16180 | hrq->entry_count); |
|---|
| 15487 | 16181 | if (hrq->entry_count < 512) { |
|---|
| 15488 | 16182 | status = -EINVAL; |
|---|
| 15489 | 16183 | goto out; |
|---|
| 15490 | 16184 | } |
|---|
| 15491 | | - /* otherwise default to smallest count (drop through) */ |
|---|
| 16185 | + fallthrough; /* otherwise default to smallest count */ |
|---|
| 15492 | 16186 | case 512: |
|---|
| 15493 | 16187 | bf_set(lpfc_rq_context_rqe_count, |
|---|
| 15494 | 16188 | &rq_create->u.request.context, |
|---|
| .. | .. |
|---|
| 15532 | 16226 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 15533 | 16227 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 15534 | 16228 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 15535 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16229 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15536 | 16230 | "2504 RQ_CREATE mailbox failed with " |
|---|
| 15537 | 16231 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 15538 | 16232 | shdr_status, shdr_add_status, rc); |
|---|
| .. | .. |
|---|
| 15550 | 16244 | &rq_create->u.response); |
|---|
| 15551 | 16245 | if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && |
|---|
| 15552 | 16246 | (hrq->db_format != LPFC_DB_RING_FORMAT)) { |
|---|
| 15553 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16247 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15554 | 16248 | "3262 RQ [%d] doorbell format not " |
|---|
| 15555 | 16249 | "supported: x%x\n", hrq->queue_id, |
|---|
| 15556 | 16250 | hrq->db_format); |
|---|
| .. | .. |
|---|
| 15562 | 16256 | &rq_create->u.response); |
|---|
| 15563 | 16257 | bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); |
|---|
| 15564 | 16258 | if (!bar_memmap_p) { |
|---|
| 15565 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16259 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15566 | 16260 | "3269 RQ[%d] failed to memmap pci " |
|---|
| 15567 | 16261 | "barset:x%x\n", hrq->queue_id, |
|---|
| 15568 | 16262 | pci_barset); |
|---|
| .. | .. |
|---|
| 15573 | 16267 | db_offset = rq_create->u.response.doorbell_offset; |
|---|
| 15574 | 16268 | if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && |
|---|
| 15575 | 16269 | (db_offset != LPFC_ULP1_RQ_DOORBELL)) { |
|---|
| 15576 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16270 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15577 | 16271 | "3270 RQ[%d] doorbell offset not " |
|---|
| 15578 | 16272 | "supported: x%x\n", hrq->queue_id, |
|---|
| 15579 | 16273 | db_offset); |
|---|
| .. | .. |
|---|
| 15594 | 16288 | hrq->subtype = subtype; |
|---|
| 15595 | 16289 | hrq->host_index = 0; |
|---|
| 15596 | 16290 | hrq->hba_index = 0; |
|---|
| 15597 | | - hrq->entry_repost = LPFC_RQ_REPOST; |
|---|
| 16291 | + hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; |
|---|
| 15598 | 16292 | |
|---|
| 15599 | 16293 | /* now create the data queue */ |
|---|
| 15600 | 16294 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
|---|
| .. | .. |
|---|
| 15618 | 16312 | } else { |
|---|
| 15619 | 16313 | switch (drq->entry_count) { |
|---|
| 15620 | 16314 | default: |
|---|
| 15621 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 16315 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15622 | 16316 | "2536 Unsupported RQ count. (%d)\n", |
|---|
| 15623 | 16317 | drq->entry_count); |
|---|
| 15624 | 16318 | if (drq->entry_count < 512) { |
|---|
| 15625 | 16319 | status = -EINVAL; |
|---|
| 15626 | 16320 | goto out; |
|---|
| 15627 | 16321 | } |
|---|
| 15628 | | - /* otherwise default to smallest count (drop through) */ |
|---|
| 16322 | + fallthrough; /* otherwise default to smallest count */ |
|---|
| 15629 | 16323 | case 512: |
|---|
| 15630 | 16324 | bf_set(lpfc_rq_context_rqe_count, |
|---|
| 15631 | 16325 | &rq_create->u.request.context, |
|---|
| .. | .. |
|---|
| 15687 | 16381 | drq->subtype = subtype; |
|---|
| 15688 | 16382 | drq->host_index = 0; |
|---|
| 15689 | 16383 | drq->hba_index = 0; |
|---|
| 15690 | | - drq->entry_repost = LPFC_RQ_REPOST; |
|---|
| 16384 | + drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; |
|---|
| 15691 | 16385 | |
|---|
| 15692 | 16386 | /* link the header and data RQs onto the parent cq child list */ |
|---|
| 15693 | 16387 | list_add_tail(&hrq->list, &cq->child_list); |
|---|
| .. | .. |
|---|
| 15704 | 16398 | * @hrqp: The queue structure array to use to create the header receive queues. |
|---|
| 15705 | 16399 | * @drqp: The queue structure array to use to create the data receive queues. |
|---|
| 15706 | 16400 | * @cqp: The completion queue array to bind these receive queues to. |
|---|
| 16401 | + * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc). |
|---|
| 15707 | 16402 | * |
|---|
| 15708 | 16403 | * This function creates a receive buffer queue pair , as detailed in @hrq and |
|---|
| 15709 | 16404 | * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command |
|---|
| .. | .. |
|---|
| 15755 | 16450 | LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, |
|---|
| 15756 | 16451 | LPFC_SLI4_MBX_NEMBED); |
|---|
| 15757 | 16452 | if (alloclen < length) { |
|---|
| 15758 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 16453 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15759 | 16454 | "3099 Allocated DMA memory size (%d) is " |
|---|
| 15760 | 16455 | "less than the requested DMA memory size " |
|---|
| 15761 | 16456 | "(%d)\n", alloclen, length); |
|---|
| .. | .. |
|---|
| 15845 | 16540 | hrq->subtype = subtype; |
|---|
| 15846 | 16541 | hrq->host_index = 0; |
|---|
| 15847 | 16542 | hrq->hba_index = 0; |
|---|
| 15848 | | - hrq->entry_repost = LPFC_RQ_REPOST; |
|---|
| 16543 | + hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; |
|---|
| 15849 | 16544 | |
|---|
| 15850 | 16545 | drq->db_format = LPFC_DB_RING_FORMAT; |
|---|
| 15851 | 16546 | drq->db_regaddr = phba->sli4_hba.RQDBregaddr; |
|---|
| .. | .. |
|---|
| 15854 | 16549 | drq->subtype = subtype; |
|---|
| 15855 | 16550 | drq->host_index = 0; |
|---|
| 15856 | 16551 | drq->hba_index = 0; |
|---|
| 15857 | | - drq->entry_repost = LPFC_RQ_REPOST; |
|---|
| 16552 | + drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; |
|---|
| 15858 | 16553 | |
|---|
| 15859 | 16554 | list_add_tail(&hrq->list, &cq->child_list); |
|---|
| 15860 | 16555 | list_add_tail(&drq->list, &cq->child_list); |
|---|
| .. | .. |
|---|
| 15865 | 16560 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 15866 | 16561 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 15867 | 16562 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 15868 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16563 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15869 | 16564 | "3120 RQ_CREATE mailbox failed with " |
|---|
| 15870 | 16565 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 15871 | 16566 | shdr_status, shdr_add_status, rc); |
|---|
| .. | .. |
|---|
| 15893 | 16588 | |
|---|
| 15894 | 16589 | /** |
|---|
| 15895 | 16590 | * lpfc_eq_destroy - Destroy an event Queue on the HBA |
|---|
| 16591 | + * @phba: HBA structure that indicates port to destroy a queue on. |
|---|
| 15896 | 16592 | * @eq: The queue structure associated with the queue to destroy. |
|---|
| 15897 | 16593 | * |
|---|
| 15898 | 16594 | * This function destroys a queue, as detailed in @eq by sending an mailbox |
|---|
| .. | .. |
|---|
| 15914 | 16610 | /* sanity check on queue memory */ |
|---|
| 15915 | 16611 | if (!eq) |
|---|
| 15916 | 16612 | return -ENODEV; |
|---|
| 16613 | + |
|---|
| 15917 | 16614 | mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); |
|---|
| 15918 | 16615 | if (!mbox) |
|---|
| 15919 | 16616 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 15934 | 16631 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 15935 | 16632 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 15936 | 16633 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 15937 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16634 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15938 | 16635 | "2505 EQ_DESTROY mailbox failed with " |
|---|
| 15939 | 16636 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 15940 | 16637 | shdr_status, shdr_add_status, rc); |
|---|
| .. | .. |
|---|
| 15949 | 16646 | |
|---|
| 15950 | 16647 | /** |
|---|
| 15951 | 16648 | * lpfc_cq_destroy - Destroy a Completion Queue on the HBA |
|---|
| 16649 | + * @phba: HBA structure that indicates port to destroy a queue on. |
|---|
| 15952 | 16650 | * @cq: The queue structure associated with the queue to destroy. |
|---|
| 15953 | 16651 | * |
|---|
| 15954 | 16652 | * This function destroys a queue, as detailed in @cq by sending an mailbox |
|---|
| .. | .. |
|---|
| 15989 | 16687 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 15990 | 16688 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 15991 | 16689 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 15992 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16690 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 15993 | 16691 | "2506 CQ_DESTROY mailbox failed with " |
|---|
| 15994 | 16692 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 15995 | 16693 | shdr_status, shdr_add_status, rc); |
|---|
| .. | .. |
|---|
| 16003 | 16701 | |
|---|
| 16004 | 16702 | /** |
|---|
| 16005 | 16703 | * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA |
|---|
| 16006 | | - * @qm: The queue structure associated with the queue to destroy. |
|---|
| 16704 | + * @phba: HBA structure that indicates port to destroy a queue on. |
|---|
| 16705 | + * @mq: The queue structure associated with the queue to destroy. |
|---|
| 16007 | 16706 | * |
|---|
| 16008 | 16707 | * This function destroys a queue, as detailed in @mq by sending an mailbox |
|---|
| 16009 | 16708 | * command, specific to the type of queue, to the HBA. |
|---|
| .. | .. |
|---|
| 16043 | 16742 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 16044 | 16743 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 16045 | 16744 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 16046 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16745 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 16047 | 16746 | "2507 MQ_DESTROY mailbox failed with " |
|---|
| 16048 | 16747 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 16049 | 16748 | shdr_status, shdr_add_status, rc); |
|---|
| .. | .. |
|---|
| 16057 | 16756 | |
|---|
| 16058 | 16757 | /** |
|---|
| 16059 | 16758 | * lpfc_wq_destroy - Destroy a Work Queue on the HBA |
|---|
| 16759 | + * @phba: HBA structure that indicates port to destroy a queue on. |
|---|
| 16060 | 16760 | * @wq: The queue structure associated with the queue to destroy. |
|---|
| 16061 | 16761 | * |
|---|
| 16062 | 16762 | * This function destroys a queue, as detailed in @wq by sending an mailbox |
|---|
| .. | .. |
|---|
| 16096 | 16796 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 16097 | 16797 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 16098 | 16798 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 16099 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16799 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 16100 | 16800 | "2508 WQ_DESTROY mailbox failed with " |
|---|
| 16101 | 16801 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 16102 | 16802 | shdr_status, shdr_add_status, rc); |
|---|
| .. | .. |
|---|
| 16112 | 16812 | |
|---|
| 16113 | 16813 | /** |
|---|
| 16114 | 16814 | * lpfc_rq_destroy - Destroy a Receive Queue on the HBA |
|---|
| 16115 | | - * @rq: The queue structure associated with the queue to destroy. |
|---|
| 16815 | + * @phba: HBA structure that indicates port to destroy a queue on. |
|---|
| 16816 | + * @hrq: The queue structure associated with the queue to destroy. |
|---|
| 16817 | + * @drq: The queue structure associated with the queue to destroy. |
|---|
| 16116 | 16818 | * |
|---|
| 16117 | 16819 | * This function destroys a queue, as detailed in @rq by sending an mailbox |
|---|
| 16118 | 16820 | * command, specific to the type of queue, to the HBA. |
|---|
| .. | .. |
|---|
| 16153 | 16855 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 16154 | 16856 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 16155 | 16857 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 16156 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16858 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 16157 | 16859 | "2509 RQ_DESTROY mailbox failed with " |
|---|
| 16158 | 16860 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 16159 | 16861 | shdr_status, shdr_add_status, rc); |
|---|
| 16160 | | - if (rc != MBX_TIMEOUT) |
|---|
| 16161 | | - mempool_free(mbox, hrq->phba->mbox_mem_pool); |
|---|
| 16862 | + mempool_free(mbox, hrq->phba->mbox_mem_pool); |
|---|
| 16162 | 16863 | return -ENXIO; |
|---|
| 16163 | 16864 | } |
|---|
| 16164 | 16865 | bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, |
|---|
| .. | .. |
|---|
| 16169 | 16870 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 16170 | 16871 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 16171 | 16872 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 16172 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16873 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 16173 | 16874 | "2510 RQ_DESTROY mailbox failed with " |
|---|
| 16174 | 16875 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 16175 | 16876 | shdr_status, shdr_add_status, rc); |
|---|
| .. | .. |
|---|
| 16217 | 16918 | union lpfc_sli4_cfg_shdr *shdr; |
|---|
| 16218 | 16919 | |
|---|
| 16219 | 16920 | if (xritag == NO_XRI) { |
|---|
| 16220 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 16921 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 16221 | 16922 | "0364 Invalid param:\n"); |
|---|
| 16222 | 16923 | return -EINVAL; |
|---|
| 16223 | 16924 | } |
|---|
| .. | .. |
|---|
| 16255 | 16956 | shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; |
|---|
| 16256 | 16957 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 16257 | 16958 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 16258 | | - if (rc != MBX_TIMEOUT) |
|---|
| 16959 | + if (!phba->sli4_hba.intr_enable) |
|---|
| 16960 | + mempool_free(mbox, phba->mbox_mem_pool); |
|---|
| 16961 | + else if (rc != MBX_TIMEOUT) |
|---|
| 16259 | 16962 | mempool_free(mbox, phba->mbox_mem_pool); |
|---|
| 16260 | 16963 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 16261 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16964 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 16262 | 16965 | "2511 POST_SGL mailbox failed with " |
|---|
| 16263 | 16966 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 16264 | 16967 | shdr_status, shdr_add_status, rc); |
|---|
| .. | .. |
|---|
| 16305 | 17008 | /** |
|---|
| 16306 | 17009 | * lpfc_sli4_free_xri - Release an xri for reuse. |
|---|
| 16307 | 17010 | * @phba: pointer to lpfc hba data structure. |
|---|
| 17011 | + * @xri: xri to release. |
|---|
| 16308 | 17012 | * |
|---|
| 16309 | 17013 | * This routine is invoked to release an xri to the pool of |
|---|
| 16310 | 17014 | * available rpis maintained by the driver. |
|---|
| .. | .. |
|---|
| 16320 | 17024 | /** |
|---|
| 16321 | 17025 | * lpfc_sli4_free_xri - Release an xri for reuse. |
|---|
| 16322 | 17026 | * @phba: pointer to lpfc hba data structure. |
|---|
| 17027 | + * @xri: xri to release. |
|---|
| 16323 | 17028 | * |
|---|
| 16324 | 17029 | * This routine is invoked to release an xri to the pool of |
|---|
| 16325 | 17030 | * available rpis maintained by the driver. |
|---|
| .. | .. |
|---|
| 16362 | 17067 | * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. |
|---|
| 16363 | 17068 | * @phba: pointer to lpfc hba data structure. |
|---|
| 16364 | 17069 | * @post_sgl_list: pointer to els sgl entry list. |
|---|
| 16365 | | - * @count: number of els sgl entries on the list. |
|---|
| 17070 | + * @post_cnt: number of els sgl entries on the list. |
|---|
| 16366 | 17071 | * |
|---|
| 16367 | 17072 | * This routine is invoked to post a block of driver's sgl pages to the |
|---|
| 16368 | 17073 | * HBA using non-embedded mailbox command. No Lock is held. This routine |
|---|
| .. | .. |
|---|
| 16389 | 17094 | reqlen = post_cnt * sizeof(struct sgl_page_pairs) + |
|---|
| 16390 | 17095 | sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); |
|---|
| 16391 | 17096 | if (reqlen > SLI4_PAGE_SIZE) { |
|---|
| 16392 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 17097 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 16393 | 17098 | "2559 Block sgl registration required DMA " |
|---|
| 16394 | 17099 | "size (%d) great than a page\n", reqlen); |
|---|
| 16395 | 17100 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 16405 | 17110 | LPFC_SLI4_MBX_NEMBED); |
|---|
| 16406 | 17111 | |
|---|
| 16407 | 17112 | if (alloclen < reqlen) { |
|---|
| 16408 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 17113 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 16409 | 17114 | "0285 Allocated DMA memory size (%d) is " |
|---|
| 16410 | 17115 | "less than the requested DMA memory " |
|---|
| 16411 | 17116 | "size (%d)\n", alloclen, reqlen); |
|---|
| .. | .. |
|---|
| 16450 | 17155 | shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; |
|---|
| 16451 | 17156 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 16452 | 17157 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 16453 | | - if (rc != MBX_TIMEOUT) |
|---|
| 17158 | + if (!phba->sli4_hba.intr_enable) |
|---|
| 17159 | + lpfc_sli4_mbox_cmd_free(phba, mbox); |
|---|
| 17160 | + else if (rc != MBX_TIMEOUT) |
|---|
| 16454 | 17161 | lpfc_sli4_mbox_cmd_free(phba, mbox); |
|---|
| 16455 | 17162 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 16456 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 17163 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 16457 | 17164 | "2513 POST_SGL_BLOCK mailbox command failed " |
|---|
| 16458 | 17165 | "status x%x add_status x%x mbx status x%x\n", |
|---|
| 16459 | 17166 | shdr_status, shdr_add_status, rc); |
|---|
| .. | .. |
|---|
| 16463 | 17170 | } |
|---|
| 16464 | 17171 | |
|---|
| 16465 | 17172 | /** |
|---|
| 16466 | | - * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware |
|---|
| 17173 | + * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware |
|---|
| 16467 | 17174 | * @phba: pointer to lpfc hba data structure. |
|---|
| 16468 | | - * @sblist: pointer to scsi buffer list. |
|---|
| 17175 | + * @nblist: pointer to nvme buffer list. |
|---|
| 16469 | 17176 | * @count: number of scsi buffers on the list. |
|---|
| 16470 | 17177 | * |
|---|
| 16471 | 17178 | * This routine is invoked to post a block of @count scsi sgl pages from a |
|---|
| 16472 | | - * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. |
|---|
| 17179 | + * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. |
|---|
| 16473 | 17180 | * No Lock is held. |
|---|
| 16474 | 17181 | * |
|---|
| 16475 | 17182 | **/ |
|---|
| 16476 | | -int |
|---|
| 16477 | | -lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, |
|---|
| 16478 | | - struct list_head *sblist, |
|---|
| 16479 | | - int count) |
|---|
| 17183 | +static int |
|---|
| 17184 | +lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, |
|---|
| 17185 | + int count) |
|---|
| 16480 | 17186 | { |
|---|
| 16481 | | - struct lpfc_scsi_buf *psb; |
|---|
| 17187 | + struct lpfc_io_buf *lpfc_ncmd; |
|---|
| 16482 | 17188 | struct lpfc_mbx_post_uembed_sgl_page1 *sgl; |
|---|
| 16483 | 17189 | struct sgl_page_pairs *sgl_pg_pairs; |
|---|
| 16484 | 17190 | void *viraddr; |
|---|
| .. | .. |
|---|
| 16496 | 17202 | sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); |
|---|
| 16497 | 17203 | if (reqlen > SLI4_PAGE_SIZE) { |
|---|
| 16498 | 17204 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
|---|
| 16499 | | - "0217 Block sgl registration required DMA " |
|---|
| 17205 | + "6118 Block sgl registration required DMA " |
|---|
| 16500 | 17206 | "size (%d) great than a page\n", reqlen); |
|---|
| 16501 | 17207 | return -ENOMEM; |
|---|
| 16502 | 17208 | } |
|---|
| 16503 | 17209 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
|---|
| 16504 | 17210 | if (!mbox) { |
|---|
| 16505 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16506 | | - "0283 Failed to allocate mbox cmd memory\n"); |
|---|
| 17211 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 17212 | + "6119 Failed to allocate mbox cmd memory\n"); |
|---|
| 16507 | 17213 | return -ENOMEM; |
|---|
| 16508 | 17214 | } |
|---|
| 16509 | 17215 | |
|---|
| 16510 | 17216 | /* Allocate DMA memory and set up the non-embedded mailbox command */ |
|---|
| 16511 | 17217 | alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, |
|---|
| 16512 | | - LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, |
|---|
| 16513 | | - LPFC_SLI4_MBX_NEMBED); |
|---|
| 17218 | + LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, |
|---|
| 17219 | + reqlen, LPFC_SLI4_MBX_NEMBED); |
|---|
| 16514 | 17220 | |
|---|
| 16515 | 17221 | if (alloclen < reqlen) { |
|---|
| 16516 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 16517 | | - "2561 Allocated DMA memory size (%d) is " |
|---|
| 17222 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 17223 | + "6120 Allocated DMA memory size (%d) is " |
|---|
| 16518 | 17224 | "less than the requested DMA memory " |
|---|
| 16519 | 17225 | "size (%d)\n", alloclen, reqlen); |
|---|
| 16520 | 17226 | lpfc_sli4_mbox_cmd_free(phba, mbox); |
|---|
| .. | .. |
|---|
| 16529 | 17235 | sgl_pg_pairs = &sgl->sgl_pg_pairs; |
|---|
| 16530 | 17236 | |
|---|
| 16531 | 17237 | pg_pairs = 0; |
|---|
| 16532 | | - list_for_each_entry(psb, sblist, list) { |
|---|
| 17238 | + list_for_each_entry(lpfc_ncmd, nblist, list) { |
|---|
| 16533 | 17239 | /* Set up the sge entry */ |
|---|
| 16534 | 17240 | sgl_pg_pairs->sgl_pg0_addr_lo = |
|---|
| 16535 | | - cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); |
|---|
| 17241 | + cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); |
|---|
| 16536 | 17242 | sgl_pg_pairs->sgl_pg0_addr_hi = |
|---|
| 16537 | | - cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); |
|---|
| 17243 | + cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); |
|---|
| 16538 | 17244 | if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) |
|---|
| 16539 | | - pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; |
|---|
| 17245 | + pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + |
|---|
| 17246 | + SGL_PAGE_SIZE; |
|---|
| 16540 | 17247 | else |
|---|
| 16541 | 17248 | pdma_phys_bpl1 = 0; |
|---|
| 16542 | 17249 | sgl_pg_pairs->sgl_pg1_addr_lo = |
|---|
| .. | .. |
|---|
| 16545 | 17252 | cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); |
|---|
| 16546 | 17253 | /* Keep the first xritag on the list */ |
|---|
| 16547 | 17254 | if (pg_pairs == 0) |
|---|
| 16548 | | - xritag_start = psb->cur_iocbq.sli4_xritag; |
|---|
| 17255 | + xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; |
|---|
| 16549 | 17256 | sgl_pg_pairs++; |
|---|
| 16550 | 17257 | pg_pairs++; |
|---|
| 16551 | 17258 | } |
|---|
| .. | .. |
|---|
| 16554 | 17261 | /* Perform endian conversion if necessary */ |
|---|
| 16555 | 17262 | sgl->word0 = cpu_to_le32(sgl->word0); |
|---|
| 16556 | 17263 | |
|---|
| 16557 | | - if (!phba->sli4_hba.intr_enable) |
|---|
| 17264 | + if (!phba->sli4_hba.intr_enable) { |
|---|
| 16558 | 17265 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); |
|---|
| 16559 | | - else { |
|---|
| 17266 | + } else { |
|---|
| 16560 | 17267 | mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); |
|---|
| 16561 | 17268 | rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); |
|---|
| 16562 | 17269 | } |
|---|
| 16563 | | - shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; |
|---|
| 17270 | + shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; |
|---|
| 16564 | 17271 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 16565 | 17272 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 16566 | | - if (rc != MBX_TIMEOUT) |
|---|
| 17273 | + if (!phba->sli4_hba.intr_enable) |
|---|
| 17274 | + lpfc_sli4_mbox_cmd_free(phba, mbox); |
|---|
| 17275 | + else if (rc != MBX_TIMEOUT) |
|---|
| 16567 | 17276 | lpfc_sli4_mbox_cmd_free(phba, mbox); |
|---|
| 16568 | 17277 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 16569 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 16570 | | - "2564 POST_SGL_BLOCK mailbox command failed " |
|---|
| 17278 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 17279 | + "6125 POST_SGL_BLOCK mailbox command failed " |
|---|
| 16571 | 17280 | "status x%x add_status x%x mbx status x%x\n", |
|---|
| 16572 | 17281 | shdr_status, shdr_add_status, rc); |
|---|
| 16573 | 17282 | rc = -ENXIO; |
|---|
| 16574 | 17283 | } |
|---|
| 16575 | 17284 | return rc; |
|---|
| 17285 | +} |
|---|
| 17286 | + |
|---|
| 17287 | +/** |
|---|
| 17288 | + * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list |
|---|
| 17289 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 17290 | + * @post_nblist: pointer to the nvme buffer list. |
|---|
| 17291 | + * @sb_count: number of nvme buffers. |
|---|
| 17292 | + * |
|---|
| 17293 | + * This routine walks a list of nvme buffers that was passed in. It attempts |
|---|
| 17294 | + * to construct blocks of nvme buffer sgls which contains contiguous xris and |
|---|
| 17295 | + * uses the non-embedded SGL block post mailbox commands to post to the port. |
|---|
| 17296 | + * For single NVME buffer sgl with non-contiguous xri, if any, it shall use |
|---|
| 17297 | + * embedded SGL post mailbox command for posting. The @post_nblist passed in |
|---|
| 17298 | + * must be local list, thus no lock is needed when manipulate the list. |
|---|
| 17299 | + * |
|---|
| 17300 | + * Returns: 0 = failure, non-zero number of successfully posted buffers. |
|---|
| 17301 | + **/ |
|---|
| 17302 | +int |
|---|
| 17303 | +lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, |
|---|
| 17304 | + struct list_head *post_nblist, int sb_count) |
|---|
| 17305 | +{ |
|---|
| 17306 | + struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; |
|---|
| 17307 | + int status, sgl_size; |
|---|
| 17308 | + int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; |
|---|
| 17309 | + dma_addr_t pdma_phys_sgl1; |
|---|
| 17310 | + int last_xritag = NO_XRI; |
|---|
| 17311 | + int cur_xritag; |
|---|
| 17312 | + LIST_HEAD(prep_nblist); |
|---|
| 17313 | + LIST_HEAD(blck_nblist); |
|---|
| 17314 | + LIST_HEAD(nvme_nblist); |
|---|
| 17315 | + |
|---|
| 17316 | + /* sanity check */ |
|---|
| 17317 | + if (sb_count <= 0) |
|---|
| 17318 | + return -EINVAL; |
|---|
| 17319 | + |
|---|
| 17320 | + sgl_size = phba->cfg_sg_dma_buf_size; |
|---|
| 17321 | + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { |
|---|
| 17322 | + list_del_init(&lpfc_ncmd->list); |
|---|
| 17323 | + block_cnt++; |
|---|
| 17324 | + if ((last_xritag != NO_XRI) && |
|---|
| 17325 | + (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { |
|---|
| 17326 | + /* a hole in xri block, form a sgl posting block */ |
|---|
| 17327 | + list_splice_init(&prep_nblist, &blck_nblist); |
|---|
| 17328 | + post_cnt = block_cnt - 1; |
|---|
| 17329 | + /* prepare list for next posting block */ |
|---|
| 17330 | + list_add_tail(&lpfc_ncmd->list, &prep_nblist); |
|---|
| 17331 | + block_cnt = 1; |
|---|
| 17332 | + } else { |
|---|
| 17333 | + /* prepare list for next posting block */ |
|---|
| 17334 | + list_add_tail(&lpfc_ncmd->list, &prep_nblist); |
|---|
| 17335 | + /* enough sgls for non-embed sgl mbox command */ |
|---|
| 17336 | + if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { |
|---|
| 17337 | + list_splice_init(&prep_nblist, &blck_nblist); |
|---|
| 17338 | + post_cnt = block_cnt; |
|---|
| 17339 | + block_cnt = 0; |
|---|
| 17340 | + } |
|---|
| 17341 | + } |
|---|
| 17342 | + num_posting++; |
|---|
| 17343 | + last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; |
|---|
| 17344 | + |
|---|
| 17345 | + /* end of repost sgl list condition for NVME buffers */ |
|---|
| 17346 | + if (num_posting == sb_count) { |
|---|
| 17347 | + if (post_cnt == 0) { |
|---|
| 17348 | + /* last sgl posting block */ |
|---|
| 17349 | + list_splice_init(&prep_nblist, &blck_nblist); |
|---|
| 17350 | + post_cnt = block_cnt; |
|---|
| 17351 | + } else if (block_cnt == 1) { |
|---|
| 17352 | + /* last single sgl with non-contiguous xri */ |
|---|
| 17353 | + if (sgl_size > SGL_PAGE_SIZE) |
|---|
| 17354 | + pdma_phys_sgl1 = |
|---|
| 17355 | + lpfc_ncmd->dma_phys_sgl + |
|---|
| 17356 | + SGL_PAGE_SIZE; |
|---|
| 17357 | + else |
|---|
| 17358 | + pdma_phys_sgl1 = 0; |
|---|
| 17359 | + cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; |
|---|
| 17360 | + status = lpfc_sli4_post_sgl( |
|---|
| 17361 | + phba, lpfc_ncmd->dma_phys_sgl, |
|---|
| 17362 | + pdma_phys_sgl1, cur_xritag); |
|---|
| 17363 | + if (status) { |
|---|
| 17364 | + /* Post error. Buffer unavailable. */ |
|---|
| 17365 | + lpfc_ncmd->flags |= |
|---|
| 17366 | + LPFC_SBUF_NOT_POSTED; |
|---|
| 17367 | + } else { |
|---|
| 17368 | + /* Post success. Bffer available. */ |
|---|
| 17369 | + lpfc_ncmd->flags &= |
|---|
| 17370 | + ~LPFC_SBUF_NOT_POSTED; |
|---|
| 17371 | + lpfc_ncmd->status = IOSTAT_SUCCESS; |
|---|
| 17372 | + num_posted++; |
|---|
| 17373 | + } |
|---|
| 17374 | + /* success, put on NVME buffer sgl list */ |
|---|
| 17375 | + list_add_tail(&lpfc_ncmd->list, &nvme_nblist); |
|---|
| 17376 | + } |
|---|
| 17377 | + } |
|---|
| 17378 | + |
|---|
| 17379 | + /* continue until a nembed page worth of sgls */ |
|---|
| 17380 | + if (post_cnt == 0) |
|---|
| 17381 | + continue; |
|---|
| 17382 | + |
|---|
| 17383 | + /* post block of NVME buffer list sgls */ |
|---|
| 17384 | + status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist, |
|---|
| 17385 | + post_cnt); |
|---|
| 17386 | + |
|---|
| 17387 | + /* don't reset xirtag due to hole in xri block */ |
|---|
| 17388 | + if (block_cnt == 0) |
|---|
| 17389 | + last_xritag = NO_XRI; |
|---|
| 17390 | + |
|---|
| 17391 | + /* reset NVME buffer post count for next round of posting */ |
|---|
| 17392 | + post_cnt = 0; |
|---|
| 17393 | + |
|---|
| 17394 | + /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ |
|---|
| 17395 | + while (!list_empty(&blck_nblist)) { |
|---|
| 17396 | + list_remove_head(&blck_nblist, lpfc_ncmd, |
|---|
| 17397 | + struct lpfc_io_buf, list); |
|---|
| 17398 | + if (status) { |
|---|
| 17399 | + /* Post error. Mark buffer unavailable. */ |
|---|
| 17400 | + lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED; |
|---|
| 17401 | + } else { |
|---|
| 17402 | + /* Post success, Mark buffer available. */ |
|---|
| 17403 | + lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED; |
|---|
| 17404 | + lpfc_ncmd->status = IOSTAT_SUCCESS; |
|---|
| 17405 | + num_posted++; |
|---|
| 17406 | + } |
|---|
| 17407 | + list_add_tail(&lpfc_ncmd->list, &nvme_nblist); |
|---|
| 17408 | + } |
|---|
| 17409 | + } |
|---|
| 17410 | + /* Push NVME buffers with sgl posted to the available list */ |
|---|
| 17411 | + lpfc_io_buf_replenish(phba, &nvme_nblist); |
|---|
| 17412 | + |
|---|
| 17413 | + return num_posted; |
|---|
| 16576 | 17414 | } |
|---|
| 16577 | 17415 | |
|---|
| 16578 | 17416 | /** |
|---|
| .. | .. |
|---|
| 16607 | 17445 | case FC_RCTL_ELS_REP: /* extended link services reply */ |
|---|
| 16608 | 17446 | case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ |
|---|
| 16609 | 17447 | case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ |
|---|
| 16610 | | - case FC_RCTL_BA_NOP: /* basic link service NOP */ |
|---|
| 16611 | 17448 | case FC_RCTL_BA_ABTS: /* basic link service abort */ |
|---|
| 16612 | 17449 | case FC_RCTL_BA_RMC: /* remove connection */ |
|---|
| 16613 | 17450 | case FC_RCTL_BA_ACC: /* basic accept */ |
|---|
| .. | .. |
|---|
| 16628 | 17465 | fc_vft_hdr = (struct fc_vft_header *)fc_hdr; |
|---|
| 16629 | 17466 | fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; |
|---|
| 16630 | 17467 | return lpfc_fc_frame_check(phba, fc_hdr); |
|---|
| 17468 | + case FC_RCTL_BA_NOP: /* basic link service NOP */ |
|---|
| 16631 | 17469 | default: |
|---|
| 16632 | 17470 | goto drop; |
|---|
| 16633 | 17471 | } |
|---|
| 16634 | | - |
|---|
| 16635 | | -#define FC_TYPE_VENDOR_UNIQUE 0xFF |
|---|
| 16636 | 17472 | |
|---|
| 16637 | 17473 | switch (fc_hdr->fh_type) { |
|---|
| 16638 | 17474 | case FC_TYPE_BLS: |
|---|
| .. | .. |
|---|
| 16640 | 17476 | case FC_TYPE_FCP: |
|---|
| 16641 | 17477 | case FC_TYPE_CT: |
|---|
| 16642 | 17478 | case FC_TYPE_NVME: |
|---|
| 16643 | | - case FC_TYPE_VENDOR_UNIQUE: |
|---|
| 16644 | 17479 | break; |
|---|
| 16645 | 17480 | case FC_TYPE_IP: |
|---|
| 16646 | 17481 | case FC_TYPE_ILS: |
|---|
| .. | .. |
|---|
| 16687 | 17522 | * @phba: Pointer to the HBA structure to search for the vport on |
|---|
| 16688 | 17523 | * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) |
|---|
| 16689 | 17524 | * @fcfi: The FC Fabric ID that the frame came from |
|---|
| 17525 | + * @did: Destination ID to match against |
|---|
| 16690 | 17526 | * |
|---|
| 16691 | 17527 | * This function searches the @phba for a vport that matches the content of the |
|---|
| 16692 | 17528 | * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the |
|---|
| .. | .. |
|---|
| 16824 | 17660 | |
|---|
| 16825 | 17661 | /** |
|---|
| 16826 | 17662 | * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences |
|---|
| 17663 | + * @vport: pointer to a vitural port |
|---|
| 16827 | 17664 | * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame |
|---|
| 16828 | 17665 | * |
|---|
| 16829 | 17666 | * This function searches through the existing incomplete sequences that have |
|---|
| .. | .. |
|---|
| 17024 | 17861 | |
|---|
| 17025 | 17862 | /* Failure means BLS ABORT RSP did not get delivered to remote node*/ |
|---|
| 17026 | 17863 | if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) |
|---|
| 17027 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 17864 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 17028 | 17865 | "3154 BLS ABORT RSP failed, data: x%x/x%x\n", |
|---|
| 17029 | 17866 | rsp_iocbq->iocb.ulpStatus, |
|---|
| 17030 | 17867 | rsp_iocbq->iocb.un.ulpWord[4]); |
|---|
| .. | .. |
|---|
| 17053 | 17890 | |
|---|
| 17054 | 17891 | /** |
|---|
| 17055 | 17892 | * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort |
|---|
| 17056 | | - * @phba: Pointer to HBA context object. |
|---|
| 17893 | + * @vport: pointer to a vitural port. |
|---|
| 17057 | 17894 | * @fc_hdr: pointer to a FC frame header. |
|---|
| 17895 | + * @aborted: was the partially assembled receive sequence successfully aborted |
|---|
| 17058 | 17896 | * |
|---|
| 17059 | 17897 | * This function sends a basic response to a previous unsol sequence abort |
|---|
| 17060 | 17898 | * event after aborting the sequence handling. |
|---|
| .. | .. |
|---|
| 17123 | 17961 | icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; |
|---|
| 17124 | 17962 | ctiocb->context1 = lpfc_nlp_get(ndlp); |
|---|
| 17125 | 17963 | |
|---|
| 17126 | | - ctiocb->iocb_cmpl = NULL; |
|---|
| 17127 | 17964 | ctiocb->vport = phba->pport; |
|---|
| 17128 | 17965 | ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; |
|---|
| 17129 | 17966 | ctiocb->sli4_lxritag = NO_XRI; |
|---|
| .. | .. |
|---|
| 17187 | 18024 | |
|---|
| 17188 | 18025 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); |
|---|
| 17189 | 18026 | if (rc == IOCB_ERROR) { |
|---|
| 17190 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
|---|
| 18027 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 17191 | 18028 | "2925 Failed to issue CT ABTS RSP x%x on " |
|---|
| 17192 | 18029 | "xri x%x, Data x%x\n", |
|---|
| 17193 | 18030 | icmd->un.xseq64.w5.hcsw.Rctl, oxid, |
|---|
| .. | .. |
|---|
| 17207 | 18044 | * receive sequence is only partially assembed by the driver, it shall abort |
|---|
| 17208 | 18045 | * the partially assembled frames for the sequence. Otherwise, if the |
|---|
| 17209 | 18046 | * unsolicited receive sequence has been completely assembled and passed to |
|---|
| 17210 | | - * the Upper Layer Protocol (UPL), it then mark the per oxid status for the |
|---|
| 18047 | + * the Upper Layer Protocol (ULP), it then mark the per oxid status for the |
|---|
| 17211 | 18048 | * unsolicited sequence has been aborted. After that, it will issue a basic |
|---|
| 17212 | 18049 | * accept to accept the abort. |
|---|
| 17213 | 18050 | **/ |
|---|
| .. | .. |
|---|
| 17294 | 18131 | /** |
|---|
| 17295 | 18132 | * lpfc_prep_seq - Prep sequence for ULP processing |
|---|
| 17296 | 18133 | * @vport: Pointer to the vport on which this sequence was received |
|---|
| 17297 | | - * @dmabuf: pointer to a dmabuf that describes the FC sequence |
|---|
| 18134 | + * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence |
|---|
| 17298 | 18135 | * |
|---|
| 17299 | 18136 | * This function takes a sequence, described by a list of frames, and creates |
|---|
| 17300 | 18137 | * a list of iocbq structures to describe the sequence. This iocbq list will be |
|---|
| .. | .. |
|---|
| 17437 | 18274 | fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; |
|---|
| 17438 | 18275 | iocbq = lpfc_prep_seq(vport, seq_dmabuf); |
|---|
| 17439 | 18276 | if (!iocbq) { |
|---|
| 17440 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 18277 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 17441 | 18278 | "2707 Ring %d handler: Failed to allocate " |
|---|
| 17442 | 18279 | "iocb Rctl x%x Type x%x received\n", |
|---|
| 17443 | 18280 | LPFC_ELS_RING, |
|---|
| .. | .. |
|---|
| 17447 | 18284 | if (!lpfc_complete_unsol_iocb(phba, |
|---|
| 17448 | 18285 | phba->sli4_hba.els_wq->pring, |
|---|
| 17449 | 18286 | iocbq, fc_hdr->fh_r_ctl, |
|---|
| 17450 | | - fc_hdr->fh_type)) |
|---|
| 17451 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 18287 | + fc_hdr->fh_type)) { |
|---|
| 18288 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 17452 | 18289 | "2540 Ring %d handler: unexpected Rctl " |
|---|
| 17453 | 18290 | "x%x Type x%x received\n", |
|---|
| 17454 | 18291 | LPFC_ELS_RING, |
|---|
| 17455 | 18292 | fc_hdr->fh_r_ctl, fc_hdr->fh_type); |
|---|
| 18293 | + lpfc_in_buf_free(phba, &seq_dmabuf->dbuf); |
|---|
| 18294 | + } |
|---|
| 17456 | 18295 | |
|---|
| 17457 | 18296 | /* Free iocb created in lpfc_prep_seq */ |
|---|
| 17458 | 18297 | list_for_each_entry_safe(curr_iocb, next_iocb, |
|---|
| .. | .. |
|---|
| 17473 | 18312 | dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); |
|---|
| 17474 | 18313 | kfree(pcmd); |
|---|
| 17475 | 18314 | lpfc_sli_release_iocbq(phba, cmdiocb); |
|---|
| 18315 | + lpfc_drain_txq(phba); |
|---|
| 17476 | 18316 | } |
|---|
| 17477 | 18317 | |
|---|
| 17478 | 18318 | static void |
|---|
| .. | .. |
|---|
| 17486 | 18326 | struct lpfc_dmabuf *pcmd = NULL; |
|---|
| 17487 | 18327 | uint32_t frame_len; |
|---|
| 17488 | 18328 | int rc; |
|---|
| 18329 | + unsigned long iflags; |
|---|
| 17489 | 18330 | |
|---|
| 17490 | 18331 | fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; |
|---|
| 17491 | 18332 | frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); |
|---|
| 17492 | 18333 | |
|---|
| 17493 | 18334 | /* Send the received frame back */ |
|---|
| 17494 | 18335 | iocbq = lpfc_sli_get_iocbq(phba); |
|---|
| 17495 | | - if (!iocbq) |
|---|
| 17496 | | - goto exit; |
|---|
| 18336 | + if (!iocbq) { |
|---|
| 18337 | + /* Queue cq event and wakeup worker thread to process it */ |
|---|
| 18338 | + spin_lock_irqsave(&phba->hbalock, iflags); |
|---|
| 18339 | + list_add_tail(&dmabuf->cq_event.list, |
|---|
| 18340 | + &phba->sli4_hba.sp_queue_event); |
|---|
| 18341 | + phba->hba_flag |= HBA_SP_QUEUE_EVT; |
|---|
| 18342 | + spin_unlock_irqrestore(&phba->hbalock, iflags); |
|---|
| 18343 | + lpfc_worker_wake_up(phba); |
|---|
| 18344 | + return; |
|---|
| 18345 | + } |
|---|
| 17497 | 18346 | |
|---|
| 17498 | 18347 | /* Allocate buffer for command payload */ |
|---|
| 17499 | 18348 | pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); |
|---|
| .. | .. |
|---|
| 17557 | 18406 | /** |
|---|
| 17558 | 18407 | * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware |
|---|
| 17559 | 18408 | * @phba: Pointer to HBA context object. |
|---|
| 18409 | + * @dmabuf: Pointer to a dmabuf that describes the FC sequence. |
|---|
| 17560 | 18410 | * |
|---|
| 17561 | 18411 | * This function is called with no lock held. This function processes all |
|---|
| 17562 | 18412 | * the received buffers and gives it to upper layers when a received buffer |
|---|
| .. | .. |
|---|
| 17578 | 18428 | /* Process each received buffer */ |
|---|
| 17579 | 18429 | fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; |
|---|
| 17580 | 18430 | |
|---|
| 18431 | + if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || |
|---|
| 18432 | + fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { |
|---|
| 18433 | + vport = phba->pport; |
|---|
| 18434 | + /* Handle MDS Loopback frames */ |
|---|
| 18435 | + if (!(phba->pport->load_flag & FC_UNLOADING)) |
|---|
| 18436 | + lpfc_sli4_handle_mds_loopback(vport, dmabuf); |
|---|
| 18437 | + else |
|---|
| 18438 | + lpfc_in_buf_free(phba, &dmabuf->dbuf); |
|---|
| 18439 | + return; |
|---|
| 18440 | + } |
|---|
| 18441 | + |
|---|
| 17581 | 18442 | /* check to see if this a valid type of frame */ |
|---|
| 17582 | 18443 | if (lpfc_fc_frame_check(phba, fc_hdr)) { |
|---|
| 17583 | 18444 | lpfc_in_buf_free(phba, &dmabuf->dbuf); |
|---|
| .. | .. |
|---|
| 17594 | 18455 | |
|---|
| 17595 | 18456 | if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) { |
|---|
| 17596 | 18457 | vport = phba->pport; |
|---|
| 18458 | + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
|---|
| 18459 | + "2023 MDS Loopback %d bytes\n", |
|---|
| 18460 | + bf_get(lpfc_rcqe_length, |
|---|
| 18461 | + &dmabuf->cq_event.cqe.rcqe_cmpl)); |
|---|
| 17597 | 18462 | /* Handle MDS Loopback frames */ |
|---|
| 17598 | 18463 | lpfc_sli4_handle_mds_loopback(vport, dmabuf); |
|---|
| 17599 | 18464 | return; |
|---|
| .. | .. |
|---|
| 17691 | 18556 | |
|---|
| 17692 | 18557 | rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); |
|---|
| 17693 | 18558 | if (rc != MBX_SUCCESS) { |
|---|
| 17694 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 18559 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 17695 | 18560 | "2008 Error %d posting all rpi " |
|---|
| 17696 | 18561 | "headers\n", rc); |
|---|
| 17697 | 18562 | rc = -EIO; |
|---|
| .. | .. |
|---|
| 17737 | 18602 | /* The port is notified of the header region via a mailbox command. */ |
|---|
| 17738 | 18603 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
|---|
| 17739 | 18604 | if (!mboxq) { |
|---|
| 17740 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 18605 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 17741 | 18606 | "2001 Unable to allocate memory for issuing " |
|---|
| 17742 | 18607 | "SLI_CONFIG_SPECIAL mailbox command\n"); |
|---|
| 17743 | 18608 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 17764 | 18629 | shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; |
|---|
| 17765 | 18630 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 17766 | 18631 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 17767 | | - if (rc != MBX_TIMEOUT) |
|---|
| 17768 | | - mempool_free(mboxq, phba->mbox_mem_pool); |
|---|
| 18632 | + mempool_free(mboxq, phba->mbox_mem_pool); |
|---|
| 17769 | 18633 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 17770 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 18634 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 17771 | 18635 | "2514 POST_RPI_HDR mailbox failed with " |
|---|
| 17772 | 18636 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 17773 | 18637 | shdr_status, shdr_add_status, rc); |
|---|
| .. | .. |
|---|
| 17822 | 18686 | phba->sli4_hba.max_cfg_param.rpi_used++; |
|---|
| 17823 | 18687 | phba->sli4_hba.rpi_count++; |
|---|
| 17824 | 18688 | } |
|---|
| 17825 | | - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
|---|
| 17826 | | - "0001 rpi:%x max:%x lim:%x\n", |
|---|
| 18689 | + lpfc_printf_log(phba, KERN_INFO, |
|---|
| 18690 | + LOG_NODE | LOG_DISCOVERY, |
|---|
| 18691 | + "0001 Allocated rpi:x%x max:x%x lim:x%x\n", |
|---|
| 17827 | 18692 | (int) rpi, max_rpi, rpi_limit); |
|---|
| 17828 | 18693 | |
|---|
| 17829 | 18694 | /* |
|---|
| .. | .. |
|---|
| 17856 | 18721 | if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { |
|---|
| 17857 | 18722 | rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); |
|---|
| 17858 | 18723 | if (!rpi_hdr) { |
|---|
| 17859 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 18724 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 17860 | 18725 | "2002 Error Could not grow rpi " |
|---|
| 17861 | 18726 | "count\n"); |
|---|
| 17862 | 18727 | } else { |
|---|
| .. | .. |
|---|
| 17872 | 18737 | /** |
|---|
| 17873 | 18738 | * lpfc_sli4_free_rpi - Release an rpi for reuse. |
|---|
| 17874 | 18739 | * @phba: pointer to lpfc hba data structure. |
|---|
| 18740 | + * @rpi: rpi to free |
|---|
| 17875 | 18741 | * |
|---|
| 17876 | 18742 | * This routine is invoked to release an rpi to the pool of |
|---|
| 17877 | 18743 | * available rpis maintained by the driver. |
|---|
| .. | .. |
|---|
| 17889 | 18755 | if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { |
|---|
| 17890 | 18756 | phba->sli4_hba.rpi_count--; |
|---|
| 17891 | 18757 | phba->sli4_hba.max_cfg_param.rpi_used--; |
|---|
| 18758 | + } else { |
|---|
| 18759 | + lpfc_printf_log(phba, KERN_INFO, |
|---|
| 18760 | + LOG_NODE | LOG_DISCOVERY, |
|---|
| 18761 | + "2016 rpi %x not inuse\n", |
|---|
| 18762 | + rpi); |
|---|
| 17892 | 18763 | } |
|---|
| 17893 | 18764 | } |
|---|
| 17894 | 18765 | |
|---|
| 17895 | 18766 | /** |
|---|
| 17896 | 18767 | * lpfc_sli4_free_rpi - Release an rpi for reuse. |
|---|
| 17897 | 18768 | * @phba: pointer to lpfc hba data structure. |
|---|
| 18769 | + * @rpi: rpi to free |
|---|
| 17898 | 18770 | * |
|---|
| 17899 | 18771 | * This routine is invoked to release an rpi to the pool of |
|---|
| 17900 | 18772 | * available rpis maintained by the driver. |
|---|
| .. | .. |
|---|
| 17924 | 18796 | |
|---|
| 17925 | 18797 | /** |
|---|
| 17926 | 18798 | * lpfc_sli4_resume_rpi - Remove the rpi bitmask region |
|---|
| 17927 | | - * @phba: pointer to lpfc hba data structure. |
|---|
| 18799 | + * @ndlp: pointer to lpfc nodelist data structure. |
|---|
| 18800 | + * @cmpl: completion call-back. |
|---|
| 18801 | + * @arg: data to load as MBox 'caller buffer information' |
|---|
| 17928 | 18802 | * |
|---|
| 17929 | 18803 | * This routine is invoked to remove the memory region that |
|---|
| 17930 | 18804 | * provided rpi via a bitmask. |
|---|
| .. | .. |
|---|
| 17946 | 18820 | lpfc_resume_rpi(mboxq, ndlp); |
|---|
| 17947 | 18821 | if (cmpl) { |
|---|
| 17948 | 18822 | mboxq->mbox_cmpl = cmpl; |
|---|
| 17949 | | - mboxq->context1 = arg; |
|---|
| 17950 | | - mboxq->context2 = ndlp; |
|---|
| 18823 | + mboxq->ctx_buf = arg; |
|---|
| 18824 | + mboxq->ctx_ndlp = ndlp; |
|---|
| 17951 | 18825 | } else |
|---|
| 17952 | 18826 | mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
|---|
| 17953 | 18827 | mboxq->vport = ndlp->vport; |
|---|
| 17954 | 18828 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); |
|---|
| 17955 | 18829 | if (rc == MBX_NOT_FINISHED) { |
|---|
| 17956 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 18830 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 17957 | 18831 | "2010 Resume RPI Mailbox failed " |
|---|
| 17958 | 18832 | "status %d, mbxStatus x%x\n", rc, |
|---|
| 17959 | 18833 | bf_get(lpfc_mqe_status, &mboxq->u.mqe)); |
|---|
| .. | .. |
|---|
| 17988 | 18862 | mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); |
|---|
| 17989 | 18863 | rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); |
|---|
| 17990 | 18864 | if (rc != MBX_SUCCESS) { |
|---|
| 17991 | | - lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, |
|---|
| 18865 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 17992 | 18866 | "2022 INIT VPI Mailbox failed " |
|---|
| 17993 | 18867 | "status %d, mbxStatus x%x\n", rc, |
|---|
| 17994 | 18868 | bf_get(lpfc_mqe_status, &mboxq->u.mqe)); |
|---|
| .. | .. |
|---|
| 18024 | 18898 | |
|---|
| 18025 | 18899 | if ((shdr_status || shdr_add_status) && |
|---|
| 18026 | 18900 | (shdr_status != STATUS_FCF_IN_USE)) |
|---|
| 18027 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 18901 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 18028 | 18902 | "2558 ADD_FCF_RECORD mailbox failed with " |
|---|
| 18029 | 18903 | "status x%x add_status x%x\n", |
|---|
| 18030 | 18904 | shdr_status, shdr_add_status); |
|---|
| .. | .. |
|---|
| 18054 | 18928 | |
|---|
| 18055 | 18929 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
|---|
| 18056 | 18930 | if (!mboxq) { |
|---|
| 18057 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 18931 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 18058 | 18932 | "2009 Failed to allocate mbox for ADD_FCF cmd\n"); |
|---|
| 18059 | 18933 | return -ENOMEM; |
|---|
| 18060 | 18934 | } |
|---|
| .. | .. |
|---|
| 18067 | 18941 | LPFC_MBOX_OPCODE_FCOE_ADD_FCF, |
|---|
| 18068 | 18942 | req_len, LPFC_SLI4_MBX_NEMBED); |
|---|
| 18069 | 18943 | if (alloc_len < req_len) { |
|---|
| 18070 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 18944 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 18071 | 18945 | "2523 Allocated DMA memory size (x%x) is " |
|---|
| 18072 | 18946 | "less than the requested DMA memory " |
|---|
| 18073 | 18947 | "size (x%x)\n", alloc_len, req_len); |
|---|
| .. | .. |
|---|
| 18100 | 18974 | mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; |
|---|
| 18101 | 18975 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); |
|---|
| 18102 | 18976 | if (rc == MBX_NOT_FINISHED) { |
|---|
| 18103 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 18977 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 18104 | 18978 | "2515 ADD_FCF_RECORD mailbox failed with " |
|---|
| 18105 | 18979 | "status 0x%x\n", rc); |
|---|
| 18106 | 18980 | lpfc_sli4_mbox_cmd_free(phba, mboxq); |
|---|
| .. | .. |
|---|
| 18173 | 19047 | phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; |
|---|
| 18174 | 19048 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
|---|
| 18175 | 19049 | if (!mboxq) { |
|---|
| 18176 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 19050 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 18177 | 19051 | "2000 Failed to allocate mbox for " |
|---|
| 18178 | 19052 | "READ_FCF cmd\n"); |
|---|
| 18179 | 19053 | error = -ENOMEM; |
|---|
| .. | .. |
|---|
| 18308 | 19182 | |
|---|
| 18309 | 19183 | /** |
|---|
| 18310 | 19184 | * lpfc_check_next_fcf_pri_level |
|---|
| 18311 | | - * phba pointer to the lpfc_hba struct for this port. |
|---|
| 19185 | + * @phba: pointer to the lpfc_hba struct for this port. |
|---|
| 18312 | 19186 | * This routine is called from the lpfc_sli4_fcf_rr_next_index_get |
|---|
| 18313 | 19187 | * routine when the rr_bmask is empty. The FCF indecies are put into the |
|---|
| 18314 | 19188 | * rr_bmask based on their priority level. Starting from the highest priority |
|---|
| .. | .. |
|---|
| 18473 | 19347 | /** |
|---|
| 18474 | 19348 | * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index |
|---|
| 18475 | 19349 | * @phba: pointer to lpfc hba data structure. |
|---|
| 19350 | + * @fcf_index: index into the FCF table to 'set' |
|---|
| 18476 | 19351 | * |
|---|
| 18477 | 19352 | * This routine sets the FCF record index in to the eligible bmask for |
|---|
| 18478 | 19353 | * roundrobin failover search. It checks to make sure that the index |
|---|
| .. | .. |
|---|
| 18505 | 19380 | /** |
|---|
| 18506 | 19381 | * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index |
|---|
| 18507 | 19382 | * @phba: pointer to lpfc hba data structure. |
|---|
| 19383 | + * @fcf_index: index into the FCF table to 'clear' |
|---|
| 18508 | 19384 | * |
|---|
| 18509 | 19385 | * This routine clears the FCF record index from the eligible bmask for |
|---|
| 18510 | 19386 | * roundrobin failover search. It checks to make sure that the index |
|---|
| .. | .. |
|---|
| 18542 | 19418 | /** |
|---|
| 18543 | 19419 | * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table |
|---|
| 18544 | 19420 | * @phba: pointer to lpfc hba data structure. |
|---|
| 19421 | + * @mbox: An allocated pointer to type LPFC_MBOXQ_t |
|---|
| 18545 | 19422 | * |
|---|
| 18546 | 19423 | * This routine is the completion routine for the rediscover FCF table mailbox |
|---|
| 18547 | 19424 | * command. If the mailbox command returned failure, it will try to stop the |
|---|
| .. | .. |
|---|
| 18616 | 19493 | |
|---|
| 18617 | 19494 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
|---|
| 18618 | 19495 | if (!mbox) { |
|---|
| 18619 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 19496 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 18620 | 19497 | "2745 Failed to allocate mbox for " |
|---|
| 18621 | 19498 | "requesting FCF rediscover.\n"); |
|---|
| 18622 | 19499 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 18684 | 19561 | LPFC_MBOXQ_t *pmb = NULL; |
|---|
| 18685 | 19562 | MAILBOX_t *mb; |
|---|
| 18686 | 19563 | uint32_t offset = 0; |
|---|
| 18687 | | - int rc; |
|---|
| 19564 | + int i, rc; |
|---|
| 18688 | 19565 | |
|---|
| 18689 | 19566 | if (!rgn23_data) |
|---|
| 18690 | 19567 | return 0; |
|---|
| 18691 | 19568 | |
|---|
| 18692 | 19569 | pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
|---|
| 18693 | 19570 | if (!pmb) { |
|---|
| 18694 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 19571 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 18695 | 19572 | "2600 failed to allocate mailbox memory\n"); |
|---|
| 18696 | 19573 | return 0; |
|---|
| 18697 | 19574 | } |
|---|
| .. | .. |
|---|
| 18714 | 19591 | */ |
|---|
| 18715 | 19592 | if (mb->un.varDmp.word_cnt == 0) |
|---|
| 18716 | 19593 | break; |
|---|
| 18717 | | - if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) |
|---|
| 18718 | | - mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; |
|---|
| 18719 | 19594 | |
|---|
| 19595 | + i = mb->un.varDmp.word_cnt * sizeof(uint32_t); |
|---|
| 19596 | + if (offset + i > DMP_RGN23_SIZE) |
|---|
| 19597 | + i = DMP_RGN23_SIZE - offset; |
|---|
| 18720 | 19598 | lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, |
|---|
| 18721 | | - rgn23_data + offset, |
|---|
| 18722 | | - mb->un.varDmp.word_cnt); |
|---|
| 18723 | | - offset += mb->un.varDmp.word_cnt; |
|---|
| 18724 | | - } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); |
|---|
| 19599 | + rgn23_data + offset, i); |
|---|
| 19600 | + offset += i; |
|---|
| 19601 | + } while (offset < DMP_RGN23_SIZE); |
|---|
| 18725 | 19602 | |
|---|
| 18726 | 19603 | mempool_free(pmb, phba->mbox_mem_pool); |
|---|
| 18727 | 19604 | return offset; |
|---|
| .. | .. |
|---|
| 18750 | 19627 | |
|---|
| 18751 | 19628 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
|---|
| 18752 | 19629 | if (!mboxq) { |
|---|
| 18753 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 19630 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 18754 | 19631 | "3105 failed to allocate mailbox memory\n"); |
|---|
| 18755 | 19632 | return 0; |
|---|
| 18756 | 19633 | } |
|---|
| .. | .. |
|---|
| 18758 | 19635 | if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) |
|---|
| 18759 | 19636 | goto out; |
|---|
| 18760 | 19637 | mqe = &mboxq->u.mqe; |
|---|
| 18761 | | - mp = (struct lpfc_dmabuf *) mboxq->context1; |
|---|
| 19638 | + mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; |
|---|
| 18762 | 19639 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
|---|
| 18763 | 19640 | if (rc) |
|---|
| 18764 | 19641 | goto out; |
|---|
| .. | .. |
|---|
| 18814 | 19691 | |
|---|
| 18815 | 19692 | /* Check the region signature first */ |
|---|
| 18816 | 19693 | if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { |
|---|
| 18817 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 19694 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 18818 | 19695 | "2619 Config region 23 has bad signature\n"); |
|---|
| 18819 | 19696 | goto out; |
|---|
| 18820 | 19697 | } |
|---|
| .. | .. |
|---|
| 18822 | 19699 | |
|---|
| 18823 | 19700 | /* Check the data structure version */ |
|---|
| 18824 | 19701 | if (rgn23_data[offset] != LPFC_REGION23_VERSION) { |
|---|
| 18825 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 19702 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 18826 | 19703 | "2620 Config region 23 has bad version\n"); |
|---|
| 18827 | 19704 | goto out; |
|---|
| 18828 | 19705 | } |
|---|
| .. | .. |
|---|
| 18903 | 19780 | struct lpfc_mbx_wr_object *wr_object; |
|---|
| 18904 | 19781 | LPFC_MBOXQ_t *mbox; |
|---|
| 18905 | 19782 | int rc = 0, i = 0; |
|---|
| 18906 | | - uint32_t shdr_status, shdr_add_status; |
|---|
| 19783 | + uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf; |
|---|
| 18907 | 19784 | uint32_t mbox_tmo; |
|---|
| 18908 | | - union lpfc_sli4_cfg_shdr *shdr; |
|---|
| 18909 | 19785 | struct lpfc_dmabuf *dmabuf; |
|---|
| 18910 | 19786 | uint32_t written = 0; |
|---|
| 19787 | + bool check_change_status = false; |
|---|
| 18911 | 19788 | |
|---|
| 18912 | 19789 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
|---|
| 18913 | 19790 | if (!mbox) |
|---|
| .. | .. |
|---|
| 18935 | 19812 | (size - written); |
|---|
| 18936 | 19813 | written += (size - written); |
|---|
| 18937 | 19814 | bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); |
|---|
| 19815 | + bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1); |
|---|
| 19816 | + check_change_status = true; |
|---|
| 18938 | 19817 | } else { |
|---|
| 18939 | 19818 | wr_object->u.request.bde[i].tus.f.bdeSize = |
|---|
| 18940 | 19819 | SLI4_PAGE_SIZE; |
|---|
| .. | .. |
|---|
| 18951 | 19830 | rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); |
|---|
| 18952 | 19831 | } |
|---|
| 18953 | 19832 | /* The IOCTL status is embedded in the mailbox subheader. */ |
|---|
| 18954 | | - shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr; |
|---|
| 18955 | | - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
|---|
| 18956 | | - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); |
|---|
| 18957 | | - if (rc != MBX_TIMEOUT) |
|---|
| 19833 | + shdr_status = bf_get(lpfc_mbox_hdr_status, |
|---|
| 19834 | + &wr_object->header.cfg_shdr.response); |
|---|
| 19835 | + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, |
|---|
| 19836 | + &wr_object->header.cfg_shdr.response); |
|---|
| 19837 | + if (check_change_status) { |
|---|
| 19838 | + shdr_change_status = bf_get(lpfc_wr_object_change_status, |
|---|
| 19839 | + &wr_object->u.response); |
|---|
| 19840 | + |
|---|
| 19841 | + if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET || |
|---|
| 19842 | + shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) { |
|---|
| 19843 | + shdr_csf = bf_get(lpfc_wr_object_csf, |
|---|
| 19844 | + &wr_object->u.response); |
|---|
| 19845 | + if (shdr_csf) |
|---|
| 19846 | + shdr_change_status = |
|---|
| 19847 | + LPFC_CHANGE_STATUS_PCI_RESET; |
|---|
| 19848 | + } |
|---|
| 19849 | + |
|---|
| 19850 | + switch (shdr_change_status) { |
|---|
| 19851 | + case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): |
|---|
| 19852 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
|---|
| 19853 | + "3198 Firmware write complete: System " |
|---|
| 19854 | + "reboot required to instantiate\n"); |
|---|
| 19855 | + break; |
|---|
| 19856 | + case (LPFC_CHANGE_STATUS_FW_RESET): |
|---|
| 19857 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
|---|
| 19858 | + "3199 Firmware write complete: Firmware" |
|---|
| 19859 | + " reset required to instantiate\n"); |
|---|
| 19860 | + break; |
|---|
| 19861 | + case (LPFC_CHANGE_STATUS_PORT_MIGRATION): |
|---|
| 19862 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
|---|
| 19863 | + "3200 Firmware write complete: Port " |
|---|
| 19864 | + "Migration or PCI Reset required to " |
|---|
| 19865 | + "instantiate\n"); |
|---|
| 19866 | + break; |
|---|
| 19867 | + case (LPFC_CHANGE_STATUS_PCI_RESET): |
|---|
| 19868 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
|---|
| 19869 | + "3201 Firmware write complete: PCI " |
|---|
| 19870 | + "Reset required to instantiate\n"); |
|---|
| 19871 | + break; |
|---|
| 19872 | + default: |
|---|
| 19873 | + break; |
|---|
| 19874 | + } |
|---|
| 19875 | + } |
|---|
| 19876 | + if (!phba->sli4_hba.intr_enable) |
|---|
| 19877 | + mempool_free(mbox, phba->mbox_mem_pool); |
|---|
| 19878 | + else if (rc != MBX_TIMEOUT) |
|---|
| 18958 | 19879 | mempool_free(mbox, phba->mbox_mem_pool); |
|---|
| 18959 | 19880 | if (shdr_status || shdr_add_status || rc) { |
|---|
| 18960 | | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
|---|
| 19881 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 18961 | 19882 | "3025 Write Object mailbox failed with " |
|---|
| 18962 | 19883 | "status x%x add_status x%x, mbx status x%x\n", |
|---|
| 18963 | 19884 | shdr_status, shdr_add_status, rc); |
|---|
| .. | .. |
|---|
| 19009 | 19930 | (mb->u.mb.mbxCommand == MBX_REG_VPI)) |
|---|
| 19010 | 19931 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
|---|
| 19011 | 19932 | if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { |
|---|
| 19012 | | - act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2; |
|---|
| 19933 | + act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; |
|---|
| 19013 | 19934 | /* Put reference count for delayed processing */ |
|---|
| 19014 | 19935 | act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); |
|---|
| 19015 | 19936 | /* Unregister the RPI when mailbox complete */ |
|---|
| .. | .. |
|---|
| 19034 | 19955 | |
|---|
| 19035 | 19956 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
|---|
| 19036 | 19957 | if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { |
|---|
| 19037 | | - ndlp = (struct lpfc_nodelist *)mb->context2; |
|---|
| 19958 | + ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; |
|---|
| 19038 | 19959 | /* Unregister the RPI when mailbox complete */ |
|---|
| 19039 | 19960 | mb->mbox_flag |= LPFC_MBX_IMED_UNREG; |
|---|
| 19040 | 19961 | restart_loop = 1; |
|---|
| .. | .. |
|---|
| 19054 | 19975 | while (!list_empty(&mbox_cmd_list)) { |
|---|
| 19055 | 19976 | list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); |
|---|
| 19056 | 19977 | if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { |
|---|
| 19057 | | - mp = (struct lpfc_dmabuf *) (mb->context1); |
|---|
| 19978 | + mp = (struct lpfc_dmabuf *)(mb->ctx_buf); |
|---|
| 19058 | 19979 | if (mp) { |
|---|
| 19059 | 19980 | __lpfc_mbuf_free(phba, mp->virt, mp->phys); |
|---|
| 19060 | 19981 | kfree(mp); |
|---|
| 19061 | 19982 | } |
|---|
| 19062 | | - ndlp = (struct lpfc_nodelist *) mb->context2; |
|---|
| 19063 | | - mb->context2 = NULL; |
|---|
| 19983 | + mb->ctx_buf = NULL; |
|---|
| 19984 | + ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; |
|---|
| 19985 | + mb->ctx_ndlp = NULL; |
|---|
| 19064 | 19986 | if (ndlp) { |
|---|
| 19065 | 19987 | spin_lock(shost->host_lock); |
|---|
| 19066 | 19988 | ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; |
|---|
| .. | .. |
|---|
| 19106 | 20028 | |
|---|
| 19107 | 20029 | if (phba->link_flag & LS_MDS_LOOPBACK) { |
|---|
| 19108 | 20030 | /* MDS WQE are posted only to first WQ*/ |
|---|
| 19109 | | - wq = phba->sli4_hba.fcp_wq[0]; |
|---|
| 20031 | + wq = phba->sli4_hba.hdwq[0].io_wq; |
|---|
| 19110 | 20032 | if (unlikely(!wq)) |
|---|
| 19111 | 20033 | return 0; |
|---|
| 19112 | 20034 | pring = wq->pring; |
|---|
| .. | .. |
|---|
| 19136 | 20058 | piocbq = lpfc_sli_ringtx_get(phba, pring); |
|---|
| 19137 | 20059 | if (!piocbq) { |
|---|
| 19138 | 20060 | spin_unlock_irqrestore(&pring->ring_lock, iflags); |
|---|
| 19139 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 20061 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 19140 | 20062 | "2823 txq empty and txq_cnt is %d\n ", |
|---|
| 19141 | 20063 | txq_cnt); |
|---|
| 19142 | 20064 | break; |
|---|
| .. | .. |
|---|
| 19165 | 20087 | |
|---|
| 19166 | 20088 | if (fail_msg) { |
|---|
| 19167 | 20089 | /* Failed means we can't issue and need to cancel */ |
|---|
| 19168 | | - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
|---|
| 20090 | + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, |
|---|
| 19169 | 20091 | "2822 IOCB failed %s iotag 0x%x " |
|---|
| 19170 | 20092 | "xri 0x%x\n", |
|---|
| 19171 | 20093 | fail_msg, |
|---|
| .. | .. |
|---|
| 19186 | 20108 | /** |
|---|
| 19187 | 20109 | * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. |
|---|
| 19188 | 20110 | * @phba: Pointer to HBA context object. |
|---|
| 19189 | | - * @pwqe: Pointer to command WQE. |
|---|
| 20111 | + * @pwqeq: Pointer to command WQE. |
|---|
| 19190 | 20112 | * @sglq: Pointer to the scatter gather queue object. |
|---|
| 19191 | 20113 | * |
|---|
| 19192 | 20114 | * This routine converts the bpl or bde that is in the WQE |
|---|
| .. | .. |
|---|
| 19311 | 20233 | /** |
|---|
| 19312 | 20234 | * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) |
|---|
| 19313 | 20235 | * @phba: Pointer to HBA context object. |
|---|
| 19314 | | - * @ring_number: Base sli ring number |
|---|
| 20236 | + * @qp: Pointer to HDW queue. |
|---|
| 19315 | 20237 | * @pwqe: Pointer to command WQE. |
|---|
| 19316 | 20238 | **/ |
|---|
| 19317 | 20239 | int |
|---|
| 19318 | | -lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, |
|---|
| 20240 | +lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, |
|---|
| 19319 | 20241 | struct lpfc_iocbq *pwqe) |
|---|
| 19320 | 20242 | { |
|---|
| 19321 | 20243 | union lpfc_wqe128 *wqe = &pwqe->wqe; |
|---|
| 19322 | | - struct lpfc_nvmet_rcv_ctx *ctxp; |
|---|
| 20244 | + struct lpfc_async_xchg_ctx *ctxp; |
|---|
| 19323 | 20245 | struct lpfc_queue *wq; |
|---|
| 19324 | 20246 | struct lpfc_sglq *sglq; |
|---|
| 19325 | 20247 | struct lpfc_sli_ring *pring; |
|---|
| .. | .. |
|---|
| 19329 | 20251 | /* NVME_LS and NVME_LS ABTS requests. */ |
|---|
| 19330 | 20252 | if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { |
|---|
| 19331 | 20253 | pring = phba->sli4_hba.nvmels_wq->pring; |
|---|
| 19332 | | - spin_lock_irqsave(&pring->ring_lock, iflags); |
|---|
| 20254 | + lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, |
|---|
| 20255 | + qp, wq_access); |
|---|
| 19333 | 20256 | sglq = __lpfc_sli_get_els_sglq(phba, pwqe); |
|---|
| 19334 | 20257 | if (!sglq) { |
|---|
| 19335 | 20258 | spin_unlock_irqrestore(&pring->ring_lock, iflags); |
|---|
| .. | .. |
|---|
| 19351 | 20274 | |
|---|
| 19352 | 20275 | lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); |
|---|
| 19353 | 20276 | spin_unlock_irqrestore(&pring->ring_lock, iflags); |
|---|
| 20277 | + |
|---|
| 20278 | + lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); |
|---|
| 19354 | 20279 | return 0; |
|---|
| 19355 | 20280 | } |
|---|
| 19356 | 20281 | |
|---|
| 19357 | 20282 | /* NVME_FCREQ and NVME_ABTS requests */ |
|---|
| 19358 | 20283 | if (pwqe->iocb_flag & LPFC_IO_NVME) { |
|---|
| 19359 | 20284 | /* Get the IO distribution (hba_wqidx) for WQ assignment. */ |
|---|
| 19360 | | - pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring; |
|---|
| 20285 | + wq = qp->io_wq; |
|---|
| 20286 | + pring = wq->pring; |
|---|
| 19361 | 20287 | |
|---|
| 19362 | | - spin_lock_irqsave(&pring->ring_lock, iflags); |
|---|
| 19363 | | - wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; |
|---|
| 19364 | | - bf_set(wqe_cqid, &wqe->generic.wqe_com, |
|---|
| 19365 | | - phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); |
|---|
| 20288 | + bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); |
|---|
| 20289 | + |
|---|
| 20290 | + lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, |
|---|
| 20291 | + qp, wq_access); |
|---|
| 19366 | 20292 | ret = lpfc_sli4_wq_put(wq, wqe); |
|---|
| 19367 | 20293 | if (ret) { |
|---|
| 19368 | 20294 | spin_unlock_irqrestore(&pring->ring_lock, iflags); |
|---|
| .. | .. |
|---|
| 19370 | 20296 | } |
|---|
| 19371 | 20297 | lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); |
|---|
| 19372 | 20298 | spin_unlock_irqrestore(&pring->ring_lock, iflags); |
|---|
| 20299 | + |
|---|
| 20300 | + lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); |
|---|
| 19373 | 20301 | return 0; |
|---|
| 19374 | 20302 | } |
|---|
| 19375 | 20303 | |
|---|
| 19376 | 20304 | /* NVMET requests */ |
|---|
| 19377 | 20305 | if (pwqe->iocb_flag & LPFC_IO_NVMET) { |
|---|
| 19378 | 20306 | /* Get the IO distribution (hba_wqidx) for WQ assignment. */ |
|---|
| 19379 | | - pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring; |
|---|
| 20307 | + wq = qp->io_wq; |
|---|
| 20308 | + pring = wq->pring; |
|---|
| 19380 | 20309 | |
|---|
| 19381 | | - spin_lock_irqsave(&pring->ring_lock, iflags); |
|---|
| 19382 | 20310 | ctxp = pwqe->context2; |
|---|
| 19383 | 20311 | sglq = ctxp->ctxbuf->sglq; |
|---|
| 19384 | 20312 | if (pwqe->sli4_xritag == NO_XRI) { |
|---|
| .. | .. |
|---|
| 19387 | 20315 | } |
|---|
| 19388 | 20316 | bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, |
|---|
| 19389 | 20317 | pwqe->sli4_xritag); |
|---|
| 19390 | | - wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; |
|---|
| 19391 | | - bf_set(wqe_cqid, &wqe->generic.wqe_com, |
|---|
| 19392 | | - phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); |
|---|
| 20318 | + bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); |
|---|
| 20319 | + |
|---|
| 20320 | + lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, |
|---|
| 20321 | + qp, wq_access); |
|---|
| 19393 | 20322 | ret = lpfc_sli4_wq_put(wq, wqe); |
|---|
| 19394 | 20323 | if (ret) { |
|---|
| 19395 | 20324 | spin_unlock_irqrestore(&pring->ring_lock, iflags); |
|---|
| .. | .. |
|---|
| 19397 | 20326 | } |
|---|
| 19398 | 20327 | lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); |
|---|
| 19399 | 20328 | spin_unlock_irqrestore(&pring->ring_lock, iflags); |
|---|
| 20329 | + |
|---|
| 20330 | + lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); |
|---|
| 19400 | 20331 | return 0; |
|---|
| 19401 | 20332 | } |
|---|
| 19402 | 20333 | return WQE_ERROR; |
|---|
| 19403 | 20334 | } |
|---|
| 20335 | + |
|---|
| 20336 | +#ifdef LPFC_MXP_STAT |
|---|
| 20337 | +/** |
|---|
| 20338 | + * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count |
|---|
| 20339 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 20340 | + * @hwqid: belong to which HWQ. |
|---|
| 20341 | + * |
|---|
| 20342 | + * The purpose of this routine is to take a snapshot of pbl, pvt and busy count |
|---|
| 20343 | + * 15 seconds after a test case is running. |
|---|
| 20344 | + * |
|---|
| 20345 | + * The user should call lpfc_debugfs_multixripools_write before running a test |
|---|
| 20346 | + * case to clear stat_snapshot_taken. Then the user starts a test case. During |
|---|
| 20347 | + * test case is running, stat_snapshot_taken is incremented by 1 every time when |
|---|
| 20348 | + * this routine is called from heartbeat timer. When stat_snapshot_taken is |
|---|
| 20349 | + * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken. |
|---|
| 20350 | + **/ |
|---|
| 20351 | +void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid) |
|---|
| 20352 | +{ |
|---|
| 20353 | + struct lpfc_sli4_hdw_queue *qp; |
|---|
| 20354 | + struct lpfc_multixri_pool *multixri_pool; |
|---|
| 20355 | + struct lpfc_pvt_pool *pvt_pool; |
|---|
| 20356 | + struct lpfc_pbl_pool *pbl_pool; |
|---|
| 20357 | + u32 txcmplq_cnt; |
|---|
| 20358 | + |
|---|
| 20359 | + qp = &phba->sli4_hba.hdwq[hwqid]; |
|---|
| 20360 | + multixri_pool = qp->p_multixri_pool; |
|---|
| 20361 | + if (!multixri_pool) |
|---|
| 20362 | + return; |
|---|
| 20363 | + |
|---|
| 20364 | + if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) { |
|---|
| 20365 | + pvt_pool = &qp->p_multixri_pool->pvt_pool; |
|---|
| 20366 | + pbl_pool = &qp->p_multixri_pool->pbl_pool; |
|---|
| 20367 | + txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; |
|---|
| 20368 | + |
|---|
| 20369 | + multixri_pool->stat_pbl_count = pbl_pool->count; |
|---|
| 20370 | + multixri_pool->stat_pvt_count = pvt_pool->count; |
|---|
| 20371 | + multixri_pool->stat_busy_count = txcmplq_cnt; |
|---|
| 20372 | + } |
|---|
| 20373 | + |
|---|
| 20374 | + multixri_pool->stat_snapshot_taken++; |
|---|
| 20375 | +} |
|---|
| 20376 | +#endif |
|---|
| 20377 | + |
|---|
| 20378 | +/** |
|---|
| 20379 | + * lpfc_adjust_pvt_pool_count - Adjust private pool count |
|---|
| 20380 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 20381 | + * @hwqid: belong to which HWQ. |
|---|
| 20382 | + * |
|---|
| 20383 | + * This routine moves some XRIs from private to public pool when private pool |
|---|
| 20384 | + * is not busy. |
|---|
| 20385 | + **/ |
|---|
| 20386 | +void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid) |
|---|
| 20387 | +{ |
|---|
| 20388 | + struct lpfc_multixri_pool *multixri_pool; |
|---|
| 20389 | + u32 io_req_count; |
|---|
| 20390 | + u32 prev_io_req_count; |
|---|
| 20391 | + |
|---|
| 20392 | + multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; |
|---|
| 20393 | + if (!multixri_pool) |
|---|
| 20394 | + return; |
|---|
| 20395 | + io_req_count = multixri_pool->io_req_count; |
|---|
| 20396 | + prev_io_req_count = multixri_pool->prev_io_req_count; |
|---|
| 20397 | + |
|---|
| 20398 | + if (prev_io_req_count != io_req_count) { |
|---|
| 20399 | + /* Private pool is busy */ |
|---|
| 20400 | + multixri_pool->prev_io_req_count = io_req_count; |
|---|
| 20401 | + } else { |
|---|
| 20402 | + /* Private pool is not busy. |
|---|
| 20403 | + * Move XRIs from private to public pool. |
|---|
| 20404 | + */ |
|---|
| 20405 | + lpfc_move_xri_pvt_to_pbl(phba, hwqid); |
|---|
| 20406 | + } |
|---|
| 20407 | +} |
|---|
| 20408 | + |
|---|
| 20409 | +/** |
|---|
| 20410 | + * lpfc_adjust_high_watermark - Adjust high watermark |
|---|
| 20411 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 20412 | + * @hwqid: belong to which HWQ. |
|---|
| 20413 | + * |
|---|
| 20414 | + * This routine sets high watermark as number of outstanding XRIs, |
|---|
| 20415 | + * but make sure the new value is between xri_limit/2 and xri_limit. |
|---|
| 20416 | + **/ |
|---|
| 20417 | +void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid) |
|---|
| 20418 | +{ |
|---|
| 20419 | + u32 new_watermark; |
|---|
| 20420 | + u32 watermark_max; |
|---|
| 20421 | + u32 watermark_min; |
|---|
| 20422 | + u32 xri_limit; |
|---|
| 20423 | + u32 txcmplq_cnt; |
|---|
| 20424 | + u32 abts_io_bufs; |
|---|
| 20425 | + struct lpfc_multixri_pool *multixri_pool; |
|---|
| 20426 | + struct lpfc_sli4_hdw_queue *qp; |
|---|
| 20427 | + |
|---|
| 20428 | + qp = &phba->sli4_hba.hdwq[hwqid]; |
|---|
| 20429 | + multixri_pool = qp->p_multixri_pool; |
|---|
| 20430 | + if (!multixri_pool) |
|---|
| 20431 | + return; |
|---|
| 20432 | + xri_limit = multixri_pool->xri_limit; |
|---|
| 20433 | + |
|---|
| 20434 | + watermark_max = xri_limit; |
|---|
| 20435 | + watermark_min = xri_limit / 2; |
|---|
| 20436 | + |
|---|
| 20437 | + txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; |
|---|
| 20438 | + abts_io_bufs = qp->abts_scsi_io_bufs; |
|---|
| 20439 | + abts_io_bufs += qp->abts_nvme_io_bufs; |
|---|
| 20440 | + |
|---|
| 20441 | + new_watermark = txcmplq_cnt + abts_io_bufs; |
|---|
| 20442 | + new_watermark = min(watermark_max, new_watermark); |
|---|
| 20443 | + new_watermark = max(watermark_min, new_watermark); |
|---|
| 20444 | + multixri_pool->pvt_pool.high_watermark = new_watermark; |
|---|
| 20445 | + |
|---|
| 20446 | +#ifdef LPFC_MXP_STAT |
|---|
| 20447 | + multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm, |
|---|
| 20448 | + new_watermark); |
|---|
| 20449 | +#endif |
|---|
| 20450 | +} |
|---|
| 20451 | + |
|---|
| 20452 | +/** |
|---|
| 20453 | + * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool |
|---|
| 20454 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 20455 | + * @hwqid: belong to which HWQ. |
|---|
| 20456 | + * |
|---|
| 20457 | + * This routine is called from hearbeat timer when pvt_pool is idle. |
|---|
| 20458 | + * All free XRIs are moved from private to public pool on hwqid with 2 steps. |
|---|
| 20459 | + * The first step moves (all - low_watermark) amount of XRIs. |
|---|
| 20460 | + * The second step moves the rest of XRIs. |
|---|
| 20461 | + **/ |
|---|
| 20462 | +void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid) |
|---|
| 20463 | +{ |
|---|
| 20464 | + struct lpfc_pbl_pool *pbl_pool; |
|---|
| 20465 | + struct lpfc_pvt_pool *pvt_pool; |
|---|
| 20466 | + struct lpfc_sli4_hdw_queue *qp; |
|---|
| 20467 | + struct lpfc_io_buf *lpfc_ncmd; |
|---|
| 20468 | + struct lpfc_io_buf *lpfc_ncmd_next; |
|---|
| 20469 | + unsigned long iflag; |
|---|
| 20470 | + struct list_head tmp_list; |
|---|
| 20471 | + u32 tmp_count; |
|---|
| 20472 | + |
|---|
| 20473 | + qp = &phba->sli4_hba.hdwq[hwqid]; |
|---|
| 20474 | + pbl_pool = &qp->p_multixri_pool->pbl_pool; |
|---|
| 20475 | + pvt_pool = &qp->p_multixri_pool->pvt_pool; |
|---|
| 20476 | + tmp_count = 0; |
|---|
| 20477 | + |
|---|
| 20478 | + lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool); |
|---|
| 20479 | + lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool); |
|---|
| 20480 | + |
|---|
| 20481 | + if (pvt_pool->count > pvt_pool->low_watermark) { |
|---|
| 20482 | + /* Step 1: move (all - low_watermark) from pvt_pool |
|---|
| 20483 | + * to pbl_pool |
|---|
| 20484 | + */ |
|---|
| 20485 | + |
|---|
| 20486 | + /* Move low watermark of bufs from pvt_pool to tmp_list */ |
|---|
| 20487 | + INIT_LIST_HEAD(&tmp_list); |
|---|
| 20488 | + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
|---|
| 20489 | + &pvt_pool->list, list) { |
|---|
| 20490 | + list_move_tail(&lpfc_ncmd->list, &tmp_list); |
|---|
| 20491 | + tmp_count++; |
|---|
| 20492 | + if (tmp_count >= pvt_pool->low_watermark) |
|---|
| 20493 | + break; |
|---|
| 20494 | + } |
|---|
| 20495 | + |
|---|
| 20496 | + /* Move all bufs from pvt_pool to pbl_pool */ |
|---|
| 20497 | + list_splice_init(&pvt_pool->list, &pbl_pool->list); |
|---|
| 20498 | + |
|---|
| 20499 | + /* Move all bufs from tmp_list to pvt_pool */ |
|---|
| 20500 | + list_splice(&tmp_list, &pvt_pool->list); |
|---|
| 20501 | + |
|---|
| 20502 | + pbl_pool->count += (pvt_pool->count - tmp_count); |
|---|
| 20503 | + pvt_pool->count = tmp_count; |
|---|
| 20504 | + } else { |
|---|
| 20505 | + /* Step 2: move the rest from pvt_pool to pbl_pool */ |
|---|
| 20506 | + list_splice_init(&pvt_pool->list, &pbl_pool->list); |
|---|
| 20507 | + pbl_pool->count += pvt_pool->count; |
|---|
| 20508 | + pvt_pool->count = 0; |
|---|
| 20509 | + } |
|---|
| 20510 | + |
|---|
| 20511 | + spin_unlock(&pvt_pool->lock); |
|---|
| 20512 | + spin_unlock_irqrestore(&pbl_pool->lock, iflag); |
|---|
| 20513 | +} |
|---|
| 20514 | + |
|---|
| 20515 | +/** |
|---|
| 20516 | + * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool |
|---|
| 20517 | + * @phba: pointer to lpfc hba data structure |
|---|
| 20518 | + * @qp: pointer to HDW queue |
|---|
| 20519 | + * @pbl_pool: specified public free XRI pool |
|---|
| 20520 | + * @pvt_pool: specified private free XRI pool |
|---|
| 20521 | + * @count: number of XRIs to move |
|---|
| 20522 | + * |
|---|
| 20523 | + * This routine tries to move some free common bufs from the specified pbl_pool |
|---|
| 20524 | + * to the specified pvt_pool. It might move less than count XRIs if there's not |
|---|
| 20525 | + * enough in public pool. |
|---|
| 20526 | + * |
|---|
| 20527 | + * Return: |
|---|
| 20528 | + * true - if XRIs are successfully moved from the specified pbl_pool to the |
|---|
| 20529 | + * specified pvt_pool |
|---|
| 20530 | + * false - if the specified pbl_pool is empty or locked by someone else |
|---|
| 20531 | + **/ |
|---|
| 20532 | +static bool |
|---|
| 20533 | +_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, |
|---|
| 20534 | + struct lpfc_pbl_pool *pbl_pool, |
|---|
| 20535 | + struct lpfc_pvt_pool *pvt_pool, u32 count) |
|---|
| 20536 | +{ |
|---|
| 20537 | + struct lpfc_io_buf *lpfc_ncmd; |
|---|
| 20538 | + struct lpfc_io_buf *lpfc_ncmd_next; |
|---|
| 20539 | + unsigned long iflag; |
|---|
| 20540 | + int ret; |
|---|
| 20541 | + |
|---|
| 20542 | + ret = spin_trylock_irqsave(&pbl_pool->lock, iflag); |
|---|
| 20543 | + if (ret) { |
|---|
| 20544 | + if (pbl_pool->count) { |
|---|
| 20545 | + /* Move a batch of XRIs from public to private pool */ |
|---|
| 20546 | + lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool); |
|---|
| 20547 | + list_for_each_entry_safe(lpfc_ncmd, |
|---|
| 20548 | + lpfc_ncmd_next, |
|---|
| 20549 | + &pbl_pool->list, |
|---|
| 20550 | + list) { |
|---|
| 20551 | + list_move_tail(&lpfc_ncmd->list, |
|---|
| 20552 | + &pvt_pool->list); |
|---|
| 20553 | + pvt_pool->count++; |
|---|
| 20554 | + pbl_pool->count--; |
|---|
| 20555 | + count--; |
|---|
| 20556 | + if (count == 0) |
|---|
| 20557 | + break; |
|---|
| 20558 | + } |
|---|
| 20559 | + |
|---|
| 20560 | + spin_unlock(&pvt_pool->lock); |
|---|
| 20561 | + spin_unlock_irqrestore(&pbl_pool->lock, iflag); |
|---|
| 20562 | + return true; |
|---|
| 20563 | + } |
|---|
| 20564 | + spin_unlock_irqrestore(&pbl_pool->lock, iflag); |
|---|
| 20565 | + } |
|---|
| 20566 | + |
|---|
| 20567 | + return false; |
|---|
| 20568 | +} |
|---|
| 20569 | + |
|---|
| 20570 | +/** |
|---|
| 20571 | + * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool |
|---|
| 20572 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 20573 | + * @hwqid: belong to which HWQ. |
|---|
| 20574 | + * @count: number of XRIs to move |
|---|
| 20575 | + * |
|---|
| 20576 | + * This routine tries to find some free common bufs in one of public pools with |
|---|
| 20577 | + * Round Robin method. The search always starts from local hwqid, then the next |
|---|
| 20578 | + * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found, |
|---|
| 20579 | + * a batch of free common bufs are moved to private pool on hwqid. |
|---|
| 20580 | + * It might move less than count XRIs if there's not enough in public pool. |
|---|
| 20581 | + **/ |
|---|
| 20582 | +void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count) |
|---|
| 20583 | +{ |
|---|
| 20584 | + struct lpfc_multixri_pool *multixri_pool; |
|---|
| 20585 | + struct lpfc_multixri_pool *next_multixri_pool; |
|---|
| 20586 | + struct lpfc_pvt_pool *pvt_pool; |
|---|
| 20587 | + struct lpfc_pbl_pool *pbl_pool; |
|---|
| 20588 | + struct lpfc_sli4_hdw_queue *qp; |
|---|
| 20589 | + u32 next_hwqid; |
|---|
| 20590 | + u32 hwq_count; |
|---|
| 20591 | + int ret; |
|---|
| 20592 | + |
|---|
| 20593 | + qp = &phba->sli4_hba.hdwq[hwqid]; |
|---|
| 20594 | + multixri_pool = qp->p_multixri_pool; |
|---|
| 20595 | + pvt_pool = &multixri_pool->pvt_pool; |
|---|
| 20596 | + pbl_pool = &multixri_pool->pbl_pool; |
|---|
| 20597 | + |
|---|
| 20598 | + /* Check if local pbl_pool is available */ |
|---|
| 20599 | + ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count); |
|---|
| 20600 | + if (ret) { |
|---|
| 20601 | +#ifdef LPFC_MXP_STAT |
|---|
| 20602 | + multixri_pool->local_pbl_hit_count++; |
|---|
| 20603 | +#endif |
|---|
| 20604 | + return; |
|---|
| 20605 | + } |
|---|
| 20606 | + |
|---|
| 20607 | + hwq_count = phba->cfg_hdw_queue; |
|---|
| 20608 | + |
|---|
| 20609 | + /* Get the next hwqid which was found last time */ |
|---|
| 20610 | + next_hwqid = multixri_pool->rrb_next_hwqid; |
|---|
| 20611 | + |
|---|
| 20612 | + do { |
|---|
| 20613 | + /* Go to next hwq */ |
|---|
| 20614 | + next_hwqid = (next_hwqid + 1) % hwq_count; |
|---|
| 20615 | + |
|---|
| 20616 | + next_multixri_pool = |
|---|
| 20617 | + phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool; |
|---|
| 20618 | + pbl_pool = &next_multixri_pool->pbl_pool; |
|---|
| 20619 | + |
|---|
| 20620 | + /* Check if the public free xri pool is available */ |
|---|
| 20621 | + ret = _lpfc_move_xri_pbl_to_pvt( |
|---|
| 20622 | + phba, qp, pbl_pool, pvt_pool, count); |
|---|
| 20623 | + |
|---|
| 20624 | + /* Exit while-loop if success or all hwqid are checked */ |
|---|
| 20625 | + } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid); |
|---|
| 20626 | + |
|---|
| 20627 | + /* Starting point for the next time */ |
|---|
| 20628 | + multixri_pool->rrb_next_hwqid = next_hwqid; |
|---|
| 20629 | + |
|---|
| 20630 | + if (!ret) { |
|---|
| 20631 | + /* stats: all public pools are empty*/ |
|---|
| 20632 | + multixri_pool->pbl_empty_count++; |
|---|
| 20633 | + } |
|---|
| 20634 | + |
|---|
| 20635 | +#ifdef LPFC_MXP_STAT |
|---|
| 20636 | + if (ret) { |
|---|
| 20637 | + if (next_hwqid == hwqid) |
|---|
| 20638 | + multixri_pool->local_pbl_hit_count++; |
|---|
| 20639 | + else |
|---|
| 20640 | + multixri_pool->other_pbl_hit_count++; |
|---|
| 20641 | + } |
|---|
| 20642 | +#endif |
|---|
| 20643 | +} |
|---|
| 20644 | + |
|---|
| 20645 | +/** |
|---|
| 20646 | + * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark |
|---|
| 20647 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 20648 | + * @hwqid: belong to which HWQ. |
|---|
| 20649 | + * |
|---|
| 20650 | + * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than |
|---|
| 20651 | + * low watermark. |
|---|
| 20652 | + **/ |
|---|
| 20653 | +void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid) |
|---|
| 20654 | +{ |
|---|
| 20655 | + struct lpfc_multixri_pool *multixri_pool; |
|---|
| 20656 | + struct lpfc_pvt_pool *pvt_pool; |
|---|
| 20657 | + |
|---|
| 20658 | + multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; |
|---|
| 20659 | + pvt_pool = &multixri_pool->pvt_pool; |
|---|
| 20660 | + |
|---|
| 20661 | + if (pvt_pool->count < pvt_pool->low_watermark) |
|---|
| 20662 | + lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); |
|---|
| 20663 | +} |
|---|
| 20664 | + |
|---|
| 20665 | +/** |
|---|
| 20666 | + * lpfc_release_io_buf - Return one IO buf back to free pool |
|---|
| 20667 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 20668 | + * @lpfc_ncmd: IO buf to be returned. |
|---|
| 20669 | + * @qp: belong to which HWQ. |
|---|
| 20670 | + * |
|---|
| 20671 | + * This routine returns one IO buf back to free pool. If this is an urgent IO, |
|---|
| 20672 | + * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1, |
|---|
| 20673 | + * the IO buf is returned to pbl_pool or pvt_pool based on watermark and |
|---|
| 20674 | + * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to |
|---|
| 20675 | + * lpfc_io_buf_list_put. |
|---|
| 20676 | + **/ |
|---|
| 20677 | +void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, |
|---|
| 20678 | + struct lpfc_sli4_hdw_queue *qp) |
|---|
| 20679 | +{ |
|---|
| 20680 | + unsigned long iflag; |
|---|
| 20681 | + struct lpfc_pbl_pool *pbl_pool; |
|---|
| 20682 | + struct lpfc_pvt_pool *pvt_pool; |
|---|
| 20683 | + struct lpfc_epd_pool *epd_pool; |
|---|
| 20684 | + u32 txcmplq_cnt; |
|---|
| 20685 | + u32 xri_owned; |
|---|
| 20686 | + u32 xri_limit; |
|---|
| 20687 | + u32 abts_io_bufs; |
|---|
| 20688 | + |
|---|
| 20689 | + /* MUST zero fields if buffer is reused by another protocol */ |
|---|
| 20690 | + lpfc_ncmd->nvmeCmd = NULL; |
|---|
| 20691 | + lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL; |
|---|
| 20692 | + lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL; |
|---|
| 20693 | + |
|---|
| 20694 | + if (phba->cfg_xpsgl && !phba->nvmet_support && |
|---|
| 20695 | + !list_empty(&lpfc_ncmd->dma_sgl_xtra_list)) |
|---|
| 20696 | + lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); |
|---|
| 20697 | + |
|---|
| 20698 | + if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list)) |
|---|
| 20699 | + lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); |
|---|
| 20700 | + |
|---|
| 20701 | + if (phba->cfg_xri_rebalancing) { |
|---|
| 20702 | + if (lpfc_ncmd->expedite) { |
|---|
| 20703 | + /* Return to expedite pool */ |
|---|
| 20704 | + epd_pool = &phba->epd_pool; |
|---|
| 20705 | + spin_lock_irqsave(&epd_pool->lock, iflag); |
|---|
| 20706 | + list_add_tail(&lpfc_ncmd->list, &epd_pool->list); |
|---|
| 20707 | + epd_pool->count++; |
|---|
| 20708 | + spin_unlock_irqrestore(&epd_pool->lock, iflag); |
|---|
| 20709 | + return; |
|---|
| 20710 | + } |
|---|
| 20711 | + |
|---|
| 20712 | + /* Avoid invalid access if an IO sneaks in and is being rejected |
|---|
| 20713 | + * just _after_ xri pools are destroyed in lpfc_offline. |
|---|
| 20714 | + * Nothing much can be done at this point. |
|---|
| 20715 | + */ |
|---|
| 20716 | + if (!qp->p_multixri_pool) |
|---|
| 20717 | + return; |
|---|
| 20718 | + |
|---|
| 20719 | + pbl_pool = &qp->p_multixri_pool->pbl_pool; |
|---|
| 20720 | + pvt_pool = &qp->p_multixri_pool->pvt_pool; |
|---|
| 20721 | + |
|---|
| 20722 | + txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; |
|---|
| 20723 | + abts_io_bufs = qp->abts_scsi_io_bufs; |
|---|
| 20724 | + abts_io_bufs += qp->abts_nvme_io_bufs; |
|---|
| 20725 | + |
|---|
| 20726 | + xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs; |
|---|
| 20727 | + xri_limit = qp->p_multixri_pool->xri_limit; |
|---|
| 20728 | + |
|---|
| 20729 | +#ifdef LPFC_MXP_STAT |
|---|
| 20730 | + if (xri_owned <= xri_limit) |
|---|
| 20731 | + qp->p_multixri_pool->below_limit_count++; |
|---|
| 20732 | + else |
|---|
| 20733 | + qp->p_multixri_pool->above_limit_count++; |
|---|
| 20734 | +#endif |
|---|
| 20735 | + |
|---|
| 20736 | + /* XRI goes to either public or private free xri pool |
|---|
| 20737 | + * based on watermark and xri_limit |
|---|
| 20738 | + */ |
|---|
| 20739 | + if ((pvt_pool->count < pvt_pool->low_watermark) || |
|---|
| 20740 | + (xri_owned < xri_limit && |
|---|
| 20741 | + pvt_pool->count < pvt_pool->high_watermark)) { |
|---|
| 20742 | + lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, |
|---|
| 20743 | + qp, free_pvt_pool); |
|---|
| 20744 | + list_add_tail(&lpfc_ncmd->list, |
|---|
| 20745 | + &pvt_pool->list); |
|---|
| 20746 | + pvt_pool->count++; |
|---|
| 20747 | + spin_unlock_irqrestore(&pvt_pool->lock, iflag); |
|---|
| 20748 | + } else { |
|---|
| 20749 | + lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, |
|---|
| 20750 | + qp, free_pub_pool); |
|---|
| 20751 | + list_add_tail(&lpfc_ncmd->list, |
|---|
| 20752 | + &pbl_pool->list); |
|---|
| 20753 | + pbl_pool->count++; |
|---|
| 20754 | + spin_unlock_irqrestore(&pbl_pool->lock, iflag); |
|---|
| 20755 | + } |
|---|
| 20756 | + } else { |
|---|
| 20757 | + lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag, |
|---|
| 20758 | + qp, free_xri); |
|---|
| 20759 | + list_add_tail(&lpfc_ncmd->list, |
|---|
| 20760 | + &qp->lpfc_io_buf_list_put); |
|---|
| 20761 | + qp->put_io_bufs++; |
|---|
| 20762 | + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, |
|---|
| 20763 | + iflag); |
|---|
| 20764 | + } |
|---|
| 20765 | +} |
|---|
| 20766 | + |
|---|
| 20767 | +/** |
|---|
| 20768 | + * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool |
|---|
| 20769 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 20770 | + * @qp: pointer to HDW queue |
|---|
| 20771 | + * @pvt_pool: pointer to private pool data structure. |
|---|
| 20772 | + * @ndlp: pointer to lpfc nodelist data structure. |
|---|
| 20773 | + * |
|---|
| 20774 | + * This routine tries to get one free IO buf from private pool. |
|---|
| 20775 | + * |
|---|
| 20776 | + * Return: |
|---|
| 20777 | + * pointer to one free IO buf - if private pool is not empty |
|---|
| 20778 | + * NULL - if private pool is empty |
|---|
| 20779 | + **/ |
|---|
| 20780 | +static struct lpfc_io_buf * |
|---|
| 20781 | +lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba, |
|---|
| 20782 | + struct lpfc_sli4_hdw_queue *qp, |
|---|
| 20783 | + struct lpfc_pvt_pool *pvt_pool, |
|---|
| 20784 | + struct lpfc_nodelist *ndlp) |
|---|
| 20785 | +{ |
|---|
| 20786 | + struct lpfc_io_buf *lpfc_ncmd; |
|---|
| 20787 | + struct lpfc_io_buf *lpfc_ncmd_next; |
|---|
| 20788 | + unsigned long iflag; |
|---|
| 20789 | + |
|---|
| 20790 | + lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool); |
|---|
| 20791 | + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, |
|---|
| 20792 | + &pvt_pool->list, list) { |
|---|
| 20793 | + if (lpfc_test_rrq_active( |
|---|
| 20794 | + phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) |
|---|
| 20795 | + continue; |
|---|
| 20796 | + list_del(&lpfc_ncmd->list); |
|---|
| 20797 | + pvt_pool->count--; |
|---|
| 20798 | + spin_unlock_irqrestore(&pvt_pool->lock, iflag); |
|---|
| 20799 | + return lpfc_ncmd; |
|---|
| 20800 | + } |
|---|
| 20801 | + spin_unlock_irqrestore(&pvt_pool->lock, iflag); |
|---|
| 20802 | + |
|---|
| 20803 | + return NULL; |
|---|
| 20804 | +} |
|---|
| 20805 | + |
|---|
| 20806 | +/** |
|---|
| 20807 | + * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool |
|---|
| 20808 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 20809 | + * |
|---|
| 20810 | + * This routine tries to get one free IO buf from expedite pool. |
|---|
| 20811 | + * |
|---|
| 20812 | + * Return: |
|---|
| 20813 | + * pointer to one free IO buf - if expedite pool is not empty |
|---|
| 20814 | + * NULL - if expedite pool is empty |
|---|
| 20815 | + **/ |
|---|
| 20816 | +static struct lpfc_io_buf * |
|---|
| 20817 | +lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba) |
|---|
| 20818 | +{ |
|---|
| 20819 | + struct lpfc_io_buf *lpfc_ncmd = NULL, *iter; |
|---|
| 20820 | + struct lpfc_io_buf *lpfc_ncmd_next; |
|---|
| 20821 | + unsigned long iflag; |
|---|
| 20822 | + struct lpfc_epd_pool *epd_pool; |
|---|
| 20823 | + |
|---|
| 20824 | + epd_pool = &phba->epd_pool; |
|---|
| 20825 | + |
|---|
| 20826 | + spin_lock_irqsave(&epd_pool->lock, iflag); |
|---|
| 20827 | + if (epd_pool->count > 0) { |
|---|
| 20828 | + list_for_each_entry_safe(iter, lpfc_ncmd_next, |
|---|
| 20829 | + &epd_pool->list, list) { |
|---|
| 20830 | + list_del(&iter->list); |
|---|
| 20831 | + epd_pool->count--; |
|---|
| 20832 | + lpfc_ncmd = iter; |
|---|
| 20833 | + break; |
|---|
| 20834 | + } |
|---|
| 20835 | + } |
|---|
| 20836 | + spin_unlock_irqrestore(&epd_pool->lock, iflag); |
|---|
| 20837 | + |
|---|
| 20838 | + return lpfc_ncmd; |
|---|
| 20839 | +} |
|---|
| 20840 | + |
|---|
| 20841 | +/** |
|---|
| 20842 | + * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs |
|---|
| 20843 | + * @phba: pointer to lpfc hba data structure. |
|---|
| 20844 | + * @ndlp: pointer to lpfc nodelist data structure. |
|---|
| 20845 | + * @hwqid: belong to which HWQ |
|---|
| 20846 | + * @expedite: 1 means this request is urgent. |
|---|
| 20847 | + * |
|---|
| 20848 | + * This routine will do the following actions and then return a pointer to |
|---|
| 20849 | + * one free IO buf. |
|---|
| 20850 | + * |
|---|
| 20851 | + * 1. If private free xri count is empty, move some XRIs from public to |
|---|
| 20852 | + * private pool. |
|---|
| 20853 | + * 2. Get one XRI from private free xri pool. |
|---|
| 20854 | + * 3. If we fail to get one from pvt_pool and this is an expedite request, |
|---|
| 20855 | + * get one free xri from expedite pool. |
|---|
| 20856 | + * |
|---|
| 20857 | + * Note: ndlp is only used on SCSI side for RRQ testing. |
|---|
| 20858 | + * The caller should pass NULL for ndlp on NVME side. |
|---|
| 20859 | + * |
|---|
| 20860 | + * Return: |
|---|
| 20861 | + * pointer to one free IO buf - if private pool is not empty |
|---|
| 20862 | + * NULL - if private pool is empty |
|---|
| 20863 | + **/ |
|---|
| 20864 | +static struct lpfc_io_buf * |
|---|
| 20865 | +lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba, |
|---|
| 20866 | + struct lpfc_nodelist *ndlp, |
|---|
| 20867 | + int hwqid, int expedite) |
|---|
| 20868 | +{ |
|---|
| 20869 | + struct lpfc_sli4_hdw_queue *qp; |
|---|
| 20870 | + struct lpfc_multixri_pool *multixri_pool; |
|---|
| 20871 | + struct lpfc_pvt_pool *pvt_pool; |
|---|
| 20872 | + struct lpfc_io_buf *lpfc_ncmd; |
|---|
| 20873 | + |
|---|
| 20874 | + qp = &phba->sli4_hba.hdwq[hwqid]; |
|---|
| 20875 | + lpfc_ncmd = NULL; |
|---|
| 20876 | + multixri_pool = qp->p_multixri_pool; |
|---|
| 20877 | + pvt_pool = &multixri_pool->pvt_pool; |
|---|
| 20878 | + multixri_pool->io_req_count++; |
|---|
| 20879 | + |
|---|
| 20880 | + /* If pvt_pool is empty, move some XRIs from public to private pool */ |
|---|
| 20881 | + if (pvt_pool->count == 0) |
|---|
| 20882 | + lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); |
|---|
| 20883 | + |
|---|
| 20884 | + /* Get one XRI from private free xri pool */ |
|---|
| 20885 | + lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp); |
|---|
| 20886 | + |
|---|
| 20887 | + if (lpfc_ncmd) { |
|---|
| 20888 | + lpfc_ncmd->hdwq = qp; |
|---|
| 20889 | + lpfc_ncmd->hdwq_no = hwqid; |
|---|
| 20890 | + } else if (expedite) { |
|---|
| 20891 | + /* If we fail to get one from pvt_pool and this is an expedite |
|---|
| 20892 | + * request, get one free xri from expedite pool. |
|---|
| 20893 | + */ |
|---|
| 20894 | + lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba); |
|---|
| 20895 | + } |
|---|
| 20896 | + |
|---|
| 20897 | + return lpfc_ncmd; |
|---|
| 20898 | +} |
|---|
| 20899 | + |
|---|
| 20900 | +static inline struct lpfc_io_buf * |
|---|
| 20901 | +lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx) |
|---|
| 20902 | +{ |
|---|
| 20903 | + struct lpfc_sli4_hdw_queue *qp; |
|---|
| 20904 | + struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next; |
|---|
| 20905 | + |
|---|
| 20906 | + qp = &phba->sli4_hba.hdwq[idx]; |
|---|
| 20907 | + list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, |
|---|
| 20908 | + &qp->lpfc_io_buf_list_get, list) { |
|---|
| 20909 | + if (lpfc_test_rrq_active(phba, ndlp, |
|---|
| 20910 | + lpfc_cmd->cur_iocbq.sli4_lxritag)) |
|---|
| 20911 | + continue; |
|---|
| 20912 | + |
|---|
| 20913 | + if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED) |
|---|
| 20914 | + continue; |
|---|
| 20915 | + |
|---|
| 20916 | + list_del_init(&lpfc_cmd->list); |
|---|
| 20917 | + qp->get_io_bufs--; |
|---|
| 20918 | + lpfc_cmd->hdwq = qp; |
|---|
| 20919 | + lpfc_cmd->hdwq_no = idx; |
|---|
| 20920 | + return lpfc_cmd; |
|---|
| 20921 | + } |
|---|
| 20922 | + return NULL; |
|---|
| 20923 | +} |
|---|
| 20924 | + |
|---|
| 20925 | +/** |
|---|
| 20926 | + * lpfc_get_io_buf - Get one IO buffer from free pool |
|---|
| 20927 | + * @phba: The HBA for which this call is being executed. |
|---|
| 20928 | + * @ndlp: pointer to lpfc nodelist data structure. |
|---|
| 20929 | + * @hwqid: belong to which HWQ |
|---|
| 20930 | + * @expedite: 1 means this request is urgent. |
|---|
| 20931 | + * |
|---|
| 20932 | + * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1, |
|---|
| 20933 | + * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes |
|---|
| 20934 | + * a IO buffer from head of @hdwq io_buf_list and returns to caller. |
|---|
| 20935 | + * |
|---|
| 20936 | + * Note: ndlp is only used on SCSI side for RRQ testing. |
|---|
| 20937 | + * The caller should pass NULL for ndlp on NVME side. |
|---|
| 20938 | + * |
|---|
| 20939 | + * Return codes: |
|---|
| 20940 | + * NULL - Error |
|---|
| 20941 | + * Pointer to lpfc_io_buf - Success |
|---|
| 20942 | + **/ |
|---|
| 20943 | +struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, |
|---|
| 20944 | + struct lpfc_nodelist *ndlp, |
|---|
| 20945 | + u32 hwqid, int expedite) |
|---|
| 20946 | +{ |
|---|
| 20947 | + struct lpfc_sli4_hdw_queue *qp; |
|---|
| 20948 | + unsigned long iflag; |
|---|
| 20949 | + struct lpfc_io_buf *lpfc_cmd; |
|---|
| 20950 | + |
|---|
| 20951 | + qp = &phba->sli4_hba.hdwq[hwqid]; |
|---|
| 20952 | + lpfc_cmd = NULL; |
|---|
| 20953 | + |
|---|
| 20954 | + if (phba->cfg_xri_rebalancing) |
|---|
| 20955 | + lpfc_cmd = lpfc_get_io_buf_from_multixri_pools( |
|---|
| 20956 | + phba, ndlp, hwqid, expedite); |
|---|
| 20957 | + else { |
|---|
| 20958 | + lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag, |
|---|
| 20959 | + qp, alloc_xri_get); |
|---|
| 20960 | + if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) |
|---|
| 20961 | + lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); |
|---|
| 20962 | + if (!lpfc_cmd) { |
|---|
| 20963 | + lpfc_qp_spin_lock(&qp->io_buf_list_put_lock, |
|---|
| 20964 | + qp, alloc_xri_put); |
|---|
| 20965 | + list_splice(&qp->lpfc_io_buf_list_put, |
|---|
| 20966 | + &qp->lpfc_io_buf_list_get); |
|---|
| 20967 | + qp->get_io_bufs += qp->put_io_bufs; |
|---|
| 20968 | + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); |
|---|
| 20969 | + qp->put_io_bufs = 0; |
|---|
| 20970 | + spin_unlock(&qp->io_buf_list_put_lock); |
|---|
| 20971 | + if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || |
|---|
| 20972 | + expedite) |
|---|
| 20973 | + lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); |
|---|
| 20974 | + } |
|---|
| 20975 | + spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag); |
|---|
| 20976 | + } |
|---|
| 20977 | + |
|---|
| 20978 | + return lpfc_cmd; |
|---|
| 20979 | +} |
|---|
| 20980 | + |
|---|
| 20981 | +/** |
|---|
| 20982 | + * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool |
|---|
| 20983 | + * @phba: The HBA for which this call is being executed. |
|---|
| 20984 | + * @lpfc_buf: IO buf structure to append the SGL chunk |
|---|
| 20985 | + * |
|---|
| 20986 | + * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool, |
|---|
| 20987 | + * and will allocate an SGL chunk if the pool is empty. |
|---|
| 20988 | + * |
|---|
| 20989 | + * Return codes: |
|---|
| 20990 | + * NULL - Error |
|---|
| 20991 | + * Pointer to sli4_hybrid_sgl - Success |
|---|
| 20992 | + **/ |
|---|
| 20993 | +struct sli4_hybrid_sgl * |
|---|
| 20994 | +lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) |
|---|
| 20995 | +{ |
|---|
| 20996 | + struct sli4_hybrid_sgl *list_entry = NULL; |
|---|
| 20997 | + struct sli4_hybrid_sgl *tmp = NULL; |
|---|
| 20998 | + struct sli4_hybrid_sgl *allocated_sgl = NULL; |
|---|
| 20999 | + struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; |
|---|
| 21000 | + struct list_head *buf_list = &hdwq->sgl_list; |
|---|
| 21001 | + unsigned long iflags; |
|---|
| 21002 | + |
|---|
| 21003 | + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); |
|---|
| 21004 | + |
|---|
| 21005 | + if (likely(!list_empty(buf_list))) { |
|---|
| 21006 | + /* break off 1 chunk from the sgl_list */ |
|---|
| 21007 | + list_for_each_entry_safe(list_entry, tmp, |
|---|
| 21008 | + buf_list, list_node) { |
|---|
| 21009 | + list_move_tail(&list_entry->list_node, |
|---|
| 21010 | + &lpfc_buf->dma_sgl_xtra_list); |
|---|
| 21011 | + break; |
|---|
| 21012 | + } |
|---|
| 21013 | + } else { |
|---|
| 21014 | + /* allocate more */ |
|---|
| 21015 | + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); |
|---|
| 21016 | + tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, |
|---|
| 21017 | + cpu_to_node(hdwq->io_wq->chann)); |
|---|
| 21018 | + if (!tmp) { |
|---|
| 21019 | + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
|---|
| 21020 | + "8353 error kmalloc memory for HDWQ " |
|---|
| 21021 | + "%d %s\n", |
|---|
| 21022 | + lpfc_buf->hdwq_no, __func__); |
|---|
| 21023 | + return NULL; |
|---|
| 21024 | + } |
|---|
| 21025 | + |
|---|
| 21026 | + tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, |
|---|
| 21027 | + GFP_ATOMIC, &tmp->dma_phys_sgl); |
|---|
| 21028 | + if (!tmp->dma_sgl) { |
|---|
| 21029 | + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
|---|
| 21030 | + "8354 error pool_alloc memory for HDWQ " |
|---|
| 21031 | + "%d %s\n", |
|---|
| 21032 | + lpfc_buf->hdwq_no, __func__); |
|---|
| 21033 | + kfree(tmp); |
|---|
| 21034 | + return NULL; |
|---|
| 21035 | + } |
|---|
| 21036 | + |
|---|
| 21037 | + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); |
|---|
| 21038 | + list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list); |
|---|
| 21039 | + } |
|---|
| 21040 | + |
|---|
| 21041 | + allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list, |
|---|
| 21042 | + struct sli4_hybrid_sgl, |
|---|
| 21043 | + list_node); |
|---|
| 21044 | + |
|---|
| 21045 | + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); |
|---|
| 21046 | + |
|---|
| 21047 | + return allocated_sgl; |
|---|
| 21048 | +} |
|---|
| 21049 | + |
|---|
| 21050 | +/** |
|---|
| 21051 | + * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool |
|---|
| 21052 | + * @phba: The HBA for which this call is being executed. |
|---|
| 21053 | + * @lpfc_buf: IO buf structure with the SGL chunk |
|---|
| 21054 | + * |
|---|
| 21055 | + * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool. |
|---|
| 21056 | + * |
|---|
| 21057 | + * Return codes: |
|---|
| 21058 | + * 0 - Success |
|---|
| 21059 | + * -EINVAL - Error |
|---|
| 21060 | + **/ |
|---|
| 21061 | +int |
|---|
| 21062 | +lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) |
|---|
| 21063 | +{ |
|---|
| 21064 | + int rc = 0; |
|---|
| 21065 | + struct sli4_hybrid_sgl *list_entry = NULL; |
|---|
| 21066 | + struct sli4_hybrid_sgl *tmp = NULL; |
|---|
| 21067 | + struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; |
|---|
| 21068 | + struct list_head *buf_list = &hdwq->sgl_list; |
|---|
| 21069 | + unsigned long iflags; |
|---|
| 21070 | + |
|---|
| 21071 | + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); |
|---|
| 21072 | + |
|---|
| 21073 | + if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) { |
|---|
| 21074 | + list_for_each_entry_safe(list_entry, tmp, |
|---|
| 21075 | + &lpfc_buf->dma_sgl_xtra_list, |
|---|
| 21076 | + list_node) { |
|---|
| 21077 | + list_move_tail(&list_entry->list_node, |
|---|
| 21078 | + buf_list); |
|---|
| 21079 | + } |
|---|
| 21080 | + } else { |
|---|
| 21081 | + rc = -EINVAL; |
|---|
| 21082 | + } |
|---|
| 21083 | + |
|---|
| 21084 | + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); |
|---|
| 21085 | + return rc; |
|---|
| 21086 | +} |
|---|
| 21087 | + |
|---|
| 21088 | +/** |
|---|
| 21089 | + * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool |
|---|
| 21090 | + * @phba: phba object |
|---|
| 21091 | + * @hdwq: hdwq to cleanup sgl buff resources on |
|---|
| 21092 | + * |
|---|
| 21093 | + * This routine frees all SGL chunks of hdwq SGL chunk pool. |
|---|
| 21094 | + * |
|---|
| 21095 | + * Return codes: |
|---|
| 21096 | + * None |
|---|
| 21097 | + **/ |
|---|
| 21098 | +void |
|---|
| 21099 | +lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, |
|---|
| 21100 | + struct lpfc_sli4_hdw_queue *hdwq) |
|---|
| 21101 | +{ |
|---|
| 21102 | + struct list_head *buf_list = &hdwq->sgl_list; |
|---|
| 21103 | + struct sli4_hybrid_sgl *list_entry = NULL; |
|---|
| 21104 | + struct sli4_hybrid_sgl *tmp = NULL; |
|---|
| 21105 | + unsigned long iflags; |
|---|
| 21106 | + |
|---|
| 21107 | + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); |
|---|
| 21108 | + |
|---|
| 21109 | + /* Free sgl pool */ |
|---|
| 21110 | + list_for_each_entry_safe(list_entry, tmp, |
|---|
| 21111 | + buf_list, list_node) { |
|---|
| 21112 | + dma_pool_free(phba->lpfc_sg_dma_buf_pool, |
|---|
| 21113 | + list_entry->dma_sgl, |
|---|
| 21114 | + list_entry->dma_phys_sgl); |
|---|
| 21115 | + list_del(&list_entry->list_node); |
|---|
| 21116 | + kfree(list_entry); |
|---|
| 21117 | + } |
|---|
| 21118 | + |
|---|
| 21119 | + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); |
|---|
| 21120 | +} |
|---|
| 21121 | + |
|---|
| 21122 | +/** |
|---|
| 21123 | + * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq |
|---|
| 21124 | + * @phba: The HBA for which this call is being executed. |
|---|
| 21125 | + * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer |
|---|
| 21126 | + * |
|---|
| 21127 | + * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool, |
|---|
| 21128 | + * and will allocate an CMD/RSP buffer if the pool is empty. |
|---|
| 21129 | + * |
|---|
| 21130 | + * Return codes: |
|---|
| 21131 | + * NULL - Error |
|---|
| 21132 | + * Pointer to fcp_cmd_rsp_buf - Success |
|---|
| 21133 | + **/ |
|---|
| 21134 | +struct fcp_cmd_rsp_buf * |
|---|
| 21135 | +lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, |
|---|
| 21136 | + struct lpfc_io_buf *lpfc_buf) |
|---|
| 21137 | +{ |
|---|
| 21138 | + struct fcp_cmd_rsp_buf *list_entry = NULL; |
|---|
| 21139 | + struct fcp_cmd_rsp_buf *tmp = NULL; |
|---|
| 21140 | + struct fcp_cmd_rsp_buf *allocated_buf = NULL; |
|---|
| 21141 | + struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; |
|---|
| 21142 | + struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; |
|---|
| 21143 | + unsigned long iflags; |
|---|
| 21144 | + |
|---|
| 21145 | + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); |
|---|
| 21146 | + |
|---|
| 21147 | + if (likely(!list_empty(buf_list))) { |
|---|
| 21148 | + /* break off 1 chunk from the list */ |
|---|
| 21149 | + list_for_each_entry_safe(list_entry, tmp, |
|---|
| 21150 | + buf_list, |
|---|
| 21151 | + list_node) { |
|---|
| 21152 | + list_move_tail(&list_entry->list_node, |
|---|
| 21153 | + &lpfc_buf->dma_cmd_rsp_list); |
|---|
| 21154 | + break; |
|---|
| 21155 | + } |
|---|
| 21156 | + } else { |
|---|
| 21157 | + /* allocate more */ |
|---|
| 21158 | + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); |
|---|
| 21159 | + tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, |
|---|
| 21160 | + cpu_to_node(hdwq->io_wq->chann)); |
|---|
| 21161 | + if (!tmp) { |
|---|
| 21162 | + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
|---|
| 21163 | + "8355 error kmalloc memory for HDWQ " |
|---|
| 21164 | + "%d %s\n", |
|---|
| 21165 | + lpfc_buf->hdwq_no, __func__); |
|---|
| 21166 | + return NULL; |
|---|
| 21167 | + } |
|---|
| 21168 | + |
|---|
| 21169 | + tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool, |
|---|
| 21170 | + GFP_ATOMIC, |
|---|
| 21171 | + &tmp->fcp_cmd_rsp_dma_handle); |
|---|
| 21172 | + |
|---|
| 21173 | + if (!tmp->fcp_cmnd) { |
|---|
| 21174 | + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
|---|
| 21175 | + "8356 error pool_alloc memory for HDWQ " |
|---|
| 21176 | + "%d %s\n", |
|---|
| 21177 | + lpfc_buf->hdwq_no, __func__); |
|---|
| 21178 | + kfree(tmp); |
|---|
| 21179 | + return NULL; |
|---|
| 21180 | + } |
|---|
| 21181 | + |
|---|
| 21182 | + tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd + |
|---|
| 21183 | + sizeof(struct fcp_cmnd)); |
|---|
| 21184 | + |
|---|
| 21185 | + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); |
|---|
| 21186 | + list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list); |
|---|
| 21187 | + } |
|---|
| 21188 | + |
|---|
| 21189 | + allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list, |
|---|
| 21190 | + struct fcp_cmd_rsp_buf, |
|---|
| 21191 | + list_node); |
|---|
| 21192 | + |
|---|
| 21193 | + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); |
|---|
| 21194 | + |
|---|
| 21195 | + return allocated_buf; |
|---|
| 21196 | +} |
|---|
| 21197 | + |
|---|
| 21198 | +/** |
|---|
| 21199 | + * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool |
|---|
| 21200 | + * @phba: The HBA for which this call is being executed. |
|---|
| 21201 | + * @lpfc_buf: IO buf structure with the CMD/RSP buf |
|---|
| 21202 | + * |
|---|
| 21203 | + * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool. |
|---|
| 21204 | + * |
|---|
| 21205 | + * Return codes: |
|---|
| 21206 | + * 0 - Success |
|---|
| 21207 | + * -EINVAL - Error |
|---|
| 21208 | + **/ |
|---|
| 21209 | +int |
|---|
| 21210 | +lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, |
|---|
| 21211 | + struct lpfc_io_buf *lpfc_buf) |
|---|
| 21212 | +{ |
|---|
| 21213 | + int rc = 0; |
|---|
| 21214 | + struct fcp_cmd_rsp_buf *list_entry = NULL; |
|---|
| 21215 | + struct fcp_cmd_rsp_buf *tmp = NULL; |
|---|
| 21216 | + struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; |
|---|
| 21217 | + struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; |
|---|
| 21218 | + unsigned long iflags; |
|---|
| 21219 | + |
|---|
| 21220 | + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); |
|---|
| 21221 | + |
|---|
| 21222 | + if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) { |
|---|
| 21223 | + list_for_each_entry_safe(list_entry, tmp, |
|---|
| 21224 | + &lpfc_buf->dma_cmd_rsp_list, |
|---|
| 21225 | + list_node) { |
|---|
| 21226 | + list_move_tail(&list_entry->list_node, |
|---|
| 21227 | + buf_list); |
|---|
| 21228 | + } |
|---|
| 21229 | + } else { |
|---|
| 21230 | + rc = -EINVAL; |
|---|
| 21231 | + } |
|---|
| 21232 | + |
|---|
| 21233 | + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); |
|---|
| 21234 | + return rc; |
|---|
| 21235 | +} |
|---|
| 21236 | + |
|---|
| 21237 | +/** |
|---|
| 21238 | + * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool |
|---|
| 21239 | + * @phba: phba object |
|---|
| 21240 | + * @hdwq: hdwq to cleanup cmd rsp buff resources on |
|---|
| 21241 | + * |
|---|
| 21242 | + * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool. |
|---|
| 21243 | + * |
|---|
| 21244 | + * Return codes: |
|---|
| 21245 | + * None |
|---|
| 21246 | + **/ |
|---|
| 21247 | +void |
|---|
| 21248 | +lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, |
|---|
| 21249 | + struct lpfc_sli4_hdw_queue *hdwq) |
|---|
| 21250 | +{ |
|---|
| 21251 | + struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; |
|---|
| 21252 | + struct fcp_cmd_rsp_buf *list_entry = NULL; |
|---|
| 21253 | + struct fcp_cmd_rsp_buf *tmp = NULL; |
|---|
| 21254 | + unsigned long iflags; |
|---|
| 21255 | + |
|---|
| 21256 | + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); |
|---|
| 21257 | + |
|---|
| 21258 | + /* Free cmd_rsp buf pool */ |
|---|
| 21259 | + list_for_each_entry_safe(list_entry, tmp, |
|---|
| 21260 | + buf_list, |
|---|
| 21261 | + list_node) { |
|---|
| 21262 | + dma_pool_free(phba->lpfc_cmd_rsp_buf_pool, |
|---|
| 21263 | + list_entry->fcp_cmnd, |
|---|
| 21264 | + list_entry->fcp_cmd_rsp_dma_handle); |
|---|
| 21265 | + list_del(&list_entry->list_node); |
|---|
| 21266 | + kfree(list_entry); |
|---|
| 21267 | + } |
|---|
| 21268 | + |
|---|
| 21269 | + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); |
|---|
| 21270 | +} |
|---|