forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/scsi/lpfc/lpfc_sli.c
....@@ -1,7 +1,7 @@
11 /*******************************************************************
22 * This file is part of the Emulex Linux Device Driver for *
33 * Fibre Channel Host Bus Adapters. *
4
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
4
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
55 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
66 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
77 * EMULEX and SLI are trademarks of Emulex. *
....@@ -35,11 +35,10 @@
3535 #include <scsi/scsi_transport_fc.h>
3636 #include <scsi/fc/fc_fs.h>
3737 #include <linux/aer.h>
38
+#include <linux/crash_dump.h>
3839 #ifdef CONFIG_X86
3940 #include <asm/set_memory.h>
4041 #endif
41
-
42
-#include <linux/nvme-fc-driver.h>
4342
4443 #include "lpfc_hw4.h"
4544 #include "lpfc_hw.h"
....@@ -50,7 +49,6 @@
5049 #include "lpfc.h"
5150 #include "lpfc_scsi.h"
5251 #include "lpfc_nvme.h"
53
-#include "lpfc_nvmet.h"
5452 #include "lpfc_crtn.h"
5553 #include "lpfc_logmsg.h"
5654 #include "lpfc_compat.h"
....@@ -78,17 +76,19 @@
7876 struct hbq_dmabuf *);
7977 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
8078 struct hbq_dmabuf *dmabuf);
81
-static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
82
- struct lpfc_cqe *);
79
+static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
80
+ struct lpfc_queue *cq, struct lpfc_cqe *cqe);
8381 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
8482 int);
8583 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86
- struct lpfc_eqe *eqe, uint32_t qidx);
84
+ struct lpfc_queue *eq,
85
+ struct lpfc_eqe *eqe);
8786 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
8887 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
89
-static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
90
- struct lpfc_sli_ring *pring,
91
- struct lpfc_iocbq *cmdiocb);
88
+static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
89
+static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
90
+ struct lpfc_queue *cq,
91
+ struct lpfc_cqe *cqe);
9292
9393 static IOCB_t *
9494 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
....@@ -110,7 +110,7 @@
110110 * endianness. This function can be called with or without
111111 * lock.
112112 **/
113
-void
113
+static void
114114 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
115115 {
116116 uint64_t *src = srcp;
....@@ -150,7 +150,7 @@
150150 /* sanity check on queue memory */
151151 if (unlikely(!q))
152152 return -ENOMEM;
153
- temp_wqe = q->qe[q->host_index].wqe;
153
+ temp_wqe = lpfc_sli4_qe(q, q->host_index);
154154
155155 /* If the host has not yet processed the next entry then we are done */
156156 idx = ((q->host_index + 1) % q->entry_count);
....@@ -160,7 +160,7 @@
160160 }
161161 q->WQ_posted++;
162162 /* set consumption flag every once in a while */
163
- if (!((q->host_index + 1) % q->entry_repost))
163
+ if (!((q->host_index + 1) % q->notify_interval))
164164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
165165 else
166166 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
....@@ -228,31 +228,22 @@
228228 * This routine will update the HBA index of a queue to reflect consumption of
229229 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
230230 * an entry the host calls this function to update the queue's internal
231
- * pointers. This routine returns the number of entries that were consumed by
232
- * the HBA.
231
+ * pointers.
233232 **/
234
-static uint32_t
233
+static void
235234 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
236235 {
237
- uint32_t released = 0;
238
-
239236 /* sanity check on queue memory */
240237 if (unlikely(!q))
241
- return 0;
238
+ return;
242239
243
- if (q->hba_index == index)
244
- return 0;
245
- do {
246
- q->hba_index = ((q->hba_index + 1) % q->entry_count);
247
- released++;
248
- } while (q->hba_index != index);
249
- return released;
240
+ q->hba_index = index;
250241 }
251242
252243 /**
253244 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
254245 * @q: The Mailbox Queue to operate on.
255
- * @wqe: The Mailbox Queue Entry to put on the Work queue.
246
+ * @mqe: The Mailbox Queue Entry to put on the Work queue.
256247 *
257248 * This routine will copy the contents of @mqe to the next available entry on
258249 * the @q. This function will then ring the Work Queue Doorbell to signal the
....@@ -270,7 +261,7 @@
270261 /* sanity check on queue memory */
271262 if (unlikely(!q))
272263 return -ENOMEM;
273
- temp_mqe = q->qe[q->host_index].mqe;
264
+ temp_mqe = lpfc_sli4_qe(q, q->host_index);
274265
275266 /* If the host has not yet processed the next entry then we are done */
276267 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
....@@ -325,29 +316,16 @@
325316 static struct lpfc_eqe *
326317 lpfc_sli4_eq_get(struct lpfc_queue *q)
327318 {
328
- struct lpfc_hba *phba;
329319 struct lpfc_eqe *eqe;
330
- uint32_t idx;
331320
332321 /* sanity check on queue memory */
333322 if (unlikely(!q))
334323 return NULL;
335
- phba = q->phba;
336
- eqe = q->qe[q->hba_index].eqe;
324
+ eqe = lpfc_sli4_qe(q, q->host_index);
337325
338326 /* If the next EQE is not valid then we are done */
339327 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
340328 return NULL;
341
- /* If the host has not yet processed the next entry then we are done */
342
- idx = ((q->hba_index + 1) % q->entry_count);
343
- if (idx == q->host_index)
344
- return NULL;
345
-
346
- q->hba_index = idx;
347
- /* if the index wrapped around, toggle the valid bit */
348
- if (phba->sli4_hba.pc_sli4_params.eqav && !q->hba_index)
349
- q->qe_valid = (q->qe_valid) ? 0 : 1;
350
-
351329
352330 /*
353331 * insert barrier for instruction interlock : data from the hardware
....@@ -367,7 +345,7 @@
367345 * @q: The Event Queue to disable interrupts
368346 *
369347 **/
370
-inline void
348
+void
371349 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
372350 {
373351 struct lpfc_register doorbell;
....@@ -386,7 +364,7 @@
386364 * @q: The Event Queue to disable interrupts
387365 *
388366 **/
389
-inline void
367
+void
390368 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
391369 {
392370 struct lpfc_register doorbell;
....@@ -397,44 +375,25 @@
397375 }
398376
399377 /**
400
- * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
378
+ * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
379
+ * @phba: adapter with EQ
401380 * @q: The Event Queue that the host has completed processing for.
381
+ * @count: Number of elements that have been consumed
402382 * @arm: Indicates whether the host wants to arms this CQ.
403383 *
404
- * This routine will mark all Event Queue Entries on @q, from the last
405
- * known completed entry to the last entry that was processed, as completed
406
- * by clearing the valid bit for each completion queue entry. Then it will
407
- * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
408
- * The internal host index in the @q will be updated by this routine to indicate
409
- * that the host has finished processing the entries. The @arm parameter
410
- * indicates that the queue should be rearmed when ringing the doorbell.
411
- *
412
- * This function will return the number of EQEs that were popped.
384
+ * This routine will notify the HBA, by ringing the doorbell, that count
385
+ * number of EQEs have been processed. The @arm parameter indicates whether
386
+ * the queue should be rearmed when ringing the doorbell.
413387 **/
414
-uint32_t
415
-lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
388
+void
389
+lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
390
+ uint32_t count, bool arm)
416391 {
417
- uint32_t released = 0;
418
- struct lpfc_hba *phba;
419
- struct lpfc_eqe *temp_eqe;
420392 struct lpfc_register doorbell;
421393
422394 /* sanity check on queue memory */
423
- if (unlikely(!q))
424
- return 0;
425
- phba = q->phba;
426
-
427
- /* while there are valid entries */
428
- while (q->hba_index != q->host_index) {
429
- if (!phba->sli4_hba.pc_sli4_params.eqav) {
430
- temp_eqe = q->qe[q->host_index].eqe;
431
- bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
432
- }
433
- released++;
434
- q->host_index = ((q->host_index + 1) % q->entry_count);
435
- }
436
- if (unlikely(released == 0 && !arm))
437
- return 0;
395
+ if (unlikely(!q || (count == 0 && !arm)))
396
+ return;
438397
439398 /* ring doorbell for number popped */
440399 doorbell.word0 = 0;
....@@ -442,7 +401,7 @@
442401 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
443402 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
444403 }
445
- bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
404
+ bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
446405 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
447406 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
448407 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
....@@ -451,60 +410,139 @@
451410 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
452411 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
453412 readl(q->phba->sli4_hba.EQDBregaddr);
454
- return released;
455413 }
456414
457415 /**
458
- * lpfc_sli4_if6_eq_release - Indicates the host has finished processing an EQ
416
+ * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
417
+ * @phba: adapter with EQ
459418 * @q: The Event Queue that the host has completed processing for.
419
+ * @count: Number of elements that have been consumed
460420 * @arm: Indicates whether the host wants to arms this CQ.
461421 *
462
- * This routine will mark all Event Queue Entries on @q, from the last
463
- * known completed entry to the last entry that was processed, as completed
464
- * by clearing the valid bit for each completion queue entry. Then it will
465
- * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
466
- * The internal host index in the @q will be updated by this routine to indicate
467
- * that the host has finished processing the entries. The @arm parameter
468
- * indicates that the queue should be rearmed when ringing the doorbell.
469
- *
470
- * This function will return the number of EQEs that were popped.
422
+ * This routine will notify the HBA, by ringing the doorbell, that count
423
+ * number of EQEs have been processed. The @arm parameter indicates whether
424
+ * the queue should be rearmed when ringing the doorbell.
471425 **/
472
-uint32_t
473
-lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm)
426
+void
427
+lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
428
+ uint32_t count, bool arm)
474429 {
475
- uint32_t released = 0;
476
- struct lpfc_hba *phba;
477
- struct lpfc_eqe *temp_eqe;
478430 struct lpfc_register doorbell;
479431
480432 /* sanity check on queue memory */
481
- if (unlikely(!q))
482
- return 0;
483
- phba = q->phba;
484
-
485
- /* while there are valid entries */
486
- while (q->hba_index != q->host_index) {
487
- if (!phba->sli4_hba.pc_sli4_params.eqav) {
488
- temp_eqe = q->qe[q->host_index].eqe;
489
- bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
490
- }
491
- released++;
492
- q->host_index = ((q->host_index + 1) % q->entry_count);
493
- }
494
- if (unlikely(released == 0 && !arm))
495
- return 0;
433
+ if (unlikely(!q || (count == 0 && !arm)))
434
+ return;
496435
497436 /* ring doorbell for number popped */
498437 doorbell.word0 = 0;
499438 if (arm)
500439 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
501
- bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, released);
440
+ bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
502441 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
503442 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
504443 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
505444 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
506445 readl(q->phba->sli4_hba.EQDBregaddr);
507
- return released;
446
+}
447
+
448
+static void
449
+__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
450
+ struct lpfc_eqe *eqe)
451
+{
452
+ if (!phba->sli4_hba.pc_sli4_params.eqav)
453
+ bf_set_le32(lpfc_eqe_valid, eqe, 0);
454
+
455
+ eq->host_index = ((eq->host_index + 1) % eq->entry_count);
456
+
457
+ /* if the index wrapped around, toggle the valid bit */
458
+ if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
459
+ eq->qe_valid = (eq->qe_valid) ? 0 : 1;
460
+}
461
+
462
+static void
463
+lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
464
+{
465
+ struct lpfc_eqe *eqe = NULL;
466
+ u32 eq_count = 0, cq_count = 0;
467
+ struct lpfc_cqe *cqe = NULL;
468
+ struct lpfc_queue *cq = NULL, *childq = NULL;
469
+ int cqid = 0;
470
+
471
+ /* walk all the EQ entries and drop on the floor */
472
+ eqe = lpfc_sli4_eq_get(eq);
473
+ while (eqe) {
474
+ /* Get the reference to the corresponding CQ */
475
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
476
+ cq = NULL;
477
+
478
+ list_for_each_entry(childq, &eq->child_list, list) {
479
+ if (childq->queue_id == cqid) {
480
+ cq = childq;
481
+ break;
482
+ }
483
+ }
484
+ /* If CQ is valid, iterate through it and drop all the CQEs */
485
+ if (cq) {
486
+ cqe = lpfc_sli4_cq_get(cq);
487
+ while (cqe) {
488
+ __lpfc_sli4_consume_cqe(phba, cq, cqe);
489
+ cq_count++;
490
+ cqe = lpfc_sli4_cq_get(cq);
491
+ }
492
+ /* Clear and re-arm the CQ */
493
+ phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
494
+ LPFC_QUEUE_REARM);
495
+ cq_count = 0;
496
+ }
497
+ __lpfc_sli4_consume_eqe(phba, eq, eqe);
498
+ eq_count++;
499
+ eqe = lpfc_sli4_eq_get(eq);
500
+ }
501
+
502
+ /* Clear and re-arm the EQ */
503
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
504
+}
505
+
506
+static int
507
+lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
508
+ uint8_t rearm)
509
+{
510
+ struct lpfc_eqe *eqe;
511
+ int count = 0, consumed = 0;
512
+
513
+ if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
514
+ goto rearm_and_exit;
515
+
516
+ eqe = lpfc_sli4_eq_get(eq);
517
+ while (eqe) {
518
+ lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
519
+ __lpfc_sli4_consume_eqe(phba, eq, eqe);
520
+
521
+ consumed++;
522
+ if (!(++count % eq->max_proc_limit))
523
+ break;
524
+
525
+ if (!(count % eq->notify_interval)) {
526
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
527
+ LPFC_QUEUE_NOARM);
528
+ consumed = 0;
529
+ }
530
+
531
+ eqe = lpfc_sli4_eq_get(eq);
532
+ }
533
+ eq->EQ_processed += count;
534
+
535
+ /* Track the max number of EQEs processed in 1 intr */
536
+ if (count > eq->EQ_max_eqe)
537
+ eq->EQ_max_eqe = count;
538
+
539
+ xchg(&eq->queue_claimed, 0);
540
+
541
+rearm_and_exit:
542
+ /* Always clear the EQ. */
543
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
544
+
545
+ return count;
508546 }
509547
510548 /**
....@@ -519,28 +557,16 @@
519557 static struct lpfc_cqe *
520558 lpfc_sli4_cq_get(struct lpfc_queue *q)
521559 {
522
- struct lpfc_hba *phba;
523560 struct lpfc_cqe *cqe;
524
- uint32_t idx;
525561
526562 /* sanity check on queue memory */
527563 if (unlikely(!q))
528564 return NULL;
529
- phba = q->phba;
530
- cqe = q->qe[q->hba_index].cqe;
565
+ cqe = lpfc_sli4_qe(q, q->host_index);
531566
532567 /* If the next CQE is not valid then we are done */
533568 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
534569 return NULL;
535
- /* If the host has not yet processed the next entry then we are done */
536
- idx = ((q->hba_index + 1) % q->entry_count);
537
- if (idx == q->host_index)
538
- return NULL;
539
-
540
- q->hba_index = idx;
541
- /* if the index wrapped around, toggle the valid bit */
542
- if (phba->sli4_hba.pc_sli4_params.cqav && !q->hba_index)
543
- q->qe_valid = (q->qe_valid) ? 0 : 1;
544570
545571 /*
546572 * insert barrier for instruction interlock : data from the hardware
....@@ -554,113 +580,85 @@
554580 return cqe;
555581 }
556582
583
+static void
584
+__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
585
+ struct lpfc_cqe *cqe)
586
+{
587
+ if (!phba->sli4_hba.pc_sli4_params.cqav)
588
+ bf_set_le32(lpfc_cqe_valid, cqe, 0);
589
+
590
+ cq->host_index = ((cq->host_index + 1) % cq->entry_count);
591
+
592
+ /* if the index wrapped around, toggle the valid bit */
593
+ if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
594
+ cq->qe_valid = (cq->qe_valid) ? 0 : 1;
595
+}
596
+
557597 /**
558
- * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
598
+ * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
599
+ * @phba: the adapter with the CQ
559600 * @q: The Completion Queue that the host has completed processing for.
601
+ * @count: the number of elements that were consumed
560602 * @arm: Indicates whether the host wants to arms this CQ.
561603 *
562
- * This routine will mark all Completion queue entries on @q, from the last
563
- * known completed entry to the last entry that was processed, as completed
564
- * by clearing the valid bit for each completion queue entry. Then it will
565
- * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
566
- * The internal host index in the @q will be updated by this routine to indicate
567
- * that the host has finished processing the entries. The @arm parameter
568
- * indicates that the queue should be rearmed when ringing the doorbell.
569
- *
570
- * This function will return the number of CQEs that were released.
604
+ * This routine will notify the HBA, by ringing the doorbell, that the
605
+ * CQEs have been processed. The @arm parameter specifies whether the
606
+ * queue should be rearmed when ringing the doorbell.
571607 **/
572
-uint32_t
573
-lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
608
+void
609
+lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
610
+ uint32_t count, bool arm)
574611 {
575
- uint32_t released = 0;
576
- struct lpfc_hba *phba;
577
- struct lpfc_cqe *temp_qe;
578612 struct lpfc_register doorbell;
579613
580614 /* sanity check on queue memory */
581
- if (unlikely(!q))
582
- return 0;
583
- phba = q->phba;
584
-
585
- /* while there are valid entries */
586
- while (q->hba_index != q->host_index) {
587
- if (!phba->sli4_hba.pc_sli4_params.cqav) {
588
- temp_qe = q->qe[q->host_index].cqe;
589
- bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
590
- }
591
- released++;
592
- q->host_index = ((q->host_index + 1) % q->entry_count);
593
- }
594
- if (unlikely(released == 0 && !arm))
595
- return 0;
615
+ if (unlikely(!q || (count == 0 && !arm)))
616
+ return;
596617
597618 /* ring doorbell for number popped */
598619 doorbell.word0 = 0;
599620 if (arm)
600621 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
601
- bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
622
+ bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
602623 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
603624 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
604625 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
605626 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
606627 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
607
- return released;
608628 }
609629
610630 /**
611
- * lpfc_sli4_if6_cq_release - Indicates the host has finished processing a CQ
631
+ * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
632
+ * @phba: the adapter with the CQ
612633 * @q: The Completion Queue that the host has completed processing for.
634
+ * @count: the number of elements that were consumed
613635 * @arm: Indicates whether the host wants to arms this CQ.
614636 *
615
- * This routine will mark all Completion queue entries on @q, from the last
616
- * known completed entry to the last entry that was processed, as completed
617
- * by clearing the valid bit for each completion queue entry. Then it will
618
- * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
619
- * The internal host index in the @q will be updated by this routine to indicate
620
- * that the host has finished processing the entries. The @arm parameter
621
- * indicates that the queue should be rearmed when ringing the doorbell.
622
- *
623
- * This function will return the number of CQEs that were released.
637
+ * This routine will notify the HBA, by ringing the doorbell, that the
638
+ * CQEs have been processed. The @arm parameter specifies whether the
639
+ * queue should be rearmed when ringing the doorbell.
624640 **/
625
-uint32_t
626
-lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm)
641
+void
642
+lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
643
+ uint32_t count, bool arm)
627644 {
628
- uint32_t released = 0;
629
- struct lpfc_hba *phba;
630
- struct lpfc_cqe *temp_qe;
631645 struct lpfc_register doorbell;
632646
633647 /* sanity check on queue memory */
634
- if (unlikely(!q))
635
- return 0;
636
- phba = q->phba;
637
-
638
- /* while there are valid entries */
639
- while (q->hba_index != q->host_index) {
640
- if (!phba->sli4_hba.pc_sli4_params.cqav) {
641
- temp_qe = q->qe[q->host_index].cqe;
642
- bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
643
- }
644
- released++;
645
- q->host_index = ((q->host_index + 1) % q->entry_count);
646
- }
647
- if (unlikely(released == 0 && !arm))
648
- return 0;
648
+ if (unlikely(!q || (count == 0 && !arm)))
649
+ return;
649650
650651 /* ring doorbell for number popped */
651652 doorbell.word0 = 0;
652653 if (arm)
653654 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
654
- bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, released);
655
+ bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
655656 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
656657 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
657
- return released;
658658 }
659659
660
-/**
660
+/*
661661 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
662
- * @q: The Header Receive Queue to operate on.
663
- * @wqe: The Receive Queue Entry to put on the Receive queue.
664662 *
665663 * This routine will copy the contents of @wqe to the next available entry on
666664 * the @q. This function will then ring the Receive Queue Doorbell to signal the
....@@ -684,8 +682,8 @@
684682 return -ENOMEM;
685683 hq_put_index = hq->host_index;
686684 dq_put_index = dq->host_index;
687
- temp_hrqe = hq->qe[hq_put_index].rqe;
688
- temp_drqe = dq->qe[dq_put_index].rqe;
685
+ temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
686
+ temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
689687
690688 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
691689 return -EINVAL;
....@@ -703,15 +701,15 @@
703701 hq->RQ_buf_posted++;
704702
705703 /* Ring The Header Receive Queue Doorbell */
706
- if (!(hq->host_index % hq->entry_repost)) {
704
+ if (!(hq->host_index % hq->notify_interval)) {
707705 doorbell.word0 = 0;
708706 if (hq->db_format == LPFC_DB_RING_FORMAT) {
709707 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
710
- hq->entry_repost);
708
+ hq->notify_interval);
711709 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
712710 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
713711 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
714
- hq->entry_repost);
712
+ hq->notify_interval);
715713 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
716714 hq->host_index);
717715 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
....@@ -723,9 +721,8 @@
723721 return hq_put_index;
724722 }
725723
726
-/**
724
+/*
727725 * lpfc_sli4_rq_release - Updates internal hba index for RQ
728
- * @q: The Header Receive Queue to operate on.
729726 *
730727 * This routine will update the HBA index of a queue to reflect consumption of
731728 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
....@@ -924,10 +921,10 @@
924921 mod_timer(&phba->rrq_tmr, next_time);
925922 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
926923 list_del(&rrq->list);
927
- if (!rrq->send_rrq)
924
+ if (!rrq->send_rrq) {
928925 /* this call will free the rrq */
929
- lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
930
- else if (lpfc_send_rrq(phba, rrq)) {
926
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
927
+ } else if (lpfc_send_rrq(phba, rrq)) {
931928 /* if we send the rrq then the completion handler
932929 * will clear the bit in the xribitmap.
933930 */
....@@ -1009,23 +1006,22 @@
10091006 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
10101007 * @phba: Pointer to HBA context object.
10111008 * @ndlp: Targets nodelist pointer for this exchange.
1012
- * @xritag the xri in the bitmap to test.
1009
+ * @xritag: the xri in the bitmap to test.
10131010 *
1014
- * This function is called with hbalock held. This function
1015
- * returns 0 = rrq not active for this xri
1016
- * 1 = rrq is valid for this xri.
1011
+ * This function returns:
1012
+ * 0 = rrq not active for this xri
1013
+ * 1 = rrq is valid for this xri.
10171014 **/
10181015 int
10191016 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
10201017 uint16_t xritag)
10211018 {
1022
- lockdep_assert_held(&phba->hbalock);
10231019 if (!ndlp)
10241020 return 0;
10251021 if (!ndlp->active_rrqs_xri_bitmap)
10261022 return 0;
10271023 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1028
- return 1;
1024
+ return 1;
10291025 else
10301026 return 0;
10311027 }
....@@ -1081,7 +1077,7 @@
10811077 goto out;
10821078
10831079 spin_unlock_irqrestore(&phba->hbalock, iflags);
1084
- rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1080
+ rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
10851081 if (!rrq) {
10861082 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10871083 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
....@@ -1120,12 +1116,13 @@
11201116 /**
11211117 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
11221118 * @phba: Pointer to HBA context object.
1123
- * @piocb: Pointer to the iocbq.
1119
+ * @piocbq: Pointer to the iocbq.
11241120 *
1125
- * This function is called with the ring lock held. This function
1126
- * gets a new driver sglq object from the sglq list. If the
1127
- * list is not empty then it is successful, it returns pointer to the newly
1128
- * allocated sglq object else it returns NULL.
1121
+ * The driver calls this function with either the nvme ls ring lock
1122
+ * or the fc els ring lock held depending on the iocb usage. This function
1123
+ * gets a new driver sglq object from the sglq list. If the list is not empty
1124
+ * then it is successful, it returns pointer to the newly allocated sglq
1125
+ * object else it returns NULL.
11291126 **/
11301127 static struct lpfc_sglq *
11311128 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
....@@ -1133,14 +1130,20 @@
11331130 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
11341131 struct lpfc_sglq *sglq = NULL;
11351132 struct lpfc_sglq *start_sglq = NULL;
1136
- struct lpfc_scsi_buf *lpfc_cmd;
1133
+ struct lpfc_io_buf *lpfc_cmd;
11371134 struct lpfc_nodelist *ndlp;
1135
+ struct lpfc_sli_ring *pring = NULL;
11381136 int found = 0;
11391137
1140
- lockdep_assert_held(&phba->hbalock);
1138
+ if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1139
+ pring = phba->sli4_hba.nvmels_wq->pring;
1140
+ else
1141
+ pring = lpfc_phba_elsring(phba);
1142
+
1143
+ lockdep_assert_held(&pring->ring_lock);
11411144
11421145 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1143
- lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
1146
+ lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
11441147 ndlp = lpfc_cmd->rdata->pnode;
11451148 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
11461149 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
....@@ -1189,7 +1192,7 @@
11891192 /**
11901193 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
11911194 * @phba: Pointer to HBA context object.
1192
- * @piocb: Pointer to the iocbq.
1195
+ * @piocbq: Pointer to the iocbq.
11931196 *
11941197 * This function is called with the sgl_list lock held. This function
11951198 * gets a new driver sglq object from the sglq list. If the
....@@ -1240,8 +1243,8 @@
12401243 * @phba: Pointer to HBA context object.
12411244 * @iocbq: Pointer to driver iocb object.
12421245 *
1243
- * This function is called with hbalock held to release driver
1244
- * iocb object to the iocb pool. The iotag in the iocb object
1246
+ * This function is called to release the driver iocb object
1247
+ * to the iocb pool. The iotag in the iocb object
12451248 * does not change for each use of the iocb object. This function
12461249 * clears all other fields of the iocb object when it is freed.
12471250 * The sqlq structure that holds the xritag and phys and virtual
....@@ -1251,7 +1254,8 @@
12511254 * this IO was aborted then the sglq entry it put on the
12521255 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
12531256 * IO has good status or fails for any other reason then the sglq
1254
- * entry is added to the free list (lpfc_els_sgl_list).
1257
+ * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1258
+ * asserted held in the code path calling this routine.
12551259 **/
12561260 static void
12571261 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
....@@ -1260,8 +1264,6 @@
12601264 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
12611265 unsigned long iflag = 0;
12621266 struct lpfc_sli_ring *pring;
1263
-
1264
- lockdep_assert_held(&phba->hbalock);
12651267
12661268 if (iocbq->sli4_xritag == NO_XRI)
12671269 sglq = NULL;
....@@ -1325,17 +1327,16 @@
13251327 * @phba: Pointer to HBA context object.
13261328 * @iocbq: Pointer to driver iocb object.
13271329 *
1328
- * This function is called with hbalock held to release driver
1329
- * iocb object to the iocb pool. The iotag in the iocb object
1330
- * does not change for each use of the iocb object. This function
1331
- * clears all other fields of the iocb object when it is freed.
1330
+ * This function is called to release the driver iocb object to the
1331
+ * iocb pool. The iotag in the iocb object does not change for each
1332
+ * use of the iocb object. This function clears all other fields of
1333
+ * the iocb object when it is freed. The hbalock is asserted held in
1334
+ * the code path calling this routine.
13321335 **/
13331336 static void
13341337 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
13351338 {
13361339 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1337
-
1338
- lockdep_assert_held(&phba->hbalock);
13391340
13401341 /*
13411342 * Clean all volatile data fields, preserve iotag and node struct.
....@@ -1405,9 +1406,12 @@
14051406
14061407 while (!list_empty(iocblist)) {
14071408 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1408
- if (!piocb->iocb_cmpl)
1409
- lpfc_sli_release_iocbq(phba, piocb);
1410
- else {
1409
+ if (!piocb->iocb_cmpl) {
1410
+ if (piocb->iocb_flag & LPFC_IO_NVME)
1411
+ lpfc_nvme_cancel_iocb(phba, piocb);
1412
+ else
1413
+ lpfc_sli_release_iocbq(phba, piocb);
1414
+ } else {
14111415 piocb->iocb.ulpStatus = ulpstatus;
14121416 piocb->iocb.un.ulpWord[4] = ulpWord4;
14131417 (piocb->iocb_cmpl) (phba, piocb, piocb);
....@@ -1485,6 +1489,7 @@
14851489 case DSSCMD_IWRITE64_CX:
14861490 case DSSCMD_IREAD64_CR:
14871491 case DSSCMD_IREAD64_CX:
1492
+ case CMD_SEND_FRAME:
14881493 type = LPFC_SOL_IOCB;
14891494 break;
14901495 case CMD_ABORT_XRI_CN:
....@@ -1559,7 +1564,7 @@
15591564 lpfc_config_ring(phba, i, pmb);
15601565 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
15611566 if (rc != MBX_SUCCESS) {
1562
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1567
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15631568 "0446 Adapter failed to init (%d), "
15641569 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
15651570 "ring %d\n",
....@@ -1580,7 +1585,8 @@
15801585 * @pring: Pointer to driver SLI ring object.
15811586 * @piocb: Pointer to the driver iocb object.
15821587 *
1583
- * This function is called with hbalock held. The function adds the
1588
+ * The driver calls this function with the hbalock held for SLI3 ports or
1589
+ * the ring lock held for SLI4 ports. The function adds the
15841590 * new iocb to txcmplq of the given ring. This function always returns
15851591 * 0. If this function is called for ELS ring, this function checks if
15861592 * there is a vport associated with the ELS command. This function also
....@@ -1590,12 +1596,16 @@
15901596 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
15911597 struct lpfc_iocbq *piocb)
15921598 {
1593
- lockdep_assert_held(&phba->hbalock);
1599
+ if (phba->sli_rev == LPFC_SLI_REV4)
1600
+ lockdep_assert_held(&pring->ring_lock);
1601
+ else
1602
+ lockdep_assert_held(&phba->hbalock);
15941603
15951604 BUG_ON(!piocb);
15961605
15971606 list_add_tail(&piocb->list, &pring->txcmplq);
15981607 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1608
+ pring->txcmplq_cnt++;
15991609
16001610 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
16011611 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
....@@ -1663,7 +1673,7 @@
16631673 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
16641674
16651675 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1666
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1676
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16671677 "0315 Ring %d issue: portCmdGet %d "
16681678 "is bigger than cmd ring %d\n",
16691679 pring->ringno,
....@@ -1773,17 +1783,17 @@
17731783 * @nextiocb: Pointer to driver iocb object which need to be
17741784 * posted to firmware.
17751785 *
1776
- * This function is called with hbalock held to post a new iocb to
1777
- * the firmware. This function copies the new iocb to ring iocb slot and
1778
- * updates the ring pointers. It adds the new iocb to txcmplq if there is
1786
+ * This function is called to post a new iocb to the firmware. This
1787
+ * function copies the new iocb to ring iocb slot and updates the
1788
+ * ring pointers. It adds the new iocb to txcmplq if there is
17791789 * a completion call back for this iocb else the function will free the
1780
- * iocb object.
1790
+ * iocb object. The hbalock is asserted held in the code path calling
1791
+ * this routine.
17811792 **/
17821793 static void
17831794 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
17841795 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
17851796 {
1786
- lockdep_assert_held(&phba->hbalock);
17871797 /*
17881798 * Set up an iotag
17891799 */
....@@ -1949,8 +1959,7 @@
19491959 hbqp->local_hbqGetIdx = getidx;
19501960
19511961 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1952
- lpfc_printf_log(phba, KERN_ERR,
1953
- LOG_SLI | LOG_VPORT,
1962
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19541963 "1802 HBQ %d: local_hbqGetIdx "
19551964 "%u is > than hbqp->entry_count %u\n",
19561965 hbqno, hbqp->local_hbqGetIdx,
....@@ -2218,10 +2227,8 @@
22182227 lpfc_hbq_defs[qno]->init_count);
22192228 }
22202229
2221
-/**
2230
+/*
22222231 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2223
- * @phba: Pointer to HBA context object.
2224
- * @hbqno: HBQ number.
22252232 *
22262233 * This function removes the first hbq buffer on an hbq list and returns a
22272234 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
....@@ -2240,7 +2247,7 @@
22402247 /**
22412248 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
22422249 * @phba: Pointer to HBA context object.
2243
- * @hbqno: HBQ number.
2250
+ * @hrq: HBQ number.
22442251 *
22452252 * This function removes the first RQ buffer on an RQ buffer list and returns a
22462253 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
....@@ -2289,7 +2296,7 @@
22892296 }
22902297 }
22912298 spin_unlock_irq(&phba->hbalock);
2292
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2299
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
22932300 "1803 Bad hbq tag. Data: x%x x%x\n",
22942301 tag, phba->hbqs[tag >> 16].buffer_count);
22952302 return NULL;
....@@ -2435,6 +2442,20 @@
24352442 return;
24362443 }
24372444
2445
+static void
2446
+__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2447
+{
2448
+ unsigned long iflags;
2449
+
2450
+ if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2451
+ lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2452
+ spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
2453
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2454
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2455
+ spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
2456
+ }
2457
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
2458
+}
24382459
24392460 /**
24402461 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
....@@ -2456,7 +2477,7 @@
24562477 uint16_t rpi, vpi;
24572478 int rc;
24582479
2459
- mp = (struct lpfc_dmabuf *) (pmb->context1);
2480
+ mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
24602481
24612482 if (mp) {
24622483 lpfc_mbuf_free(phba, mp->virt, mp->phys);
....@@ -2493,15 +2514,43 @@
24932514 }
24942515
24952516 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2496
- ndlp = (struct lpfc_nodelist *)pmb->context2;
2517
+ ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
24972518 lpfc_nlp_put(ndlp);
2498
- pmb->context2 = NULL;
2519
+ pmb->ctx_buf = NULL;
2520
+ pmb->ctx_ndlp = NULL;
2521
+ }
2522
+
2523
+ if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2524
+ ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2525
+
2526
+ /* Check to see if there are any deferred events to process */
2527
+ if (ndlp) {
2528
+ lpfc_printf_vlog(
2529
+ vport,
2530
+ KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2531
+ "1438 UNREG cmpl deferred mbox x%x "
2532
+ "on NPort x%x Data: x%x x%x %px\n",
2533
+ ndlp->nlp_rpi, ndlp->nlp_DID,
2534
+ ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2535
+
2536
+ if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2537
+ (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2538
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
2539
+ ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2540
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2541
+ } else {
2542
+ __lpfc_sli_rpi_release(vport, ndlp);
2543
+ }
2544
+ if (vport->load_flag & FC_UNLOADING)
2545
+ lpfc_nlp_put(ndlp);
2546
+ pmb->ctx_ndlp = NULL;
2547
+ }
24992548 }
25002549
25012550 /* Check security permission status on INIT_LINK mailbox command */
25022551 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
25032552 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2504
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2553
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
25052554 "2860 SLI authentication is required "
25062555 "for INIT_LINK but has not done yet\n");
25072556
....@@ -2529,21 +2578,46 @@
25292578 struct lpfc_vport *vport = pmb->vport;
25302579 struct lpfc_nodelist *ndlp;
25312580
2532
- ndlp = pmb->context1;
2581
+ ndlp = pmb->ctx_ndlp;
25332582 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
25342583 if (phba->sli_rev == LPFC_SLI_REV4 &&
25352584 (bf_get(lpfc_sli_intf_if_type,
25362585 &phba->sli4_hba.sli_intf) >=
25372586 LPFC_SLI_INTF_IF_TYPE_2)) {
25382587 if (ndlp) {
2539
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2540
- "0010 UNREG_LOGIN vpi:%x "
2541
- "rpi:%x DID:%x map:%x %p\n",
2542
- vport->vpi, ndlp->nlp_rpi,
2543
- ndlp->nlp_DID,
2544
- ndlp->nlp_usg_map, ndlp);
2588
+ lpfc_printf_vlog(
2589
+ vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2590
+ "0010 UNREG_LOGIN vpi:%x "
2591
+ "rpi:%x DID:%x defer x%x flg x%x "
2592
+ "map:%x %px\n",
2593
+ vport->vpi, ndlp->nlp_rpi,
2594
+ ndlp->nlp_DID, ndlp->nlp_defer_did,
2595
+ ndlp->nlp_flag,
2596
+ ndlp->nlp_usg_map, ndlp);
25452597 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
25462598 lpfc_nlp_put(ndlp);
2599
+
2600
+ /* Check to see if there are any deferred
2601
+ * events to process
2602
+ */
2603
+ if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2604
+ (ndlp->nlp_defer_did !=
2605
+ NLP_EVT_NOTHING_PENDING)) {
2606
+ lpfc_printf_vlog(
2607
+ vport, KERN_INFO, LOG_DISCOVERY,
2608
+ "4111 UNREG cmpl deferred "
2609
+ "clr x%x on "
2610
+ "NPort x%x Data: x%x x%px\n",
2611
+ ndlp->nlp_rpi, ndlp->nlp_DID,
2612
+ ndlp->nlp_defer_did, ndlp);
2613
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
2614
+ ndlp->nlp_defer_did =
2615
+ NLP_EVT_NOTHING_PENDING;
2616
+ lpfc_issue_els_plogi(
2617
+ vport, ndlp->nlp_DID, 0);
2618
+ } else {
2619
+ __lpfc_sli_rpi_release(vport, ndlp);
2620
+ }
25472621 }
25482622 }
25492623 }
....@@ -2612,10 +2686,11 @@
26122686 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
26132687 MBX_SHUTDOWN) {
26142688 /* Unknown mailbox command compl */
2615
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2689
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
26162690 "(%d):0323 Unknown Mailbox command "
26172691 "x%x (x%x/x%x) Cmpl\n",
2618
- pmb->vport ? pmb->vport->vpi : 0,
2692
+ pmb->vport ? pmb->vport->vpi :
2693
+ LPFC_VPORT_UNKNOWN,
26192694 pmbox->mbxCommand,
26202695 lpfc_sli_config_mbox_subsys_get(phba,
26212696 pmb),
....@@ -2636,7 +2711,8 @@
26362711 "(%d):0305 Mbox cmd cmpl "
26372712 "error - RETRYing Data: x%x "
26382713 "(x%x/x%x) x%x x%x x%x\n",
2639
- pmb->vport ? pmb->vport->vpi : 0,
2714
+ pmb->vport ? pmb->vport->vpi :
2715
+ LPFC_VPORT_UNKNOWN,
26402716 pmbox->mbxCommand,
26412717 lpfc_sli_config_mbox_subsys_get(phba,
26422718 pmb),
....@@ -2644,7 +2720,8 @@
26442720 pmb),
26452721 pmbox->mbxStatus,
26462722 pmbox->un.varWords[0],
2647
- pmb->vport->port_state);
2723
+ pmb->vport ? pmb->vport->port_state :
2724
+ LPFC_VPORT_UNKNOWN);
26482725 pmbox->mbxStatus = 0;
26492726 pmbox->mbxOwner = OWN_HOST;
26502727 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
....@@ -2655,7 +2732,7 @@
26552732
26562733 /* Mailbox cmd <cmd> Cmpl <cmpl> */
26572734 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2658
- "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2735
+ "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
26592736 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
26602737 "x%x x%x x%x\n",
26612738 pmb->vport ? pmb->vport->vpi : 0,
....@@ -2710,6 +2787,123 @@
27102787 }
27112788
27122789 /**
2790
+ * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
2791
+ * containing a NVME LS request.
2792
+ * @phba: pointer to lpfc hba data structure.
2793
+ * @piocb: pointer to the iocbq struct representing the sequence starting
2794
+ * frame.
2795
+ *
2796
+ * This routine initially validates the NVME LS, validates there is a login
2797
+ * with the port that sent the LS, and then calls the appropriate nvme host
2798
+ * or target LS request handler.
2799
+ **/
2800
+static void
2801
+lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
2802
+{
2803
+ struct lpfc_nodelist *ndlp;
2804
+ struct lpfc_dmabuf *d_buf;
2805
+ struct hbq_dmabuf *nvmebuf;
2806
+ struct fc_frame_header *fc_hdr;
2807
+ struct lpfc_async_xchg_ctx *axchg = NULL;
2808
+ char *failwhy = NULL;
2809
+ uint32_t oxid, sid, did, fctl, size;
2810
+ int ret = 1;
2811
+
2812
+ d_buf = piocb->context2;
2813
+
2814
+ nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2815
+ fc_hdr = nvmebuf->hbuf.virt;
2816
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2817
+ sid = sli4_sid_from_fc_hdr(fc_hdr);
2818
+ did = sli4_did_from_fc_hdr(fc_hdr);
2819
+ fctl = (fc_hdr->fh_f_ctl[0] << 16 |
2820
+ fc_hdr->fh_f_ctl[1] << 8 |
2821
+ fc_hdr->fh_f_ctl[2]);
2822
+ size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
2823
+
2824
+ lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
2825
+ oxid, size, sid);
2826
+
2827
+ if (phba->pport->load_flag & FC_UNLOADING) {
2828
+ failwhy = "Driver Unloading";
2829
+ } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
2830
+ failwhy = "NVME FC4 Disabled";
2831
+ } else if (!phba->nvmet_support && !phba->pport->localport) {
2832
+ failwhy = "No Localport";
2833
+ } else if (phba->nvmet_support && !phba->targetport) {
2834
+ failwhy = "No Targetport";
2835
+ } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
2836
+ failwhy = "Bad NVME LS R_CTL";
2837
+ } else if (unlikely((fctl & 0x00FF0000) !=
2838
+ (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
2839
+ failwhy = "Bad NVME LS F_CTL";
2840
+ } else {
2841
+ axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
2842
+ if (!axchg)
2843
+ failwhy = "No CTX memory";
2844
+ }
2845
+
2846
+ if (unlikely(failwhy)) {
2847
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2848
+ "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
2849
+ sid, oxid, failwhy);
2850
+ goto out_fail;
2851
+ }
2852
+
2853
+ /* validate the source of the LS is logged in */
2854
+ ndlp = lpfc_findnode_did(phba->pport, sid);
2855
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2856
+ ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2857
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2858
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2859
+ "6216 NVME Unsol rcv: No ndlp: "
2860
+ "NPort_ID x%x oxid x%x\n",
2861
+ sid, oxid);
2862
+ goto out_fail;
2863
+ }
2864
+
2865
+ axchg->phba = phba;
2866
+ axchg->ndlp = ndlp;
2867
+ axchg->size = size;
2868
+ axchg->oxid = oxid;
2869
+ axchg->sid = sid;
2870
+ axchg->wqeq = NULL;
2871
+ axchg->state = LPFC_NVME_STE_LS_RCV;
2872
+ axchg->entry_cnt = 1;
2873
+ axchg->rqb_buffer = (void *)nvmebuf;
2874
+ axchg->hdwq = &phba->sli4_hba.hdwq[0];
2875
+ axchg->payload = nvmebuf->dbuf.virt;
2876
+ INIT_LIST_HEAD(&axchg->list);
2877
+
2878
+ if (phba->nvmet_support)
2879
+ ret = lpfc_nvmet_handle_lsreq(phba, axchg);
2880
+ else
2881
+ ret = lpfc_nvme_handle_lsreq(phba, axchg);
2882
+
2883
+ /* if zero, LS was successfully handled. If non-zero, LS not handled */
2884
+ if (!ret)
2885
+ return;
2886
+
2887
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2888
+ "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
2889
+ "NVMe%s handler failed %d\n",
2890
+ did, sid, oxid,
2891
+ (phba->nvmet_support) ? "T" : "I", ret);
2892
+
2893
+out_fail:
2894
+
2895
+ /* recycle receive buffer */
2896
+ lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2897
+
2898
+ /* If start of new exchange, abort it */
2899
+ if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
2900
+ ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
2901
+
2902
+ if (ret)
2903
+ kfree(axchg);
2904
+}
2905
+
2906
+/**
27132907 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
27142908 * @phba: Pointer to HBA context object.
27152909 * @pring: Pointer to driver SLI ring object.
....@@ -2730,7 +2924,7 @@
27302924
27312925 switch (fch_type) {
27322926 case FC_TYPE_NVME:
2733
- lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2927
+ lpfc_nvme_unsol_ls_handler(phba, saveq);
27342928 return 1;
27352929 default:
27362930 break;
....@@ -2937,8 +3131,8 @@
29373131 *
29383132 * This function looks up the iocb_lookup table to get the command iocb
29393133 * corresponding to the given response iocb using the iotag of the
2940
- * response iocb. This function is called with the hbalock held
2941
- * for sli3 devices or the ring_lock for sli4 devices.
3134
+ * response iocb. The driver calls this function with the hbalock held
3135
+ * for SLI3 ports or the ring lock held for SLI4 ports.
29423136 * This function returns the command iocb object if it finds the command
29433137 * iocb else returns NULL.
29443138 **/
....@@ -2949,8 +3143,15 @@
29493143 {
29503144 struct lpfc_iocbq *cmd_iocb = NULL;
29513145 uint16_t iotag;
2952
- lockdep_assert_held(&phba->hbalock);
3146
+ spinlock_t *temp_lock = NULL;
3147
+ unsigned long iflag = 0;
29533148
3149
+ if (phba->sli_rev == LPFC_SLI_REV4)
3150
+ temp_lock = &pring->ring_lock;
3151
+ else
3152
+ temp_lock = &phba->hbalock;
3153
+
3154
+ spin_lock_irqsave(temp_lock, iflag);
29543155 iotag = prspiocb->iocb.ulpIoTag;
29553156
29563157 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
....@@ -2959,11 +3160,14 @@
29593160 /* remove from txcmpl queue list */
29603161 list_del_init(&cmd_iocb->list);
29613162 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3163
+ pring->txcmplq_cnt--;
3164
+ spin_unlock_irqrestore(temp_lock, iflag);
29623165 return cmd_iocb;
29633166 }
29643167 }
29653168
2966
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3169
+ spin_unlock_irqrestore(temp_lock, iflag);
3170
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
29673171 "0317 iotag x%x is out of "
29683172 "range: max iotag x%x wd0 x%x\n",
29693173 iotag, phba->sli.last_iotag,
....@@ -2978,8 +3182,8 @@
29783182 * @iotag: IOCB tag.
29793183 *
29803184 * This function looks up the iocb_lookup table to get the command iocb
2981
- * corresponding to the given iotag. This function is called with the
2982
- * hbalock held.
3185
+ * corresponding to the given iotag. The driver calls this function with
3186
+ * the ring lock held because this function is an SLI4 port only helper.
29833187 * This function returns the command iocb object if it finds the command
29843188 * iocb else returns NULL.
29853189 **/
....@@ -2988,19 +3192,29 @@
29883192 struct lpfc_sli_ring *pring, uint16_t iotag)
29893193 {
29903194 struct lpfc_iocbq *cmd_iocb = NULL;
3195
+ spinlock_t *temp_lock = NULL;
3196
+ unsigned long iflag = 0;
29913197
2992
- lockdep_assert_held(&phba->hbalock);
3198
+ if (phba->sli_rev == LPFC_SLI_REV4)
3199
+ temp_lock = &pring->ring_lock;
3200
+ else
3201
+ temp_lock = &phba->hbalock;
3202
+
3203
+ spin_lock_irqsave(temp_lock, iflag);
29933204 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
29943205 cmd_iocb = phba->sli.iocbq_lookup[iotag];
29953206 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
29963207 /* remove from txcmpl queue list */
29973208 list_del_init(&cmd_iocb->list);
29983209 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3210
+ pring->txcmplq_cnt--;
3211
+ spin_unlock_irqrestore(temp_lock, iflag);
29993212 return cmd_iocb;
30003213 }
30013214 }
30023215
3003
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3216
+ spin_unlock_irqrestore(temp_lock, iflag);
3217
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
30043218 "0372 iotag x%x lookup error: max iotag (x%x) "
30053219 "iocb_flag x%x\n",
30063220 iotag, phba->sli.last_iotag,
....@@ -3033,17 +3247,7 @@
30333247 int rc = 1;
30343248 unsigned long iflag;
30353249
3036
- /* Based on the iotag field, get the cmd IOCB from the txcmplq */
3037
- if (phba->sli_rev == LPFC_SLI_REV4)
3038
- spin_lock_irqsave(&pring->ring_lock, iflag);
3039
- else
3040
- spin_lock_irqsave(&phba->hbalock, iflag);
30413250 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3042
- if (phba->sli_rev == LPFC_SLI_REV4)
3043
- spin_unlock_irqrestore(&pring->ring_lock, iflag);
3044
- else
3045
- spin_unlock_irqrestore(&phba->hbalock, iflag);
3046
-
30473251 if (cmdiocbp) {
30483252 if (cmdiocbp->iocb_cmpl) {
30493253 /*
....@@ -3186,7 +3390,7 @@
31863390 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
31873391 * rsp ring <portRspMax>
31883392 */
3189
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3393
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
31903394 "0312 Ring %d handler: portRspPut %d "
31913395 "is bigger than rsp ring %d\n",
31923396 pring->ringno, le32_to_cpu(pgp->rspPutInx),
....@@ -3208,7 +3412,7 @@
32083412
32093413 /**
32103414 * lpfc_poll_eratt - Error attention polling timer timeout handler
3211
- * @ptr: Pointer to address of HBA context object.
3415
+ * @t: Context to fetch pointer to address of HBA context object from.
32123416 *
32133417 * This function is invoked by the Error Attention polling timer when the
32143418 * timer times out. It will check the SLI Error Attention register for
....@@ -3374,8 +3578,10 @@
33743578 break;
33753579 }
33763580
3581
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
33773582 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
33783583 &rspiocbq);
3584
+ spin_lock_irqsave(&phba->hbalock, iflag);
33793585 if (unlikely(!cmdiocbq))
33803586 break;
33813587 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
....@@ -3403,7 +3609,7 @@
34033609 phba->brd_no, adaptermsg);
34043610 } else {
34053611 /* Unknown IOCB command */
3406
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3612
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
34073613 "0334 Unknown IOCB command "
34083614 "Data: x%x, x%x x%x x%x x%x\n",
34093615 type, irsp->ulpCommand,
....@@ -3569,9 +3775,12 @@
35693775
35703776 case LPFC_ABORT_IOCB:
35713777 cmdiocbp = NULL;
3572
- if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3778
+ if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3779
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
35733780 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
35743781 saveq);
3782
+ spin_lock_irqsave(&phba->hbalock, iflag);
3783
+ }
35753784 if (cmdiocbp) {
35763785 /* Call the specified completion routine */
35773786 if (cmdiocbp->iocb_cmpl) {
....@@ -3598,7 +3807,7 @@
35983807 phba->brd_no, adaptermsg);
35993808 } else {
36003809 /* Unknown IOCB command */
3601
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3810
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
36023811 "0335 Unknown IOCB "
36033812 "command Data: x%x "
36043813 "x%x x%x x%x\n",
....@@ -3678,7 +3887,7 @@
36783887 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
36793888 * rsp ring <portRspMax>
36803889 */
3681
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3890
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
36823891 "0303 Ring %d handler: portRspPut %d "
36833892 "is bigger than rsp ring %d\n",
36843893 pring->ringno, portRspPut, portRspMax);
....@@ -3887,36 +4096,8 @@
38874096 }
38884097
38894098 /**
3890
- * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
3891
- * @phba: Pointer to HBA context object.
3892
- * @pring: Pointer to driver SLI ring object.
3893
- *
3894
- * This function aborts all iocbs in the given ring and frees all the iocb
3895
- * objects in txq. This function issues an abort iocb for all the iocb commands
3896
- * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3897
- * the return of this function. The caller is not required to hold any locks.
3898
- **/
3899
-void
3900
-lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3901
-{
3902
- LIST_HEAD(completions);
3903
- struct lpfc_iocbq *iocb, *next_iocb;
3904
-
3905
- if (pring->ringno == LPFC_ELS_RING)
3906
- lpfc_fabric_abort_hba(phba);
3907
-
3908
- spin_lock_irq(&phba->hbalock);
3909
- /* Next issue ABTS for everything on the txcmplq */
3910
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3911
- lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3912
- spin_unlock_irq(&phba->hbalock);
3913
-}
3914
-
3915
-
3916
-/**
39174099 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
39184100 * @phba: Pointer to HBA context object.
3919
- * @pring: Pointer to driver SLI ring object.
39204101 *
39214102 * This function aborts all iocbs in FCP rings and frees all the iocb
39224103 * objects in txq. This function issues an abort iocb for all the iocb commands
....@@ -3932,8 +4113,8 @@
39324113
39334114 /* Look on all the FCP Rings for the iotag */
39344115 if (phba->sli_rev >= LPFC_SLI_REV4) {
3935
- for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3936
- pring = phba->sli4_hba.fcp_wq[i]->pring;
4116
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
4117
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
39374118 lpfc_sli_abort_iocb_ring(phba, pring);
39384119 }
39394120 } else {
....@@ -3943,43 +4124,17 @@
39434124 }
39444125
39454126 /**
3946
- * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
4127
+ * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
39474128 * @phba: Pointer to HBA context object.
39484129 *
3949
- * This function aborts all wqes in NVME rings. This function issues an
3950
- * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
3951
- * the txcmplq is not guaranteed to complete before the return of this
3952
- * function. The caller is not required to hold any locks.
3953
- **/
3954
-void
3955
-lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
3956
-{
3957
- struct lpfc_sli_ring *pring;
3958
- uint32_t i;
3959
-
3960
- if (phba->sli_rev < LPFC_SLI_REV4)
3961
- return;
3962
-
3963
- /* Abort all IO on each NVME ring. */
3964
- for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3965
- pring = phba->sli4_hba.nvme_wq[i]->pring;
3966
- lpfc_sli_abort_wqe_ring(phba, pring);
3967
- }
3968
-}
3969
-
3970
-
3971
-/**
3972
- * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3973
- * @phba: Pointer to HBA context object.
3974
- *
3975
- * This function flushes all iocbs in the fcp ring and frees all the iocb
4130
+ * This function flushes all iocbs in the IO ring and frees all the iocb
39764131 * objects in txq and txcmplq. This function will not issue abort iocbs
39774132 * for all the iocb commands in txcmplq, they will just be returned with
39784133 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
39794134 * slot has been permanently disabled.
39804135 **/
39814136 void
3982
-lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
4137
+lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
39834138 {
39844139 LIST_HEAD(txq);
39854140 LIST_HEAD(txcmplq);
....@@ -3989,14 +4144,19 @@
39894144 struct lpfc_iocbq *piocb, *next_iocb;
39904145
39914146 spin_lock_irq(&phba->hbalock);
4147
+ if (phba->hba_flag & HBA_IOQ_FLUSH ||
4148
+ !phba->sli4_hba.hdwq) {
4149
+ spin_unlock_irq(&phba->hbalock);
4150
+ return;
4151
+ }
39924152 /* Indicate the I/O queues are flushed */
3993
- phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
4153
+ phba->hba_flag |= HBA_IOQ_FLUSH;
39944154 spin_unlock_irq(&phba->hbalock);
39954155
39964156 /* Look on all the FCP Rings for the iotag */
39974157 if (phba->sli_rev >= LPFC_SLI_REV4) {
3998
- for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3999
- pring = phba->sli4_hba.fcp_wq[i]->pring;
4158
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
4159
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
40004160
40014161 spin_lock_irq(&pring->ring_lock);
40024162 /* Retrieve everything on txq */
....@@ -4039,55 +4199,6 @@
40394199 IOERR_SLI_DOWN);
40404200 /* Flush the txcmpq */
40414201 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4042
- IOERR_SLI_DOWN);
4043
- }
4044
-}
4045
-
4046
-/**
4047
- * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
4048
- * @phba: Pointer to HBA context object.
4049
- *
4050
- * This function flushes all wqes in the nvme rings and frees all resources
4051
- * in the txcmplq. This function does not issue abort wqes for the IO
4052
- * commands in txcmplq, they will just be returned with
4053
- * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4054
- * slot has been permanently disabled.
4055
- **/
4056
-void
4057
-lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
4058
-{
4059
- LIST_HEAD(txcmplq);
4060
- struct lpfc_sli_ring *pring;
4061
- uint32_t i;
4062
- struct lpfc_iocbq *piocb, *next_iocb;
4063
-
4064
- if (phba->sli_rev < LPFC_SLI_REV4)
4065
- return;
4066
-
4067
- /* Hint to other driver operations that a flush is in progress. */
4068
- spin_lock_irq(&phba->hbalock);
4069
- phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
4070
- spin_unlock_irq(&phba->hbalock);
4071
-
4072
- /* Cycle through all NVME rings and complete each IO with
4073
- * a local driver reason code. This is a flush so no
4074
- * abort exchange to FW.
4075
- */
4076
- for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
4077
- pring = phba->sli4_hba.nvme_wq[i]->pring;
4078
-
4079
- spin_lock_irq(&pring->ring_lock);
4080
- list_for_each_entry_safe(piocb, next_iocb,
4081
- &pring->txcmplq, list)
4082
- piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4083
- /* Retrieve everything on the txcmplq */
4084
- list_splice_init(&pring->txcmplq, &txcmplq);
4085
- pring->txcmplq_cnt = 0;
4086
- spin_unlock_irq(&pring->ring_lock);
4087
-
4088
- /* Flush the txcmpq &&&PAE */
4089
- lpfc_sli_cancel_iocbs(phba, &txcmplq,
4090
- IOSTAT_LOCAL_REJECT,
40914202 IOERR_SLI_DOWN);
40924203 }
40934204 }
....@@ -4147,7 +4258,7 @@
41474258
41484259 /* Check to see if any errors occurred during init */
41494260 if ((status & HS_FFERM) || (i >= 20)) {
4150
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4261
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
41514262 "2751 Adapter failed to restart, "
41524263 "status reg x%x, FW Data: A8 x%x AC x%x\n",
41534264 status,
....@@ -4369,7 +4480,7 @@
43694480 if (retval != MBX_SUCCESS) {
43704481 if (retval != MBX_BUSY)
43714482 mempool_free(pmb, phba->mbox_mem_pool);
4372
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4483
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
43734484 "2752 KILL_BOARD command failed retval %d\n",
43744485 retval);
43754486 spin_lock_irq(&phba->hbalock);
....@@ -4450,7 +4561,9 @@
44504561 }
44514562
44524563 /* Turn off parity checking and serr during the physical reset */
4453
- pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4564
+ if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4565
+ return -EIO;
4566
+
44544567 pci_write_config_word(phba->pcidev, PCI_COMMAND,
44554568 (cfg_value &
44564569 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
....@@ -4490,7 +4603,7 @@
44904603 * checking during resets the device. The caller is not required to hold
44914604 * any locks.
44924605 *
4493
- * This function returns 0 always.
4606
+ * This function returns 0 on success else returns negative error code.
44944607 **/
44954608 int
44964609 lpfc_sli4_brdreset(struct lpfc_hba *phba)
....@@ -4516,18 +4629,17 @@
45164629 phba->fcf.fcf_flag = 0;
45174630 spin_unlock_irq(&phba->hbalock);
45184631
4519
- /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4520
- if (phba->hba_flag & HBA_FW_DUMP_OP) {
4521
- phba->hba_flag &= ~HBA_FW_DUMP_OP;
4522
- return rc;
4523
- }
4524
-
45254632 /* Now physically reset the device */
45264633 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
45274634 "0389 Performing PCI function reset!\n");
45284635
45294636 /* Turn off parity checking and serr during the physical reset */
4530
- pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4637
+ if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4638
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4639
+ "3205 PCI read Config failed\n");
4640
+ return -EIO;
4641
+ }
4642
+
45314643 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
45324644 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
45334645
....@@ -4642,8 +4754,10 @@
46424754 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
46434755
46444756 rc = lpfc_sli4_brdreset(phba);
4645
- if (rc)
4646
- return rc;
4757
+ if (rc) {
4758
+ phba->link_state = LPFC_HBA_ERROR;
4759
+ goto hba_down_queue;
4760
+ }
46474761
46484762 spin_lock_irq(&phba->hbalock);
46494763 phba->pport->stopped = 0;
....@@ -4658,6 +4772,7 @@
46584772 if (hba_aer_enabled)
46594773 pci_disable_pcie_error_reporting(phba->pcidev);
46604774
4775
+hba_down_queue:
46614776 lpfc_hba_down_post(phba);
46624777 lpfc_sli4_queue_destroy(phba);
46634778
....@@ -4711,7 +4826,7 @@
47114826 if (i++ >= 200) {
47124827 /* Adapter failed to init, timeout, status reg
47134828 <status> */
4714
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4829
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
47154830 "0436 Adapter failed to init, "
47164831 "timeout, status reg x%x, "
47174832 "FW Data: A8 x%x AC x%x\n", status,
....@@ -4726,7 +4841,7 @@
47264841 /* ERROR: During chipset initialization */
47274842 /* Adapter failed to init, chipset, status reg
47284843 <status> */
4729
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4844
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
47304845 "0437 Adapter failed to init, "
47314846 "chipset, status reg x%x, "
47324847 "FW Data: A8 x%x AC x%x\n", status,
....@@ -4757,7 +4872,7 @@
47574872 if (status & HS_FFERM) {
47584873 /* ERROR: During chipset initialization */
47594874 /* Adapter failed to init, chipset, status reg <status> */
4760
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4875
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
47614876 "0438 Adapter failed to init, chipset, "
47624877 "status reg x%x, "
47634878 "FW Data: A8 x%x AC x%x\n", status,
....@@ -4902,8 +5017,17 @@
49025017 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
49035018 {
49045019 phba->hbq_in_use = 1;
4905
- phba->hbqs[LPFC_ELS_HBQ].entry_count =
4906
- lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5020
+ /**
5021
+ * Specific case when the MDS diagnostics is enabled and supported.
5022
+ * The receive buffer count is truncated to manage the incoming
5023
+ * traffic.
5024
+ **/
5025
+ if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5026
+ phba->hbqs[LPFC_ELS_HBQ].entry_count =
5027
+ lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5028
+ else
5029
+ phba->hbqs[LPFC_ELS_HBQ].entry_count =
5030
+ lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
49075031 phba->hbq_count = 1;
49085032 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
49095033 /* Initially populate or replenish the HBQs */
....@@ -4971,7 +5095,7 @@
49715095 LPFC_SLI3_CRP_ENABLED |
49725096 LPFC_SLI3_DSS_ENABLED);
49735097 if (rc != MBX_SUCCESS) {
4974
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5098
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
49755099 "0442 Adapter failed to init, mbxCmd x%x "
49765100 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
49775101 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
....@@ -5009,23 +5133,6 @@
50095133
50105134 } else
50115135 phba->max_vpi = 0;
5012
- phba->fips_level = 0;
5013
- phba->fips_spec_rev = 0;
5014
- if (pmb->u.mb.un.varCfgPort.gdss) {
5015
- phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
5016
- phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
5017
- phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
5018
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5019
- "2850 Security Crypto Active. FIPS x%d "
5020
- "(Spec Rev: x%d)",
5021
- phba->fips_level, phba->fips_spec_rev);
5022
- }
5023
- if (pmb->u.mb.un.varCfgPort.sec_err) {
5024
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5025
- "2856 Config Port Security Crypto "
5026
- "Error: x%x ",
5027
- pmb->u.mb.un.varCfgPort.sec_err);
5028
- }
50295136 if (pmb->u.mb.un.varCfgPort.gerbm)
50305137 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
50315138 if (pmb->u.mb.un.varCfgPort.gcrp)
....@@ -5038,7 +5145,7 @@
50385145 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
50395146 phba->cfg_enable_bg = 0;
50405147 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5041
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5148
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
50425149 "0443 Adapter did not grant "
50435150 "BlockGuard\n");
50445151 }
....@@ -5077,7 +5184,7 @@
50775184 switch (phba->cfg_sli_mode) {
50785185 case 2:
50795186 if (phba->cfg_enable_npiv) {
5080
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5187
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
50815188 "1824 NPIV enabled: Override sli_mode "
50825189 "parameter (%d) to auto (0).\n",
50835190 phba->cfg_sli_mode);
....@@ -5089,7 +5196,7 @@
50895196 case 3:
50905197 break;
50915198 default:
5092
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5199
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
50935200 "1819 Unrecognized sli_mode parameter: %d.\n",
50945201 phba->cfg_sli_mode);
50955202
....@@ -5100,7 +5207,7 @@
51005207 rc = lpfc_sli_config_port(phba, mode);
51015208
51025209 if (rc && phba->cfg_sli_mode == 3)
5103
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5210
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
51045211 "1820 Unable to select SLI-3. "
51055212 "Not supported by adapter.\n");
51065213 if (rc && mode != 2)
....@@ -5194,7 +5301,7 @@
51945301
51955302 lpfc_sli_hba_setup_error:
51965303 phba->link_state = LPFC_HBA_ERROR;
5197
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5304
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
51985305 "0445 Firmware initialization failed\n");
51995306 return rc;
52005307 }
....@@ -5202,7 +5309,7 @@
52025309 /**
52035310 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
52045311 * @phba: Pointer to HBA context object.
5205
- * @mboxq: mailbox pointer.
5312
+ *
52065313 * This function issue a dump mailbox command to read config region
52075314 * 23 and parse the records in the region and populate driver
52085315 * data structure.
....@@ -5232,7 +5339,7 @@
52325339 goto out_free_mboxq;
52335340 }
52345341
5235
- mp = (struct lpfc_dmabuf *) mboxq->context1;
5342
+ mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
52365343 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
52375344
52385345 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
....@@ -5313,8 +5420,8 @@
53135420 * mailbox command.
53145421 */
53155422 dma_size = *vpd_size;
5316
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
5317
- &dmabuf->phys, GFP_KERNEL);
5423
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5424
+ &dmabuf->phys, GFP_KERNEL);
53185425 if (!dmabuf->virt) {
53195426 kfree(dmabuf);
53205427 return -ENOMEM;
....@@ -5358,7 +5465,7 @@
53585465 }
53595466
53605467 /**
5361
- * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5468
+ * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
53625469 * @phba: pointer to lpfc hba data structure.
53635470 *
53645471 * This routine retrieves SLI4 device physical port name this PCI function
....@@ -5366,42 +5473,32 @@
53665473 *
53675474 * Return codes
53685475 * 0 - successful
5369
- * otherwise - failed to retrieve physical port name
5476
+ * otherwise - failed to retrieve controller attributes
53705477 **/
53715478 static int
5372
-lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5479
+lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
53735480 {
53745481 LPFC_MBOXQ_t *mboxq;
53755482 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
53765483 struct lpfc_controller_attribute *cntl_attr;
5377
- struct lpfc_mbx_get_port_name *get_port_name;
53785484 void *virtaddr = NULL;
53795485 uint32_t alloclen, reqlen;
53805486 uint32_t shdr_status, shdr_add_status;
53815487 union lpfc_sli4_cfg_shdr *shdr;
5382
- char cport_name = 0;
53835488 int rc;
5384
-
5385
- /* We assume nothing at this point */
5386
- phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5387
- phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
53885489
53895490 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
53905491 if (!mboxq)
53915492 return -ENOMEM;
5392
- /* obtain link type and link number via READ_CONFIG */
5393
- phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5394
- lpfc_sli4_read_config(phba);
5395
- if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5396
- goto retrieve_ppname;
53975493
5398
- /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5494
+ /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
53995495 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
54005496 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
54015497 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
54025498 LPFC_SLI4_MBX_NEMBED);
5499
+
54035500 if (alloclen < reqlen) {
5404
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5501
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
54055502 "3084 Allocated DMA memory size (%d) is "
54065503 "less than the requested DMA memory size "
54075504 "(%d)\n", alloclen, reqlen);
....@@ -5425,16 +5522,69 @@
54255522 rc = -ENXIO;
54265523 goto out_free_mboxq;
54275524 }
5525
+
54285526 cntl_attr = &mbx_cntl_attr->cntl_attr;
54295527 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
54305528 phba->sli4_hba.lnk_info.lnk_tp =
54315529 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
54325530 phba->sli4_hba.lnk_info.lnk_no =
54335531 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5532
+
5533
+ memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5534
+ strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5535
+ sizeof(phba->BIOSVersion));
5536
+
54345537 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5435
- "3086 lnk_type:%d, lnk_numb:%d\n",
5538
+ "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
54365539 phba->sli4_hba.lnk_info.lnk_tp,
5437
- phba->sli4_hba.lnk_info.lnk_no);
5540
+ phba->sli4_hba.lnk_info.lnk_no,
5541
+ phba->BIOSVersion);
5542
+out_free_mboxq:
5543
+ if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5544
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
5545
+ else
5546
+ mempool_free(mboxq, phba->mbox_mem_pool);
5547
+ return rc;
5548
+}
5549
+
5550
+/**
5551
+ * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5552
+ * @phba: pointer to lpfc hba data structure.
5553
+ *
5554
+ * This routine retrieves SLI4 device physical port name this PCI function
5555
+ * is attached to.
5556
+ *
5557
+ * Return codes
5558
+ * 0 - successful
5559
+ * otherwise - failed to retrieve physical port name
5560
+ **/
5561
+static int
5562
+lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5563
+{
5564
+ LPFC_MBOXQ_t *mboxq;
5565
+ struct lpfc_mbx_get_port_name *get_port_name;
5566
+ uint32_t shdr_status, shdr_add_status;
5567
+ union lpfc_sli4_cfg_shdr *shdr;
5568
+ char cport_name = 0;
5569
+ int rc;
5570
+
5571
+ /* We assume nothing at this point */
5572
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5573
+ phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5574
+
5575
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5576
+ if (!mboxq)
5577
+ return -ENOMEM;
5578
+ /* obtain link type and link number via READ_CONFIG */
5579
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5580
+ lpfc_sli4_read_config(phba);
5581
+ if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5582
+ goto retrieve_ppname;
5583
+
5584
+ /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5585
+ rc = lpfc_sli4_get_ctl_attr(phba);
5586
+ if (rc)
5587
+ goto out_free_mboxq;
54385588
54395589 retrieve_ppname:
54405590 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
....@@ -5494,12 +5644,10 @@
54945644 }
54955645
54965646 out_free_mboxq:
5497
- if (rc != MBX_TIMEOUT) {
5498
- if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5499
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
5500
- else
5501
- mempool_free(mboxq, phba->mbox_mem_pool);
5502
- }
5647
+ if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5648
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
5649
+ else
5650
+ mempool_free(mboxq, phba->mbox_mem_pool);
55035651 return rc;
55045652 }
55055653
....@@ -5515,41 +5663,40 @@
55155663 {
55165664 int qidx;
55175665 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5666
+ struct lpfc_sli4_hdw_queue *qp;
5667
+ struct lpfc_queue *eq;
55185668
5519
- sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM);
5520
- sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM);
5669
+ sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5670
+ sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
55215671 if (sli4_hba->nvmels_cq)
5522
- sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq,
5672
+ sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5673
+ LPFC_QUEUE_REARM);
5674
+
5675
+ if (sli4_hba->hdwq) {
5676
+ /* Loop thru all Hardware Queues */
5677
+ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5678
+ qp = &sli4_hba->hdwq[qidx];
5679
+ /* ARM the corresponding CQ */
5680
+ sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
55235681 LPFC_QUEUE_REARM);
5682
+ }
55245683
5525
- if (sli4_hba->fcp_cq)
5526
- for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
5527
- sli4_hba->sli4_cq_release(sli4_hba->fcp_cq[qidx],
5528
- LPFC_QUEUE_REARM);
5529
-
5530
- if (sli4_hba->nvme_cq)
5531
- for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
5532
- sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx],
5533
- LPFC_QUEUE_REARM);
5534
-
5535
- if (phba->cfg_fof)
5536
- sli4_hba->sli4_cq_release(sli4_hba->oas_cq, LPFC_QUEUE_REARM);
5537
-
5538
- if (sli4_hba->hba_eq)
5539
- for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
5540
- sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx],
5541
- LPFC_QUEUE_REARM);
5542
-
5543
- if (phba->nvmet_support) {
5544
- for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5545
- sli4_hba->sli4_cq_release(
5546
- sli4_hba->nvmet_cqset[qidx],
5547
- LPFC_QUEUE_REARM);
5684
+ /* Loop thru all IRQ vectors */
5685
+ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5686
+ eq = sli4_hba->hba_eq_hdl[qidx].eq;
5687
+ /* ARM the corresponding EQ */
5688
+ sli4_hba->sli4_write_eq_db(phba, eq,
5689
+ 0, LPFC_QUEUE_REARM);
55485690 }
55495691 }
55505692
5551
- if (phba->cfg_fof)
5552
- sli4_hba->sli4_eq_release(sli4_hba->fof_eq, LPFC_QUEUE_REARM);
5693
+ if (phba->nvmet_support) {
5694
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5695
+ sli4_hba->sli4_write_cq_db(phba,
5696
+ sli4_hba->nvmet_cqset[qidx], 0,
5697
+ LPFC_QUEUE_REARM);
5698
+ }
5699
+ }
55535700 }
55545701
55555702 /**
....@@ -5607,7 +5754,7 @@
56075754 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
56085755 if (bf_get(lpfc_mbox_hdr_status,
56095756 &rsrc_info->header.cfg_shdr.response)) {
5610
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5757
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
56115758 "2930 Failed to get resource extents "
56125759 "Status 0x%x Add'l Status 0x%x\n",
56135760 bf_get(lpfc_mbox_hdr_status,
....@@ -5696,10 +5843,10 @@
56965843 /**
56975844 * lpfc_sli4_cfg_post_extnts -
56985845 * @phba: Pointer to HBA context object.
5699
- * @extnt_cnt - number of available extents.
5700
- * @type - the extent type (rpi, xri, vfi, vpi).
5701
- * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5702
- * @mbox - pointer to the caller's allocated mailbox structure.
5846
+ * @extnt_cnt: number of available extents.
5847
+ * @type: the extent type (rpi, xri, vfi, vpi).
5848
+ * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5849
+ * @mbox: pointer to the caller's allocated mailbox structure.
57035850 *
57045851 * This function executes the extents allocation request. It also
57055852 * takes care of the amount of memory needed to allocate or get the
....@@ -5745,7 +5892,7 @@
57455892 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
57465893 req_len, *emb);
57475894 if (alloc_len < req_len) {
5748
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5895
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
57495896 "2982 Allocated DMA memory size (x%x) is "
57505897 "less than the requested DMA memory "
57515898 "size (x%x)\n", alloc_len, req_len);
....@@ -5801,7 +5948,7 @@
58015948 return -EIO;
58025949
58035950 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5804
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5951
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
58055952 "3009 No available Resource Extents "
58065953 "for resource type 0x%x: Count: 0x%x, "
58075954 "Size 0x%x\n", type, rsrc_cnt,
....@@ -5978,11 +6125,8 @@
59786125 list_add_tail(&rsrc_blks->list, ext_blk_list);
59796126 rsrc_start = rsrc_id;
59806127 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
5981
- phba->sli4_hba.scsi_xri_start = rsrc_start +
6128
+ phba->sli4_hba.io_xri_start = rsrc_start +
59826129 lpfc_sli4_get_iocb_cnt(phba);
5983
- phba->sli4_hba.nvme_xri_start =
5984
- phba->sli4_hba.scsi_xri_start +
5985
- phba->sli4_hba.scsi_xri_max;
59866130 }
59876131
59886132 while (rsrc_id < (rsrc_start + rsrc_size)) {
....@@ -6055,7 +6199,7 @@
60556199 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
60566200 if (bf_get(lpfc_mbox_hdr_status,
60576201 &dealloc_rsrc->header.cfg_shdr.response)) {
6058
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6202
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
60596203 "2919 Failed to release resource extents "
60606204 "for type %d - Status 0x%x Add'l Status 0x%x. "
60616205 "Resource memory not released.\n",
....@@ -6146,9 +6290,317 @@
61466290 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
61476291 mbox->u.mqe.un.set_feature.param_len = 8;
61486292 break;
6293
+ case LPFC_SET_DUAL_DUMP:
6294
+ bf_set(lpfc_mbx_set_feature_dd,
6295
+ &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6296
+ bf_set(lpfc_mbx_set_feature_ddquery,
6297
+ &mbox->u.mqe.un.set_feature, 0);
6298
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6299
+ mbox->u.mqe.un.set_feature.param_len = 4;
6300
+ break;
61496301 }
61506302
61516303 return;
6304
+}
6305
+
6306
+/**
6307
+ * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6308
+ * @phba: Pointer to HBA context object.
6309
+ *
6310
+ * Disable FW logging into host memory on the adapter. To
6311
+ * be done before reading logs from the host memory.
6312
+ **/
6313
+void
6314
+lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6315
+{
6316
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6317
+
6318
+ spin_lock_irq(&phba->hbalock);
6319
+ ras_fwlog->state = INACTIVE;
6320
+ spin_unlock_irq(&phba->hbalock);
6321
+
6322
+ /* Disable FW logging to host memory */
6323
+ writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6324
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6325
+
6326
+ /* Wait 10ms for firmware to stop using DMA buffer */
6327
+ usleep_range(10 * 1000, 20 * 1000);
6328
+}
6329
+
6330
+/**
6331
+ * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6332
+ * @phba: Pointer to HBA context object.
6333
+ *
6334
+ * This function is called to free memory allocated for RAS FW logging
6335
+ * support in the driver.
6336
+ **/
6337
+void
6338
+lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6339
+{
6340
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6341
+ struct lpfc_dmabuf *dmabuf, *next;
6342
+
6343
+ if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6344
+ list_for_each_entry_safe(dmabuf, next,
6345
+ &ras_fwlog->fwlog_buff_list,
6346
+ list) {
6347
+ list_del(&dmabuf->list);
6348
+ dma_free_coherent(&phba->pcidev->dev,
6349
+ LPFC_RAS_MAX_ENTRY_SIZE,
6350
+ dmabuf->virt, dmabuf->phys);
6351
+ kfree(dmabuf);
6352
+ }
6353
+ }
6354
+
6355
+ if (ras_fwlog->lwpd.virt) {
6356
+ dma_free_coherent(&phba->pcidev->dev,
6357
+ sizeof(uint32_t) * 2,
6358
+ ras_fwlog->lwpd.virt,
6359
+ ras_fwlog->lwpd.phys);
6360
+ ras_fwlog->lwpd.virt = NULL;
6361
+ }
6362
+
6363
+ spin_lock_irq(&phba->hbalock);
6364
+ ras_fwlog->state = INACTIVE;
6365
+ spin_unlock_irq(&phba->hbalock);
6366
+}
6367
+
6368
+/**
6369
+ * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6370
+ * @phba: Pointer to HBA context object.
6371
+ * @fwlog_buff_count: Count of buffers to be created.
6372
+ *
6373
+ * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6374
+ * to update FW log is posted to the adapter.
6375
+ * Buffer count is calculated based on module param ras_fwlog_buffsize
6376
+ * Size of each buffer posted to FW is 64K.
6377
+ **/
6378
+
6379
+static int
6380
+lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6381
+ uint32_t fwlog_buff_count)
6382
+{
6383
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6384
+ struct lpfc_dmabuf *dmabuf;
6385
+ int rc = 0, i = 0;
6386
+
6387
+ /* Initialize List */
6388
+ INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6389
+
6390
+ /* Allocate memory for the LWPD */
6391
+ ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6392
+ sizeof(uint32_t) * 2,
6393
+ &ras_fwlog->lwpd.phys,
6394
+ GFP_KERNEL);
6395
+ if (!ras_fwlog->lwpd.virt) {
6396
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6397
+ "6185 LWPD Memory Alloc Failed\n");
6398
+
6399
+ return -ENOMEM;
6400
+ }
6401
+
6402
+ ras_fwlog->fw_buffcount = fwlog_buff_count;
6403
+ for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6404
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6405
+ GFP_KERNEL);
6406
+ if (!dmabuf) {
6407
+ rc = -ENOMEM;
6408
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6409
+ "6186 Memory Alloc failed FW logging");
6410
+ goto free_mem;
6411
+ }
6412
+
6413
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6414
+ LPFC_RAS_MAX_ENTRY_SIZE,
6415
+ &dmabuf->phys, GFP_KERNEL);
6416
+ if (!dmabuf->virt) {
6417
+ kfree(dmabuf);
6418
+ rc = -ENOMEM;
6419
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6420
+ "6187 DMA Alloc Failed FW logging");
6421
+ goto free_mem;
6422
+ }
6423
+ dmabuf->buffer_tag = i;
6424
+ list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6425
+ }
6426
+
6427
+free_mem:
6428
+ if (rc)
6429
+ lpfc_sli4_ras_dma_free(phba);
6430
+
6431
+ return rc;
6432
+}
6433
+
6434
+/**
6435
+ * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6436
+ * @phba: pointer to lpfc hba data structure.
6437
+ * @pmb: pointer to the driver internal queue element for mailbox command.
6438
+ *
6439
+ * Completion handler for driver's RAS MBX command to the device.
6440
+ **/
6441
+static void
6442
+lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6443
+{
6444
+ MAILBOX_t *mb;
6445
+ union lpfc_sli4_cfg_shdr *shdr;
6446
+ uint32_t shdr_status, shdr_add_status;
6447
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6448
+
6449
+ mb = &pmb->u.mb;
6450
+
6451
+ shdr = (union lpfc_sli4_cfg_shdr *)
6452
+ &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6453
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6454
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6455
+
6456
+ if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6457
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6458
+ "6188 FW LOG mailbox "
6459
+ "completed with status x%x add_status x%x,"
6460
+ " mbx status x%x\n",
6461
+ shdr_status, shdr_add_status, mb->mbxStatus);
6462
+
6463
+ ras_fwlog->ras_hwsupport = false;
6464
+ goto disable_ras;
6465
+ }
6466
+
6467
+ spin_lock_irq(&phba->hbalock);
6468
+ ras_fwlog->state = ACTIVE;
6469
+ spin_unlock_irq(&phba->hbalock);
6470
+ mempool_free(pmb, phba->mbox_mem_pool);
6471
+
6472
+ return;
6473
+
6474
+disable_ras:
6475
+ /* Free RAS DMA memory */
6476
+ lpfc_sli4_ras_dma_free(phba);
6477
+ mempool_free(pmb, phba->mbox_mem_pool);
6478
+}
6479
+
6480
+/**
6481
+ * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6482
+ * @phba: pointer to lpfc hba data structure.
6483
+ * @fwlog_level: Logging verbosity level.
6484
+ * @fwlog_enable: Enable/Disable logging.
6485
+ *
6486
+ * Initialize memory and post mailbox command to enable FW logging in host
6487
+ * memory.
6488
+ **/
6489
+int
6490
+lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6491
+ uint32_t fwlog_level,
6492
+ uint32_t fwlog_enable)
6493
+{
6494
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6495
+ struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6496
+ struct lpfc_dmabuf *dmabuf;
6497
+ LPFC_MBOXQ_t *mbox;
6498
+ uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6499
+ int rc = 0;
6500
+
6501
+ spin_lock_irq(&phba->hbalock);
6502
+ ras_fwlog->state = INACTIVE;
6503
+ spin_unlock_irq(&phba->hbalock);
6504
+
6505
+ fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6506
+ phba->cfg_ras_fwlog_buffsize);
6507
+ fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6508
+
6509
+ /*
6510
+ * If re-enabling FW logging support use earlier allocated
6511
+ * DMA buffers while posting MBX command.
6512
+ **/
6513
+ if (!ras_fwlog->lwpd.virt) {
6514
+ rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6515
+ if (rc) {
6516
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6517
+ "6189 FW Log Memory Allocation Failed");
6518
+ return rc;
6519
+ }
6520
+ }
6521
+
6522
+ /* Setup Mailbox command */
6523
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6524
+ if (!mbox) {
6525
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6526
+ "6190 RAS MBX Alloc Failed");
6527
+ rc = -ENOMEM;
6528
+ goto mem_free;
6529
+ }
6530
+
6531
+ ras_fwlog->fw_loglevel = fwlog_level;
6532
+ len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6533
+ sizeof(struct lpfc_sli4_cfg_mhdr));
6534
+
6535
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6536
+ LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6537
+ len, LPFC_SLI4_MBX_EMBED);
6538
+
6539
+ mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6540
+ bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6541
+ fwlog_enable);
6542
+ bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6543
+ ras_fwlog->fw_loglevel);
6544
+ bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6545
+ ras_fwlog->fw_buffcount);
6546
+ bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6547
+ LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6548
+
6549
+ /* Update DMA buffer address */
6550
+ list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6551
+ memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6552
+
6553
+ mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6554
+ putPaddrLow(dmabuf->phys);
6555
+
6556
+ mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6557
+ putPaddrHigh(dmabuf->phys);
6558
+ }
6559
+
6560
+ /* Update LPWD address */
6561
+ mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6562
+ mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6563
+
6564
+ spin_lock_irq(&phba->hbalock);
6565
+ ras_fwlog->state = REG_INPROGRESS;
6566
+ spin_unlock_irq(&phba->hbalock);
6567
+ mbox->vport = phba->pport;
6568
+ mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6569
+
6570
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6571
+
6572
+ if (rc == MBX_NOT_FINISHED) {
6573
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6574
+ "6191 FW-Log Mailbox failed. "
6575
+ "status %d mbxStatus : x%x", rc,
6576
+ bf_get(lpfc_mqe_status, &mbox->u.mqe));
6577
+ mempool_free(mbox, phba->mbox_mem_pool);
6578
+ rc = -EIO;
6579
+ goto mem_free;
6580
+ } else
6581
+ rc = 0;
6582
+mem_free:
6583
+ if (rc)
6584
+ lpfc_sli4_ras_dma_free(phba);
6585
+
6586
+ return rc;
6587
+}
6588
+
6589
+/**
6590
+ * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6591
+ * @phba: Pointer to HBA context object.
6592
+ *
6593
+ * Check if RAS is supported on the adapter and initialize it.
6594
+ **/
6595
+void
6596
+lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6597
+{
6598
+ /* Check RAS FW Log needs to be enabled or not */
6599
+ if (lpfc_check_fwlog_support(phba))
6600
+ return;
6601
+
6602
+ lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6603
+ LPFC_RAS_ENABLE_LOGGING);
61526604 }
61536605
61546606 /**
....@@ -6254,7 +6706,7 @@
62546706 /* RPIs. */
62556707 count = phba->sli4_hba.max_cfg_param.max_rpi;
62566708 if (count <= 0) {
6257
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6709
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
62586710 "3279 Invalid provisioning of "
62596711 "rpi:%d\n", count);
62606712 rc = -EINVAL;
....@@ -6282,7 +6734,7 @@
62826734 /* VPIs. */
62836735 count = phba->sli4_hba.max_cfg_param.max_vpi;
62846736 if (count <= 0) {
6285
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6737
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
62866738 "3280 Invalid provisioning of "
62876739 "vpi:%d\n", count);
62886740 rc = -EINVAL;
....@@ -6309,7 +6761,7 @@
63096761 /* XRIs. */
63106762 count = phba->sli4_hba.max_cfg_param.max_xri;
63116763 if (count <= 0) {
6312
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6764
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
63136765 "3281 Invalid provisioning of "
63146766 "xri:%d\n", count);
63156767 rc = -EINVAL;
....@@ -6338,7 +6790,7 @@
63386790 /* VFIs. */
63396791 count = phba->sli4_hba.max_cfg_param.max_vfi;
63406792 if (count <= 0) {
6341
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6793
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
63426794 "3282 Invalid provisioning of "
63436795 "vfi:%d\n", count);
63446796 rc = -EINVAL;
....@@ -6432,7 +6884,7 @@
64326884 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
64336885 * @phba: Pointer to HBA context object.
64346886 * @type: The resource extent type.
6435
- * @extnt_count: buffer to hold port extent count response
6887
+ * @extnt_cnt: buffer to hold port extent count response
64366888 * @extnt_size: buffer to hold port extent size response.
64376889 *
64386890 * This function calls the port to read the host allocated extents
....@@ -6516,7 +6968,7 @@
65166968 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
65176969 req_len, emb);
65186970 if (alloc_len < req_len) {
6519
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6971
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
65206972 "2983 Allocated DMA memory size (x%x) is "
65216973 "less than the requested DMA memory "
65226974 "size (x%x)\n", alloc_len, req_len);
....@@ -6559,7 +7011,7 @@
65597011 }
65607012
65617013 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6562
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
7014
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
65637015 "2984 Failed to read allocated resources "
65647016 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
65657017 type,
....@@ -6576,7 +7028,6 @@
65767028 /**
65777029 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
65787030 * @phba: pointer to lpfc hba data structure.
6579
- * @pring: Pointer to driver SLI ring object.
65807031 * @sgl_list: linked link of sgl buffers to post
65817032 * @cnt: number of linked list buffers
65827033 *
....@@ -6714,7 +7165,7 @@
67147165 spin_unlock(&phba->sli4_hba.sgl_list_lock);
67157166 spin_unlock_irq(&phba->hbalock);
67167167 } else {
6717
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7168
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
67187169 "3161 Failure to post sgl to port.\n");
67197170 return -EIO;
67207171 }
....@@ -6723,7 +7174,39 @@
67237174 return total_cnt;
67247175 }
67257176
6726
-void
7177
+/**
7178
+ * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7179
+ * @phba: pointer to lpfc hba data structure.
7180
+ *
7181
+ * This routine walks the list of nvme buffers that have been allocated and
7182
+ * repost them to the port by using SGL block post. This is needed after a
7183
+ * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7184
+ * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7185
+ * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7186
+ *
7187
+ * Returns: 0 = success, non-zero failure.
7188
+ **/
7189
+static int
7190
+lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7191
+{
7192
+ LIST_HEAD(post_nblist);
7193
+ int num_posted, rc = 0;
7194
+
7195
+ /* get all NVME buffers need to repost to a local list */
7196
+ lpfc_io_buf_flush(phba, &post_nblist);
7197
+
7198
+ /* post the list of nvme buffer sgls to port if available */
7199
+ if (!list_empty(&post_nblist)) {
7200
+ num_posted = lpfc_sli4_post_io_sgl_list(
7201
+ phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7202
+ /* failed to post any nvme buffer, return error */
7203
+ if (num_posted == 0)
7204
+ rc = -EIO;
7205
+ }
7206
+ return rc;
7207
+}
7208
+
7209
+static void
67277210 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
67287211 {
67297212 uint32_t len;
....@@ -6785,7 +7268,7 @@
67857268 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
67867269 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
67877270 if (rc < 0) {
6788
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7271
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
67897272 "6421 Cannot post to HRQ %d: %x %x %x "
67907273 "DRQ %x %x\n",
67917274 hrq->queue_id,
....@@ -6806,6 +7289,68 @@
68067289 }
68077290
68087291 /**
7292
+ * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7293
+ * @phba: pointer to lpfc hba data structure.
7294
+ *
7295
+ * This routine initializes the per-cq idle_stat to dynamically dictate
7296
+ * polling decisions.
7297
+ *
7298
+ * Return codes:
7299
+ * None
7300
+ **/
7301
+static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7302
+{
7303
+ int i;
7304
+ struct lpfc_sli4_hdw_queue *hdwq;
7305
+ struct lpfc_queue *cq;
7306
+ struct lpfc_idle_stat *idle_stat;
7307
+ u64 wall;
7308
+
7309
+ for_each_present_cpu(i) {
7310
+ hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7311
+ cq = hdwq->io_cq;
7312
+
7313
+ /* Skip if we've already handled this cq's primary CPU */
7314
+ if (cq->chann != i)
7315
+ continue;
7316
+
7317
+ idle_stat = &phba->sli4_hba.idle_stat[i];
7318
+
7319
+ idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7320
+ idle_stat->prev_wall = wall;
7321
+
7322
+ if (phba->nvmet_support)
7323
+ cq->poll_mode = LPFC_QUEUE_WORK;
7324
+ else
7325
+ cq->poll_mode = LPFC_IRQ_POLL;
7326
+ }
7327
+
7328
+ if (!phba->nvmet_support)
7329
+ schedule_delayed_work(&phba->idle_stat_delay_work,
7330
+ msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7331
+}
7332
+
7333
+static void lpfc_sli4_dip(struct lpfc_hba *phba)
7334
+{
7335
+ uint32_t if_type;
7336
+
7337
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7338
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7339
+ if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7340
+ struct lpfc_register reg_data;
7341
+
7342
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7343
+ &reg_data.word0))
7344
+ return;
7345
+
7346
+ if (bf_get(lpfc_sliport_status_dip, &reg_data))
7347
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7348
+ "2904 Firmware Dump Image Present"
7349
+ " on Adapter");
7350
+ }
7351
+}
7352
+
7353
+/**
68097354 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
68107355 * @phba: Pointer to HBA context object.
68117356 *
....@@ -6817,7 +7362,7 @@
68177362 int
68187363 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
68197364 {
6820
- int rc, i, cnt;
7365
+ int rc, i, cnt, len, dd;
68217366 LPFC_MBOXQ_t *mboxq;
68227367 struct lpfc_mqe *mqe;
68237368 uint8_t *vpd;
....@@ -6827,6 +7372,7 @@
68277372 struct lpfc_vport *vport = phba->pport;
68287373 struct lpfc_dmabuf *mp;
68297374 struct lpfc_rqb *rqbp;
7375
+ u32 flg;
68307376
68317377 /* Perform a PCI function reset to start from clean */
68327378 rc = lpfc_pci_function_reset(phba);
....@@ -6840,8 +7386,20 @@
68407386 else {
68417387 spin_lock_irq(&phba->hbalock);
68427388 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7389
+ flg = phba->sli.sli_flag;
68437390 spin_unlock_irq(&phba->hbalock);
7391
+ /* Allow a little time after setting SLI_ACTIVE for any polled
7392
+ * MBX commands to complete via BSG.
7393
+ */
7394
+ for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
7395
+ msleep(20);
7396
+ spin_lock_irq(&phba->hbalock);
7397
+ flg = phba->sli.sli_flag;
7398
+ spin_unlock_irq(&phba->hbalock);
7399
+ }
68447400 }
7401
+
7402
+ lpfc_sli4_dip(phba);
68457403
68467404 /*
68477405 * Allocate a single mailbox container for initializing the
....@@ -6880,10 +7438,10 @@
68807438 else
68817439 phba->hba_flag &= ~HBA_FIP_SUPPORT;
68827440
6883
- phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
7441
+ phba->hba_flag &= ~HBA_IOQ_FLUSH;
68847442
68857443 if (phba->sli_rev != LPFC_SLI_REV4) {
6886
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7444
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
68877445 "0376 READ_REV Error. SLI Level %d "
68887446 "FCoE enabled %d\n",
68897447 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
....@@ -6912,6 +7470,12 @@
69127470 "3080 Successful retrieving SLI4 device "
69137471 "physical port name: %s.\n", phba->Port);
69147472
7473
+ rc = lpfc_sli4_get_ctl_attr(phba);
7474
+ if (!rc)
7475
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7476
+ "8351 Successful retrieving SLI4 device "
7477
+ "CTL ATTR\n");
7478
+
69157479 /*
69167480 * Evaluate the read rev and vpd data. Populate the driver
69177481 * state with the results. If this routine fails, the failure
....@@ -6919,7 +7483,7 @@
69197483 */
69207484 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
69217485 if (unlikely(!rc)) {
6922
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7486
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
69237487 "0377 Error %d parsing vpd. "
69247488 "Using defaults.\n", rc);
69257489 rc = 0;
....@@ -6964,15 +7528,6 @@
69647528 phba->vpd.rev.opFwName,
69657529 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
69667530 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
6967
-
6968
- /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
6969
- rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
6970
- if (phba->pport->cfg_lun_queue_depth > rc) {
6971
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6972
- "3362 LUN queue depth changed from %d to %d\n",
6973
- phba->pport->cfg_lun_queue_depth, rc);
6974
- phba->pport->cfg_lun_queue_depth = rc;
6975
- }
69767531
69777532 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
69787533 LPFC_SLI_INTF_IF_TYPE_0) {
....@@ -7062,13 +7617,30 @@
70627617 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
70637618 spin_unlock_irq(&phba->hbalock);
70647619
7620
+ /* Always try to enable dual dump feature if we can */
7621
+ lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
7622
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7623
+ dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
7624
+ if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
7625
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7626
+ "6448 Dual Dump is enabled\n");
7627
+ else
7628
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
7629
+ "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
7630
+ "rc:x%x dd:x%x\n",
7631
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7632
+ lpfc_sli_config_mbox_subsys_get(
7633
+ phba, mboxq),
7634
+ lpfc_sli_config_mbox_opcode_get(
7635
+ phba, mboxq),
7636
+ rc, dd);
70657637 /*
70667638 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
70677639 * calls depends on these resources to complete port setup.
70687640 */
70697641 rc = lpfc_sli4_alloc_resource_identifiers(phba);
70707642 if (rc) {
7071
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7643
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
70727644 "2920 Failed to alloc Resource IDs "
70737645 "rc = x%x\n", rc);
70747646 goto out_free_mbox;
....@@ -7093,7 +7665,7 @@
70937665
70947666 mboxq->vport = vport;
70957667 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7096
- mp = (struct lpfc_dmabuf *) mboxq->context1;
7668
+ mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
70977669 if (rc == MBX_SUCCESS) {
70987670 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
70997671 rc = 0;
....@@ -7105,9 +7677,9 @@
71057677 */
71067678 lpfc_mbuf_free(phba, mp->virt, mp->phys);
71077679 kfree(mp);
7108
- mboxq->context1 = NULL;
7680
+ mboxq->ctx_buf = NULL;
71097681 if (unlikely(rc)) {
7110
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7682
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
71117683 "0382 READ_SPARAM command failed "
71127684 "status %d, mbxStatus x%x\n",
71137685 rc, bf_get(lpfc_mqe_status, mqe));
....@@ -7125,7 +7697,7 @@
71257697 /* Create all the SLI4 queues */
71267698 rc = lpfc_sli4_queue_create(phba);
71277699 if (rc) {
7128
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7700
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
71297701 "3089 Failed to allocate queues\n");
71307702 rc = -ENODEV;
71317703 goto out_free_mbox;
....@@ -7133,7 +7705,7 @@
71337705 /* Set up all the queues to the device */
71347706 rc = lpfc_sli4_queue_setup(phba);
71357707 if (unlikely(rc)) {
7136
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7708
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
71377709 "0381 Error %d during queue setup.\n ", rc);
71387710 goto out_stop_timers;
71397711 }
....@@ -7144,7 +7716,7 @@
71447716 /* update host els xri-sgl sizes and mappings */
71457717 rc = lpfc_sli4_els_sgl_update(phba);
71467718 if (unlikely(rc)) {
7147
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7719
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
71487720 "1400 Failed to update xri-sgl size and "
71497721 "mapping: %d\n", rc);
71507722 goto out_destroy_queue;
....@@ -7154,7 +7726,7 @@
71547726 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
71557727 phba->sli4_hba.els_xri_cnt);
71567728 if (unlikely(rc < 0)) {
7157
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7729
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
71587730 "0582 Error %d during els sgl post "
71597731 "operation\n", rc);
71607732 rc = -ENODEV;
....@@ -7166,7 +7738,7 @@
71667738 /* update host nvmet xri-sgl sizes and mappings */
71677739 rc = lpfc_sli4_nvmet_sgl_update(phba);
71687740 if (unlikely(rc)) {
7169
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7741
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
71707742 "6308 Failed to update nvmet-sgl size "
71717743 "and mapping: %d\n", rc);
71727744 goto out_destroy_queue;
....@@ -7178,7 +7750,7 @@
71787750 &phba->sli4_hba.lpfc_nvmet_sgl_list,
71797751 phba->sli4_hba.nvmet_xri_cnt);
71807752 if (unlikely(rc < 0)) {
7181
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7753
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
71827754 "3117 Error %d during nvmet "
71837755 "sgl post\n", rc);
71847756 rc = -ENODEV;
....@@ -7186,39 +7758,46 @@
71867758 }
71877759 phba->sli4_hba.nvmet_xri_cnt = rc;
71887760
7189
- cnt = phba->cfg_iocb_cnt * 1024;
7190
- /* We need 1 iocbq for every SGL, for IO processing */
7191
- cnt += phba->sli4_hba.nvmet_xri_cnt;
7761
+ /* We allocate an iocbq for every receive context SGL.
7762
+ * The additional allocation is for abort and ls handling.
7763
+ */
7764
+ cnt = phba->sli4_hba.nvmet_xri_cnt +
7765
+ phba->sli4_hba.max_cfg_param.max_xri;
71927766 } else {
7193
- /* update host scsi xri-sgl sizes and mappings */
7194
- rc = lpfc_sli4_scsi_sgl_update(phba);
7767
+ /* update host common xri-sgl sizes and mappings */
7768
+ rc = lpfc_sli4_io_sgl_update(phba);
71957769 if (unlikely(rc)) {
7196
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7197
- "6309 Failed to update scsi-sgl size "
7198
- "and mapping: %d\n", rc);
7199
- goto out_destroy_queue;
7200
- }
7201
-
7202
- /* update host nvme xri-sgl sizes and mappings */
7203
- rc = lpfc_sli4_nvme_sgl_update(phba);
7204
- if (unlikely(rc)) {
7205
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7770
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
72067771 "6082 Failed to update nvme-sgl size "
72077772 "and mapping: %d\n", rc);
72087773 goto out_destroy_queue;
72097774 }
72107775
7211
- cnt = phba->cfg_iocb_cnt * 1024;
7776
+ /* register the allocated common sgl pool to the port */
7777
+ rc = lpfc_sli4_repost_io_sgl_list(phba);
7778
+ if (unlikely(rc)) {
7779
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7780
+ "6116 Error %d during nvme sgl post "
7781
+ "operation\n", rc);
7782
+ /* Some NVME buffers were moved to abort nvme list */
7783
+ /* A pci function reset will repost them */
7784
+ rc = -ENODEV;
7785
+ goto out_destroy_queue;
7786
+ }
7787
+ /* Each lpfc_io_buf job structure has an iocbq element.
7788
+ * This cnt provides for abort, els, ct and ls requests.
7789
+ */
7790
+ cnt = phba->sli4_hba.max_cfg_param.max_xri;
72127791 }
72137792
72147793 if (!phba->sli.iocbq_lookup) {
72157794 /* Initialize and populate the iocb list per host */
72167795 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7217
- "2821 initialize iocb list %d total %d\n",
7218
- phba->cfg_iocb_cnt, cnt);
7796
+ "2821 initialize iocb list with %d entries\n",
7797
+ cnt);
72197798 rc = lpfc_init_iocb_list(phba, cnt);
72207799 if (rc) {
7221
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7800
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
72227801 "1413 Failed to init iocb list.\n");
72237802 goto out_destroy_queue;
72247803 }
....@@ -7244,40 +7823,10 @@
72447823 }
72457824 }
72467825
7247
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
7248
- /* register the allocated scsi sgl pool to the port */
7249
- rc = lpfc_sli4_repost_scsi_sgl_list(phba);
7250
- if (unlikely(rc)) {
7251
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7252
- "0383 Error %d during scsi sgl post "
7253
- "operation\n", rc);
7254
- /* Some Scsi buffers were moved to abort scsi list */
7255
- /* A pci function reset will repost them */
7256
- rc = -ENODEV;
7257
- goto out_destroy_queue;
7258
- }
7259
- }
7260
-
7261
- if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
7262
- (phba->nvmet_support == 0)) {
7263
-
7264
- /* register the allocated nvme sgl pool to the port */
7265
- rc = lpfc_repost_nvme_sgl_list(phba);
7266
- if (unlikely(rc)) {
7267
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7268
- "6116 Error %d during nvme sgl post "
7269
- "operation\n", rc);
7270
- /* Some NVME buffers were moved to abort nvme list */
7271
- /* A pci function reset will repost them */
7272
- rc = -ENODEV;
7273
- goto out_destroy_queue;
7274
- }
7275
- }
7276
-
72777826 /* Post the rpi header region to the device. */
72787827 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
72797828 if (unlikely(rc)) {
7280
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7829
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
72817830 "0393 Error %d during rpi post operation\n",
72827831 rc);
72837832 rc = -ENODEV;
....@@ -7323,11 +7872,24 @@
73237872 lpfc_sli_read_link_ste(phba);
73247873 }
73257874
7326
- /* Arm the CQs and then EQs on device */
7327
- lpfc_sli4_arm_cqeq_intr(phba);
7875
+ /* Don't post more new bufs if repost already recovered
7876
+ * the nvme sgls.
7877
+ */
7878
+ if (phba->nvmet_support == 0) {
7879
+ if (phba->sli4_hba.io_xri_cnt == 0) {
7880
+ len = lpfc_new_io_buf(
7881
+ phba, phba->sli4_hba.io_xri_max);
7882
+ if (len == 0) {
7883
+ rc = -ENOMEM;
7884
+ goto out_unset_queue;
7885
+ }
73287886
7329
- /* Indicate device interrupt mode */
7330
- phba->sli4_hba.intr_enable = 1;
7887
+ if (phba->cfg_xri_rebalancing)
7888
+ lpfc_create_multixri_pools(phba);
7889
+ }
7890
+ } else {
7891
+ phba->cfg_xri_rebalancing = 0;
7892
+ }
73317893
73327894 /* Allow asynchronous mailbox command to go through */
73337895 spin_lock_irq(&phba->hbalock);
....@@ -7350,6 +7912,14 @@
73507912 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
73517913 phba->hb_outstanding = 0;
73527914 phba->last_completion_time = jiffies;
7915
+
7916
+ /* start eq_delay heartbeat */
7917
+ if (phba->cfg_auto_imax)
7918
+ queue_delayed_work(phba->wq, &phba->eq_delay_work,
7919
+ msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7920
+
7921
+ /* start per phba idle_stat_delay heartbeat */
7922
+ lpfc_init_idle_stat_hb(phba);
73537923
73547924 /* Start error attention (ERATT) polling timer */
73557925 mod_timer(&phba->eratt_poll,
....@@ -7380,29 +7950,49 @@
73807950 */
73817951 spin_lock_irq(&phba->hbalock);
73827952 phba->link_state = LPFC_LINK_DOWN;
7953
+
7954
+ /* Check if physical ports are trunked */
7955
+ if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7956
+ phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7957
+ if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7958
+ phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7959
+ if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7960
+ phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7961
+ if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7962
+ phba->trunk_link.link3.state = LPFC_LINK_DOWN;
73837963 spin_unlock_irq(&phba->hbalock);
7964
+
7965
+ /* Arm the CQs and then EQs on device */
7966
+ lpfc_sli4_arm_cqeq_intr(phba);
7967
+
7968
+ /* Indicate device interrupt mode */
7969
+ phba->sli4_hba.intr_enable = 1;
7970
+
73847971 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
73857972 (phba->hba_flag & LINK_DISABLED)) {
7386
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7973
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
73877974 "3103 Adapter Link is disabled.\n");
73887975 lpfc_down_link(phba, mboxq);
73897976 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
73907977 if (rc != MBX_SUCCESS) {
7391
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7978
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
73927979 "3104 Adapter failed to issue "
73937980 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7394
- goto out_unset_queue;
7981
+ goto out_io_buff_free;
73957982 }
73967983 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
73977984 /* don't perform init_link on SLI4 FC port loopback test */
73987985 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
73997986 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
74007987 if (rc)
7401
- goto out_unset_queue;
7988
+ goto out_io_buff_free;
74027989 }
74037990 }
74047991 mempool_free(mboxq, phba->mbox_mem_pool);
74057992 return rc;
7993
+out_io_buff_free:
7994
+ /* Free allocated IO Buffers */
7995
+ lpfc_io_free(phba);
74067996 out_unset_queue:
74077997 /* Unset all the queues set up in this routine when error out */
74087998 lpfc_sli4_queue_unset(phba);
....@@ -7419,7 +8009,7 @@
74198009
74208010 /**
74218011 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7422
- * @ptr: context object - pointer to hba structure.
8012
+ * @t: Context to fetch pointer to hba structure from.
74238013 *
74248014 * This is the callback function for mailbox timer. The mailbox
74258015 * timer is armed when a new mailbox command is issued and the timer
....@@ -7473,8 +8063,9 @@
74738063 mcq = phba->sli4_hba.mbx_cq;
74748064 idx = mcq->hba_index;
74758065 qe_valid = mcq->qe_valid;
7476
- while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) {
7477
- mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
8066
+ while (bf_get_le32(lpfc_cqe_valid,
8067
+ (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8068
+ mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
74788069 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
74798070 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
74808071 pending_completions = true;
....@@ -7503,27 +8094,28 @@
75038094 * and will process all the completions associated with the eq for the
75048095 * mailbox completion queue.
75058096 **/
7506
-bool
8097
+static bool
75078098 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
75088099 {
75098100 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
75108101 uint32_t eqidx;
75118102 struct lpfc_queue *fpeq = NULL;
7512
- struct lpfc_eqe *eqe;
8103
+ struct lpfc_queue *eq;
75138104 bool mbox_pending;
75148105
75158106 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
75168107 return false;
75178108
7518
- /* Find the eq associated with the mcq */
7519
-
7520
- if (sli4_hba->hba_eq)
7521
- for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
7522
- if (sli4_hba->hba_eq[eqidx]->queue_id ==
7523
- sli4_hba->mbx_cq->assoc_qid) {
7524
- fpeq = sli4_hba->hba_eq[eqidx];
8109
+ /* Find the EQ associated with the mbox CQ */
8110
+ if (sli4_hba->hdwq) {
8111
+ for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8112
+ eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8113
+ if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8114
+ fpeq = eq;
75258115 break;
75268116 }
8117
+ }
8118
+ }
75278119 if (!fpeq)
75288120 return false;
75298121
....@@ -7543,14 +8135,11 @@
75438135 */
75448136
75458137 if (mbox_pending)
7546
- while ((eqe = lpfc_sli4_eq_get(fpeq))) {
7547
- lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
7548
- fpeq->EQ_processed++;
7549
- }
7550
-
7551
- /* Always clear and re-arm the EQ */
7552
-
7553
- sli4_hba->sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
8138
+ /* process and rearm the EQ */
8139
+ lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
8140
+ else
8141
+ /* Always clear and re-arm the EQ */
8142
+ sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
75548143
75558144 return mbox_pending;
75568145
....@@ -7594,8 +8183,8 @@
75948183 }
75958184
75968185 /* Mbox cmd <mbxCommand> timeout */
7597
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7598
- "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
8186
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8187
+ "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
75998188 mb->mbxCommand,
76008189 phba->pport->port_state,
76018190 phba->sli.sli_flag,
....@@ -7616,7 +8205,7 @@
76168205
76178206 lpfc_sli_abort_fcp_rings(phba);
76188207
7619
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8208
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
76208209 "0345 Resetting board due to mailbox timeout\n");
76218210
76228211 /* Reset the HBA device */
....@@ -7714,7 +8303,7 @@
77148303 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
77158304
77168305 /* Mbox command <mbxCommand> cannot issue */
7717
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8306
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
77188307 "(%d):0311 Mailbox command x%x cannot "
77198308 "issue Data: x%x x%x\n",
77208309 pmbox->vport ? pmbox->vport->vpi : 0,
....@@ -7726,7 +8315,7 @@
77268315 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
77278316 !(hc_copy & HC_MBINT_ENA)) {
77288317 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7729
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8318
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
77308319 "(%d):2528 Mailbox command x%x cannot "
77318320 "issue Data: x%x x%x\n",
77328321 pmbox->vport ? pmbox->vport->vpi : 0,
....@@ -7745,7 +8334,7 @@
77458334 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
77468335
77478336 /* Mbox command <mbxCommand> cannot issue */
7748
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8337
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
77498338 "(%d):2529 Mailbox command x%x "
77508339 "cannot issue Data: x%x x%x\n",
77518340 pmbox->vport ? pmbox->vport->vpi : 0,
....@@ -7757,7 +8346,7 @@
77578346 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
77588347 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
77598348 /* Mbox command <mbxCommand> cannot issue */
7760
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8349
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
77618350 "(%d):2530 Mailbox command x%x "
77628351 "cannot issue Data: x%x x%x\n",
77638352 pmbox->vport ? pmbox->vport->vpi : 0,
....@@ -7810,7 +8399,7 @@
78108399 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
78118400 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
78128401 /* Mbox command <mbxCommand> cannot issue */
7813
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8402
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
78148403 "(%d):2531 Mailbox command x%x "
78158404 "cannot issue Data: x%x x%x\n",
78168405 pmbox->vport ? pmbox->vport->vpi : 0,
....@@ -7865,10 +8454,10 @@
78658454 }
78668455
78678456 /* Copy the mailbox extension data */
7868
- if (pmbox->in_ext_byte_len && pmbox->context2) {
7869
- lpfc_sli_pcimem_bcopy(pmbox->context2,
7870
- (uint8_t *)phba->mbox_ext,
7871
- pmbox->in_ext_byte_len);
8457
+ if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8458
+ lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8459
+ (uint8_t *)phba->mbox_ext,
8460
+ pmbox->in_ext_byte_len);
78728461 }
78738462 /* Copy command data to host SLIM area */
78748463 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
....@@ -7879,10 +8468,10 @@
78798468 = MAILBOX_HBA_EXT_OFFSET;
78808469
78818470 /* Copy the mailbox extension data */
7882
- if (pmbox->in_ext_byte_len && pmbox->context2)
8471
+ if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
78838472 lpfc_memcpy_to_slim(phba->MBslimaddr +
78848473 MAILBOX_HBA_EXT_OFFSET,
7885
- pmbox->context2, pmbox->in_ext_byte_len);
8474
+ pmbox->ctx_buf, pmbox->in_ext_byte_len);
78868475
78878476 if (mbx->mbxCommand == MBX_CONFIG_PORT)
78888477 /* copy command data into host mbox for cmpl */
....@@ -8005,9 +8594,9 @@
80058594 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
80068595 MAILBOX_CMD_SIZE);
80078596 /* Copy the mailbox extension data */
8008
- if (pmbox->out_ext_byte_len && pmbox->context2) {
8597
+ if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
80098598 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8010
- pmbox->context2,
8599
+ pmbox->ctx_buf,
80118600 pmbox->out_ext_byte_len);
80128601 }
80138602 } else {
....@@ -8015,8 +8604,9 @@
80158604 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
80168605 MAILBOX_CMD_SIZE);
80178606 /* Copy the mailbox extension data */
8018
- if (pmbox->out_ext_byte_len && pmbox->context2) {
8019
- lpfc_memcpy_from_slim(pmbox->context2,
8607
+ if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8608
+ lpfc_memcpy_from_slim(
8609
+ pmbox->ctx_buf,
80208610 phba->MBslimaddr +
80218611 MAILBOX_HBA_EXT_OFFSET,
80228612 pmbox->out_ext_byte_len);
....@@ -8127,7 +8717,7 @@
81278717 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
81288718 spin_unlock_irq(&phba->hbalock);
81298719
8130
- /* wake up worker thread to post asynchronlous mailbox command */
8720
+ /* wake up worker thread to post asynchronous mailbox command */
81318721 lpfc_worker_wake_up(phba);
81328722 }
81338723
....@@ -8156,7 +8746,7 @@
81568746 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
81578747 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
81588748 if (!db_ready)
8159
- msleep(2);
8749
+ mdelay(2);
81608750
81618751 if (time_after(jiffies, timeout))
81628752 return MBXERR_ERROR;
....@@ -8200,7 +8790,7 @@
82008790 spin_lock_irqsave(&phba->hbalock, iflag);
82018791 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
82028792 spin_unlock_irqrestore(&phba->hbalock, iflag);
8203
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8793
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
82048794 "(%d):2532 Mailbox command x%x (x%x/x%x) "
82058795 "cannot issue Data: x%x x%x\n",
82068796 mboxq->vport ? mboxq->vport->vpi : 0,
....@@ -8219,7 +8809,6 @@
82198809 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
82208810 if (rc)
82218811 goto exit;
8222
-
82238812 /*
82248813 * Initialize the bootstrap memory region to avoid stale data areas
82258814 * in the mailbox post. Then copy the caller's mailbox contents to
....@@ -8300,7 +8889,7 @@
83008889 /**
83018890 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
83028891 * @phba: Pointer to HBA context object.
8303
- * @pmbox: Pointer to mailbox object.
8892
+ * @mboxq: Pointer to mailbox object.
83048893 * @flag: Flag indicating how the mailbox need to be processed.
83058894 *
83068895 * This function is called by discovery code and HBA management code to submit
....@@ -8322,7 +8911,7 @@
83228911
83238912 rc = lpfc_mbox_dev_check(phba);
83248913 if (unlikely(rc)) {
8325
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8914
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
83268915 "(%d):2544 Mailbox command x%x (x%x/x%x) "
83278916 "cannot issue Data: x%x x%x\n",
83288917 mboxq->vport ? mboxq->vport->vpi : 0,
....@@ -8344,7 +8933,7 @@
83448933 "(%d):2541 Mailbox command x%x "
83458934 "(x%x/x%x) failure: "
83468935 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8347
- "Data: x%x x%x\n,",
8936
+ "Data: x%x x%x\n",
83488937 mboxq->vport ? mboxq->vport->vpi : 0,
83498938 mboxq->u.mb.mbxCommand,
83508939 lpfc_sli_config_mbox_subsys_get(phba,
....@@ -8378,7 +8967,7 @@
83788967 "(%d):2597 Sync Mailbox command "
83798968 "x%x (x%x/x%x) failure: "
83808969 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8381
- "Data: x%x x%x\n,",
8970
+ "Data: x%x x%x\n",
83828971 mboxq->vport ? mboxq->vport->vpi : 0,
83838972 mboxq->u.mb.mbxCommand,
83848973 lpfc_sli_config_mbox_subsys_get(phba,
....@@ -8396,10 +8985,10 @@
83968985 return rc;
83978986 }
83988987
8399
- /* Now, interrupt mode asynchrous mailbox command */
8988
+ /* Now, interrupt mode asynchronous mailbox command */
84008989 rc = lpfc_mbox_cmd_check(phba, mboxq);
84018990 if (rc) {
8402
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8991
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
84038992 "(%d):2543 Mailbox command x%x (x%x/x%x) "
84048993 "cannot issue Data: x%x x%x\n",
84058994 mboxq->vport ? mboxq->vport->vpi : 0,
....@@ -8467,7 +9056,7 @@
84679056 }
84689057 if (unlikely(phba->sli.mbox_active)) {
84699058 spin_unlock_irqrestore(&phba->hbalock, iflags);
8470
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
9059
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
84719060 "0384 There is pending active mailbox cmd\n");
84729061 return MBX_NOT_FINISHED;
84739062 }
....@@ -8528,7 +9117,7 @@
85289117 /* Post the mailbox command to the port */
85299118 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
85309119 if (rc != MBX_SUCCESS) {
8531
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
9120
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
85329121 "(%d):2533 Mailbox command x%x (x%x/x%x) "
85339122 "cannot issue Data: x%x x%x\n",
85349123 mboxq->vport ? mboxq->vport->vpi : 0,
....@@ -8604,7 +9193,7 @@
86049193 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
86059194 break;
86069195 default:
8607
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9196
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
86089197 "1420 Invalid HBA PCI-device group: 0x%x\n",
86099198 dev_grp);
86109199 return -ENODEV;
....@@ -8619,7 +9208,8 @@
86199208 * @pring: Pointer to driver SLI ring object.
86209209 * @piocb: Pointer to address of newly added command iocb.
86219210 *
8622
- * This function is called with hbalock held to add a command
9211
+ * This function is called with hbalock held for SLI3 ports or
9212
+ * the ring lock held for SLI4 ports to add a command
86239213 * iocb to the txq when SLI layer cannot submit the command iocb
86249214 * to the ring.
86259215 **/
....@@ -8627,7 +9217,10 @@
86279217 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
86289218 struct lpfc_iocbq *piocb)
86299219 {
8630
- lockdep_assert_held(&phba->hbalock);
9220
+ if (phba->sli_rev == LPFC_SLI_REV4)
9221
+ lockdep_assert_held(&pring->ring_lock);
9222
+ else
9223
+ lockdep_assert_held(&phba->hbalock);
86319224 /* Insert the caller's iocb in the txq tail for later processing. */
86329225 list_add_tail(&piocb->list, &pring->txq);
86339226 }
....@@ -8701,8 +9294,7 @@
87019294 if (piocb->iocb_cmpl && (!piocb->vport) &&
87029295 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
87039296 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
8704
- lpfc_printf_log(phba, KERN_ERR,
8705
- LOG_SLI | LOG_VPORT,
9297
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
87069298 "1807 IOCB x%x failed. No vport\n",
87079299 piocb->iocb.ulpCommand);
87089300 dump_stack();
....@@ -8755,7 +9347,7 @@
87559347 */
87569348 if (piocb->iocb_cmpl)
87579349 piocb->iocb_cmpl = NULL;
8758
- /*FALLTHROUGH*/
9350
+ fallthrough;
87599351 case CMD_CREATE_XRI_CR:
87609352 case CMD_CLOSE_XRI_CN:
87619353 case CMD_CLOSE_XRI_CX:
....@@ -8803,7 +9395,7 @@
88039395 /**
88049396 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
88059397 * @phba: Pointer to HBA context object.
8806
- * @piocb: Pointer to command iocb.
9398
+ * @piocbq: Pointer to command iocb.
88079399 * @sglq: Pointer to the scatter gather queue object.
88089400 *
88099401 * This routine converts the bpl or bde that is in the IOCB
....@@ -8911,7 +9503,7 @@
89119503 /**
89129504 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
89139505 * @phba: Pointer to HBA context object.
8914
- * @piocb: Pointer to command iocb.
9506
+ * @iocbq: Pointer to command iocb.
89159507 * @wqe: Pointer to the work queue entry.
89169508 *
89179509 * This routine converts the iocb command to its Work Queue Entry
....@@ -8956,11 +9548,9 @@
89569548 memset(wqe, 0, sizeof(union lpfc_wqe128));
89579549 /* Some of the fields are in the right position already */
89589550 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
8959
- if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
8960
- /* The ct field has moved so reset */
8961
- wqe->generic.wqe_com.word7 = 0;
8962
- wqe->generic.wqe_com.word10 = 0;
8963
- }
9551
+ /* The ct field has moved so reset */
9552
+ wqe->generic.wqe_com.word7 = 0;
9553
+ wqe->generic.wqe_com.word10 = 0;
89649554
89659555 abort_tag = (uint32_t) iocbq->iotag;
89669556 xritag = iocbq->sli4_xritag;
....@@ -8999,7 +9589,7 @@
89999589 else
90009590 ndlp = (struct lpfc_nodelist *)iocbq->context1;
90019591 if (!iocbq->iocb.ulpLe) {
9002
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9592
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
90039593 "2007 Only Limited Edition cmd Format"
90049594 " supported 0x%x\n",
90059595 iocbq->iocb.ulpCommand);
....@@ -9030,6 +9620,8 @@
90309620 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
90319621 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
90329622 *pcmd == ELS_CMD_SCR ||
9623
+ *pcmd == ELS_CMD_RDF ||
9624
+ *pcmd == ELS_CMD_RSCN_XMT ||
90339625 *pcmd == ELS_CMD_FDISC ||
90349626 *pcmd == ELS_CMD_LOGO ||
90359627 *pcmd == ELS_CMD_PLOGI)) {
....@@ -9069,6 +9661,7 @@
90699661 cmnd = CMD_XMIT_SEQUENCE64_CR;
90709662 if (phba->link_flag & LS_LOOPBACK_MODE)
90719663 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9664
+ fallthrough;
90729665 case CMD_XMIT_SEQUENCE64_CR:
90739666 /* word3 iocb=io_tag32 wqe=reserved */
90749667 wqe->xmit_sequence.rsvd3 = 0;
....@@ -9137,7 +9730,7 @@
91379730 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
91389731
91399732 if (phba->fcp_embed_io) {
9140
- struct lpfc_scsi_buf *lpfc_cmd;
9733
+ struct lpfc_io_buf *lpfc_cmd;
91419734 struct sli4_sge *sgl;
91429735 struct fcp_cmnd *fcp_cmnd;
91439736 uint32_t *ptr;
....@@ -9145,7 +9738,7 @@
91459738 /* 128 byte wqe support here */
91469739
91479740 lpfc_cmd = iocbq->context1;
9148
- sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9741
+ sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
91499742 fcp_cmnd = lpfc_cmd->fcp_cmnd;
91509743
91519744 /* Word 0-2 - FCP_CMND */
....@@ -9201,7 +9794,7 @@
92019794 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
92029795
92039796 if (phba->fcp_embed_io) {
9204
- struct lpfc_scsi_buf *lpfc_cmd;
9797
+ struct lpfc_io_buf *lpfc_cmd;
92059798 struct sli4_sge *sgl;
92069799 struct fcp_cmnd *fcp_cmnd;
92079800 uint32_t *ptr;
....@@ -9209,7 +9802,7 @@
92099802 /* 128 byte wqe support here */
92109803
92119804 lpfc_cmd = iocbq->context1;
9212
- sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9805
+ sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
92139806 fcp_cmnd = lpfc_cmd->fcp_cmnd;
92149807
92159808 /* Word 0-2 - FCP_CMND */
....@@ -9258,7 +9851,7 @@
92589851 /* Note, word 10 is already initialized to 0 */
92599852
92609853 if (phba->fcp_embed_io) {
9261
- struct lpfc_scsi_buf *lpfc_cmd;
9854
+ struct lpfc_io_buf *lpfc_cmd;
92629855 struct sli4_sge *sgl;
92639856 struct fcp_cmnd *fcp_cmnd;
92649857 uint32_t *ptr;
....@@ -9266,7 +9859,7 @@
92669859 /* 128 byte wqe support here */
92679860
92689861 lpfc_cmd = iocbq->context1;
9269
- sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9862
+ sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
92709863 fcp_cmnd = lpfc_cmd->fcp_cmnd;
92719864
92729865 /* Word 0-2 - FCP_CMND */
....@@ -9304,7 +9897,7 @@
93049897 /* word6 context tag copied in memcpy */
93059898 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
93069899 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9307
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9900
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
93089901 "2015 Invalid CT %x command 0x%x\n",
93099902 ct, iocbq->iocb.ulpCommand);
93109903 return IOCB_ERROR;
....@@ -9417,7 +10010,7 @@
941710010 * we re-construct this WQE here based on information in
941810011 * iocbq from scratch.
941910012 */
9420
- memset(wqe, 0, sizeof(union lpfc_wqe));
10013
+ memset(wqe, 0, sizeof(*wqe));
942110014 /* OX_ID is invariable to who sent ABTS to CT exchange */
942210015 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
942310016 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
....@@ -9464,6 +10057,15 @@
946410057
946510058 break;
946610059 case CMD_SEND_FRAME:
10060
+ bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
10061
+ bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
10062
+ bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
10063
+ bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
10064
+ bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
10065
+ bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10066
+ bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
10067
+ bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
10068
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
946710069 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
946810070 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
946910071 return 0;
....@@ -9474,7 +10076,7 @@
947410076 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
947510077 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
947610078 default:
9477
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10079
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
947810080 "2014 Invalid command 0x%x\n",
947910081 iocbq->iocb.ulpCommand);
948010082 return IOCB_ERROR;
....@@ -9509,7 +10111,7 @@
950910111 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
951010112 * an iocb command to an HBA with SLI-4 interface spec.
951110113 *
9512
- * This function is called with hbalock held. The function will return success
10114
+ * This function is called with ringlock held. The function will return success
951310115 * after it successfully submit the iocb to firmware or after adding to the
951410116 * txq.
951510117 **/
....@@ -9525,10 +10127,7 @@
952510127 /* Get the WQ */
952610128 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
952710129 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9528
- if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
9529
- wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
9530
- else
9531
- wq = phba->sli4_hba.oas_wq;
10130
+ wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
953210131 } else {
953310132 wq = phba->sli4_hba.els_wq;
953410133 }
....@@ -9540,7 +10139,7 @@
954010139 * The WQE can be either 64 or 128 bytes,
954110140 */
954210141
9543
- lockdep_assert_held(&phba->hbalock);
10142
+ lockdep_assert_held(&pring->ring_lock);
954410143
954510144 if (piocb->sli4_xritag == NO_XRI) {
954610145 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
....@@ -9598,7 +10197,7 @@
959810197 return 0;
959910198 }
960010199
9601
-/**
10200
+/*
960210201 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
960310202 *
960410203 * This routine wraps the actual lockless version for issusing IOCB function
....@@ -9639,7 +10238,7 @@
963910238 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
964010239 break;
964110240 default:
9642
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10241
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
964310242 "1419 Invalid HBA PCI-device group: 0x%x\n",
964410243 dev_grp);
964510244 return -ENODEV;
....@@ -9662,29 +10261,20 @@
966210261 struct lpfc_sli_ring *
966310262 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
966410263 {
10264
+ struct lpfc_io_buf *lpfc_cmd;
10265
+
966510266 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
9666
- if (!(phba->cfg_fof) ||
9667
- (!(piocb->iocb_flag & LPFC_IO_FOF))) {
9668
- if (unlikely(!phba->sli4_hba.fcp_wq))
9669
- return NULL;
9670
- /*
9671
- * for abort iocb hba_wqidx should already
9672
- * be setup based on what work queue we used.
9673
- */
9674
- if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9675
- piocb->hba_wqidx =
9676
- lpfc_sli4_scmd_to_wqidx_distr(phba,
9677
- piocb->context1);
9678
- piocb->hba_wqidx = piocb->hba_wqidx %
9679
- phba->cfg_fcp_io_channel;
9680
- }
9681
- return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
9682
- } else {
9683
- if (unlikely(!phba->sli4_hba.oas_wq))
9684
- return NULL;
9685
- piocb->hba_wqidx = 0;
9686
- return phba->sli4_hba.oas_wq->pring;
10267
+ if (unlikely(!phba->sli4_hba.hdwq))
10268
+ return NULL;
10269
+ /*
10270
+ * for abort iocb hba_wqidx should already
10271
+ * be setup based on what work queue we used.
10272
+ */
10273
+ if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10274
+ lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10275
+ piocb->hba_wqidx = lpfc_cmd->hdwq_no;
968710276 }
10277
+ return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
968810278 } else {
968910279 if (unlikely(!phba->sli4_hba.els_wq))
969010280 return NULL;
....@@ -9696,7 +10286,7 @@
969610286 /**
969710287 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
969810288 * @phba: Pointer to HBA context object.
9699
- * @pring: Pointer to driver SLI ring object.
10289
+ * @ring_number: Ring number
970010290 * @piocb: Pointer to command iocb.
970110291 * @flag: Flag indicating if this command can be put into txq.
970210292 *
....@@ -9710,14 +10300,14 @@
971010300 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
971110301 struct lpfc_iocbq *piocb, uint32_t flag)
971210302 {
9713
- struct lpfc_hba_eq_hdl *hba_eq_hdl;
971410303 struct lpfc_sli_ring *pring;
9715
- struct lpfc_queue *fpeq;
9716
- struct lpfc_eqe *eqe;
10304
+ struct lpfc_queue *eq;
971710305 unsigned long iflags;
9718
- int rc, idx;
10306
+ int rc;
971910307
972010308 if (phba->sli_rev == LPFC_SLI_REV4) {
10309
+ eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
10310
+
972110311 pring = lpfc_sli4_calc_ring(phba, piocb);
972210312 if (unlikely(pring == NULL))
972310313 return IOCB_ERROR;
....@@ -9726,33 +10316,7 @@
972610316 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
972710317 spin_unlock_irqrestore(&pring->ring_lock, iflags);
972810318
9729
- if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
9730
- idx = piocb->hba_wqidx;
9731
- hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
9732
-
9733
- if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
9734
-
9735
- /* Get associated EQ with this index */
9736
- fpeq = phba->sli4_hba.hba_eq[idx];
9737
-
9738
- /* Turn off interrupts from this EQ */
9739
- phba->sli4_hba.sli4_eq_clr_intr(fpeq);
9740
-
9741
- /*
9742
- * Process all the events on FCP EQ
9743
- */
9744
- while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9745
- lpfc_sli4_hba_handle_eqe(phba,
9746
- eqe, idx);
9747
- fpeq->EQ_processed++;
9748
- }
9749
-
9750
- /* Always clear and re-arm the EQ */
9751
- phba->sli4_hba.sli4_eq_release(fpeq,
9752
- LPFC_QUEUE_REARM);
9753
- }
9754
- atomic_inc(&hba_eq_hdl->hba_eq_in_use);
9755
- }
10319
+ lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
975610320 } else {
975710321 /* For now, SLI2/3 will still use hbalock */
975810322 spin_lock_irqsave(&phba->hbalock, iflags);
....@@ -9806,6 +10370,32 @@
980610370 pring->prt[0].type = phba->cfg_multi_ring_type;
980710371 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
980810372 return 0;
10373
+}
10374
+
10375
+static void
10376
+lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
10377
+ struct lpfc_nodelist *ndlp)
10378
+{
10379
+ unsigned long iflags;
10380
+ struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
10381
+
10382
+ spin_lock_irqsave(&phba->hbalock, iflags);
10383
+ if (!list_empty(&evtp->evt_listp)) {
10384
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
10385
+ return;
10386
+ }
10387
+
10388
+ /* Incrementing the reference count until the queued work is done. */
10389
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
10390
+ if (!evtp->evt_arg1) {
10391
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
10392
+ return;
10393
+ }
10394
+ evtp->evt = LPFC_EVT_RECOVER_PORT;
10395
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
10396
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
10397
+
10398
+ lpfc_worker_wake_up(phba);
980910399 }
981010400
981110401 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
....@@ -9871,7 +10461,6 @@
987110461 struct lpfc_nodelist *ndlp,
987210462 struct sli4_wcqe_xri_aborted *axri)
987310463 {
9874
- struct lpfc_vport *vport;
987510464 uint32_t ext_status = 0;
987610465
987710466 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
....@@ -9881,7 +10470,6 @@
988110470 return;
988210471 }
988310472
9884
- vport = ndlp->vport;
988510473 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
988610474 "3116 Port generated FCP XRI ABORT event on "
988710475 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
....@@ -9898,7 +10486,7 @@
989810486 ext_status = axri->parameter & IOERR_PARAM_MASK;
989910487 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
990010488 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
9901
- lpfc_sli_abts_recover_port(vport, ndlp);
10489
+ lpfc_sli_post_recovery_event(phba, ndlp);
990210490 }
990310491
990410492 /**
....@@ -9934,13 +10522,13 @@
993410522 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
993510523 if (evt_code == ASYNC_TEMP_WARN) {
993610524 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
9937
- lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10525
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
993810526 "0347 Adapter is very hot, please take "
993910527 "corrective action. temperature : %d Celsius\n",
994010528 (uint32_t) icmd->ulpContext);
994110529 } else {
994210530 temp_event_data.event_code = LPFC_NORMAL_TEMP;
9943
- lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10531
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
994410532 "0340 Adapter temperature is OK now. "
994510533 "temperature : %d Celsius\n",
994610534 (uint32_t) icmd->ulpContext);
....@@ -9957,7 +10545,7 @@
995710545 break;
995810546 default:
995910547 iocb_w = (uint32_t *) icmd;
9960
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10548
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
996110549 "0346 Ring %d handler: unexpected ASYNC_STATUS"
996210550 " evt_code 0x%x\n"
996310551 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
....@@ -10167,19 +10755,11 @@
1016710755 INIT_LIST_HEAD(&psli->mboxq);
1016810756 INIT_LIST_HEAD(&psli->mboxq_cmpl);
1016910757 /* Initialize list headers for txq and txcmplq as double linked lists */
10170
- for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
10171
- pring = phba->sli4_hba.fcp_wq[i]->pring;
10758
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
10759
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
1017210760 pring->flag = 0;
1017310761 pring->ringno = LPFC_FCP_RING;
10174
- INIT_LIST_HEAD(&pring->txq);
10175
- INIT_LIST_HEAD(&pring->txcmplq);
10176
- INIT_LIST_HEAD(&pring->iocb_continueq);
10177
- spin_lock_init(&pring->ring_lock);
10178
- }
10179
- for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
10180
- pring = phba->sli4_hba.nvme_wq[i]->pring;
10181
- pring->flag = 0;
10182
- pring->ringno = LPFC_FCP_RING;
10762
+ pring->txcmplq_cnt = 0;
1018310763 INIT_LIST_HEAD(&pring->txq);
1018410764 INIT_LIST_HEAD(&pring->txcmplq);
1018510765 INIT_LIST_HEAD(&pring->iocb_continueq);
....@@ -10188,25 +10768,17 @@
1018810768 pring = phba->sli4_hba.els_wq->pring;
1018910769 pring->flag = 0;
1019010770 pring->ringno = LPFC_ELS_RING;
10771
+ pring->txcmplq_cnt = 0;
1019110772 INIT_LIST_HEAD(&pring->txq);
1019210773 INIT_LIST_HEAD(&pring->txcmplq);
1019310774 INIT_LIST_HEAD(&pring->iocb_continueq);
1019410775 spin_lock_init(&pring->ring_lock);
1019510776
10196
- if (phba->cfg_nvme_io_channel) {
10777
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1019710778 pring = phba->sli4_hba.nvmels_wq->pring;
1019810779 pring->flag = 0;
1019910780 pring->ringno = LPFC_ELS_RING;
10200
- INIT_LIST_HEAD(&pring->txq);
10201
- INIT_LIST_HEAD(&pring->txcmplq);
10202
- INIT_LIST_HEAD(&pring->iocb_continueq);
10203
- spin_lock_init(&pring->ring_lock);
10204
- }
10205
-
10206
- if (phba->cfg_fof) {
10207
- pring = phba->sli4_hba.oas_wq->pring;
10208
- pring->flag = 0;
10209
- pring->ringno = LPFC_FCP_RING;
10781
+ pring->txcmplq_cnt = 0;
1021010782 INIT_LIST_HEAD(&pring->txq);
1021110783 INIT_LIST_HEAD(&pring->txcmplq);
1021210784 INIT_LIST_HEAD(&pring->iocb_continueq);
....@@ -10279,8 +10851,12 @@
1027910851 LPFC_MBOXQ_t *pmb;
1028010852 unsigned long iflag;
1028110853
10854
+ /* Disable softirqs, including timers from obtaining phba->hbalock */
10855
+ local_bh_disable();
10856
+
1028210857 /* Flush all the mailbox commands in the mbox system */
1028310858 spin_lock_irqsave(&phba->hbalock, iflag);
10859
+
1028410860 /* The pending mailbox command queue */
1028510861 list_splice_init(&phba->sli.mboxq, &completions);
1028610862 /* The outstanding active mailbox command */
....@@ -10292,6 +10868,9 @@
1029210868 /* The completed mailbox command queue */
1029310869 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
1029410870 spin_unlock_irqrestore(&phba->hbalock, iflag);
10871
+
10872
+ /* Enable softirqs again, done with phba->hbalock */
10873
+ local_bh_enable();
1029510874
1029610875 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
1029710876 while (!list_empty(&completions)) {
....@@ -10376,14 +10955,14 @@
1037610955 set_bit(LPFC_DATA_READY, &phba->data_flags);
1037710956 }
1037810957 prev_pring_flag = pring->flag;
10379
- spin_lock_irq(&pring->ring_lock);
10958
+ spin_lock(&pring->ring_lock);
1038010959 list_for_each_entry_safe(iocb, next_iocb,
1038110960 &pring->txq, list) {
1038210961 if (iocb->vport != vport)
1038310962 continue;
1038410963 list_move_tail(&iocb->list, &completions);
1038510964 }
10386
- spin_unlock_irq(&pring->ring_lock);
10965
+ spin_unlock(&pring->ring_lock);
1038710966 list_for_each_entry_safe(iocb, next_iocb,
1038810967 &pring->txcmplq, list) {
1038910968 if (iocb->vport != vport)
....@@ -10432,6 +11011,9 @@
1043211011
1043311012 lpfc_hba_down_prep(phba);
1043411013
11014
+ /* Disable softirqs, including timers from obtaining phba->hbalock */
11015
+ local_bh_disable();
11016
+
1043511017 lpfc_fabric_abort_hba(phba);
1043611018
1043711019 spin_lock_irqsave(&phba->hbalock, flags);
....@@ -10456,9 +11038,9 @@
1045611038 pring = qp->pring;
1045711039 if (!pring)
1045811040 continue;
10459
- spin_lock_irq(&pring->ring_lock);
11041
+ spin_lock(&pring->ring_lock);
1046011042 list_splice_init(&pring->txq, &completions);
10461
- spin_unlock_irq(&pring->ring_lock);
11043
+ spin_unlock(&pring->ring_lock);
1046211044 if (pring == phba->sli4_hba.els_wq->pring) {
1046311045 pring->flag |= LPFC_DEFERRED_RING_EVENT;
1046411046 /* Set the lpfc data pending flag */
....@@ -10484,6 +11066,9 @@
1048411066 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1048511067 kfree(buf_ptr);
1048611068 }
11069
+
11070
+ /* Enable softirqs again, done with phba->hbalock */
11071
+ local_bh_enable();
1048711072
1048811073 /* Return any active mbox cmds */
1048911074 del_timer_sync(&psli->mbox_tmo);
....@@ -10634,9 +11219,9 @@
1063411219 }
1063511220
1063611221 spin_unlock_irq(&phba->hbalock);
10637
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11222
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1063811223 "0402 Cannot find virtual addr for buffer tag on "
10639
- "ring %d Data x%lx x%p x%p x%x\n",
11224
+ "ring %d Data x%lx x%px x%px x%x\n",
1064011225 pring->ringno, (unsigned long) tag,
1064111226 slp->next, slp->prev, pring->postbufq_cnt);
1064211227
....@@ -10678,9 +11263,9 @@
1067811263 }
1067911264
1068011265 spin_unlock_irq(&phba->hbalock);
10681
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11266
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1068211267 "0410 Cannot find virtual addr for mapped buf on "
10683
- "ring %d Data x%llx x%p x%p x%x\n",
11268
+ "ring %d Data x%llx x%px x%px x%x\n",
1068411269 pring->ringno, (unsigned long long)phys,
1068511270 slp->next, slp->prev, pring->postbufq_cnt);
1068611271 return NULL;
....@@ -10735,7 +11320,7 @@
1073511320 abort_iocb = phba->sli.iocbq_lookup[abort_context];
1073611321
1073711322 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
10738
- "0327 Cannot abort els iocb %p "
11323
+ "0327 Cannot abort els iocb x%px "
1073911324 "with tag %x context %x, abort status %x, "
1074011325 "abort code %x\n",
1074111326 abort_iocb, abort_iotag, abort_context,
....@@ -10789,6 +11374,7 @@
1078911374 * request, this function issues abort out unconditionally. This function is
1079011375 * called with hbalock held. The function returns 0 when it fails due to
1079111376 * memory allocation failure or when the command iocb is an abort request.
11377
+ * The hbalock is asserted held in the code path calling this routine.
1079211378 **/
1079311379 static int
1079411380 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
....@@ -10801,8 +11387,6 @@
1080111387 int retval;
1080211388 unsigned long iflags;
1080311389 struct lpfc_nodelist *ndlp;
10804
-
10805
- lockdep_assert_held(&phba->hbalock);
1080611390
1080711391 /*
1080811392 * There are certain command types we don't want to abort. And we
....@@ -10956,100 +11540,6 @@
1095611540 }
1095711541
1095811542 /**
10959
- * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
10960
- * @phba: Pointer to HBA context object.
10961
- * @pring: Pointer to driver SLI ring object.
10962
- * @cmdiocb: Pointer to driver command iocb object.
10963
- *
10964
- * This function issues an abort iocb for the provided command iocb down to
10965
- * the port. Other than the case the outstanding command iocb is an abort
10966
- * request, this function issues abort out unconditionally. This function is
10967
- * called with hbalock held. The function returns 0 when it fails due to
10968
- * memory allocation failure or when the command iocb is an abort request.
10969
- **/
10970
-static int
10971
-lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10972
- struct lpfc_iocbq *cmdiocb)
10973
-{
10974
- struct lpfc_vport *vport = cmdiocb->vport;
10975
- struct lpfc_iocbq *abtsiocbp;
10976
- union lpfc_wqe128 *abts_wqe;
10977
- int retval;
10978
-
10979
- /*
10980
- * There are certain command types we don't want to abort. And we
10981
- * don't want to abort commands that are already in the process of
10982
- * being aborted.
10983
- */
10984
- if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10985
- cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
10986
- (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10987
- return 0;
10988
-
10989
- /* issue ABTS for this io based on iotag */
10990
- abtsiocbp = __lpfc_sli_get_iocbq(phba);
10991
- if (abtsiocbp == NULL)
10992
- return 0;
10993
-
10994
- /* This signals the response to set the correct status
10995
- * before calling the completion handler
10996
- */
10997
- cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10998
-
10999
- /* Complete prepping the abort wqe and issue to the FW. */
11000
- abts_wqe = &abtsiocbp->wqe;
11001
-
11002
- /* Clear any stale WQE contents */
11003
- memset(abts_wqe, 0, sizeof(union lpfc_wqe));
11004
- bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
11005
-
11006
- /* word 7 */
11007
- bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
11008
- bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
11009
- cmdiocb->iocb.ulpClass);
11010
-
11011
- /* word 8 - tell the FW to abort the IO associated with this
11012
- * outstanding exchange ID.
11013
- */
11014
- abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
11015
-
11016
- /* word 9 - this is the iotag for the abts_wqe completion. */
11017
- bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
11018
- abtsiocbp->iotag);
11019
-
11020
- /* word 10 */
11021
- bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
11022
- bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
11023
-
11024
- /* word 11 */
11025
- bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
11026
- bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
11027
- bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
11028
-
11029
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11030
- abtsiocbp->iocb_flag |= LPFC_IO_NVME;
11031
- abtsiocbp->vport = vport;
11032
- abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
11033
- retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
11034
- if (retval) {
11035
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
11036
- "6147 Failed abts issue_wqe with status x%x "
11037
- "for oxid x%x\n",
11038
- retval, cmdiocb->sli4_xritag);
11039
- lpfc_sli_release_iocbq(phba, abtsiocbp);
11040
- return retval;
11041
- }
11042
-
11043
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
11044
- "6148 Drv Abort NVME Request Issued for "
11045
- "ox_id x%x on reqtag x%x\n",
11046
- cmdiocb->sli4_xritag,
11047
- abtsiocbp->iotag);
11048
-
11049
- return retval;
11050
-}
11051
-
11052
-/**
1105311543 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
1105411544 * @phba: pointer to lpfc HBA data structure.
1105511545 *
....@@ -11105,17 +11595,24 @@
1110511595 uint16_t tgt_id, uint64_t lun_id,
1110611596 lpfc_ctx_cmd ctx_cmd)
1110711597 {
11108
- struct lpfc_scsi_buf *lpfc_cmd;
11598
+ struct lpfc_io_buf *lpfc_cmd;
11599
+ IOCB_t *icmd = NULL;
1110911600 int rc = 1;
1111011601
1111111602 if (iocbq->vport != vport)
1111211603 return rc;
1111311604
11114
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11115
- !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
11605
+ if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11606
+ !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
11607
+ iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
1111611608 return rc;
1111711609
11118
- lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11610
+ icmd = &iocbq->iocb;
11611
+ if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11612
+ icmd->ulpCommand == CMD_CLOSE_XRI_CN)
11613
+ return rc;
11614
+
11615
+ lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
1111911616
1112011617 if (lpfc_cmd->pCmd == NULL)
1112111618 return rc;
....@@ -11244,7 +11741,7 @@
1124411741 int i;
1124511742
1124611743 /* all I/Os are in process of being flushed */
11247
- if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
11744
+ if (phba->hba_flag & HBA_IOQ_FLUSH)
1124811745 return errcnt;
1124911746
1125011747 for (i = 1; i <= phba->sli.last_iotag; i++) {
....@@ -11321,7 +11818,7 @@
1132111818 * @pring: Pointer to driver SLI ring object.
1132211819 * @tgt_id: SCSI ID of the target.
1132311820 * @lun_id: LUN ID of the scsi device.
11324
- * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11821
+ * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
1132511822 *
1132611823 * This function sends an abort command for every SCSI command
1132711824 * associated with the given virtual port pending on the ring
....@@ -11342,19 +11839,19 @@
1134211839 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
1134311840 {
1134411841 struct lpfc_hba *phba = vport->phba;
11345
- struct lpfc_scsi_buf *lpfc_cmd;
11842
+ struct lpfc_io_buf *lpfc_cmd;
1134611843 struct lpfc_iocbq *abtsiocbq;
1134711844 struct lpfc_nodelist *ndlp;
1134811845 struct lpfc_iocbq *iocbq;
1134911846 IOCB_t *icmd;
1135011847 int sum, i, ret_val;
1135111848 unsigned long iflags;
11352
- struct lpfc_sli_ring *pring_s4;
11849
+ struct lpfc_sli_ring *pring_s4 = NULL;
1135311850
1135411851 spin_lock_irqsave(&phba->hbalock, iflags);
1135511852
1135611853 /* all I/Os are in process of being flushed */
11357
- if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
11854
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
1135811855 spin_unlock_irqrestore(&phba->hbalock, iflags);
1135911856 return 0;
1136011857 }
....@@ -11367,17 +11864,46 @@
1136711864 cmd) != 0)
1136811865 continue;
1136911866
11867
+ /* Guard against IO completion being called at same time */
11868
+ lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11869
+ spin_lock(&lpfc_cmd->buf_lock);
11870
+
11871
+ if (!lpfc_cmd->pCmd) {
11872
+ spin_unlock(&lpfc_cmd->buf_lock);
11873
+ continue;
11874
+ }
11875
+
11876
+ if (phba->sli_rev == LPFC_SLI_REV4) {
11877
+ pring_s4 =
11878
+ phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
11879
+ if (!pring_s4) {
11880
+ spin_unlock(&lpfc_cmd->buf_lock);
11881
+ continue;
11882
+ }
11883
+ /* Note: both hbalock and ring_lock must be set here */
11884
+ spin_lock(&pring_s4->ring_lock);
11885
+ }
11886
+
1137011887 /*
1137111888 * If the iocbq is already being aborted, don't take a second
1137211889 * action, but do count it.
1137311890 */
11374
- if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11891
+ if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11892
+ !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11893
+ if (phba->sli_rev == LPFC_SLI_REV4)
11894
+ spin_unlock(&pring_s4->ring_lock);
11895
+ spin_unlock(&lpfc_cmd->buf_lock);
1137511896 continue;
11897
+ }
1137611898
1137711899 /* issue ABTS for this IOCB based on iotag */
1137811900 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11379
- if (abtsiocbq == NULL)
11901
+ if (!abtsiocbq) {
11902
+ if (phba->sli_rev == LPFC_SLI_REV4)
11903
+ spin_unlock(&pring_s4->ring_lock);
11904
+ spin_unlock(&lpfc_cmd->buf_lock);
1138011905 continue;
11906
+ }
1138111907
1138211908 icmd = &iocbq->iocb;
1138311909 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
....@@ -11398,7 +11924,6 @@
1139811924 if (iocbq->iocb_flag & LPFC_IO_FOF)
1139911925 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
1140011926
11401
- lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
1140211927 ndlp = lpfc_cmd->rdata->pnode;
1140311928
1140411929 if (lpfc_is_link_up(phba) &&
....@@ -11417,11 +11942,6 @@
1141711942 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
1141811943
1141911944 if (phba->sli_rev == LPFC_SLI_REV4) {
11420
- pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocbq);
11421
- if (!pring_s4)
11422
- continue;
11423
- /* Note: both hbalock and ring_lock must be set here */
11424
- spin_lock(&pring_s4->ring_lock);
1142511945 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
1142611946 abtsiocbq, 0);
1142711947 spin_unlock(&pring_s4->ring_lock);
....@@ -11430,6 +11950,7 @@
1143011950 abtsiocbq, 0);
1143111951 }
1143211952
11953
+ spin_unlock(&lpfc_cmd->buf_lock);
1143311954
1143411955 if (ret_val == IOCB_ERROR)
1143511956 __lpfc_sli_release_iocbq(phba, abtsiocbq);
....@@ -11464,7 +11985,7 @@
1146411985 {
1146511986 wait_queue_head_t *pdone_q;
1146611987 unsigned long iflags;
11467
- struct lpfc_scsi_buf *lpfc_cmd;
11988
+ struct lpfc_io_buf *lpfc_cmd;
1146811989
1146911990 spin_lock_irqsave(&phba->hbalock, iflags);
1147011991 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
....@@ -11493,9 +12014,12 @@
1149312014 /* Set the exchange busy flag for task management commands */
1149412015 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
1149512016 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11496
- lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
12017
+ lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
1149712018 cur_iocbq);
11498
- lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
12019
+ if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
12020
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
12021
+ else
12022
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
1149912023 }
1150012024
1150112025 pdone_q = cmdiocbq->context_un.wait_queue;
....@@ -11534,7 +12058,7 @@
1153412058 /**
1153512059 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
1153612060 * @phba: Pointer to HBA context object..
11537
- * @pring: Pointer to sli ring.
12061
+ * @ring_number: Ring number
1153812062 * @piocb: Pointer to command iocb.
1153912063 * @prspiocbq: Pointer to response iocb.
1154012064 * @timeout: Timeout in number of seconds.
....@@ -11640,12 +12164,12 @@
1164012164 * completed. Not that it completed successfully.
1164112165 * */
1164212166 } else if (timeleft == 0) {
11643
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12167
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1164412168 "0338 IOCB wait timeout error - no "
1164512169 "wake response Data x%x\n", timeout);
1164612170 retval = IOCB_TIMEDOUT;
1164712171 } else {
11648
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12172
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1164912173 "0330 IOCB wake NOT set, "
1165012174 "Data x%x x%lx\n",
1165112175 timeout, (timeleft / jiffies));
....@@ -11754,6 +12278,7 @@
1175412278 /**
1175512279 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
1175612280 * @phba: Pointer to HBA context.
12281
+ * @mbx_action: Mailbox shutdown options.
1175712282 *
1175812283 * This function is called to shutdown the driver's mailbox sub-system.
1175912284 * It first marks the mailbox sub-system is in a block state to prevent
....@@ -11780,6 +12305,9 @@
1178012305 }
1178112306 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
1178212307
12308
+ /* Disable softirqs, including timers from obtaining phba->hbalock */
12309
+ local_bh_disable();
12310
+
1178312311 spin_lock_irq(&phba->hbalock);
1178412312 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
1178512313
....@@ -11793,6 +12321,9 @@
1179312321 1000) + jiffies;
1179412322 spin_unlock_irq(&phba->hbalock);
1179512323
12324
+ /* Enable softirqs again, done with phba->hbalock */
12325
+ local_bh_enable();
12326
+
1179612327 while (phba->sli.mbox_active) {
1179712328 /* Check active mailbox complete status every 2ms */
1179812329 msleep(2);
....@@ -11802,8 +12333,12 @@
1180212333 */
1180312334 break;
1180412335 }
11805
- } else
12336
+ } else {
1180612337 spin_unlock_irq(&phba->hbalock);
12338
+
12339
+ /* Enable softirqs again, done with phba->hbalock */
12340
+ local_bh_enable();
12341
+ }
1180712342
1180812343 lpfc_sli_mbox_sys_flush(phba);
1180912344 }
....@@ -11878,6 +12413,7 @@
1187812413 uint32_t uerr_sta_hi, uerr_sta_lo;
1187912414 uint32_t if_type, portsmphr;
1188012415 struct lpfc_register portstat_reg;
12416
+ u32 logmask;
1188112417
1188212418 /*
1188312419 * For now, use the SLI4 device internal unrecoverable error
....@@ -11897,7 +12433,7 @@
1189712433 }
1189812434 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
1189912435 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
11900
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12436
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1190112437 "1423 HBA Unrecoverable error: "
1190212438 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
1190312439 "ue_mask_lo_reg=0x%x, "
....@@ -11928,7 +12464,12 @@
1192812464 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1192912465 phba->work_status[1] =
1193012466 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
11931
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12467
+ logmask = LOG_TRACE_EVENT;
12468
+ if (phba->work_status[0] ==
12469
+ SLIPORT_ERR1_REG_ERR_CODE_2 &&
12470
+ phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
12471
+ logmask = LOG_SLI;
12472
+ lpfc_printf_log(phba, KERN_ERR, logmask,
1193212473 "2885 Port Status Event: "
1193312474 "port status reg 0x%x, "
1193412475 "port smphr reg 0x%x, "
....@@ -11944,7 +12485,7 @@
1194412485 break;
1194512486 case LPFC_SLI_INTF_IF_TYPE_1:
1194612487 default:
11947
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12488
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1194812489 "2886 HBA Error Attention on unsupported "
1194912490 "if type %d.", if_type);
1195012491 return 1;
....@@ -12008,7 +12549,7 @@
1200812549 ha_copy = lpfc_sli4_eratt_read(phba);
1200912550 break;
1201012551 default:
12011
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12552
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1201212553 "0299 Invalid SLI revision (%d)\n",
1201312554 phba->sli_rev);
1201412555 ha_copy = 0;
....@@ -12241,8 +12782,7 @@
1224112782 * Stray Mailbox Interrupt, mbxCommand <cmd>
1224212783 * mbxStatus <status>
1224312784 */
12244
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12245
- LOG_SLI,
12785
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1224612786 "(%d):0304 Stray Mailbox "
1224712787 "Interrupt mbxCommand x%x "
1224812788 "mbxStatus x%x\n",
....@@ -12260,10 +12800,10 @@
1226012800 lpfc_sli_pcimem_bcopy(mbox, pmbox,
1226112801 MAILBOX_CMD_SIZE);
1226212802 if (pmb->out_ext_byte_len &&
12263
- pmb->context2)
12803
+ pmb->ctx_buf)
1226412804 lpfc_sli_pcimem_bcopy(
1226512805 phba->mbox_ext,
12266
- pmb->context2,
12806
+ pmb->ctx_buf,
1226712807 pmb->out_ext_byte_len);
1226812808 }
1226912809 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
....@@ -12278,9 +12818,9 @@
1227812818
1227912819 if (!pmbox->mbxStatus) {
1228012820 mp = (struct lpfc_dmabuf *)
12281
- (pmb->context1);
12821
+ (pmb->ctx_buf);
1228212822 ndlp = (struct lpfc_nodelist *)
12283
- pmb->context2;
12823
+ pmb->ctx_ndlp;
1228412824
1228512825 /* Reg_LOGIN of dflt RPI was
1228612826 * successful. new lets get
....@@ -12293,8 +12833,8 @@
1229312833 pmb);
1229412834 pmb->mbox_cmpl =
1229512835 lpfc_mbx_cmpl_dflt_rpi;
12296
- pmb->context1 = mp;
12297
- pmb->context2 = ndlp;
12836
+ pmb->ctx_buf = mp;
12837
+ pmb->ctx_ndlp = ndlp;
1229812838 pmb->vport = vport;
1229912839 rc = lpfc_sli_issue_mbox(phba,
1230012840 pmb,
....@@ -12302,7 +12842,7 @@
1230212842 if (rc != MBX_BUSY)
1230312843 lpfc_printf_log(phba,
1230412844 KERN_ERR,
12305
- LOG_MBOX | LOG_SLI,
12845
+ LOG_TRACE_EVENT,
1230612846 "0350 rc should have"
1230712847 "been MBX_BUSY\n");
1230812848 if (rc != MBX_NOT_FINISHED)
....@@ -12331,8 +12871,9 @@
1233112871 MBX_NOWAIT);
1233212872 } while (rc == MBX_NOT_FINISHED);
1233312873 if (rc != MBX_SUCCESS)
12334
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12335
- LOG_SLI, "0349 rc should be "
12874
+ lpfc_printf_log(phba, KERN_ERR,
12875
+ LOG_TRACE_EVENT,
12876
+ "0349 rc should be "
1233612877 "MBX_SUCCESS\n");
1233712878 }
1233812879
....@@ -12557,35 +13098,6 @@
1255713098 } /* lpfc_sli_intr_handler */
1255813099
1255913100 /**
12560
- * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
12561
- * @phba: pointer to lpfc hba data structure.
12562
- *
12563
- * This routine is invoked by the worker thread to process all the pending
12564
- * SLI4 FCP abort XRI events.
12565
- **/
12566
-void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12567
-{
12568
- struct lpfc_cq_event *cq_event;
12569
-
12570
- /* First, declare the fcp xri abort event has been handled */
12571
- spin_lock_irq(&phba->hbalock);
12572
- phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
12573
- spin_unlock_irq(&phba->hbalock);
12574
- /* Now, handle all the fcp xri abort events */
12575
- while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
12576
- /* Get the first event from the head of the event queue */
12577
- spin_lock_irq(&phba->hbalock);
12578
- list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
12579
- cq_event, struct lpfc_cq_event, list);
12580
- spin_unlock_irq(&phba->hbalock);
12581
- /* Notify aborted XRI for FCP work queue */
12582
- lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12583
- /* Free the event processed back to the free pool */
12584
- lpfc_sli4_cq_event_release(phba, cq_event);
12585
- }
12586
-}
12587
-
12588
-/**
1258913101 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
1259013102 * @phba: pointer to lpfc hba data structure.
1259113103 *
....@@ -12595,23 +13107,30 @@
1259513107 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
1259613108 {
1259713109 struct lpfc_cq_event *cq_event;
13110
+ unsigned long iflags;
1259813111
1259913112 /* First, declare the els xri abort event has been handled */
12600
- spin_lock_irq(&phba->hbalock);
13113
+ spin_lock_irqsave(&phba->hbalock, iflags);
1260113114 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12602
- spin_unlock_irq(&phba->hbalock);
13115
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
13116
+
1260313117 /* Now, handle all the els xri abort events */
13118
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
1260413119 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
1260513120 /* Get the first event from the head of the event queue */
12606
- spin_lock_irq(&phba->hbalock);
1260713121 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
1260813122 cq_event, struct lpfc_cq_event, list);
12609
- spin_unlock_irq(&phba->hbalock);
13123
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13124
+ iflags);
1261013125 /* Notify aborted XRI for ELS work queue */
1261113126 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
13127
+
1261213128 /* Free the event processed back to the free pool */
1261313129 lpfc_sli4_cq_event_release(phba, cq_event);
13130
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13131
+ iflags);
1261413132 }
13133
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
1261513134 }
1261613135
1261713136 /**
....@@ -12733,7 +13252,7 @@
1273313252 /**
1273413253 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
1273513254 * @phba: Pointer to HBA context object.
12736
- * @wcqe: Pointer to work-queue completion queue entry.
13255
+ * @irspiocbq: Pointer to work-queue completion queue entry.
1273713256 *
1273813257 * This routine handles an ELS work-queue completion event and construct
1273913258 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
....@@ -12755,13 +13274,11 @@
1275513274 return NULL;
1275613275
1275713276 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
12758
- spin_lock_irqsave(&pring->ring_lock, iflags);
1275913277 pring->stats.iocb_event++;
1276013278 /* Look up the ELS command IOCB and create pseudo response IOCB */
1276113279 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
1276213280 bf_get(lpfc_wcqe_c_request_tag, wcqe));
1276313281 if (unlikely(!cmdiocbq)) {
12764
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
1276513282 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1276613283 "0386 ELS complete with no corresponding "
1276713284 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
....@@ -12771,6 +13288,7 @@
1277113288 return NULL;
1277213289 }
1277313290
13291
+ spin_lock_irqsave(&pring->ring_lock, iflags);
1277413292 /* Put the iocb back on the txcmplq */
1277513293 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
1277613294 spin_unlock_irqrestore(&pring->ring_lock, iflags);
....@@ -12789,7 +13307,7 @@
1278913307 /* Allocate a new internal CQ_EVENT entry */
1279013308 cq_event = lpfc_sli4_cq_event_alloc(phba);
1279113309 if (!cq_event) {
12792
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13310
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1279313311 "0602 Failed to alloc CQ_EVENT entry\n");
1279413312 return NULL;
1279513313 }
....@@ -12800,11 +13318,11 @@
1280013318 }
1280113319
1280213320 /**
12803
- * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
13321
+ * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
1280413322 * @phba: Pointer to HBA context object.
12805
- * @cqe: Pointer to mailbox completion queue entry.
13323
+ * @mcqe: Pointer to mailbox completion queue entry.
1280613324 *
12807
- * This routine process a mailbox completion queue entry with asynchrous
13325
+ * This routine process a mailbox completion queue entry with asynchronous
1280813326 * event.
1280913327 *
1281013328 * Return: true if work posted to worker thread, otherwise false.
....@@ -12823,9 +13341,13 @@
1282313341 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
1282413342 if (!cq_event)
1282513343 return false;
12826
- spin_lock_irqsave(&phba->hbalock, iflags);
13344
+
13345
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
1282713346 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13347
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
13348
+
1282813349 /* Set the async event flag */
13350
+ spin_lock_irqsave(&phba->hbalock, iflags);
1282913351 phba->hba_flag |= ASYNC_EVENT;
1283013352 spin_unlock_irqrestore(&phba->hbalock, iflags);
1283113353
....@@ -12835,7 +13357,7 @@
1283513357 /**
1283613358 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
1283713359 * @phba: Pointer to HBA context object.
12838
- * @cqe: Pointer to mailbox completion queue entry.
13360
+ * @mcqe: Pointer to mailbox completion queue entry.
1283913361 *
1284013362 * This routine process a mailbox completion queue entry with mailbox
1284113363 * completion event.
....@@ -12864,7 +13386,7 @@
1286413386 spin_lock_irqsave(&phba->hbalock, iflags);
1286513387 pmb = phba->sli.mbox_active;
1286613388 if (unlikely(!pmb)) {
12867
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13389
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1286813390 "1832 No pending MBOX command to handle\n");
1286913391 spin_unlock_irqrestore(&phba->hbalock, iflags);
1287013392 goto out_no_mqe_complete;
....@@ -12900,21 +13422,22 @@
1290013422 mcqe_status,
1290113423 pmbox->un.varWords[0], 0);
1290213424 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
12903
- mp = (struct lpfc_dmabuf *)(pmb->context1);
12904
- ndlp = (struct lpfc_nodelist *)pmb->context2;
13425
+ mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13426
+ ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
1290513427 /* Reg_LOGIN of dflt RPI was successful. Now lets get
1290613428 * RID of the PPI using the same mbox buffer.
1290713429 */
1290813430 lpfc_unreg_login(phba, vport->vpi,
1290913431 pmbox->un.varWords[0], pmb);
1291013432 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
12911
- pmb->context1 = mp;
12912
- pmb->context2 = ndlp;
13433
+ pmb->ctx_buf = mp;
13434
+ pmb->ctx_ndlp = ndlp;
1291313435 pmb->vport = vport;
1291413436 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1291513437 if (rc != MBX_BUSY)
12916
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12917
- LOG_SLI, "0385 rc should "
13438
+ lpfc_printf_log(phba, KERN_ERR,
13439
+ LOG_TRACE_EVENT,
13440
+ "0385 rc should "
1291813441 "have been MBX_BUSY\n");
1291913442 if (rc != MBX_NOT_FINISHED)
1292013443 goto send_current_mbox;
....@@ -12955,19 +13478,23 @@
1295513478 /**
1295613479 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
1295713480 * @phba: Pointer to HBA context object.
13481
+ * @cq: Pointer to associated CQ
1295813482 * @cqe: Pointer to mailbox completion queue entry.
1295913483 *
1296013484 * This routine process a mailbox completion queue entry, it invokes the
12961
- * proper mailbox complete handling or asynchrous event handling routine
13485
+ * proper mailbox complete handling or asynchronous event handling routine
1296213486 * according to the MCQE's async bit.
1296313487 *
1296413488 * Return: true if work posted to worker thread, otherwise false.
1296513489 **/
1296613490 static bool
12967
-lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
13491
+lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13492
+ struct lpfc_cqe *cqe)
1296813493 {
1296913494 struct lpfc_mcqe mcqe;
1297013495 bool workposted;
13496
+
13497
+ cq->CQ_mbox++;
1297113498
1297213499 /* Copy the mailbox MCQE and convert endian order as needed */
1297313500 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
....@@ -12999,7 +13526,6 @@
1299913526 struct lpfc_sli_ring *pring = cq->pring;
1300013527 int txq_cnt = 0;
1300113528 int txcmplq_cnt = 0;
13002
- int fcp_txcmplq_cnt = 0;
1300313529
1300413530 /* Check for response status */
1300513531 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
....@@ -13019,11 +13545,10 @@
1301913545 txq_cnt++;
1302013546 if (!list_empty(&pring->txcmplq))
1302113547 txcmplq_cnt++;
13022
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13548
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1302313549 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13024
- "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
13550
+ "els_txcmplq_cnt=%d\n",
1302513551 txq_cnt, phba->iocb_cnt,
13026
- fcp_txcmplq_cnt,
1302713552 txcmplq_cnt);
1302813553 return false;
1302913554 }
....@@ -13086,44 +13611,35 @@
1308613611 unsigned long iflags;
1308713612
1308813613 switch (cq->subtype) {
13089
- case LPFC_FCP:
13090
- cq_event = lpfc_cq_event_setup(
13091
- phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13092
- if (!cq_event)
13093
- return false;
13094
- spin_lock_irqsave(&phba->hbalock, iflags);
13095
- list_add_tail(&cq_event->list,
13096
- &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
13097
- /* Set the fcp xri abort event flag */
13098
- phba->hba_flag |= FCP_XRI_ABORT_EVENT;
13099
- spin_unlock_irqrestore(&phba->hbalock, iflags);
13100
- workposted = true;
13614
+ case LPFC_IO:
13615
+ lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13616
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13617
+ /* Notify aborted XRI for NVME work queue */
13618
+ if (phba->nvmet_support)
13619
+ lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13620
+ }
13621
+ workposted = false;
1310113622 break;
1310213623 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
1310313624 case LPFC_ELS:
13104
- cq_event = lpfc_cq_event_setup(
13105
- phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13106
- if (!cq_event)
13107
- return false;
13108
- spin_lock_irqsave(&phba->hbalock, iflags);
13625
+ cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
13626
+ if (!cq_event) {
13627
+ workposted = false;
13628
+ break;
13629
+ }
13630
+ cq_event->hdwq = cq->hdwq;
13631
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13632
+ iflags);
1310913633 list_add_tail(&cq_event->list,
1311013634 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
1311113635 /* Set the els xri abort event flag */
1311213636 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13113
- spin_unlock_irqrestore(&phba->hbalock, iflags);
13637
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13638
+ iflags);
1311413639 workposted = true;
1311513640 break;
13116
- case LPFC_NVME:
13117
- /* Notify aborted XRI for NVME work queue */
13118
- if (phba->nvmet_support)
13119
- lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13120
- else
13121
- lpfc_sli4_nvme_xri_aborted(phba, wcqe);
13122
-
13123
- workposted = false;
13124
- break;
1312513641 default:
13126
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13642
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1312713643 "0603 Invalid CQ subtype %d: "
1312813644 "%08x %08x %08x %08x\n",
1312913645 cq->subtype, wcqe->word0, wcqe->parameter,
....@@ -13133,6 +13649,8 @@
1313313649 }
1313413650 return workposted;
1313513651 }
13652
+
13653
+#define FC_RCTL_MDS_DIAGS 0xF4
1313613654
1313713655 /**
1313813656 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
....@@ -13169,8 +13687,9 @@
1316913687 status = bf_get(lpfc_rcqe_status, rcqe);
1317013688 switch (status) {
1317113689 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13172
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13690
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1317313691 "2537 Receive Frame Truncated!!\n");
13692
+ fallthrough;
1317413693 case FC_STATUS_RQ_SUCCESS:
1317513694 spin_lock_irqsave(&phba->hbalock, iflags);
1317613695 lpfc_sli4_rq_release(hrq, drq);
....@@ -13184,10 +13703,21 @@
1318413703 hrq->RQ_buf_posted--;
1318513704 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
1318613705
13187
- /* If a NVME LS event (type 0x28), treat it as Fast path */
1318813706 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
1318913707
13190
- /* save off the frame for the word thread to process */
13708
+ if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13709
+ fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13710
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
13711
+ /* Handle MDS Loopback frames */
13712
+ if (!(phba->pport->load_flag & FC_UNLOADING))
13713
+ lpfc_sli4_handle_mds_loopback(phba->pport,
13714
+ dma_buf);
13715
+ else
13716
+ lpfc_in_buf_free(phba, &dma_buf->dbuf);
13717
+ break;
13718
+ }
13719
+
13720
+ /* save off the frame for the work thread to process */
1319113721 list_add_tail(&dma_buf->cq_event.list,
1319213722 &phba->sli4_hba.sp_queue_event);
1319313723 /* Frame received */
....@@ -13198,7 +13728,7 @@
1319813728 case FC_STATUS_INSUFF_BUF_FRM_DISC:
1319913729 if (phba->nvmet_support) {
1320013730 tgtp = phba->targetport->private;
13201
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13731
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1320213732 "6402 RQE Error x%x, posted %d err_cnt "
1320313733 "%d: %x %x %x\n",
1320413734 status, hrq->RQ_buf_posted,
....@@ -13207,7 +13737,7 @@
1320713737 atomic_read(&tgtp->rcv_fcp_cmd_out),
1320813738 atomic_read(&tgtp->xmt_fcp_release));
1320913739 }
13210
- /* fallthrough */
13740
+ fallthrough;
1321113741
1321213742 case FC_STATUS_INSUFF_BUF_NEED_BUF:
1321313743 hrq->RQ_no_posted_buf++;
....@@ -13226,7 +13756,7 @@
1322613756 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
1322713757 * @phba: Pointer to HBA context object.
1322813758 * @cq: Pointer to the completion queue.
13229
- * @wcqe: Pointer to a completion queue entry.
13759
+ * @cqe: Pointer to a completion queue entry.
1323013760 *
1323113761 * This routine process a slow-path work-queue or receive queue completion queue
1323213762 * entry.
....@@ -13270,7 +13800,7 @@
1327013800 (struct lpfc_rcqe *)&cqevt);
1327113801 break;
1327213802 default:
13273
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13803
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1327413804 "0388 Not a valid WCQE code: x%x\n",
1327513805 bf_get(lpfc_cqe_code, &cqevt));
1327613806 break;
....@@ -13282,6 +13812,7 @@
1328213812 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
1328313813 * @phba: Pointer to HBA context object.
1328413814 * @eqe: Pointer to fast-path event queue entry.
13815
+ * @speq: Pointer to slow-path event queue.
1328513816 *
1328613817 * This routine process a event queue entry from the slow-path event queue.
1328713818 * It will check the MajorCode and MinorCode to determine this is for a
....@@ -13297,6 +13828,7 @@
1329713828 {
1329813829 struct lpfc_queue *cq = NULL, *childq;
1329913830 uint16_t cqid;
13831
+ int ret = 0;
1330013832
1330113833 /* Get the reference to the corresponding CQ */
1330213834 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
....@@ -13309,7 +13841,7 @@
1330913841 }
1331013842 if (unlikely(!cq)) {
1331113843 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13312
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13844
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1331313845 "0365 Slow-path CQ identifier "
1331413846 "(%d) does not exist\n", cqid);
1331513847 return;
....@@ -13318,88 +13850,203 @@
1331813850 /* Save EQ associated with this CQ */
1331913851 cq->assoc_qp = speq;
1332013852
13321
- if (!queue_work(phba->wq, &cq->spwork))
13322
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13323
- "0390 Cannot schedule soft IRQ "
13853
+ if (is_kdump_kernel())
13854
+ ret = queue_work(phba->wq, &cq->spwork);
13855
+ else
13856
+ ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
13857
+
13858
+ if (!ret)
13859
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13860
+ "0390 Cannot schedule queue work "
1332413861 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13325
- cqid, cq->queue_id, smp_processor_id());
13862
+ cqid, cq->queue_id, raw_smp_processor_id());
13863
+}
13864
+
13865
+/**
13866
+ * __lpfc_sli4_process_cq - Process elements of a CQ
13867
+ * @phba: Pointer to HBA context object.
13868
+ * @cq: Pointer to CQ to be processed
13869
+ * @handler: Routine to process each cqe
13870
+ * @delay: Pointer to usdelay to set in case of rescheduling of the handler
13871
+ * @poll_mode: Polling mode we were called from
13872
+ *
13873
+ * This routine processes completion queue entries in a CQ. While a valid
13874
+ * queue element is found, the handler is called. During processing checks
13875
+ * are made for periodic doorbell writes to let the hardware know of
13876
+ * element consumption.
13877
+ *
13878
+ * If the max limit on cqes to process is hit, or there are no more valid
13879
+ * entries, the loop stops. If we processed a sufficient number of elements,
13880
+ * meaning there is sufficient load, rather than rearming and generating
13881
+ * another interrupt, a cq rescheduling delay will be set. A delay of 0
13882
+ * indicates no rescheduling.
13883
+ *
13884
+ * Returns True if work scheduled, False otherwise.
13885
+ **/
13886
+static bool
13887
+__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13888
+ bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13889
+ struct lpfc_cqe *), unsigned long *delay,
13890
+ enum lpfc_poll_mode poll_mode)
13891
+{
13892
+ struct lpfc_cqe *cqe;
13893
+ bool workposted = false;
13894
+ int count = 0, consumed = 0;
13895
+ bool arm = true;
13896
+
13897
+ /* default - no reschedule */
13898
+ *delay = 0;
13899
+
13900
+ if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13901
+ goto rearm_and_exit;
13902
+
13903
+ /* Process all the entries to the CQ */
13904
+ cq->q_flag = 0;
13905
+ cqe = lpfc_sli4_cq_get(cq);
13906
+ while (cqe) {
13907
+ workposted |= handler(phba, cq, cqe);
13908
+ __lpfc_sli4_consume_cqe(phba, cq, cqe);
13909
+
13910
+ consumed++;
13911
+ if (!(++count % cq->max_proc_limit))
13912
+ break;
13913
+
13914
+ if (!(count % cq->notify_interval)) {
13915
+ phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13916
+ LPFC_QUEUE_NOARM);
13917
+ consumed = 0;
13918
+ cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
13919
+ }
13920
+
13921
+ if (count == LPFC_NVMET_CQ_NOTIFY)
13922
+ cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
13923
+
13924
+ cqe = lpfc_sli4_cq_get(cq);
13925
+ }
13926
+ if (count >= phba->cfg_cq_poll_threshold) {
13927
+ *delay = 1;
13928
+ arm = false;
13929
+ }
13930
+
13931
+ /* Note: complete the irq_poll softirq before rearming CQ */
13932
+ if (poll_mode == LPFC_IRQ_POLL)
13933
+ irq_poll_complete(&cq->iop);
13934
+
13935
+ /* Track the max number of CQEs processed in 1 EQ */
13936
+ if (count > cq->CQ_max_cqe)
13937
+ cq->CQ_max_cqe = count;
13938
+
13939
+ cq->assoc_qp->EQ_cqe_cnt += count;
13940
+
13941
+ /* Catch the no cq entry condition */
13942
+ if (unlikely(count == 0))
13943
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13944
+ "0369 No entry from completion queue "
13945
+ "qid=%d\n", cq->queue_id);
13946
+
13947
+ xchg(&cq->queue_claimed, 0);
13948
+
13949
+rearm_and_exit:
13950
+ phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13951
+ arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13952
+
13953
+ return workposted;
1332613954 }
1332713955
1332813956 /**
1332913957 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13330
- * @phba: Pointer to HBA context object.
13958
+ * @cq: pointer to CQ to process
1333113959 *
13332
- * This routine process a event queue entry from the slow-path event queue.
13333
- * It will check the MajorCode and MinorCode to determine this is for a
13334
- * completion event on a completion queue, if not, an error shall be logged
13335
- * and just return. Otherwise, it will get to the corresponding completion
13336
- * queue and process all the entries on that completion queue, rearm the
13337
- * completion queue, and then return.
13960
+ * This routine calls the cq processing routine with a handler specific
13961
+ * to the type of queue bound to it.
1333813962 *
13963
+ * The CQ routine returns two values: the first is the calling status,
13964
+ * which indicates whether work was queued to the background discovery
13965
+ * thread. If true, the routine should wakeup the discovery thread;
13966
+ * the second is the delay parameter. If non-zero, rather than rearming
13967
+ * the CQ and yet another interrupt, the CQ handler should be queued so
13968
+ * that it is processed in a subsequent polling action. The value of
13969
+ * the delay indicates when to reschedule it.
1333913970 **/
1334013971 static void
13341
-lpfc_sli4_sp_process_cq(struct work_struct *work)
13972
+__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
1334213973 {
13343
- struct lpfc_queue *cq =
13344
- container_of(work, struct lpfc_queue, spwork);
1334513974 struct lpfc_hba *phba = cq->phba;
13346
- struct lpfc_cqe *cqe;
13975
+ unsigned long delay;
1334713976 bool workposted = false;
13348
- int ccount = 0;
13977
+ int ret = 0;
1334913978
13350
- /* Process all the entries to the CQ */
13979
+ /* Process and rearm the CQ */
1335113980 switch (cq->type) {
1335213981 case LPFC_MCQ:
13353
- while ((cqe = lpfc_sli4_cq_get(cq))) {
13354
- workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
13355
- if (!(++ccount % cq->entry_repost))
13356
- break;
13357
- cq->CQ_mbox++;
13358
- }
13982
+ workposted |= __lpfc_sli4_process_cq(phba, cq,
13983
+ lpfc_sli4_sp_handle_mcqe,
13984
+ &delay, LPFC_QUEUE_WORK);
1335913985 break;
1336013986 case LPFC_WCQ:
13361
- while ((cqe = lpfc_sli4_cq_get(cq))) {
13362
- if (cq->subtype == LPFC_FCP ||
13363
- cq->subtype == LPFC_NVME) {
13364
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13365
- if (phba->ktime_on)
13366
- cq->isr_timestamp = ktime_get_ns();
13367
- else
13368
- cq->isr_timestamp = 0;
13369
-#endif
13370
- workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
13371
- cqe);
13372
- } else {
13373
- workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
13374
- cqe);
13375
- }
13376
- if (!(++ccount % cq->entry_repost))
13377
- break;
13378
- }
13379
-
13380
- /* Track the max number of CQEs processed in 1 EQ */
13381
- if (ccount > cq->CQ_max_cqe)
13382
- cq->CQ_max_cqe = ccount;
13987
+ if (cq->subtype == LPFC_IO)
13988
+ workposted |= __lpfc_sli4_process_cq(phba, cq,
13989
+ lpfc_sli4_fp_handle_cqe,
13990
+ &delay, LPFC_QUEUE_WORK);
13991
+ else
13992
+ workposted |= __lpfc_sli4_process_cq(phba, cq,
13993
+ lpfc_sli4_sp_handle_cqe,
13994
+ &delay, LPFC_QUEUE_WORK);
1338313995 break;
1338413996 default:
13385
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13997
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1338613998 "0370 Invalid completion queue type (%d)\n",
1338713999 cq->type);
1338814000 return;
1338914001 }
1339014002
13391
- /* Catch the no cq entry condition, log an error */
13392
- if (unlikely(ccount == 0))
13393
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13394
- "0371 No entry from the CQ: identifier "
13395
- "(x%x), type (%d)\n", cq->queue_id, cq->type);
13396
-
13397
- /* In any case, flash and re-arm the RCQ */
13398
- phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
14003
+ if (delay) {
14004
+ if (is_kdump_kernel())
14005
+ ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14006
+ delay);
14007
+ else
14008
+ ret = queue_delayed_work_on(cq->chann, phba->wq,
14009
+ &cq->sched_spwork, delay);
14010
+ if (!ret)
14011
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14012
+ "0394 Cannot schedule queue work "
14013
+ "for cqid=%d on CPU %d\n",
14014
+ cq->queue_id, cq->chann);
14015
+ }
1339914016
1340014017 /* wake up worker thread if there are works to be done */
1340114018 if (workposted)
1340214019 lpfc_worker_wake_up(phba);
14020
+}
14021
+
14022
+/**
14023
+ * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14024
+ * interrupt
14025
+ * @work: pointer to work element
14026
+ *
14027
+ * translates from the work handler and calls the slow-path handler.
14028
+ **/
14029
+static void
14030
+lpfc_sli4_sp_process_cq(struct work_struct *work)
14031
+{
14032
+ struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14033
+
14034
+ __lpfc_sli4_sp_process_cq(cq);
14035
+}
14036
+
14037
+/**
14038
+ * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14039
+ * @work: pointer to work element
14040
+ *
14041
+ * translates from the work handler and calls the slow-path handler.
14042
+ **/
14043
+static void
14044
+lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
14045
+{
14046
+ struct lpfc_queue *cq = container_of(to_delayed_work(work),
14047
+ struct lpfc_queue, sched_spwork);
14048
+
14049
+ __lpfc_sli4_sp_process_cq(cq);
1340314050 }
1340414051
1340514052 /**
....@@ -13431,9 +14078,9 @@
1343114078 IOERR_NO_RESOURCES))
1343214079 phba->lpfc_rampdown_queue_depth(phba);
1343314080
13434
- /* Log the error status */
14081
+ /* Log the cmpl status */
1343514082 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13436
- "0373 FCP CQE error: status=x%x: "
14083
+ "0373 FCP CQE cmpl: status=x%x: "
1343714084 "CQE: %08x %08x %08x %08x\n",
1343814085 bf_get(lpfc_wcqe_c_status, wcqe),
1343914086 wcqe->word0, wcqe->total_data_placed,
....@@ -13443,9 +14090,9 @@
1344314090 /* Look up the FCP command IOCB and create pseudo response IOCB */
1344414091 spin_lock_irqsave(&pring->ring_lock, iflags);
1344514092 pring->stats.iocb_event++;
14093
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
1344614094 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
1344714095 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13448
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
1344914096 if (unlikely(!cmdiocbq)) {
1345014097 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1345114098 "0374 FCP complete with no corresponding "
....@@ -13527,6 +14174,7 @@
1352714174 /**
1352814175 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
1352914176 * @phba: Pointer to HBA context object.
14177
+ * @cq: Pointer to completion queue.
1353014178 * @rcqe: Pointer to receive-queue completion queue entry.
1353114179 *
1353214180 * This routine process a receive-queue completion queue entry.
....@@ -13571,9 +14219,9 @@
1357114219 status = bf_get(lpfc_rcqe_status, rcqe);
1357214220 switch (status) {
1357314221 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13574
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14222
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1357514223 "6126 Receive Frame Truncated!!\n");
13576
- /* Drop thru */
14224
+ fallthrough;
1357714225 case FC_STATUS_RQ_SUCCESS:
1357814226 spin_lock_irqsave(&phba->hbalock, iflags);
1357914227 lpfc_sli4_rq_release(hrq, drq);
....@@ -13590,8 +14238,8 @@
1359014238
1359114239 /* Just some basic sanity checks on FCP Command frame */
1359214240 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13593
- fc_hdr->fh_f_ctl[1] << 8 |
13594
- fc_hdr->fh_f_ctl[2]);
14241
+ fc_hdr->fh_f_ctl[1] << 8 |
14242
+ fc_hdr->fh_f_ctl[2]);
1359514243 if (((fctl &
1359614244 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
1359714245 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
....@@ -13599,19 +14247,19 @@
1359914247 goto drop;
1360014248
1360114249 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13602
- dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
14250
+ dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
1360314251 lpfc_nvmet_unsol_fcp_event(
13604
- phba, idx, dma_buf,
13605
- cq->isr_timestamp);
14252
+ phba, idx, dma_buf, cq->isr_timestamp,
14253
+ cq->q_flag & HBA_NVMET_CQ_NOTIFY);
1360614254 return false;
1360714255 }
1360814256 drop:
13609
- lpfc_in_buf_free(phba, &dma_buf->dbuf);
14257
+ lpfc_rq_buf_free(phba, &dma_buf->hbuf);
1361014258 break;
1361114259 case FC_STATUS_INSUFF_BUF_FRM_DISC:
1361214260 if (phba->nvmet_support) {
1361314261 tgtp = phba->targetport->private;
13614
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
14262
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1361514263 "6401 RQE Error x%x, posted %d err_cnt "
1361614264 "%d: %x %x %x\n",
1361714265 status, hrq->RQ_buf_posted,
....@@ -13620,7 +14268,7 @@
1362014268 atomic_read(&tgtp->rcv_fcp_cmd_out),
1362114269 atomic_read(&tgtp->xmt_fcp_release));
1362214270 }
13623
- /* fallthrough */
14271
+ fallthrough;
1362414272
1362514273 case FC_STATUS_INSUFF_BUF_NEED_BUF:
1362614274 hrq->RQ_no_posted_buf++;
....@@ -13633,13 +14281,16 @@
1363314281
1363414282 /**
1363514283 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
14284
+ * @phba: adapter with cq
1363614285 * @cq: Pointer to the completion queue.
13637
- * @eqe: Pointer to fast-path completion queue entry.
14286
+ * @cqe: Pointer to fast-path completion queue entry.
1363814287 *
1363914288 * This routine process a fast-path work queue completion entry from fast-path
1364014289 * event queue for FCP command response completion.
14290
+ *
14291
+ * Return: true if work posted to worker thread, otherwise false.
1364114292 **/
13642
-static int
14293
+static bool
1364314294 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
1364414295 struct lpfc_cqe *cqe)
1364514296 {
....@@ -13656,10 +14307,7 @@
1365614307 cq->CQ_wq++;
1365714308 /* Process the WQ complete event */
1365814309 phba->last_completion_time = jiffies;
13659
- if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
13660
- lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13661
- (struct lpfc_wcqe_complete *)&wcqe);
13662
- if (cq->subtype == LPFC_NVME_LS)
14310
+ if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
1366314311 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
1366414312 (struct lpfc_wcqe_complete *)&wcqe);
1366514313 break;
....@@ -13685,7 +14333,7 @@
1368514333 }
1368614334 break;
1368714335 default:
13688
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14336
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1368914337 "0144 Not a valid CQE code: x%x\n",
1369014338 bf_get(lpfc_wcqe_c_code, &wcqe));
1369114339 break;
....@@ -13694,8 +14342,47 @@
1369414342 }
1369514343
1369614344 /**
14345
+ * lpfc_sli4_sched_cq_work - Schedules cq work
14346
+ * @phba: Pointer to HBA context object.
14347
+ * @cq: Pointer to CQ
14348
+ * @cqid: CQ ID
14349
+ *
14350
+ * This routine checks the poll mode of the CQ corresponding to
14351
+ * cq->chann, then either schedules a softirq or queue_work to complete
14352
+ * cq work.
14353
+ *
14354
+ * queue_work path is taken if in NVMET mode, or if poll_mode is in
14355
+ * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
14356
+ *
14357
+ **/
14358
+static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
14359
+ struct lpfc_queue *cq, uint16_t cqid)
14360
+{
14361
+ int ret = 0;
14362
+
14363
+ switch (cq->poll_mode) {
14364
+ case LPFC_IRQ_POLL:
14365
+ irq_poll_sched(&cq->iop);
14366
+ break;
14367
+ case LPFC_QUEUE_WORK:
14368
+ default:
14369
+ if (is_kdump_kernel())
14370
+ ret = queue_work(phba->wq, &cq->irqwork);
14371
+ else
14372
+ ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
14373
+ if (!ret)
14374
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14375
+ "0383 Cannot schedule queue work "
14376
+ "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14377
+ cqid, cq->queue_id,
14378
+ raw_smp_processor_id());
14379
+ }
14380
+}
14381
+
14382
+/**
1369714383 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
1369814384 * @phba: Pointer to HBA context object.
14385
+ * @eq: Pointer to the queue structure.
1369914386 * @eqe: Pointer to fast-path event queue entry.
1370014387 *
1370114388 * This routine process a event queue entry from the fast-path event queue.
....@@ -13706,14 +14393,15 @@
1370614393 * completion queue, and then return.
1370714394 **/
1370814395 static void
13709
-lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13710
- uint32_t qidx)
14396
+lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14397
+ struct lpfc_eqe *eqe)
1371114398 {
1371214399 struct lpfc_queue *cq = NULL;
14400
+ uint32_t qidx = eq->hdwq;
1371314401 uint16_t cqid, id;
1371414402
1371514403 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13716
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14404
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1371714405 "0366 Not a valid completion "
1371814406 "event: majorcode=x%x, minorcode=x%x\n",
1371914407 bf_get_le32(lpfc_eqe_major_code, eqe),
....@@ -13724,6 +14412,14 @@
1372414412 /* Get the reference to the corresponding CQ */
1372514413 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
1372614414
14415
+ /* Use the fast lookup method first */
14416
+ if (cqid <= phba->sli4_hba.cq_max) {
14417
+ cq = phba->sli4_hba.cq_lookup[cqid];
14418
+ if (cq)
14419
+ goto work_cq;
14420
+ }
14421
+
14422
+ /* Next check for NVMET completion */
1372714423 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
1372814424 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
1372914425 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
....@@ -13731,20 +14427,6 @@
1373114427 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
1373214428 goto process_cq;
1373314429 }
13734
- }
13735
-
13736
- if (phba->sli4_hba.nvme_cq_map &&
13737
- (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
13738
- /* Process NVME / NVMET command completion */
13739
- cq = phba->sli4_hba.nvme_cq[qidx];
13740
- goto process_cq;
13741
- }
13742
-
13743
- if (phba->sli4_hba.fcp_cq_map &&
13744
- (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
13745
- /* Process FCP command completion */
13746
- cq = phba->sli4_hba.fcp_cq[qidx];
13747
- goto process_cq;
1374814430 }
1374914431
1375014432 if (phba->sli4_hba.nvmels_cq &&
....@@ -13755,243 +14437,106 @@
1375514437
1375614438 /* Otherwise this is a Slow path event */
1375714439 if (cq == NULL) {
13758
- lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
14440
+ lpfc_sli4_sp_handle_eqe(phba, eqe,
14441
+ phba->sli4_hba.hdwq[qidx].hba_eq);
1375914442 return;
1376014443 }
1376114444
1376214445 process_cq:
1376314446 if (unlikely(cqid != cq->queue_id)) {
13764
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14447
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1376514448 "0368 Miss-matched fast-path completion "
1376614449 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
1376714450 cqid, cq->queue_id);
1376814451 return;
1376914452 }
1377014453
13771
- /* Save EQ associated with this CQ */
13772
- cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
13773
-
13774
- if (!queue_work(phba->wq, &cq->irqwork))
13775
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13776
- "0363 Cannot schedule soft IRQ "
13777
- "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13778
- cqid, cq->queue_id, smp_processor_id());
14454
+work_cq:
14455
+#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14456
+ if (phba->ktime_on)
14457
+ cq->isr_timestamp = ktime_get_ns();
14458
+ else
14459
+ cq->isr_timestamp = 0;
14460
+#endif
14461
+ lpfc_sli4_sched_cq_work(phba, cq, cqid);
1377914462 }
1378014463
1378114464 /**
13782
- * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
13783
- * @phba: Pointer to HBA context object.
13784
- * @eqe: Pointer to fast-path event queue entry.
14465
+ * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14466
+ * @cq: Pointer to CQ to be processed
14467
+ * @poll_mode: Enum lpfc_poll_state to determine poll mode
1378514468 *
13786
- * This routine process a event queue entry from the fast-path event queue.
13787
- * It will check the MajorCode and MinorCode to determine this is for a
13788
- * completion event on a completion queue, if not, an error shall be logged
13789
- * and just return. Otherwise, it will get to the corresponding completion
13790
- * queue and process all the entries on the completion queue, rearm the
13791
- * completion queue, and then return.
14469
+ * This routine calls the cq processing routine with the handler for
14470
+ * fast path CQEs.
14471
+ *
14472
+ * The CQ routine returns two values: the first is the calling status,
14473
+ * which indicates whether work was queued to the background discovery
14474
+ * thread. If true, the routine should wakeup the discovery thread;
14475
+ * the second is the delay parameter. If non-zero, rather than rearming
14476
+ * the CQ and yet another interrupt, the CQ handler should be queued so
14477
+ * that it is processed in a subsequent polling action. The value of
14478
+ * the delay indicates when to reschedule it.
1379214479 **/
1379314480 static void
13794
-lpfc_sli4_hba_process_cq(struct work_struct *work)
14481
+__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
14482
+ enum lpfc_poll_mode poll_mode)
1379514483 {
13796
- struct lpfc_queue *cq =
13797
- container_of(work, struct lpfc_queue, irqwork);
1379814484 struct lpfc_hba *phba = cq->phba;
13799
- struct lpfc_cqe *cqe;
14485
+ unsigned long delay;
1380014486 bool workposted = false;
13801
- int ccount = 0;
14487
+ int ret = 0;
1380214488
13803
- /* Process all the entries to the CQ */
13804
- while ((cqe = lpfc_sli4_cq_get(cq))) {
13805
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13806
- if (phba->ktime_on)
13807
- cq->isr_timestamp = ktime_get_ns();
14489
+ /* process and rearm the CQ */
14490
+ workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14491
+ &delay, poll_mode);
14492
+
14493
+ if (delay) {
14494
+ if (is_kdump_kernel())
14495
+ ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
14496
+ delay);
1380814497 else
13809
- cq->isr_timestamp = 0;
13810
-#endif
13811
- workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13812
- if (!(++ccount % cq->entry_repost))
13813
- break;
14498
+ ret = queue_delayed_work_on(cq->chann, phba->wq,
14499
+ &cq->sched_irqwork, delay);
14500
+ if (!ret)
14501
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14502
+ "0367 Cannot schedule queue work "
14503
+ "for cqid=%d on CPU %d\n",
14504
+ cq->queue_id, cq->chann);
1381414505 }
13815
-
13816
- /* Track the max number of CQEs processed in 1 EQ */
13817
- if (ccount > cq->CQ_max_cqe)
13818
- cq->CQ_max_cqe = ccount;
13819
- cq->assoc_qp->EQ_cqe_cnt += ccount;
13820
-
13821
- /* Catch the no cq entry condition */
13822
- if (unlikely(ccount == 0))
13823
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13824
- "0369 No entry from fast-path completion "
13825
- "queue fcpcqid=%d\n", cq->queue_id);
13826
-
13827
- /* In any case, flash and re-arm the CQ */
13828
- phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
1382914506
1383014507 /* wake up worker thread if there are works to be done */
1383114508 if (workposted)
1383214509 lpfc_worker_wake_up(phba);
1383314510 }
1383414511
13835
-static void
13836
-lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
13837
-{
13838
- struct lpfc_eqe *eqe;
13839
-
13840
- /* walk all the EQ entries and drop on the floor */
13841
- while ((eqe = lpfc_sli4_eq_get(eq)))
13842
- ;
13843
-
13844
- /* Clear and re-arm the EQ */
13845
- phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
13846
-}
13847
-
13848
-
1384914512 /**
13850
- * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
13851
- * entry
13852
- * @phba: Pointer to HBA context object.
13853
- * @eqe: Pointer to fast-path event queue entry.
14513
+ * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14514
+ * interrupt
14515
+ * @work: pointer to work element
1385414516 *
13855
- * This routine process a event queue entry from the Flash Optimized Fabric
13856
- * event queue. It will check the MajorCode and MinorCode to determine this
13857
- * is for a completion event on a completion queue, if not, an error shall be
13858
- * logged and just return. Otherwise, it will get to the corresponding
13859
- * completion queue and process all the entries on the completion queue, rearm
13860
- * the completion queue, and then return.
14517
+ * translates from the work handler and calls the fast-path handler.
1386114518 **/
1386214519 static void
13863
-lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
14520
+lpfc_sli4_hba_process_cq(struct work_struct *work)
1386414521 {
13865
- struct lpfc_queue *cq;
13866
- uint16_t cqid;
14522
+ struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
1386714523
13868
- if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13869
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13870
- "9147 Not a valid completion "
13871
- "event: majorcode=x%x, minorcode=x%x\n",
13872
- bf_get_le32(lpfc_eqe_major_code, eqe),
13873
- bf_get_le32(lpfc_eqe_minor_code, eqe));
13874
- return;
13875
- }
13876
-
13877
- /* Get the reference to the corresponding CQ */
13878
- cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13879
-
13880
- /* Next check for OAS */
13881
- cq = phba->sli4_hba.oas_cq;
13882
- if (unlikely(!cq)) {
13883
- if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13884
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13885
- "9148 OAS completion queue "
13886
- "does not exist\n");
13887
- return;
13888
- }
13889
-
13890
- if (unlikely(cqid != cq->queue_id)) {
13891
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13892
- "9149 Miss-matched fast-path compl "
13893
- "queue id: eqcqid=%d, fcpcqid=%d\n",
13894
- cqid, cq->queue_id);
13895
- return;
13896
- }
13897
-
13898
- /* Save EQ associated with this CQ */
13899
- cq->assoc_qp = phba->sli4_hba.fof_eq;
13900
-
13901
- /* CQ work will be processed on CPU affinitized to this IRQ */
13902
- if (!queue_work(phba->wq, &cq->irqwork))
13903
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13904
- "0367 Cannot schedule soft IRQ "
13905
- "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13906
- cqid, cq->queue_id, smp_processor_id());
14524
+ __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
1390714525 }
1390814526
1390914527 /**
13910
- * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
13911
- * @irq: Interrupt number.
13912
- * @dev_id: The device context pointer.
14528
+ * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14529
+ * @work: pointer to work element
1391314530 *
13914
- * This function is directly called from the PCI layer as an interrupt
13915
- * service routine when device with SLI-4 interface spec is enabled with
13916
- * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
13917
- * IOCB ring event in the HBA. However, when the device is enabled with either
13918
- * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13919
- * device-level interrupt handler. When the PCI slot is in error recovery
13920
- * or the HBA is undergoing initialization, the interrupt handler will not
13921
- * process the interrupt. The Flash Optimized Fabric ring event are handled in
13922
- * the intrrupt context. This function is called without any lock held.
13923
- * It gets the hbalock to access and update SLI data structures. Note that,
13924
- * the EQ to CQ are one-to-one map such that the EQ index is
13925
- * equal to that of CQ index.
13926
- *
13927
- * This function returns IRQ_HANDLED when interrupt is handled else it
13928
- * returns IRQ_NONE.
14531
+ * translates from the work handler and calls the fast-path handler.
1392914532 **/
13930
-irqreturn_t
13931
-lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
14533
+static void
14534
+lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
1393214535 {
13933
- struct lpfc_hba *phba;
13934
- struct lpfc_hba_eq_hdl *hba_eq_hdl;
13935
- struct lpfc_queue *eq;
13936
- struct lpfc_eqe *eqe;
13937
- unsigned long iflag;
13938
- int ecount = 0;
14536
+ struct lpfc_queue *cq = container_of(to_delayed_work(work),
14537
+ struct lpfc_queue, sched_irqwork);
1393914538
13940
- /* Get the driver's phba structure from the dev_id */
13941
- hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13942
- phba = hba_eq_hdl->phba;
13943
-
13944
- if (unlikely(!phba))
13945
- return IRQ_NONE;
13946
-
13947
- /* Get to the EQ struct associated with this vector */
13948
- eq = phba->sli4_hba.fof_eq;
13949
- if (unlikely(!eq))
13950
- return IRQ_NONE;
13951
-
13952
- /* Check device state for handling interrupt */
13953
- if (unlikely(lpfc_intr_state_check(phba))) {
13954
- /* Check again for link_state with lock held */
13955
- spin_lock_irqsave(&phba->hbalock, iflag);
13956
- if (phba->link_state < LPFC_LINK_DOWN)
13957
- /* Flush, clear interrupt, and rearm the EQ */
13958
- lpfc_sli4_eq_flush(phba, eq);
13959
- spin_unlock_irqrestore(&phba->hbalock, iflag);
13960
- return IRQ_NONE;
13961
- }
13962
-
13963
- /*
13964
- * Process all the event on FCP fast-path EQ
13965
- */
13966
- while ((eqe = lpfc_sli4_eq_get(eq))) {
13967
- lpfc_sli4_fof_handle_eqe(phba, eqe);
13968
- if (!(++ecount % eq->entry_repost))
13969
- break;
13970
- eq->EQ_processed++;
13971
- }
13972
-
13973
- /* Track the max number of EQEs processed in 1 intr */
13974
- if (ecount > eq->EQ_max_eqe)
13975
- eq->EQ_max_eqe = ecount;
13976
-
13977
-
13978
- if (unlikely(ecount == 0)) {
13979
- eq->EQ_no_entry++;
13980
-
13981
- if (phba->intr_type == MSIX)
13982
- /* MSI-X treated interrupt served as no EQ share INT */
13983
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13984
- "9145 MSI-X interrupt with no EQE\n");
13985
- else {
13986
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13987
- "9146 ISR interrupt with no EQE\n");
13988
- /* Non MSI-X treated on interrupt as EQ share INT */
13989
- return IRQ_NONE;
13990
- }
13991
- }
13992
- /* Always clear and re-arm the fast-path EQ */
13993
- phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
13994
- return IRQ_HANDLED;
14539
+ __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
1399514540 }
1399614541
1399714542 /**
....@@ -14026,10 +14571,10 @@
1402614571 struct lpfc_hba *phba;
1402714572 struct lpfc_hba_eq_hdl *hba_eq_hdl;
1402814573 struct lpfc_queue *fpeq;
14029
- struct lpfc_eqe *eqe;
1403014574 unsigned long iflag;
1403114575 int ecount = 0;
1403214576 int hba_eqidx;
14577
+ struct lpfc_eq_intr_info *eqi;
1403314578
1403414579 /* Get the driver's phba structure from the dev_id */
1403514580 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
....@@ -14038,22 +14583,13 @@
1403814583
1403914584 if (unlikely(!phba))
1404014585 return IRQ_NONE;
14041
- if (unlikely(!phba->sli4_hba.hba_eq))
14586
+ if (unlikely(!phba->sli4_hba.hdwq))
1404214587 return IRQ_NONE;
1404314588
1404414589 /* Get to the EQ struct associated with this vector */
14045
- fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
14590
+ fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
1404614591 if (unlikely(!fpeq))
1404714592 return IRQ_NONE;
14048
-
14049
- if (lpfc_fcp_look_ahead) {
14050
- if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
14051
- phba->sli4_hba.sli4_eq_clr_intr(fpeq);
14052
- else {
14053
- atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14054
- return IRQ_NONE;
14055
- }
14056
- }
1405714593
1405814594 /* Check device state for handling interrupt */
1405914595 if (unlikely(lpfc_intr_state_check(phba))) {
....@@ -14061,38 +14597,28 @@
1406114597 spin_lock_irqsave(&phba->hbalock, iflag);
1406214598 if (phba->link_state < LPFC_LINK_DOWN)
1406314599 /* Flush, clear interrupt, and rearm the EQ */
14064
- lpfc_sli4_eq_flush(phba, fpeq);
14600
+ lpfc_sli4_eqcq_flush(phba, fpeq);
1406514601 spin_unlock_irqrestore(&phba->hbalock, iflag);
14066
- if (lpfc_fcp_look_ahead)
14067
- atomic_inc(&hba_eq_hdl->hba_eq_in_use);
1406814602 return IRQ_NONE;
1406914603 }
1407014604
14071
- /*
14072
- * Process all the event on FCP fast-path EQ
14073
- */
14074
- while ((eqe = lpfc_sli4_eq_get(fpeq))) {
14075
- lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
14076
- if (!(++ecount % fpeq->entry_repost))
14077
- break;
14078
- fpeq->EQ_processed++;
14079
- }
14605
+ eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
14606
+ eqi->icnt++;
1408014607
14081
- /* Track the max number of EQEs processed in 1 intr */
14082
- if (ecount > fpeq->EQ_max_eqe)
14083
- fpeq->EQ_max_eqe = ecount;
14608
+ fpeq->last_cpu = raw_smp_processor_id();
1408414609
14085
- /* Always clear and re-arm the fast-path EQ */
14086
- phba->sli4_hba.sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
14610
+ if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
14611
+ fpeq->q_flag & HBA_EQ_DELAY_CHK &&
14612
+ phba->cfg_auto_imax &&
14613
+ fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14614
+ phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14615
+ lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14616
+
14617
+ /* process and rearm the EQ */
14618
+ ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
1408714619
1408814620 if (unlikely(ecount == 0)) {
1408914621 fpeq->EQ_no_entry++;
14090
-
14091
- if (lpfc_fcp_look_ahead) {
14092
- atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14093
- return IRQ_NONE;
14094
- }
14095
-
1409614622 if (phba->intr_type == MSIX)
1409714623 /* MSI-X treated interrupt served as no EQ share INT */
1409814624 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
....@@ -14101,9 +14627,6 @@
1410114627 /* Non MSI-X treated on interrupt as EQ share INT */
1410214628 return IRQ_NONE;
1410314629 }
14104
-
14105
- if (lpfc_fcp_look_ahead)
14106
- atomic_inc(&hba_eq_hdl->hba_eq_in_use);
1410714630
1410814631 return IRQ_HANDLED;
1410914632 } /* lpfc_sli4_fp_intr_handler */
....@@ -14142,15 +14665,8 @@
1414214665 /*
1414314666 * Invoke fast-path host attention interrupt handling as appropriate.
1414414667 */
14145
- for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
14668
+ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
1414614669 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14147
- &phba->sli4_hba.hba_eq_hdl[qidx]);
14148
- if (hba_irq_rc == IRQ_HANDLED)
14149
- hba_handled |= true;
14150
- }
14151
-
14152
- if (phba->cfg_fof) {
14153
- hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
1415414670 &phba->sli4_hba.hba_eq_hdl[qidx]);
1415514671 if (hba_irq_rc == IRQ_HANDLED)
1415614672 hba_handled |= true;
....@@ -14158,6 +14674,145 @@
1415814674
1415914675 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
1416014676 } /* lpfc_sli4_intr_handler */
14677
+
14678
+void lpfc_sli4_poll_hbtimer(struct timer_list *t)
14679
+{
14680
+ struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
14681
+ struct lpfc_queue *eq;
14682
+ int i = 0;
14683
+
14684
+ rcu_read_lock();
14685
+
14686
+ list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
14687
+ i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
14688
+ if (!list_empty(&phba->poll_list))
14689
+ mod_timer(&phba->cpuhp_poll_timer,
14690
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14691
+
14692
+ rcu_read_unlock();
14693
+}
14694
+
14695
+inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
14696
+{
14697
+ struct lpfc_hba *phba = eq->phba;
14698
+ int i = 0;
14699
+
14700
+ /*
14701
+ * Unlocking an irq is one of the entry point to check
14702
+ * for re-schedule, but we are good for io submission
14703
+ * path as midlayer does a get_cpu to glue us in. Flush
14704
+ * out the invalidate queue so we can see the updated
14705
+ * value for flag.
14706
+ */
14707
+ smp_rmb();
14708
+
14709
+ if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
14710
+ /* We will not likely get the completion for the caller
14711
+ * during this iteration but i guess that's fine.
14712
+ * Future io's coming on this eq should be able to
14713
+ * pick it up. As for the case of single io's, they
14714
+ * will be handled through a sched from polling timer
14715
+ * function which is currently triggered every 1msec.
14716
+ */
14717
+ i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
14718
+
14719
+ return i;
14720
+}
14721
+
14722
+static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
14723
+{
14724
+ struct lpfc_hba *phba = eq->phba;
14725
+
14726
+ /* kickstart slowpath processing if needed */
14727
+ if (list_empty(&phba->poll_list))
14728
+ mod_timer(&phba->cpuhp_poll_timer,
14729
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14730
+
14731
+ list_add_rcu(&eq->_poll_list, &phba->poll_list);
14732
+ synchronize_rcu();
14733
+}
14734
+
14735
+static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
14736
+{
14737
+ struct lpfc_hba *phba = eq->phba;
14738
+
14739
+ /* Disable slowpath processing for this eq. Kick start the eq
14740
+ * by RE-ARMING the eq's ASAP
14741
+ */
14742
+ list_del_rcu(&eq->_poll_list);
14743
+ synchronize_rcu();
14744
+
14745
+ if (list_empty(&phba->poll_list))
14746
+ del_timer_sync(&phba->cpuhp_poll_timer);
14747
+}
14748
+
14749
+void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
14750
+{
14751
+ struct lpfc_queue *eq, *next;
14752
+
14753
+ list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
14754
+ list_del(&eq->_poll_list);
14755
+
14756
+ INIT_LIST_HEAD(&phba->poll_list);
14757
+ synchronize_rcu();
14758
+}
14759
+
14760
+static inline void
14761
+__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
14762
+{
14763
+ if (mode == eq->mode)
14764
+ return;
14765
+ /*
14766
+ * currently this function is only called during a hotplug
14767
+ * event and the cpu on which this function is executing
14768
+ * is going offline. By now the hotplug has instructed
14769
+ * the scheduler to remove this cpu from cpu active mask.
14770
+ * So we don't need to work about being put aside by the
14771
+ * scheduler for a high priority process. Yes, the inte-
14772
+ * rrupts could come but they are known to retire ASAP.
14773
+ */
14774
+
14775
+ /* Disable polling in the fastpath */
14776
+ WRITE_ONCE(eq->mode, mode);
14777
+ /* flush out the store buffer */
14778
+ smp_wmb();
14779
+
14780
+ /*
14781
+ * Add this eq to the polling list and start polling. For
14782
+ * a grace period both interrupt handler and poller will
14783
+ * try to process the eq _but_ that's fine. We have a
14784
+ * synchronization mechanism in place (queue_claimed) to
14785
+ * deal with it. This is just a draining phase for int-
14786
+ * errupt handler (not eq's) as we have guranteed through
14787
+ * barrier that all the CPUs have seen the new CQ_POLLED
14788
+ * state. which will effectively disable the REARMING of
14789
+ * the EQ. The whole idea is eq's die off eventually as
14790
+ * we are not rearming EQ's anymore.
14791
+ */
14792
+ mode ? lpfc_sli4_add_to_poll_list(eq) :
14793
+ lpfc_sli4_remove_from_poll_list(eq);
14794
+}
14795
+
14796
+void lpfc_sli4_start_polling(struct lpfc_queue *eq)
14797
+{
14798
+ __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
14799
+}
14800
+
14801
+void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
14802
+{
14803
+ struct lpfc_hba *phba = eq->phba;
14804
+
14805
+ __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
14806
+
14807
+ /* Kick start for the pending io's in h/w.
14808
+ * Once we switch back to interrupt processing on a eq
14809
+ * the io path completion will only arm eq's when it
14810
+ * receives a completion. But since eq's are in disa-
14811
+ * rmed state it doesn't receive a completion. This
14812
+ * creates a deadlock scenaro.
14813
+ */
14814
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
14815
+}
1416114816
1416214817 /**
1416314818 * lpfc_sli4_queue_free - free a queue structure and associated memory
....@@ -14175,6 +14830,9 @@
1417514830 if (!queue)
1417614831 return;
1417714832
14833
+ if (!list_empty(&queue->wq_list))
14834
+ list_del(&queue->wq_list);
14835
+
1417814836 while (!list_empty(&queue->page_list)) {
1417914837 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
1418014838 list);
....@@ -14187,8 +14845,8 @@
1418714845 kfree(queue->rqbp);
1418814846 }
1418914847
14190
- if (!list_empty(&queue->wq_list))
14191
- list_del(&queue->wq_list);
14848
+ if (!list_empty(&queue->cpu_list))
14849
+ list_del(&queue->cpu_list);
1419214850
1419314851 kfree(queue);
1419414852 return;
....@@ -14199,7 +14857,8 @@
1419914857 * @phba: The HBA that this queue is being created on.
1420014858 * @page_size: The size of a queue page
1420114859 * @entry_size: The size of each queue entry for this queue.
14202
- * @entry count: The number of entries that this queue will handle.
14860
+ * @entry_count: The number of entries that this queue will handle.
14861
+ * @cpu: The cpu that will primarily utilize this queue.
1420314862 *
1420414863 * This function allocates a queue structure and the DMAable memory used for
1420514864 * the host resident queue. This function must be called before creating the
....@@ -14207,68 +14866,69 @@
1420714866 **/
1420814867 struct lpfc_queue *
1420914868 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14210
- uint32_t entry_size, uint32_t entry_count)
14869
+ uint32_t entry_size, uint32_t entry_count, int cpu)
1421114870 {
1421214871 struct lpfc_queue *queue;
1421314872 struct lpfc_dmabuf *dmabuf;
14214
- int x, total_qe_count;
14215
- void *dma_pointer;
1421614873 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14874
+ uint16_t x, pgcnt;
1421714875
1421814876 if (!phba->sli4_hba.pc_sli4_params.supported)
1421914877 hw_page_size = page_size;
1422014878
14221
- queue = kzalloc(sizeof(struct lpfc_queue) +
14222
- (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
14223
- if (!queue)
14224
- return NULL;
14225
- queue->page_count = (ALIGN(entry_size * entry_count,
14226
- hw_page_size))/hw_page_size;
14879
+ pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
1422714880
1422814881 /* If needed, Adjust page count to match the max the adapter supports */
14229
- if (phba->sli4_hba.pc_sli4_params.wqpcnt &&
14230
- (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt))
14231
- queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
14882
+ if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14883
+ pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14884
+
14885
+ queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14886
+ GFP_KERNEL, cpu_to_node(cpu));
14887
+ if (!queue)
14888
+ return NULL;
1423214889
1423314890 INIT_LIST_HEAD(&queue->list);
14891
+ INIT_LIST_HEAD(&queue->_poll_list);
1423414892 INIT_LIST_HEAD(&queue->wq_list);
1423514893 INIT_LIST_HEAD(&queue->wqfull_list);
1423614894 INIT_LIST_HEAD(&queue->page_list);
1423714895 INIT_LIST_HEAD(&queue->child_list);
14896
+ INIT_LIST_HEAD(&queue->cpu_list);
1423814897
1423914898 /* Set queue parameters now. If the system cannot provide memory
1424014899 * resources, the free routine needs to know what was allocated.
1424114900 */
14901
+ queue->page_count = pgcnt;
14902
+ queue->q_pgs = (void **)&queue[1];
14903
+ queue->entry_cnt_per_pg = hw_page_size / entry_size;
1424214904 queue->entry_size = entry_size;
1424314905 queue->entry_count = entry_count;
1424414906 queue->page_size = hw_page_size;
1424514907 queue->phba = phba;
1424614908
14247
- for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
14248
- dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
14909
+ for (x = 0; x < queue->page_count; x++) {
14910
+ dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14911
+ dev_to_node(&phba->pcidev->dev));
1424914912 if (!dmabuf)
1425014913 goto out_fail;
14251
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
14252
- hw_page_size, &dmabuf->phys,
14253
- GFP_KERNEL);
14914
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14915
+ hw_page_size, &dmabuf->phys,
14916
+ GFP_KERNEL);
1425414917 if (!dmabuf->virt) {
1425514918 kfree(dmabuf);
1425614919 goto out_fail;
1425714920 }
1425814921 dmabuf->buffer_tag = x;
1425914922 list_add_tail(&dmabuf->list, &queue->page_list);
14260
- /* initialize queue's entry array */
14261
- dma_pointer = dmabuf->virt;
14262
- for (; total_qe_count < entry_count &&
14263
- dma_pointer < (hw_page_size + dmabuf->virt);
14264
- total_qe_count++, dma_pointer += entry_size) {
14265
- queue->qe[total_qe_count].address = dma_pointer;
14266
- }
14923
+ /* use lpfc_sli4_qe to index a paritcular entry in this page */
14924
+ queue->q_pgs[x] = dmabuf->virt;
1426714925 }
1426814926 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
1426914927 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14928
+ INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14929
+ INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
1427014930
14271
- /* entry_repost will be set during q creation */
14931
+ /* notify_interval will be set during q creation */
1427214932
1427314933 return queue;
1427414934 out_fail:
....@@ -14305,43 +14965,75 @@
1430514965 }
1430614966
1430714967 /**
14308
- * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
14309
- * @phba: HBA structure that indicates port to create a queue on.
14310
- * @startq: The starting FCP EQ to modify
14968
+ * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
14969
+ * @phba: HBA structure that EQs are on.
14970
+ * @startq: The starting EQ index to modify
14971
+ * @numq: The number of EQs (consecutive indexes) to modify
14972
+ * @usdelay: amount of delay
1431114973 *
14312
- * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
14313
- * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
14314
- * updated in one mailbox command.
14974
+ * This function revises the EQ delay on 1 or more EQs. The EQ delay
14975
+ * is set either by writing to a register (if supported by the SLI Port)
14976
+ * or by mailbox command. The mailbox command allows several EQs to be
14977
+ * updated at once.
1431514978 *
14316
- * The @phba struct is used to send mailbox command to HBA. The @startq
14317
- * is used to get the starting FCP EQ to change.
14318
- * This function is asynchronous and will wait for the mailbox
14319
- * command to finish before continuing.
14979
+ * The @phba struct is used to send a mailbox command to HBA. The @startq
14980
+ * is used to get the starting EQ index to change. The @numq value is
14981
+ * used to specify how many consecutive EQ indexes, starting at EQ index,
14982
+ * are to be changed. This function is asynchronous and will wait for any
14983
+ * mailbox commands to finish before returning.
1432014984 *
14321
- * On success this function will return a zero. If unable to allocate enough
14322
- * memory this function will return -ENOMEM. If the queue create mailbox command
14323
- * fails this function will return -ENXIO.
14985
+ * On success this function will return a zero. If unable to allocate
14986
+ * enough memory this function will return -ENOMEM. If a mailbox command
14987
+ * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
14988
+ * have had their delay multipler changed.
1432414989 **/
14325
-int
14990
+void
1432614991 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14327
- uint32_t numq, uint32_t imax)
14992
+ uint32_t numq, uint32_t usdelay)
1432814993 {
1432914994 struct lpfc_mbx_modify_eq_delay *eq_delay;
1433014995 LPFC_MBOXQ_t *mbox;
1433114996 struct lpfc_queue *eq;
14332
- int cnt, rc, length, status = 0;
14997
+ int cnt = 0, rc, length;
1433314998 uint32_t shdr_status, shdr_add_status;
14334
- uint32_t result, val;
14999
+ uint32_t dmult;
1433515000 int qidx;
1433615001 union lpfc_sli4_cfg_shdr *shdr;
14337
- uint16_t dmult;
1433815002
14339
- if (startq >= phba->io_channel_irqs)
14340
- return 0;
15003
+ if (startq >= phba->cfg_irq_chann)
15004
+ return;
15005
+
15006
+ if (usdelay > 0xFFFF) {
15007
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15008
+ "6429 usdelay %d too large. Scaled down to "
15009
+ "0xFFFF.\n", usdelay);
15010
+ usdelay = 0xFFFF;
15011
+ }
15012
+
15013
+ /* set values by EQ_DELAY register if supported */
15014
+ if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15015
+ for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15016
+ eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15017
+ if (!eq)
15018
+ continue;
15019
+
15020
+ lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15021
+
15022
+ if (++cnt >= numq)
15023
+ break;
15024
+ }
15025
+ return;
15026
+ }
15027
+
15028
+ /* Otherwise, set values by mailbox cmd */
1434115029
1434215030 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14343
- if (!mbox)
14344
- return -ENOMEM;
15031
+ if (!mbox) {
15032
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15033
+ "6428 Failed allocating mailbox cmd buffer."
15034
+ " EQ delay was not set.\n");
15035
+ return;
15036
+ }
1434515037 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
1434615038 sizeof(struct lpfc_sli4_cfg_mhdr));
1434715039 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
....@@ -14350,65 +15042,42 @@
1435015042 eq_delay = &mbox->u.mqe.un.eq_delay;
1435115043
1435215044 /* Calculate delay multiper from maximum interrupt per second */
14353
- result = imax / phba->io_channel_irqs;
14354
- if (result > LPFC_DMULT_CONST || result == 0)
14355
- dmult = 0;
14356
- else
14357
- dmult = LPFC_DMULT_CONST/result - 1;
15045
+ dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15046
+ if (dmult)
15047
+ dmult--;
1435815048 if (dmult > LPFC_DMULT_MAX)
1435915049 dmult = LPFC_DMULT_MAX;
1436015050
14361
- cnt = 0;
14362
- for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
14363
- eq = phba->sli4_hba.hba_eq[qidx];
15051
+ for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15052
+ eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
1436415053 if (!eq)
1436515054 continue;
14366
- eq->q_mode = imax;
15055
+ eq->q_mode = usdelay;
1436715056 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
1436815057 eq_delay->u.request.eq[cnt].phase = 0;
1436915058 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14370
- cnt++;
1437115059
14372
- /* q_mode is only used for auto_imax */
14373
- if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14374
- /* Use EQ Delay Register method for q_mode */
14375
-
14376
- /* Convert for EQ Delay register */
14377
- val = phba->cfg_fcp_imax;
14378
- if (val) {
14379
- /* First, interrupts per sec per EQ */
14380
- val = phba->cfg_fcp_imax /
14381
- phba->io_channel_irqs;
14382
-
14383
- /* us delay between each interrupt */
14384
- val = LPFC_SEC_TO_USEC / val;
14385
- }
14386
- eq->q_mode = val;
14387
- } else {
14388
- eq->q_mode = imax;
14389
- }
14390
-
14391
- if (cnt >= numq)
15060
+ if (++cnt >= numq)
1439215061 break;
1439315062 }
1439415063 eq_delay->u.request.num_eq = cnt;
1439515064
1439615065 mbox->vport = phba->pport;
1439715066 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14398
- mbox->context1 = NULL;
15067
+ mbox->ctx_buf = NULL;
15068
+ mbox->ctx_ndlp = NULL;
1439915069 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1440015070 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
1440115071 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1440215072 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1440315073 if (shdr_status || shdr_add_status || rc) {
14404
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15074
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1440515075 "2512 MODIFY_EQ_DELAY mailbox failed with "
1440615076 "status x%x add_status x%x, mbx status x%x\n",
1440715077 shdr_status, shdr_add_status, rc);
14408
- status = -ENXIO;
1440915078 }
1441015079 mempool_free(mbox, phba->mbox_mem_pool);
14411
- return status;
15080
+ return;
1441215081 }
1441315082
1441415083 /**
....@@ -14479,12 +15148,14 @@
1447915148 dmult);
1448015149 switch (eq->entry_count) {
1448115150 default:
14482
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15151
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1448315152 "0360 Unsupported EQ count. (%d)\n",
1448415153 eq->entry_count);
14485
- if (eq->entry_count < 256)
14486
- return -EINVAL;
14487
- /* otherwise default to smallest count (drop through) */
15154
+ if (eq->entry_count < 256) {
15155
+ status = -EINVAL;
15156
+ goto out;
15157
+ }
15158
+ fallthrough; /* otherwise default to smallest count */
1448815159 case 256:
1448915160 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
1449015161 LPFC_EQ_CNT_256);
....@@ -14515,12 +15186,13 @@
1451515186 }
1451615187 mbox->vport = phba->pport;
1451715188 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14518
- mbox->context1 = NULL;
15189
+ mbox->ctx_buf = NULL;
15190
+ mbox->ctx_ndlp = NULL;
1451915191 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1452015192 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1452115193 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1452215194 if (shdr_status || shdr_add_status || rc) {
14523
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15195
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1452415196 "2500 EQ_CREATE mailbox failed with "
1452515197 "status x%x add_status x%x, mbx status x%x\n",
1452615198 shdr_status, shdr_add_status, rc);
....@@ -14532,11 +15204,20 @@
1453215204 if (eq->queue_id == 0xFFFF)
1453315205 status = -ENXIO;
1453415206 eq->host_index = 0;
14535
- eq->hba_index = 0;
14536
- eq->entry_repost = LPFC_EQ_REPOST;
14537
-
15207
+ eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
15208
+ eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
15209
+out:
1453815210 mempool_free(mbox, phba->mbox_mem_pool);
1453915211 return status;
15212
+}
15213
+
15214
+static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
15215
+{
15216
+ struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
15217
+
15218
+ __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
15219
+
15220
+ return 1;
1454015221 }
1454115222
1454215223 /**
....@@ -14544,6 +15225,8 @@
1454415225 * @phba: HBA structure that indicates port to create a queue on.
1454515226 * @cq: The queue structure to use to create the completion queue.
1454615227 * @eq: The event queue to bind this completion queue to.
15228
+ * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15229
+ * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
1454715230 *
1454815231 * This function creates a completion queue, as detailed in @wq, on a port,
1454915232 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
....@@ -14570,13 +15253,10 @@
1457015253 int rc, length, status = 0;
1457115254 uint32_t shdr_status, shdr_add_status;
1457215255 union lpfc_sli4_cfg_shdr *shdr;
14573
- uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
1457415256
1457515257 /* sanity check on queue memory */
1457615258 if (!cq || !eq)
1457715259 return -ENODEV;
14578
- if (!phba->sli4_hba.pc_sli4_params.supported)
14579
- hw_page_size = cq->page_size;
1458015260
1458115261 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1458215262 if (!mbox)
....@@ -14617,9 +15297,9 @@
1461715297 LPFC_CQ_CNT_WORD7);
1461815298 break;
1461915299 }
14620
- /* Fall Thru */
15300
+ fallthrough;
1462115301 default:
14622
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15302
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1462315303 "0361 Unsupported CQ count: "
1462415304 "entry cnt %d sz %d pg cnt %d\n",
1462515305 cq->entry_count, cq->entry_size,
....@@ -14628,7 +15308,7 @@
1462815308 status = -EINVAL;
1462915309 goto out;
1463015310 }
14631
- /* otherwise default to smallest count (drop through) */
15311
+ fallthrough; /* otherwise default to smallest count */
1463215312 case 256:
1463315313 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
1463415314 LPFC_CQ_CNT_256);
....@@ -14655,7 +15335,7 @@
1465515335 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1465615336 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1465715337 if (shdr_status || shdr_add_status || rc) {
14658
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15338
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1465915339 "2501 CQ_CREATE mailbox failed with "
1466015340 "status x%x add_status x%x, mbx status x%x\n",
1466115341 shdr_status, shdr_add_status, rc);
....@@ -14674,10 +15354,15 @@
1467415354 cq->subtype = subtype;
1467515355 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
1467615356 cq->assoc_qid = eq->queue_id;
15357
+ cq->assoc_qp = eq;
1467715358 cq->host_index = 0;
14678
- cq->hba_index = 0;
14679
- cq->entry_repost = LPFC_CQ_REPOST;
15359
+ cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15360
+ cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
1468015361
15362
+ if (cq->queue_id > phba->sli4_hba.cq_max)
15363
+ phba->sli4_hba.cq_max = cq->queue_id;
15364
+
15365
+ irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
1468115366 out:
1468215367 mempool_free(mbox, phba->mbox_mem_pool);
1468315368 return status;
....@@ -14687,7 +15372,9 @@
1468715372 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
1468815373 * @phba: HBA structure that indicates port to create a queue on.
1468915374 * @cqp: The queue structure array to use to create the completion queues.
14690
- * @eqp: The event queue array to bind these completion queues to.
15375
+ * @hdwq: The hardware queue array with the EQ to bind completion queues to.
15376
+ * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15377
+ * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
1469115378 *
1469215379 * This function creates a set of completion queue, s to support MRQ
1469315380 * as detailed in @cqp, on a port,
....@@ -14707,7 +15394,8 @@
1470715394 **/
1470815395 int
1470915396 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14710
- struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
15397
+ struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15398
+ uint32_t subtype)
1471115399 {
1471215400 struct lpfc_queue *cq;
1471315401 struct lpfc_queue *eq;
....@@ -14722,7 +15410,7 @@
1472215410
1472315411 /* sanity check on queue memory */
1472415412 numcq = phba->cfg_nvmet_mrq;
14725
- if (!cqp || !eqp || !numcq)
15413
+ if (!cqp || !hdwq || !numcq)
1472615414 return -ENODEV;
1472715415
1472815416 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
....@@ -14736,7 +15424,7 @@
1473615424 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
1473715425 LPFC_SLI4_MBX_NEMBED);
1473815426 if (alloclen < length) {
14739
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15427
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1474015428 "3098 Allocated DMA memory size (%d) is "
1474115429 "less than the requested DMA memory size "
1474215430 "(%d)\n", alloclen, length);
....@@ -14749,7 +15437,7 @@
1474915437
1475015438 for (idx = 0; idx < numcq; idx++) {
1475115439 cq = cqp[idx];
14752
- eq = eqp[idx];
15440
+ eq = hdwq[idx].hba_eq;
1475315441 if (!cq || !eq) {
1475415442 status = -ENOMEM;
1475515443 goto out;
....@@ -14788,16 +15476,16 @@
1478815476 LPFC_CQ_CNT_WORD7);
1478915477 break;
1479015478 }
14791
- /* Fall Thru */
15479
+ fallthrough;
1479215480 default:
14793
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15481
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1479415482 "3118 Bad CQ count. (%d)\n",
1479515483 cq->entry_count);
1479615484 if (cq->entry_count < 256) {
1479715485 status = -EINVAL;
1479815486 goto out;
1479915487 }
14800
- /* otherwise default to smallest (drop thru) */
15488
+ fallthrough; /* otherwise default to smallest */
1480115489 case 256:
1480215490 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
1480315491 &cq_set->u.request, LPFC_CQ_CNT_256);
....@@ -14882,9 +15570,11 @@
1488215570 cq->type = type;
1488315571 cq->subtype = subtype;
1488415572 cq->assoc_qid = eq->queue_id;
15573
+ cq->assoc_qp = eq;
1488515574 cq->host_index = 0;
14886
- cq->hba_index = 0;
14887
- cq->entry_repost = LPFC_CQ_REPOST;
15575
+ cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15576
+ cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15577
+ cq->entry_count);
1488815578 cq->chann = idx;
1488915579
1489015580 rc = 0;
....@@ -14906,7 +15596,7 @@
1490615596 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1490715597 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1490815598 if (shdr_status || shdr_add_status || rc) {
14909
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15599
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1491015600 "3119 CQ_CREATE_SET mailbox failed with "
1491115601 "status x%x add_status x%x, mbx status x%x\n",
1491215602 shdr_status, shdr_add_status, rc);
....@@ -14922,6 +15612,8 @@
1492215612 for (idx = 0; idx < numcq; idx++) {
1492315613 cq = cqp[idx];
1492415614 cq->queue_id = rc + idx;
15615
+ if (cq->queue_id > phba->sli4_hba.cq_max)
15616
+ phba->sli4_hba.cq_max = cq->queue_id;
1492515617 }
1492615618
1492715619 out:
....@@ -15062,14 +15754,14 @@
1506215754 cq->queue_id);
1506315755 switch (mq->entry_count) {
1506415756 default:
15065
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15757
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1506615758 "0362 Unsupported MQ count. (%d)\n",
1506715759 mq->entry_count);
1506815760 if (mq->entry_count < 16) {
1506915761 status = -EINVAL;
1507015762 goto out;
1507115763 }
15072
- /* otherwise default to smallest count (drop through) */
15764
+ fallthrough; /* otherwise default to smallest count */
1507315765 case 16:
1507415766 bf_set(lpfc_mq_context_ring_size,
1507515767 &mq_create_ext->u.request.context,
....@@ -15118,7 +15810,7 @@
1511815810 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1511915811 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1512015812 if (shdr_status || shdr_add_status || rc) {
15121
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15813
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1512215814 "2502 MQ_CREATE mailbox failed with "
1512315815 "status x%x add_status x%x, mbx status x%x\n",
1512415816 shdr_status, shdr_add_status, rc);
....@@ -15134,7 +15826,6 @@
1513415826 mq->subtype = subtype;
1513515827 mq->host_index = 0;
1513615828 mq->hba_index = 0;
15137
- mq->entry_repost = LPFC_MQ_REPOST;
1513815829
1513915830 /* link the mq onto the parent cq child list */
1514015831 list_add_tail(&mq->list, &cq->child_list);
....@@ -15182,8 +15873,10 @@
1518215873 uint16_t pci_barset;
1518315874 uint8_t dpp_barset;
1518415875 uint32_t dpp_offset;
15185
- unsigned long pg_addr;
1518615876 uint8_t wq_create_version;
15877
+#ifdef CONFIG_X86
15878
+ unsigned long pg_addr;
15879
+#endif
1518715880
1518815881 /* sanity check on queue memory */
1518915882 if (!wq || !cq)
....@@ -15268,7 +15961,7 @@
1526815961 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1526915962 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1527015963 if (shdr_status || shdr_add_status || rc) {
15271
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15964
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1527215965 "2503 WQ_CREATE mailbox failed with "
1527315966 "status x%x add_status x%x, mbx status x%x\n",
1527415967 shdr_status, shdr_add_status, rc);
....@@ -15295,7 +15988,7 @@
1529515988 &wq_create->u.response);
1529615989 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
1529715990 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15298
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15991
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1529915992 "3265 WQ[%d] doorbell format "
1530015993 "not supported: x%x\n",
1530115994 wq->queue_id, wq->db_format);
....@@ -15307,7 +16000,7 @@
1530716000 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
1530816001 pci_barset);
1530916002 if (!bar_memmap_p) {
15310
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16003
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1531116004 "3263 WQ[%d] failed to memmap "
1531216005 "pci barset:x%x\n",
1531316006 wq->queue_id, pci_barset);
....@@ -15317,7 +16010,7 @@
1531716010 db_offset = wq_create->u.response.doorbell_offset;
1531816011 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
1531916012 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15320
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16013
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1532116014 "3252 WQ[%d] doorbell offset "
1532216015 "not supported: x%x\n",
1532316016 wq->queue_id, db_offset);
....@@ -15341,7 +16034,7 @@
1534116034 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
1534216035 pci_barset);
1534316036 if (!bar_memmap_p) {
15344
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16037
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1534516038 "3267 WQ[%d] failed to memmap "
1534616039 "pci barset:x%x\n",
1534716040 wq->queue_id, pci_barset);
....@@ -15357,7 +16050,7 @@
1535716050 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
1535816051 dpp_barset);
1535916052 if (!bar_memmap_p) {
15360
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16053
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1536116054 "3268 WQ[%d] failed to memmap "
1536216055 "pci barset:x%x\n",
1536316056 wq->queue_id, dpp_barset);
....@@ -15373,9 +16066,9 @@
1537316066 wq->queue_id, pci_barset, db_offset,
1537416067 wq->dpp_id, dpp_barset, dpp_offset);
1537516068
16069
+#ifdef CONFIG_X86
1537616070 /* Enable combined writes for DPP aperture */
1537716071 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15378
-#ifdef CONFIG_X86
1537916072 rc = set_memory_wc(pg_addr, 1);
1538016073 if (rc) {
1538116074 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
....@@ -15400,7 +16093,7 @@
1540016093 wq->subtype = subtype;
1540116094 wq->host_index = 0;
1540216095 wq->hba_index = 0;
15403
- wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
16096
+ wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
1540416097
1540516098 /* link the wq onto the parent cq child list */
1540616099 list_add_tail(&wq->list, &cq->child_list);
....@@ -15415,6 +16108,7 @@
1541516108 * @hrq: The queue structure to use to create the header receive queue.
1541616109 * @drq: The queue structure to use to create the data receive queue.
1541716110 * @cq: The completion queue to bind this work queue to.
16111
+ * @subtype: The subtype of the work queue indicating its functionality.
1541816112 *
1541916113 * This function creates a receive buffer queue pair , as detailed in @hrq and
1542016114 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
....@@ -15481,14 +16175,14 @@
1548116175 } else {
1548216176 switch (hrq->entry_count) {
1548316177 default:
15484
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16178
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1548516179 "2535 Unsupported RQ count. (%d)\n",
1548616180 hrq->entry_count);
1548716181 if (hrq->entry_count < 512) {
1548816182 status = -EINVAL;
1548916183 goto out;
1549016184 }
15491
- /* otherwise default to smallest count (drop through) */
16185
+ fallthrough; /* otherwise default to smallest count */
1549216186 case 512:
1549316187 bf_set(lpfc_rq_context_rqe_count,
1549416188 &rq_create->u.request.context,
....@@ -15532,7 +16226,7 @@
1553216226 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1553316227 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1553416228 if (shdr_status || shdr_add_status || rc) {
15535
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16229
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1553616230 "2504 RQ_CREATE mailbox failed with "
1553716231 "status x%x add_status x%x, mbx status x%x\n",
1553816232 shdr_status, shdr_add_status, rc);
....@@ -15550,7 +16244,7 @@
1555016244 &rq_create->u.response);
1555116245 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
1555216246 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15553
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16247
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1555416248 "3262 RQ [%d] doorbell format not "
1555516249 "supported: x%x\n", hrq->queue_id,
1555616250 hrq->db_format);
....@@ -15562,7 +16256,7 @@
1556216256 &rq_create->u.response);
1556316257 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
1556416258 if (!bar_memmap_p) {
15565
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16259
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1556616260 "3269 RQ[%d] failed to memmap pci "
1556716261 "barset:x%x\n", hrq->queue_id,
1556816262 pci_barset);
....@@ -15573,7 +16267,7 @@
1557316267 db_offset = rq_create->u.response.doorbell_offset;
1557416268 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
1557516269 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15576
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16270
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1557716271 "3270 RQ[%d] doorbell offset not "
1557816272 "supported: x%x\n", hrq->queue_id,
1557916273 db_offset);
....@@ -15594,7 +16288,7 @@
1559416288 hrq->subtype = subtype;
1559516289 hrq->host_index = 0;
1559616290 hrq->hba_index = 0;
15597
- hrq->entry_repost = LPFC_RQ_REPOST;
16291
+ hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
1559816292
1559916293 /* now create the data queue */
1560016294 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
....@@ -15618,14 +16312,14 @@
1561816312 } else {
1561916313 switch (drq->entry_count) {
1562016314 default:
15621
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16315
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1562216316 "2536 Unsupported RQ count. (%d)\n",
1562316317 drq->entry_count);
1562416318 if (drq->entry_count < 512) {
1562516319 status = -EINVAL;
1562616320 goto out;
1562716321 }
15628
- /* otherwise default to smallest count (drop through) */
16322
+ fallthrough; /* otherwise default to smallest count */
1562916323 case 512:
1563016324 bf_set(lpfc_rq_context_rqe_count,
1563116325 &rq_create->u.request.context,
....@@ -15687,7 +16381,7 @@
1568716381 drq->subtype = subtype;
1568816382 drq->host_index = 0;
1568916383 drq->hba_index = 0;
15690
- drq->entry_repost = LPFC_RQ_REPOST;
16384
+ drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
1569116385
1569216386 /* link the header and data RQs onto the parent cq child list */
1569316387 list_add_tail(&hrq->list, &cq->child_list);
....@@ -15704,6 +16398,7 @@
1570416398 * @hrqp: The queue structure array to use to create the header receive queues.
1570516399 * @drqp: The queue structure array to use to create the data receive queues.
1570616400 * @cqp: The completion queue array to bind these receive queues to.
16401
+ * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
1570716402 *
1570816403 * This function creates a receive buffer queue pair , as detailed in @hrq and
1570916404 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
....@@ -15755,7 +16450,7 @@
1575516450 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
1575616451 LPFC_SLI4_MBX_NEMBED);
1575716452 if (alloclen < length) {
15758
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16453
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1575916454 "3099 Allocated DMA memory size (%d) is "
1576016455 "less than the requested DMA memory size "
1576116456 "(%d)\n", alloclen, length);
....@@ -15845,7 +16540,7 @@
1584516540 hrq->subtype = subtype;
1584616541 hrq->host_index = 0;
1584716542 hrq->hba_index = 0;
15848
- hrq->entry_repost = LPFC_RQ_REPOST;
16543
+ hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
1584916544
1585016545 drq->db_format = LPFC_DB_RING_FORMAT;
1585116546 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
....@@ -15854,7 +16549,7 @@
1585416549 drq->subtype = subtype;
1585516550 drq->host_index = 0;
1585616551 drq->hba_index = 0;
15857
- drq->entry_repost = LPFC_RQ_REPOST;
16552
+ drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
1585816553
1585916554 list_add_tail(&hrq->list, &cq->child_list);
1586016555 list_add_tail(&drq->list, &cq->child_list);
....@@ -15865,7 +16560,7 @@
1586516560 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1586616561 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1586716562 if (shdr_status || shdr_add_status || rc) {
15868
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16563
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1586916564 "3120 RQ_CREATE mailbox failed with "
1587016565 "status x%x add_status x%x, mbx status x%x\n",
1587116566 shdr_status, shdr_add_status, rc);
....@@ -15893,6 +16588,7 @@
1589316588
1589416589 /**
1589516590 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16591
+ * @phba: HBA structure that indicates port to destroy a queue on.
1589616592 * @eq: The queue structure associated with the queue to destroy.
1589716593 *
1589816594 * This function destroys a queue, as detailed in @eq by sending an mailbox
....@@ -15914,6 +16610,7 @@
1591416610 /* sanity check on queue memory */
1591516611 if (!eq)
1591616612 return -ENODEV;
16613
+
1591716614 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
1591816615 if (!mbox)
1591916616 return -ENOMEM;
....@@ -15934,7 +16631,7 @@
1593416631 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1593516632 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1593616633 if (shdr_status || shdr_add_status || rc) {
15937
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16634
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1593816635 "2505 EQ_DESTROY mailbox failed with "
1593916636 "status x%x add_status x%x, mbx status x%x\n",
1594016637 shdr_status, shdr_add_status, rc);
....@@ -15949,6 +16646,7 @@
1594916646
1595016647 /**
1595116648 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16649
+ * @phba: HBA structure that indicates port to destroy a queue on.
1595216650 * @cq: The queue structure associated with the queue to destroy.
1595316651 *
1595416652 * This function destroys a queue, as detailed in @cq by sending an mailbox
....@@ -15989,7 +16687,7 @@
1598916687 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1599016688 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1599116689 if (shdr_status || shdr_add_status || rc) {
15992
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16690
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1599316691 "2506 CQ_DESTROY mailbox failed with "
1599416692 "status x%x add_status x%x, mbx status x%x\n",
1599516693 shdr_status, shdr_add_status, rc);
....@@ -16003,7 +16701,8 @@
1600316701
1600416702 /**
1600516703 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16006
- * @qm: The queue structure associated with the queue to destroy.
16704
+ * @phba: HBA structure that indicates port to destroy a queue on.
16705
+ * @mq: The queue structure associated with the queue to destroy.
1600716706 *
1600816707 * This function destroys a queue, as detailed in @mq by sending an mailbox
1600916708 * command, specific to the type of queue, to the HBA.
....@@ -16043,7 +16742,7 @@
1604316742 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1604416743 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1604516744 if (shdr_status || shdr_add_status || rc) {
16046
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16745
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1604716746 "2507 MQ_DESTROY mailbox failed with "
1604816747 "status x%x add_status x%x, mbx status x%x\n",
1604916748 shdr_status, shdr_add_status, rc);
....@@ -16057,6 +16756,7 @@
1605716756
1605816757 /**
1605916758 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16759
+ * @phba: HBA structure that indicates port to destroy a queue on.
1606016760 * @wq: The queue structure associated with the queue to destroy.
1606116761 *
1606216762 * This function destroys a queue, as detailed in @wq by sending an mailbox
....@@ -16096,7 +16796,7 @@
1609616796 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1609716797 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1609816798 if (shdr_status || shdr_add_status || rc) {
16099
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16799
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1610016800 "2508 WQ_DESTROY mailbox failed with "
1610116801 "status x%x add_status x%x, mbx status x%x\n",
1610216802 shdr_status, shdr_add_status, rc);
....@@ -16112,7 +16812,9 @@
1611216812
1611316813 /**
1611416814 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16115
- * @rq: The queue structure associated with the queue to destroy.
16815
+ * @phba: HBA structure that indicates port to destroy a queue on.
16816
+ * @hrq: The queue structure associated with the queue to destroy.
16817
+ * @drq: The queue structure associated with the queue to destroy.
1611616818 *
1611716819 * This function destroys a queue, as detailed in @rq by sending an mailbox
1611816820 * command, specific to the type of queue, to the HBA.
....@@ -16153,12 +16855,11 @@
1615316855 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1615416856 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1615516857 if (shdr_status || shdr_add_status || rc) {
16156
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16858
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1615716859 "2509 RQ_DESTROY mailbox failed with "
1615816860 "status x%x add_status x%x, mbx status x%x\n",
1615916861 shdr_status, shdr_add_status, rc);
16160
- if (rc != MBX_TIMEOUT)
16161
- mempool_free(mbox, hrq->phba->mbox_mem_pool);
16862
+ mempool_free(mbox, hrq->phba->mbox_mem_pool);
1616216863 return -ENXIO;
1616316864 }
1616416865 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
....@@ -16169,7 +16870,7 @@
1616916870 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1617016871 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1617116872 if (shdr_status || shdr_add_status || rc) {
16172
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16873
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1617316874 "2510 RQ_DESTROY mailbox failed with "
1617416875 "status x%x add_status x%x, mbx status x%x\n",
1617516876 shdr_status, shdr_add_status, rc);
....@@ -16217,7 +16918,7 @@
1621716918 union lpfc_sli4_cfg_shdr *shdr;
1621816919
1621916920 if (xritag == NO_XRI) {
16220
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16921
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1622116922 "0364 Invalid param:\n");
1622216923 return -EINVAL;
1622316924 }
....@@ -16255,10 +16956,12 @@
1625516956 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
1625616957 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1625716958 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16258
- if (rc != MBX_TIMEOUT)
16959
+ if (!phba->sli4_hba.intr_enable)
16960
+ mempool_free(mbox, phba->mbox_mem_pool);
16961
+ else if (rc != MBX_TIMEOUT)
1625916962 mempool_free(mbox, phba->mbox_mem_pool);
1626016963 if (shdr_status || shdr_add_status || rc) {
16261
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16964
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1626216965 "2511 POST_SGL mailbox failed with "
1626316966 "status x%x add_status x%x, mbx status x%x\n",
1626416967 shdr_status, shdr_add_status, rc);
....@@ -16305,6 +17008,7 @@
1630517008 /**
1630617009 * lpfc_sli4_free_xri - Release an xri for reuse.
1630717010 * @phba: pointer to lpfc hba data structure.
17011
+ * @xri: xri to release.
1630817012 *
1630917013 * This routine is invoked to release an xri to the pool of
1631017014 * available rpis maintained by the driver.
....@@ -16320,6 +17024,7 @@
1632017024 /**
1632117025 * lpfc_sli4_free_xri - Release an xri for reuse.
1632217026 * @phba: pointer to lpfc hba data structure.
17027
+ * @xri: xri to release.
1632317028 *
1632417029 * This routine is invoked to release an xri to the pool of
1632517030 * available rpis maintained by the driver.
....@@ -16362,7 +17067,7 @@
1636217067 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
1636317068 * @phba: pointer to lpfc hba data structure.
1636417069 * @post_sgl_list: pointer to els sgl entry list.
16365
- * @count: number of els sgl entries on the list.
17070
+ * @post_cnt: number of els sgl entries on the list.
1636617071 *
1636717072 * This routine is invoked to post a block of driver's sgl pages to the
1636817073 * HBA using non-embedded mailbox command. No Lock is held. This routine
....@@ -16389,7 +17094,7 @@
1638917094 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
1639017095 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
1639117096 if (reqlen > SLI4_PAGE_SIZE) {
16392
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17097
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1639317098 "2559 Block sgl registration required DMA "
1639417099 "size (%d) great than a page\n", reqlen);
1639517100 return -ENOMEM;
....@@ -16405,7 +17110,7 @@
1640517110 LPFC_SLI4_MBX_NEMBED);
1640617111
1640717112 if (alloclen < reqlen) {
16408
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17113
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1640917114 "0285 Allocated DMA memory size (%d) is "
1641017115 "less than the requested DMA memory "
1641117116 "size (%d)\n", alloclen, reqlen);
....@@ -16450,10 +17155,12 @@
1645017155 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
1645117156 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1645217157 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16453
- if (rc != MBX_TIMEOUT)
17158
+ if (!phba->sli4_hba.intr_enable)
17159
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
17160
+ else if (rc != MBX_TIMEOUT)
1645417161 lpfc_sli4_mbox_cmd_free(phba, mbox);
1645517162 if (shdr_status || shdr_add_status || rc) {
16456
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17163
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1645717164 "2513 POST_SGL_BLOCK mailbox command failed "
1645817165 "status x%x add_status x%x mbx status x%x\n",
1645917166 shdr_status, shdr_add_status, rc);
....@@ -16463,22 +17170,21 @@
1646317170 }
1646417171
1646517172 /**
16466
- * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
17173
+ * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
1646717174 * @phba: pointer to lpfc hba data structure.
16468
- * @sblist: pointer to scsi buffer list.
17175
+ * @nblist: pointer to nvme buffer list.
1646917176 * @count: number of scsi buffers on the list.
1647017177 *
1647117178 * This routine is invoked to post a block of @count scsi sgl pages from a
16472
- * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
17179
+ * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
1647317180 * No Lock is held.
1647417181 *
1647517182 **/
16476
-int
16477
-lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
16478
- struct list_head *sblist,
16479
- int count)
17183
+static int
17184
+lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
17185
+ int count)
1648017186 {
16481
- struct lpfc_scsi_buf *psb;
17187
+ struct lpfc_io_buf *lpfc_ncmd;
1648217188 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
1648317189 struct sgl_page_pairs *sgl_pg_pairs;
1648417190 void *viraddr;
....@@ -16496,25 +17202,25 @@
1649617202 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
1649717203 if (reqlen > SLI4_PAGE_SIZE) {
1649817204 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16499
- "0217 Block sgl registration required DMA "
17205
+ "6118 Block sgl registration required DMA "
1650017206 "size (%d) great than a page\n", reqlen);
1650117207 return -ENOMEM;
1650217208 }
1650317209 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1650417210 if (!mbox) {
16505
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16506
- "0283 Failed to allocate mbox cmd memory\n");
17211
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17212
+ "6119 Failed to allocate mbox cmd memory\n");
1650717213 return -ENOMEM;
1650817214 }
1650917215
1651017216 /* Allocate DMA memory and set up the non-embedded mailbox command */
1651117217 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16512
- LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16513
- LPFC_SLI4_MBX_NEMBED);
17218
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17219
+ reqlen, LPFC_SLI4_MBX_NEMBED);
1651417220
1651517221 if (alloclen < reqlen) {
16516
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16517
- "2561 Allocated DMA memory size (%d) is "
17222
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17223
+ "6120 Allocated DMA memory size (%d) is "
1651817224 "less than the requested DMA memory "
1651917225 "size (%d)\n", alloclen, reqlen);
1652017226 lpfc_sli4_mbox_cmd_free(phba, mbox);
....@@ -16529,14 +17235,15 @@
1652917235 sgl_pg_pairs = &sgl->sgl_pg_pairs;
1653017236
1653117237 pg_pairs = 0;
16532
- list_for_each_entry(psb, sblist, list) {
17238
+ list_for_each_entry(lpfc_ncmd, nblist, list) {
1653317239 /* Set up the sge entry */
1653417240 sgl_pg_pairs->sgl_pg0_addr_lo =
16535
- cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
17241
+ cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
1653617242 sgl_pg_pairs->sgl_pg0_addr_hi =
16537
- cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
17243
+ cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
1653817244 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16539
- pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
17245
+ pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
17246
+ SGL_PAGE_SIZE;
1654017247 else
1654117248 pdma_phys_bpl1 = 0;
1654217249 sgl_pg_pairs->sgl_pg1_addr_lo =
....@@ -16545,7 +17252,7 @@
1654517252 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
1654617253 /* Keep the first xritag on the list */
1654717254 if (pg_pairs == 0)
16548
- xritag_start = psb->cur_iocbq.sli4_xritag;
17255
+ xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
1654917256 sgl_pg_pairs++;
1655017257 pg_pairs++;
1655117258 }
....@@ -16554,25 +17261,156 @@
1655417261 /* Perform endian conversion if necessary */
1655517262 sgl->word0 = cpu_to_le32(sgl->word0);
1655617263
16557
- if (!phba->sli4_hba.intr_enable)
17264
+ if (!phba->sli4_hba.intr_enable) {
1655817265 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16559
- else {
17266
+ } else {
1656017267 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
1656117268 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
1656217269 }
16563
- shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
17270
+ shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
1656417271 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1656517272 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16566
- if (rc != MBX_TIMEOUT)
17273
+ if (!phba->sli4_hba.intr_enable)
17274
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
17275
+ else if (rc != MBX_TIMEOUT)
1656717276 lpfc_sli4_mbox_cmd_free(phba, mbox);
1656817277 if (shdr_status || shdr_add_status || rc) {
16569
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16570
- "2564 POST_SGL_BLOCK mailbox command failed "
17278
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17279
+ "6125 POST_SGL_BLOCK mailbox command failed "
1657117280 "status x%x add_status x%x mbx status x%x\n",
1657217281 shdr_status, shdr_add_status, rc);
1657317282 rc = -ENXIO;
1657417283 }
1657517284 return rc;
17285
+}
17286
+
17287
+/**
17288
+ * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
17289
+ * @phba: pointer to lpfc hba data structure.
17290
+ * @post_nblist: pointer to the nvme buffer list.
17291
+ * @sb_count: number of nvme buffers.
17292
+ *
17293
+ * This routine walks a list of nvme buffers that was passed in. It attempts
17294
+ * to construct blocks of nvme buffer sgls which contains contiguous xris and
17295
+ * uses the non-embedded SGL block post mailbox commands to post to the port.
17296
+ * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
17297
+ * embedded SGL post mailbox command for posting. The @post_nblist passed in
17298
+ * must be local list, thus no lock is needed when manipulate the list.
17299
+ *
17300
+ * Returns: 0 = failure, non-zero number of successfully posted buffers.
17301
+ **/
17302
+int
17303
+lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
17304
+ struct list_head *post_nblist, int sb_count)
17305
+{
17306
+ struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
17307
+ int status, sgl_size;
17308
+ int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17309
+ dma_addr_t pdma_phys_sgl1;
17310
+ int last_xritag = NO_XRI;
17311
+ int cur_xritag;
17312
+ LIST_HEAD(prep_nblist);
17313
+ LIST_HEAD(blck_nblist);
17314
+ LIST_HEAD(nvme_nblist);
17315
+
17316
+ /* sanity check */
17317
+ if (sb_count <= 0)
17318
+ return -EINVAL;
17319
+
17320
+ sgl_size = phba->cfg_sg_dma_buf_size;
17321
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17322
+ list_del_init(&lpfc_ncmd->list);
17323
+ block_cnt++;
17324
+ if ((last_xritag != NO_XRI) &&
17325
+ (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17326
+ /* a hole in xri block, form a sgl posting block */
17327
+ list_splice_init(&prep_nblist, &blck_nblist);
17328
+ post_cnt = block_cnt - 1;
17329
+ /* prepare list for next posting block */
17330
+ list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17331
+ block_cnt = 1;
17332
+ } else {
17333
+ /* prepare list for next posting block */
17334
+ list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17335
+ /* enough sgls for non-embed sgl mbox command */
17336
+ if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
17337
+ list_splice_init(&prep_nblist, &blck_nblist);
17338
+ post_cnt = block_cnt;
17339
+ block_cnt = 0;
17340
+ }
17341
+ }
17342
+ num_posting++;
17343
+ last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17344
+
17345
+ /* end of repost sgl list condition for NVME buffers */
17346
+ if (num_posting == sb_count) {
17347
+ if (post_cnt == 0) {
17348
+ /* last sgl posting block */
17349
+ list_splice_init(&prep_nblist, &blck_nblist);
17350
+ post_cnt = block_cnt;
17351
+ } else if (block_cnt == 1) {
17352
+ /* last single sgl with non-contiguous xri */
17353
+ if (sgl_size > SGL_PAGE_SIZE)
17354
+ pdma_phys_sgl1 =
17355
+ lpfc_ncmd->dma_phys_sgl +
17356
+ SGL_PAGE_SIZE;
17357
+ else
17358
+ pdma_phys_sgl1 = 0;
17359
+ cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17360
+ status = lpfc_sli4_post_sgl(
17361
+ phba, lpfc_ncmd->dma_phys_sgl,
17362
+ pdma_phys_sgl1, cur_xritag);
17363
+ if (status) {
17364
+ /* Post error. Buffer unavailable. */
17365
+ lpfc_ncmd->flags |=
17366
+ LPFC_SBUF_NOT_POSTED;
17367
+ } else {
17368
+ /* Post success. Bffer available. */
17369
+ lpfc_ncmd->flags &=
17370
+ ~LPFC_SBUF_NOT_POSTED;
17371
+ lpfc_ncmd->status = IOSTAT_SUCCESS;
17372
+ num_posted++;
17373
+ }
17374
+ /* success, put on NVME buffer sgl list */
17375
+ list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17376
+ }
17377
+ }
17378
+
17379
+ /* continue until a nembed page worth of sgls */
17380
+ if (post_cnt == 0)
17381
+ continue;
17382
+
17383
+ /* post block of NVME buffer list sgls */
17384
+ status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
17385
+ post_cnt);
17386
+
17387
+ /* don't reset xirtag due to hole in xri block */
17388
+ if (block_cnt == 0)
17389
+ last_xritag = NO_XRI;
17390
+
17391
+ /* reset NVME buffer post count for next round of posting */
17392
+ post_cnt = 0;
17393
+
17394
+ /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
17395
+ while (!list_empty(&blck_nblist)) {
17396
+ list_remove_head(&blck_nblist, lpfc_ncmd,
17397
+ struct lpfc_io_buf, list);
17398
+ if (status) {
17399
+ /* Post error. Mark buffer unavailable. */
17400
+ lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
17401
+ } else {
17402
+ /* Post success, Mark buffer available. */
17403
+ lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
17404
+ lpfc_ncmd->status = IOSTAT_SUCCESS;
17405
+ num_posted++;
17406
+ }
17407
+ list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17408
+ }
17409
+ }
17410
+ /* Push NVME buffers with sgl posted to the available list */
17411
+ lpfc_io_buf_replenish(phba, &nvme_nblist);
17412
+
17413
+ return num_posted;
1657617414 }
1657717415
1657817416 /**
....@@ -16607,7 +17445,6 @@
1660717445 case FC_RCTL_ELS_REP: /* extended link services reply */
1660817446 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
1660917447 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
16610
- case FC_RCTL_BA_NOP: /* basic link service NOP */
1661117448 case FC_RCTL_BA_ABTS: /* basic link service abort */
1661217449 case FC_RCTL_BA_RMC: /* remove connection */
1661317450 case FC_RCTL_BA_ACC: /* basic accept */
....@@ -16628,11 +17465,10 @@
1662817465 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
1662917466 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
1663017467 return lpfc_fc_frame_check(phba, fc_hdr);
17468
+ case FC_RCTL_BA_NOP: /* basic link service NOP */
1663117469 default:
1663217470 goto drop;
1663317471 }
16634
-
16635
-#define FC_TYPE_VENDOR_UNIQUE 0xFF
1663617472
1663717473 switch (fc_hdr->fh_type) {
1663817474 case FC_TYPE_BLS:
....@@ -16640,7 +17476,6 @@
1664017476 case FC_TYPE_FCP:
1664117477 case FC_TYPE_CT:
1664217478 case FC_TYPE_NVME:
16643
- case FC_TYPE_VENDOR_UNIQUE:
1664417479 break;
1664517480 case FC_TYPE_IP:
1664617481 case FC_TYPE_ILS:
....@@ -16687,6 +17522,7 @@
1668717522 * @phba: Pointer to the HBA structure to search for the vport on
1668817523 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
1668917524 * @fcfi: The FC Fabric ID that the frame came from
17525
+ * @did: Destination ID to match against
1669017526 *
1669117527 * This function searches the @phba for a vport that matches the content of the
1669217528 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
....@@ -16824,6 +17660,7 @@
1682417660
1682517661 /**
1682617662 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17663
+ * @vport: pointer to a vitural port
1682717664 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
1682817665 *
1682917666 * This function searches through the existing incomplete sequences that have
....@@ -17024,7 +17861,7 @@
1702417861
1702517862 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
1702617863 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17027
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17864
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1702817865 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
1702917866 rsp_iocbq->iocb.ulpStatus,
1703017867 rsp_iocbq->iocb.un.ulpWord[4]);
....@@ -17053,8 +17890,9 @@
1705317890
1705417891 /**
1705517892 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
17056
- * @phba: Pointer to HBA context object.
17893
+ * @vport: pointer to a vitural port.
1705717894 * @fc_hdr: pointer to a FC frame header.
17895
+ * @aborted: was the partially assembled receive sequence successfully aborted
1705817896 *
1705917897 * This function sends a basic response to a previous unsol sequence abort
1706017898 * event after aborting the sequence handling.
....@@ -17123,7 +17961,6 @@
1712317961 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1712417962 ctiocb->context1 = lpfc_nlp_get(ndlp);
1712517963
17126
- ctiocb->iocb_cmpl = NULL;
1712717964 ctiocb->vport = phba->pport;
1712817965 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
1712917966 ctiocb->sli4_lxritag = NO_XRI;
....@@ -17187,7 +18024,7 @@
1718718024
1718818025 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1718918026 if (rc == IOCB_ERROR) {
17190
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
18027
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1719118028 "2925 Failed to issue CT ABTS RSP x%x on "
1719218029 "xri x%x, Data x%x\n",
1719318030 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
....@@ -17207,7 +18044,7 @@
1720718044 * receive sequence is only partially assembed by the driver, it shall abort
1720818045 * the partially assembled frames for the sequence. Otherwise, if the
1720918046 * unsolicited receive sequence has been completely assembled and passed to
17210
- * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
18047
+ * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
1721118048 * unsolicited sequence has been aborted. After that, it will issue a basic
1721218049 * accept to accept the abort.
1721318050 **/
....@@ -17294,7 +18131,7 @@
1729418131 /**
1729518132 * lpfc_prep_seq - Prep sequence for ULP processing
1729618133 * @vport: Pointer to the vport on which this sequence was received
17297
- * @dmabuf: pointer to a dmabuf that describes the FC sequence
18134
+ * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
1729818135 *
1729918136 * This function takes a sequence, described by a list of frames, and creates
1730018137 * a list of iocbq structures to describe the sequence. This iocbq list will be
....@@ -17437,7 +18274,7 @@
1743718274 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
1743818275 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
1743918276 if (!iocbq) {
17440
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18277
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1744118278 "2707 Ring %d handler: Failed to allocate "
1744218279 "iocb Rctl x%x Type x%x received\n",
1744318280 LPFC_ELS_RING,
....@@ -17447,12 +18284,14 @@
1744718284 if (!lpfc_complete_unsol_iocb(phba,
1744818285 phba->sli4_hba.els_wq->pring,
1744918286 iocbq, fc_hdr->fh_r_ctl,
17450
- fc_hdr->fh_type))
17451
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18287
+ fc_hdr->fh_type)) {
18288
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1745218289 "2540 Ring %d handler: unexpected Rctl "
1745318290 "x%x Type x%x received\n",
1745418291 LPFC_ELS_RING,
1745518292 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18293
+ lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
18294
+ }
1745618295
1745718296 /* Free iocb created in lpfc_prep_seq */
1745818297 list_for_each_entry_safe(curr_iocb, next_iocb,
....@@ -17473,6 +18312,7 @@
1747318312 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
1747418313 kfree(pcmd);
1747518314 lpfc_sli_release_iocbq(phba, cmdiocb);
18315
+ lpfc_drain_txq(phba);
1747618316 }
1747718317
1747818318 static void
....@@ -17486,14 +18326,23 @@
1748618326 struct lpfc_dmabuf *pcmd = NULL;
1748718327 uint32_t frame_len;
1748818328 int rc;
18329
+ unsigned long iflags;
1748918330
1749018331 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
1749118332 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
1749218333
1749318334 /* Send the received frame back */
1749418335 iocbq = lpfc_sli_get_iocbq(phba);
17495
- if (!iocbq)
17496
- goto exit;
18336
+ if (!iocbq) {
18337
+ /* Queue cq event and wakeup worker thread to process it */
18338
+ spin_lock_irqsave(&phba->hbalock, iflags);
18339
+ list_add_tail(&dmabuf->cq_event.list,
18340
+ &phba->sli4_hba.sp_queue_event);
18341
+ phba->hba_flag |= HBA_SP_QUEUE_EVT;
18342
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
18343
+ lpfc_worker_wake_up(phba);
18344
+ return;
18345
+ }
1749718346
1749818347 /* Allocate buffer for command payload */
1749918348 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
....@@ -17557,6 +18406,7 @@
1755718406 /**
1755818407 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
1755918408 * @phba: Pointer to HBA context object.
18409
+ * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
1756018410 *
1756118411 * This function is called with no lock held. This function processes all
1756218412 * the received buffers and gives it to upper layers when a received buffer
....@@ -17578,6 +18428,17 @@
1757818428 /* Process each received buffer */
1757918429 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
1758018430
18431
+ if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18432
+ fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18433
+ vport = phba->pport;
18434
+ /* Handle MDS Loopback frames */
18435
+ if (!(phba->pport->load_flag & FC_UNLOADING))
18436
+ lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18437
+ else
18438
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
18439
+ return;
18440
+ }
18441
+
1758118442 /* check to see if this a valid type of frame */
1758218443 if (lpfc_fc_frame_check(phba, fc_hdr)) {
1758318444 lpfc_in_buf_free(phba, &dmabuf->dbuf);
....@@ -17594,6 +18455,10 @@
1759418455
1759518456 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
1759618457 vport = phba->pport;
18458
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18459
+ "2023 MDS Loopback %d bytes\n",
18460
+ bf_get(lpfc_rcqe_length,
18461
+ &dmabuf->cq_event.cqe.rcqe_cmpl));
1759718462 /* Handle MDS Loopback frames */
1759818463 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
1759918464 return;
....@@ -17691,7 +18556,7 @@
1769118556
1769218557 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
1769318558 if (rc != MBX_SUCCESS) {
17694
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18559
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1769518560 "2008 Error %d posting all rpi "
1769618561 "headers\n", rc);
1769718562 rc = -EIO;
....@@ -17737,7 +18602,7 @@
1773718602 /* The port is notified of the header region via a mailbox command. */
1773818603 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1773918604 if (!mboxq) {
17740
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18605
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1774118606 "2001 Unable to allocate memory for issuing "
1774218607 "SLI_CONFIG_SPECIAL mailbox command\n");
1774318608 return -ENOMEM;
....@@ -17764,10 +18629,9 @@
1776418629 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
1776518630 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1776618631 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17767
- if (rc != MBX_TIMEOUT)
17768
- mempool_free(mboxq, phba->mbox_mem_pool);
18632
+ mempool_free(mboxq, phba->mbox_mem_pool);
1776918633 if (shdr_status || shdr_add_status || rc) {
17770
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18634
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1777118635 "2514 POST_RPI_HDR mailbox failed with "
1777218636 "status x%x add_status x%x, mbx status x%x\n",
1777318637 shdr_status, shdr_add_status, rc);
....@@ -17822,8 +18686,9 @@
1782218686 phba->sli4_hba.max_cfg_param.rpi_used++;
1782318687 phba->sli4_hba.rpi_count++;
1782418688 }
17825
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
17826
- "0001 rpi:%x max:%x lim:%x\n",
18689
+ lpfc_printf_log(phba, KERN_INFO,
18690
+ LOG_NODE | LOG_DISCOVERY,
18691
+ "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
1782718692 (int) rpi, max_rpi, rpi_limit);
1782818693
1782918694 /*
....@@ -17856,7 +18721,7 @@
1785618721 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
1785718722 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
1785818723 if (!rpi_hdr) {
17859
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18724
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1786018725 "2002 Error Could not grow rpi "
1786118726 "count\n");
1786218727 } else {
....@@ -17872,6 +18737,7 @@
1787218737 /**
1787318738 * lpfc_sli4_free_rpi - Release an rpi for reuse.
1787418739 * @phba: pointer to lpfc hba data structure.
18740
+ * @rpi: rpi to free
1787518741 *
1787618742 * This routine is invoked to release an rpi to the pool of
1787718743 * available rpis maintained by the driver.
....@@ -17889,12 +18755,18 @@
1788918755 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
1789018756 phba->sli4_hba.rpi_count--;
1789118757 phba->sli4_hba.max_cfg_param.rpi_used--;
18758
+ } else {
18759
+ lpfc_printf_log(phba, KERN_INFO,
18760
+ LOG_NODE | LOG_DISCOVERY,
18761
+ "2016 rpi %x not inuse\n",
18762
+ rpi);
1789218763 }
1789318764 }
1789418765
1789518766 /**
1789618767 * lpfc_sli4_free_rpi - Release an rpi for reuse.
1789718768 * @phba: pointer to lpfc hba data structure.
18769
+ * @rpi: rpi to free
1789818770 *
1789918771 * This routine is invoked to release an rpi to the pool of
1790018772 * available rpis maintained by the driver.
....@@ -17924,7 +18796,9 @@
1792418796
1792518797 /**
1792618798 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
17927
- * @phba: pointer to lpfc hba data structure.
18799
+ * @ndlp: pointer to lpfc nodelist data structure.
18800
+ * @cmpl: completion call-back.
18801
+ * @arg: data to load as MBox 'caller buffer information'
1792818802 *
1792918803 * This routine is invoked to remove the memory region that
1793018804 * provided rpi via a bitmask.
....@@ -17946,14 +18820,14 @@
1794618820 lpfc_resume_rpi(mboxq, ndlp);
1794718821 if (cmpl) {
1794818822 mboxq->mbox_cmpl = cmpl;
17949
- mboxq->context1 = arg;
17950
- mboxq->context2 = ndlp;
18823
+ mboxq->ctx_buf = arg;
18824
+ mboxq->ctx_ndlp = ndlp;
1795118825 } else
1795218826 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1795318827 mboxq->vport = ndlp->vport;
1795418828 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
1795518829 if (rc == MBX_NOT_FINISHED) {
17956
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18830
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1795718831 "2010 Resume RPI Mailbox failed "
1795818832 "status %d, mbxStatus x%x\n", rc,
1795918833 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
....@@ -17988,7 +18862,7 @@
1798818862 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
1798918863 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
1799018864 if (rc != MBX_SUCCESS) {
17991
- lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
18865
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1799218866 "2022 INIT VPI Mailbox failed "
1799318867 "status %d, mbxStatus x%x\n", rc,
1799418868 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
....@@ -18024,7 +18898,7 @@
1802418898
1802518899 if ((shdr_status || shdr_add_status) &&
1802618900 (shdr_status != STATUS_FCF_IN_USE))
18027
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18901
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1802818902 "2558 ADD_FCF_RECORD mailbox failed with "
1802918903 "status x%x add_status x%x\n",
1803018904 shdr_status, shdr_add_status);
....@@ -18054,7 +18928,7 @@
1805418928
1805518929 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1805618930 if (!mboxq) {
18057
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18931
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1805818932 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
1805918933 return -ENOMEM;
1806018934 }
....@@ -18067,7 +18941,7 @@
1806718941 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
1806818942 req_len, LPFC_SLI4_MBX_NEMBED);
1806918943 if (alloc_len < req_len) {
18070
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18944
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1807118945 "2523 Allocated DMA memory size (x%x) is "
1807218946 "less than the requested DMA memory "
1807318947 "size (x%x)\n", alloc_len, req_len);
....@@ -18100,7 +18974,7 @@
1810018974 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
1810118975 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
1810218976 if (rc == MBX_NOT_FINISHED) {
18103
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18977
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1810418978 "2515 ADD_FCF_RECORD mailbox failed with "
1810518979 "status 0x%x\n", rc);
1810618980 lpfc_sli4_mbox_cmd_free(phba, mboxq);
....@@ -18173,7 +19047,7 @@
1817319047 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
1817419048 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1817519049 if (!mboxq) {
18176
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19050
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1817719051 "2000 Failed to allocate mbox for "
1817819052 "READ_FCF cmd\n");
1817919053 error = -ENOMEM;
....@@ -18308,7 +19182,7 @@
1830819182
1830919183 /**
1831019184 * lpfc_check_next_fcf_pri_level
18311
- * phba pointer to the lpfc_hba struct for this port.
19185
+ * @phba: pointer to the lpfc_hba struct for this port.
1831219186 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
1831319187 * routine when the rr_bmask is empty. The FCF indecies are put into the
1831419188 * rr_bmask based on their priority level. Starting from the highest priority
....@@ -18473,6 +19347,7 @@
1847319347 /**
1847419348 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
1847519349 * @phba: pointer to lpfc hba data structure.
19350
+ * @fcf_index: index into the FCF table to 'set'
1847619351 *
1847719352 * This routine sets the FCF record index in to the eligible bmask for
1847819353 * roundrobin failover search. It checks to make sure that the index
....@@ -18505,6 +19380,7 @@
1850519380 /**
1850619381 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
1850719382 * @phba: pointer to lpfc hba data structure.
19383
+ * @fcf_index: index into the FCF table to 'clear'
1850819384 *
1850919385 * This routine clears the FCF record index from the eligible bmask for
1851019386 * roundrobin failover search. It checks to make sure that the index
....@@ -18542,6 +19418,7 @@
1854219418 /**
1854319419 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
1854419420 * @phba: pointer to lpfc hba data structure.
19421
+ * @mbox: An allocated pointer to type LPFC_MBOXQ_t
1854519422 *
1854619423 * This routine is the completion routine for the rediscover FCF table mailbox
1854719424 * command. If the mailbox command returned failure, it will try to stop the
....@@ -18616,7 +19493,7 @@
1861619493
1861719494 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1861819495 if (!mbox) {
18619
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19496
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1862019497 "2745 Failed to allocate mbox for "
1862119498 "requesting FCF rediscover.\n");
1862219499 return -ENOMEM;
....@@ -18684,14 +19561,14 @@
1868419561 LPFC_MBOXQ_t *pmb = NULL;
1868519562 MAILBOX_t *mb;
1868619563 uint32_t offset = 0;
18687
- int rc;
19564
+ int i, rc;
1868819565
1868919566 if (!rgn23_data)
1869019567 return 0;
1869119568
1869219569 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1869319570 if (!pmb) {
18694
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19571
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1869519572 "2600 failed to allocate mailbox memory\n");
1869619573 return 0;
1869719574 }
....@@ -18714,14 +19591,14 @@
1871419591 */
1871519592 if (mb->un.varDmp.word_cnt == 0)
1871619593 break;
18717
- if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
18718
- mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
1871919594
19595
+ i = mb->un.varDmp.word_cnt * sizeof(uint32_t);
19596
+ if (offset + i > DMP_RGN23_SIZE)
19597
+ i = DMP_RGN23_SIZE - offset;
1872019598 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
18721
- rgn23_data + offset,
18722
- mb->un.varDmp.word_cnt);
18723
- offset += mb->un.varDmp.word_cnt;
18724
- } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19599
+ rgn23_data + offset, i);
19600
+ offset += i;
19601
+ } while (offset < DMP_RGN23_SIZE);
1872519602
1872619603 mempool_free(pmb, phba->mbox_mem_pool);
1872719604 return offset;
....@@ -18750,7 +19627,7 @@
1875019627
1875119628 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1875219629 if (!mboxq) {
18753
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19630
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1875419631 "3105 failed to allocate mailbox memory\n");
1875519632 return 0;
1875619633 }
....@@ -18758,7 +19635,7 @@
1875819635 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
1875919636 goto out;
1876019637 mqe = &mboxq->u.mqe;
18761
- mp = (struct lpfc_dmabuf *) mboxq->context1;
19638
+ mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
1876219639 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
1876319640 if (rc)
1876419641 goto out;
....@@ -18814,7 +19691,7 @@
1881419691
1881519692 /* Check the region signature first */
1881619693 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
18817
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19694
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1881819695 "2619 Config region 23 has bad signature\n");
1881919696 goto out;
1882019697 }
....@@ -18822,7 +19699,7 @@
1882219699
1882319700 /* Check the data structure version */
1882419701 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
18825
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19702
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1882619703 "2620 Config region 23 has bad version\n");
1882719704 goto out;
1882819705 }
....@@ -18903,11 +19780,11 @@
1890319780 struct lpfc_mbx_wr_object *wr_object;
1890419781 LPFC_MBOXQ_t *mbox;
1890519782 int rc = 0, i = 0;
18906
- uint32_t shdr_status, shdr_add_status;
19783
+ uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
1890719784 uint32_t mbox_tmo;
18908
- union lpfc_sli4_cfg_shdr *shdr;
1890919785 struct lpfc_dmabuf *dmabuf;
1891019786 uint32_t written = 0;
19787
+ bool check_change_status = false;
1891119788
1891219789 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1891319790 if (!mbox)
....@@ -18935,6 +19812,8 @@
1893519812 (size - written);
1893619813 written += (size - written);
1893719814 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19815
+ bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19816
+ check_change_status = true;
1893819817 } else {
1893919818 wr_object->u.request.bde[i].tus.f.bdeSize =
1894019819 SLI4_PAGE_SIZE;
....@@ -18951,13 +19830,55 @@
1895119830 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
1895219831 }
1895319832 /* The IOCTL status is embedded in the mailbox subheader. */
18954
- shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
18955
- shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18956
- shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18957
- if (rc != MBX_TIMEOUT)
19833
+ shdr_status = bf_get(lpfc_mbox_hdr_status,
19834
+ &wr_object->header.cfg_shdr.response);
19835
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19836
+ &wr_object->header.cfg_shdr.response);
19837
+ if (check_change_status) {
19838
+ shdr_change_status = bf_get(lpfc_wr_object_change_status,
19839
+ &wr_object->u.response);
19840
+
19841
+ if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
19842
+ shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
19843
+ shdr_csf = bf_get(lpfc_wr_object_csf,
19844
+ &wr_object->u.response);
19845
+ if (shdr_csf)
19846
+ shdr_change_status =
19847
+ LPFC_CHANGE_STATUS_PCI_RESET;
19848
+ }
19849
+
19850
+ switch (shdr_change_status) {
19851
+ case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19852
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19853
+ "3198 Firmware write complete: System "
19854
+ "reboot required to instantiate\n");
19855
+ break;
19856
+ case (LPFC_CHANGE_STATUS_FW_RESET):
19857
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19858
+ "3199 Firmware write complete: Firmware"
19859
+ " reset required to instantiate\n");
19860
+ break;
19861
+ case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19862
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19863
+ "3200 Firmware write complete: Port "
19864
+ "Migration or PCI Reset required to "
19865
+ "instantiate\n");
19866
+ break;
19867
+ case (LPFC_CHANGE_STATUS_PCI_RESET):
19868
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19869
+ "3201 Firmware write complete: PCI "
19870
+ "Reset required to instantiate\n");
19871
+ break;
19872
+ default:
19873
+ break;
19874
+ }
19875
+ }
19876
+ if (!phba->sli4_hba.intr_enable)
19877
+ mempool_free(mbox, phba->mbox_mem_pool);
19878
+ else if (rc != MBX_TIMEOUT)
1895819879 mempool_free(mbox, phba->mbox_mem_pool);
1895919880 if (shdr_status || shdr_add_status || rc) {
18960
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19881
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1896119882 "3025 Write Object mailbox failed with "
1896219883 "status x%x add_status x%x, mbx status x%x\n",
1896319884 shdr_status, shdr_add_status, rc);
....@@ -19009,7 +19930,7 @@
1900919930 (mb->u.mb.mbxCommand == MBX_REG_VPI))
1901019931 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1901119932 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19012
- act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
19933
+ act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
1901319934 /* Put reference count for delayed processing */
1901419935 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
1901519936 /* Unregister the RPI when mailbox complete */
....@@ -19034,7 +19955,7 @@
1903419955
1903519956 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1903619957 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19037
- ndlp = (struct lpfc_nodelist *)mb->context2;
19958
+ ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
1903819959 /* Unregister the RPI when mailbox complete */
1903919960 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
1904019961 restart_loop = 1;
....@@ -19054,13 +19975,14 @@
1905419975 while (!list_empty(&mbox_cmd_list)) {
1905519976 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
1905619977 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19057
- mp = (struct lpfc_dmabuf *) (mb->context1);
19978
+ mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
1905819979 if (mp) {
1905919980 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
1906019981 kfree(mp);
1906119982 }
19062
- ndlp = (struct lpfc_nodelist *) mb->context2;
19063
- mb->context2 = NULL;
19983
+ mb->ctx_buf = NULL;
19984
+ ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19985
+ mb->ctx_ndlp = NULL;
1906419986 if (ndlp) {
1906519987 spin_lock(shost->host_lock);
1906619988 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
....@@ -19106,7 +20028,7 @@
1910620028
1910720029 if (phba->link_flag & LS_MDS_LOOPBACK) {
1910820030 /* MDS WQE are posted only to first WQ*/
19109
- wq = phba->sli4_hba.fcp_wq[0];
20031
+ wq = phba->sli4_hba.hdwq[0].io_wq;
1911020032 if (unlikely(!wq))
1911120033 return 0;
1911220034 pring = wq->pring;
....@@ -19136,7 +20058,7 @@
1913620058 piocbq = lpfc_sli_ringtx_get(phba, pring);
1913720059 if (!piocbq) {
1913820060 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19139
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
20061
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1914020062 "2823 txq empty and txq_cnt is %d\n ",
1914120063 txq_cnt);
1914220064 break;
....@@ -19165,7 +20087,7 @@
1916520087
1916620088 if (fail_msg) {
1916720089 /* Failed means we can't issue and need to cancel */
19168
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
20090
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1916920091 "2822 IOCB failed %s iotag 0x%x "
1917020092 "xri 0x%x\n",
1917120093 fail_msg,
....@@ -19186,7 +20108,7 @@
1918620108 /**
1918720109 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
1918820110 * @phba: Pointer to HBA context object.
19189
- * @pwqe: Pointer to command WQE.
20111
+ * @pwqeq: Pointer to command WQE.
1919020112 * @sglq: Pointer to the scatter gather queue object.
1919120113 *
1919220114 * This routine converts the bpl or bde that is in the WQE
....@@ -19311,15 +20233,15 @@
1931120233 /**
1931220234 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
1931320235 * @phba: Pointer to HBA context object.
19314
- * @ring_number: Base sli ring number
20236
+ * @qp: Pointer to HDW queue.
1931520237 * @pwqe: Pointer to command WQE.
1931620238 **/
1931720239 int
19318
-lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
20240
+lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
1931920241 struct lpfc_iocbq *pwqe)
1932020242 {
1932120243 union lpfc_wqe128 *wqe = &pwqe->wqe;
19322
- struct lpfc_nvmet_rcv_ctx *ctxp;
20244
+ struct lpfc_async_xchg_ctx *ctxp;
1932320245 struct lpfc_queue *wq;
1932420246 struct lpfc_sglq *sglq;
1932520247 struct lpfc_sli_ring *pring;
....@@ -19329,7 +20251,8 @@
1932920251 /* NVME_LS and NVME_LS ABTS requests. */
1933020252 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
1933120253 pring = phba->sli4_hba.nvmels_wq->pring;
19332
- spin_lock_irqsave(&pring->ring_lock, iflags);
20254
+ lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20255
+ qp, wq_access);
1933320256 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
1933420257 if (!sglq) {
1933520258 spin_unlock_irqrestore(&pring->ring_lock, iflags);
....@@ -19351,18 +20274,21 @@
1935120274
1935220275 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
1935320276 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20277
+
20278
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
1935420279 return 0;
1935520280 }
1935620281
1935720282 /* NVME_FCREQ and NVME_ABTS requests */
1935820283 if (pwqe->iocb_flag & LPFC_IO_NVME) {
1935920284 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19360
- pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
20285
+ wq = qp->io_wq;
20286
+ pring = wq->pring;
1936120287
19362
- spin_lock_irqsave(&pring->ring_lock, iflags);
19363
- wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
19364
- bf_set(wqe_cqid, &wqe->generic.wqe_com,
19365
- phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
20288
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20289
+
20290
+ lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20291
+ qp, wq_access);
1936620292 ret = lpfc_sli4_wq_put(wq, wqe);
1936720293 if (ret) {
1936820294 spin_unlock_irqrestore(&pring->ring_lock, iflags);
....@@ -19370,15 +20296,17 @@
1937020296 }
1937120297 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
1937220298 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20299
+
20300
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
1937320301 return 0;
1937420302 }
1937520303
1937620304 /* NVMET requests */
1937720305 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
1937820306 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19379
- pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
20307
+ wq = qp->io_wq;
20308
+ pring = wq->pring;
1938020309
19381
- spin_lock_irqsave(&pring->ring_lock, iflags);
1938220310 ctxp = pwqe->context2;
1938320311 sglq = ctxp->ctxbuf->sglq;
1938420312 if (pwqe->sli4_xritag == NO_XRI) {
....@@ -19387,9 +20315,10 @@
1938720315 }
1938820316 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
1938920317 pwqe->sli4_xritag);
19390
- wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
19391
- bf_set(wqe_cqid, &wqe->generic.wqe_com,
19392
- phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
20318
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20319
+
20320
+ lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20321
+ qp, wq_access);
1939320322 ret = lpfc_sli4_wq_put(wq, wqe);
1939420323 if (ret) {
1939520324 spin_unlock_irqrestore(&pring->ring_lock, iflags);
....@@ -19397,7 +20326,945 @@
1939720326 }
1939820327 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
1939920328 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20329
+
20330
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
1940020331 return 0;
1940120332 }
1940220333 return WQE_ERROR;
1940320334 }
20335
+
20336
+#ifdef LPFC_MXP_STAT
20337
+/**
20338
+ * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
20339
+ * @phba: pointer to lpfc hba data structure.
20340
+ * @hwqid: belong to which HWQ.
20341
+ *
20342
+ * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
20343
+ * 15 seconds after a test case is running.
20344
+ *
20345
+ * The user should call lpfc_debugfs_multixripools_write before running a test
20346
+ * case to clear stat_snapshot_taken. Then the user starts a test case. During
20347
+ * test case is running, stat_snapshot_taken is incremented by 1 every time when
20348
+ * this routine is called from heartbeat timer. When stat_snapshot_taken is
20349
+ * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
20350
+ **/
20351
+void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
20352
+{
20353
+ struct lpfc_sli4_hdw_queue *qp;
20354
+ struct lpfc_multixri_pool *multixri_pool;
20355
+ struct lpfc_pvt_pool *pvt_pool;
20356
+ struct lpfc_pbl_pool *pbl_pool;
20357
+ u32 txcmplq_cnt;
20358
+
20359
+ qp = &phba->sli4_hba.hdwq[hwqid];
20360
+ multixri_pool = qp->p_multixri_pool;
20361
+ if (!multixri_pool)
20362
+ return;
20363
+
20364
+ if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
20365
+ pvt_pool = &qp->p_multixri_pool->pvt_pool;
20366
+ pbl_pool = &qp->p_multixri_pool->pbl_pool;
20367
+ txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20368
+
20369
+ multixri_pool->stat_pbl_count = pbl_pool->count;
20370
+ multixri_pool->stat_pvt_count = pvt_pool->count;
20371
+ multixri_pool->stat_busy_count = txcmplq_cnt;
20372
+ }
20373
+
20374
+ multixri_pool->stat_snapshot_taken++;
20375
+}
20376
+#endif
20377
+
20378
+/**
20379
+ * lpfc_adjust_pvt_pool_count - Adjust private pool count
20380
+ * @phba: pointer to lpfc hba data structure.
20381
+ * @hwqid: belong to which HWQ.
20382
+ *
20383
+ * This routine moves some XRIs from private to public pool when private pool
20384
+ * is not busy.
20385
+ **/
20386
+void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
20387
+{
20388
+ struct lpfc_multixri_pool *multixri_pool;
20389
+ u32 io_req_count;
20390
+ u32 prev_io_req_count;
20391
+
20392
+ multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20393
+ if (!multixri_pool)
20394
+ return;
20395
+ io_req_count = multixri_pool->io_req_count;
20396
+ prev_io_req_count = multixri_pool->prev_io_req_count;
20397
+
20398
+ if (prev_io_req_count != io_req_count) {
20399
+ /* Private pool is busy */
20400
+ multixri_pool->prev_io_req_count = io_req_count;
20401
+ } else {
20402
+ /* Private pool is not busy.
20403
+ * Move XRIs from private to public pool.
20404
+ */
20405
+ lpfc_move_xri_pvt_to_pbl(phba, hwqid);
20406
+ }
20407
+}
20408
+
20409
+/**
20410
+ * lpfc_adjust_high_watermark - Adjust high watermark
20411
+ * @phba: pointer to lpfc hba data structure.
20412
+ * @hwqid: belong to which HWQ.
20413
+ *
20414
+ * This routine sets high watermark as number of outstanding XRIs,
20415
+ * but make sure the new value is between xri_limit/2 and xri_limit.
20416
+ **/
20417
+void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
20418
+{
20419
+ u32 new_watermark;
20420
+ u32 watermark_max;
20421
+ u32 watermark_min;
20422
+ u32 xri_limit;
20423
+ u32 txcmplq_cnt;
20424
+ u32 abts_io_bufs;
20425
+ struct lpfc_multixri_pool *multixri_pool;
20426
+ struct lpfc_sli4_hdw_queue *qp;
20427
+
20428
+ qp = &phba->sli4_hba.hdwq[hwqid];
20429
+ multixri_pool = qp->p_multixri_pool;
20430
+ if (!multixri_pool)
20431
+ return;
20432
+ xri_limit = multixri_pool->xri_limit;
20433
+
20434
+ watermark_max = xri_limit;
20435
+ watermark_min = xri_limit / 2;
20436
+
20437
+ txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20438
+ abts_io_bufs = qp->abts_scsi_io_bufs;
20439
+ abts_io_bufs += qp->abts_nvme_io_bufs;
20440
+
20441
+ new_watermark = txcmplq_cnt + abts_io_bufs;
20442
+ new_watermark = min(watermark_max, new_watermark);
20443
+ new_watermark = max(watermark_min, new_watermark);
20444
+ multixri_pool->pvt_pool.high_watermark = new_watermark;
20445
+
20446
+#ifdef LPFC_MXP_STAT
20447
+ multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
20448
+ new_watermark);
20449
+#endif
20450
+}
20451
+
20452
+/**
20453
+ * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
20454
+ * @phba: pointer to lpfc hba data structure.
20455
+ * @hwqid: belong to which HWQ.
20456
+ *
20457
+ * This routine is called from hearbeat timer when pvt_pool is idle.
20458
+ * All free XRIs are moved from private to public pool on hwqid with 2 steps.
20459
+ * The first step moves (all - low_watermark) amount of XRIs.
20460
+ * The second step moves the rest of XRIs.
20461
+ **/
20462
+void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20463
+{
20464
+ struct lpfc_pbl_pool *pbl_pool;
20465
+ struct lpfc_pvt_pool *pvt_pool;
20466
+ struct lpfc_sli4_hdw_queue *qp;
20467
+ struct lpfc_io_buf *lpfc_ncmd;
20468
+ struct lpfc_io_buf *lpfc_ncmd_next;
20469
+ unsigned long iflag;
20470
+ struct list_head tmp_list;
20471
+ u32 tmp_count;
20472
+
20473
+ qp = &phba->sli4_hba.hdwq[hwqid];
20474
+ pbl_pool = &qp->p_multixri_pool->pbl_pool;
20475
+ pvt_pool = &qp->p_multixri_pool->pvt_pool;
20476
+ tmp_count = 0;
20477
+
20478
+ lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20479
+ lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
20480
+
20481
+ if (pvt_pool->count > pvt_pool->low_watermark) {
20482
+ /* Step 1: move (all - low_watermark) from pvt_pool
20483
+ * to pbl_pool
20484
+ */
20485
+
20486
+ /* Move low watermark of bufs from pvt_pool to tmp_list */
20487
+ INIT_LIST_HEAD(&tmp_list);
20488
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20489
+ &pvt_pool->list, list) {
20490
+ list_move_tail(&lpfc_ncmd->list, &tmp_list);
20491
+ tmp_count++;
20492
+ if (tmp_count >= pvt_pool->low_watermark)
20493
+ break;
20494
+ }
20495
+
20496
+ /* Move all bufs from pvt_pool to pbl_pool */
20497
+ list_splice_init(&pvt_pool->list, &pbl_pool->list);
20498
+
20499
+ /* Move all bufs from tmp_list to pvt_pool */
20500
+ list_splice(&tmp_list, &pvt_pool->list);
20501
+
20502
+ pbl_pool->count += (pvt_pool->count - tmp_count);
20503
+ pvt_pool->count = tmp_count;
20504
+ } else {
20505
+ /* Step 2: move the rest from pvt_pool to pbl_pool */
20506
+ list_splice_init(&pvt_pool->list, &pbl_pool->list);
20507
+ pbl_pool->count += pvt_pool->count;
20508
+ pvt_pool->count = 0;
20509
+ }
20510
+
20511
+ spin_unlock(&pvt_pool->lock);
20512
+ spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20513
+}
20514
+
20515
+/**
20516
+ * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20517
+ * @phba: pointer to lpfc hba data structure
20518
+ * @qp: pointer to HDW queue
20519
+ * @pbl_pool: specified public free XRI pool
20520
+ * @pvt_pool: specified private free XRI pool
20521
+ * @count: number of XRIs to move
20522
+ *
20523
+ * This routine tries to move some free common bufs from the specified pbl_pool
20524
+ * to the specified pvt_pool. It might move less than count XRIs if there's not
20525
+ * enough in public pool.
20526
+ *
20527
+ * Return:
20528
+ * true - if XRIs are successfully moved from the specified pbl_pool to the
20529
+ * specified pvt_pool
20530
+ * false - if the specified pbl_pool is empty or locked by someone else
20531
+ **/
20532
+static bool
20533
+_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20534
+ struct lpfc_pbl_pool *pbl_pool,
20535
+ struct lpfc_pvt_pool *pvt_pool, u32 count)
20536
+{
20537
+ struct lpfc_io_buf *lpfc_ncmd;
20538
+ struct lpfc_io_buf *lpfc_ncmd_next;
20539
+ unsigned long iflag;
20540
+ int ret;
20541
+
20542
+ ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20543
+ if (ret) {
20544
+ if (pbl_pool->count) {
20545
+ /* Move a batch of XRIs from public to private pool */
20546
+ lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
20547
+ list_for_each_entry_safe(lpfc_ncmd,
20548
+ lpfc_ncmd_next,
20549
+ &pbl_pool->list,
20550
+ list) {
20551
+ list_move_tail(&lpfc_ncmd->list,
20552
+ &pvt_pool->list);
20553
+ pvt_pool->count++;
20554
+ pbl_pool->count--;
20555
+ count--;
20556
+ if (count == 0)
20557
+ break;
20558
+ }
20559
+
20560
+ spin_unlock(&pvt_pool->lock);
20561
+ spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20562
+ return true;
20563
+ }
20564
+ spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20565
+ }
20566
+
20567
+ return false;
20568
+}
20569
+
20570
+/**
20571
+ * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20572
+ * @phba: pointer to lpfc hba data structure.
20573
+ * @hwqid: belong to which HWQ.
20574
+ * @count: number of XRIs to move
20575
+ *
20576
+ * This routine tries to find some free common bufs in one of public pools with
20577
+ * Round Robin method. The search always starts from local hwqid, then the next
20578
+ * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20579
+ * a batch of free common bufs are moved to private pool on hwqid.
20580
+ * It might move less than count XRIs if there's not enough in public pool.
20581
+ **/
20582
+void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20583
+{
20584
+ struct lpfc_multixri_pool *multixri_pool;
20585
+ struct lpfc_multixri_pool *next_multixri_pool;
20586
+ struct lpfc_pvt_pool *pvt_pool;
20587
+ struct lpfc_pbl_pool *pbl_pool;
20588
+ struct lpfc_sli4_hdw_queue *qp;
20589
+ u32 next_hwqid;
20590
+ u32 hwq_count;
20591
+ int ret;
20592
+
20593
+ qp = &phba->sli4_hba.hdwq[hwqid];
20594
+ multixri_pool = qp->p_multixri_pool;
20595
+ pvt_pool = &multixri_pool->pvt_pool;
20596
+ pbl_pool = &multixri_pool->pbl_pool;
20597
+
20598
+ /* Check if local pbl_pool is available */
20599
+ ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20600
+ if (ret) {
20601
+#ifdef LPFC_MXP_STAT
20602
+ multixri_pool->local_pbl_hit_count++;
20603
+#endif
20604
+ return;
20605
+ }
20606
+
20607
+ hwq_count = phba->cfg_hdw_queue;
20608
+
20609
+ /* Get the next hwqid which was found last time */
20610
+ next_hwqid = multixri_pool->rrb_next_hwqid;
20611
+
20612
+ do {
20613
+ /* Go to next hwq */
20614
+ next_hwqid = (next_hwqid + 1) % hwq_count;
20615
+
20616
+ next_multixri_pool =
20617
+ phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20618
+ pbl_pool = &next_multixri_pool->pbl_pool;
20619
+
20620
+ /* Check if the public free xri pool is available */
20621
+ ret = _lpfc_move_xri_pbl_to_pvt(
20622
+ phba, qp, pbl_pool, pvt_pool, count);
20623
+
20624
+ /* Exit while-loop if success or all hwqid are checked */
20625
+ } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20626
+
20627
+ /* Starting point for the next time */
20628
+ multixri_pool->rrb_next_hwqid = next_hwqid;
20629
+
20630
+ if (!ret) {
20631
+ /* stats: all public pools are empty*/
20632
+ multixri_pool->pbl_empty_count++;
20633
+ }
20634
+
20635
+#ifdef LPFC_MXP_STAT
20636
+ if (ret) {
20637
+ if (next_hwqid == hwqid)
20638
+ multixri_pool->local_pbl_hit_count++;
20639
+ else
20640
+ multixri_pool->other_pbl_hit_count++;
20641
+ }
20642
+#endif
20643
+}
20644
+
20645
+/**
20646
+ * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20647
+ * @phba: pointer to lpfc hba data structure.
20648
+ * @hwqid: belong to which HWQ.
20649
+ *
20650
+ * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20651
+ * low watermark.
20652
+ **/
20653
+void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20654
+{
20655
+ struct lpfc_multixri_pool *multixri_pool;
20656
+ struct lpfc_pvt_pool *pvt_pool;
20657
+
20658
+ multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20659
+ pvt_pool = &multixri_pool->pvt_pool;
20660
+
20661
+ if (pvt_pool->count < pvt_pool->low_watermark)
20662
+ lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20663
+}
20664
+
20665
+/**
20666
+ * lpfc_release_io_buf - Return one IO buf back to free pool
20667
+ * @phba: pointer to lpfc hba data structure.
20668
+ * @lpfc_ncmd: IO buf to be returned.
20669
+ * @qp: belong to which HWQ.
20670
+ *
20671
+ * This routine returns one IO buf back to free pool. If this is an urgent IO,
20672
+ * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20673
+ * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20674
+ * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
20675
+ * lpfc_io_buf_list_put.
20676
+ **/
20677
+void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20678
+ struct lpfc_sli4_hdw_queue *qp)
20679
+{
20680
+ unsigned long iflag;
20681
+ struct lpfc_pbl_pool *pbl_pool;
20682
+ struct lpfc_pvt_pool *pvt_pool;
20683
+ struct lpfc_epd_pool *epd_pool;
20684
+ u32 txcmplq_cnt;
20685
+ u32 xri_owned;
20686
+ u32 xri_limit;
20687
+ u32 abts_io_bufs;
20688
+
20689
+ /* MUST zero fields if buffer is reused by another protocol */
20690
+ lpfc_ncmd->nvmeCmd = NULL;
20691
+ lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20692
+ lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20693
+
20694
+ if (phba->cfg_xpsgl && !phba->nvmet_support &&
20695
+ !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
20696
+ lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
20697
+
20698
+ if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
20699
+ lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
20700
+
20701
+ if (phba->cfg_xri_rebalancing) {
20702
+ if (lpfc_ncmd->expedite) {
20703
+ /* Return to expedite pool */
20704
+ epd_pool = &phba->epd_pool;
20705
+ spin_lock_irqsave(&epd_pool->lock, iflag);
20706
+ list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20707
+ epd_pool->count++;
20708
+ spin_unlock_irqrestore(&epd_pool->lock, iflag);
20709
+ return;
20710
+ }
20711
+
20712
+ /* Avoid invalid access if an IO sneaks in and is being rejected
20713
+ * just _after_ xri pools are destroyed in lpfc_offline.
20714
+ * Nothing much can be done at this point.
20715
+ */
20716
+ if (!qp->p_multixri_pool)
20717
+ return;
20718
+
20719
+ pbl_pool = &qp->p_multixri_pool->pbl_pool;
20720
+ pvt_pool = &qp->p_multixri_pool->pvt_pool;
20721
+
20722
+ txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20723
+ abts_io_bufs = qp->abts_scsi_io_bufs;
20724
+ abts_io_bufs += qp->abts_nvme_io_bufs;
20725
+
20726
+ xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20727
+ xri_limit = qp->p_multixri_pool->xri_limit;
20728
+
20729
+#ifdef LPFC_MXP_STAT
20730
+ if (xri_owned <= xri_limit)
20731
+ qp->p_multixri_pool->below_limit_count++;
20732
+ else
20733
+ qp->p_multixri_pool->above_limit_count++;
20734
+#endif
20735
+
20736
+ /* XRI goes to either public or private free xri pool
20737
+ * based on watermark and xri_limit
20738
+ */
20739
+ if ((pvt_pool->count < pvt_pool->low_watermark) ||
20740
+ (xri_owned < xri_limit &&
20741
+ pvt_pool->count < pvt_pool->high_watermark)) {
20742
+ lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20743
+ qp, free_pvt_pool);
20744
+ list_add_tail(&lpfc_ncmd->list,
20745
+ &pvt_pool->list);
20746
+ pvt_pool->count++;
20747
+ spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20748
+ } else {
20749
+ lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20750
+ qp, free_pub_pool);
20751
+ list_add_tail(&lpfc_ncmd->list,
20752
+ &pbl_pool->list);
20753
+ pbl_pool->count++;
20754
+ spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20755
+ }
20756
+ } else {
20757
+ lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20758
+ qp, free_xri);
20759
+ list_add_tail(&lpfc_ncmd->list,
20760
+ &qp->lpfc_io_buf_list_put);
20761
+ qp->put_io_bufs++;
20762
+ spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20763
+ iflag);
20764
+ }
20765
+}
20766
+
20767
+/**
20768
+ * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
20769
+ * @phba: pointer to lpfc hba data structure.
20770
+ * @qp: pointer to HDW queue
20771
+ * @pvt_pool: pointer to private pool data structure.
20772
+ * @ndlp: pointer to lpfc nodelist data structure.
20773
+ *
20774
+ * This routine tries to get one free IO buf from private pool.
20775
+ *
20776
+ * Return:
20777
+ * pointer to one free IO buf - if private pool is not empty
20778
+ * NULL - if private pool is empty
20779
+ **/
20780
+static struct lpfc_io_buf *
20781
+lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
20782
+ struct lpfc_sli4_hdw_queue *qp,
20783
+ struct lpfc_pvt_pool *pvt_pool,
20784
+ struct lpfc_nodelist *ndlp)
20785
+{
20786
+ struct lpfc_io_buf *lpfc_ncmd;
20787
+ struct lpfc_io_buf *lpfc_ncmd_next;
20788
+ unsigned long iflag;
20789
+
20790
+ lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
20791
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20792
+ &pvt_pool->list, list) {
20793
+ if (lpfc_test_rrq_active(
20794
+ phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20795
+ continue;
20796
+ list_del(&lpfc_ncmd->list);
20797
+ pvt_pool->count--;
20798
+ spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20799
+ return lpfc_ncmd;
20800
+ }
20801
+ spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20802
+
20803
+ return NULL;
20804
+}
20805
+
20806
+/**
20807
+ * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
20808
+ * @phba: pointer to lpfc hba data structure.
20809
+ *
20810
+ * This routine tries to get one free IO buf from expedite pool.
20811
+ *
20812
+ * Return:
20813
+ * pointer to one free IO buf - if expedite pool is not empty
20814
+ * NULL - if expedite pool is empty
20815
+ **/
20816
+static struct lpfc_io_buf *
20817
+lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20818
+{
20819
+ struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
20820
+ struct lpfc_io_buf *lpfc_ncmd_next;
20821
+ unsigned long iflag;
20822
+ struct lpfc_epd_pool *epd_pool;
20823
+
20824
+ epd_pool = &phba->epd_pool;
20825
+
20826
+ spin_lock_irqsave(&epd_pool->lock, iflag);
20827
+ if (epd_pool->count > 0) {
20828
+ list_for_each_entry_safe(iter, lpfc_ncmd_next,
20829
+ &epd_pool->list, list) {
20830
+ list_del(&iter->list);
20831
+ epd_pool->count--;
20832
+ lpfc_ncmd = iter;
20833
+ break;
20834
+ }
20835
+ }
20836
+ spin_unlock_irqrestore(&epd_pool->lock, iflag);
20837
+
20838
+ return lpfc_ncmd;
20839
+}
20840
+
20841
+/**
20842
+ * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
20843
+ * @phba: pointer to lpfc hba data structure.
20844
+ * @ndlp: pointer to lpfc nodelist data structure.
20845
+ * @hwqid: belong to which HWQ
20846
+ * @expedite: 1 means this request is urgent.
20847
+ *
20848
+ * This routine will do the following actions and then return a pointer to
20849
+ * one free IO buf.
20850
+ *
20851
+ * 1. If private free xri count is empty, move some XRIs from public to
20852
+ * private pool.
20853
+ * 2. Get one XRI from private free xri pool.
20854
+ * 3. If we fail to get one from pvt_pool and this is an expedite request,
20855
+ * get one free xri from expedite pool.
20856
+ *
20857
+ * Note: ndlp is only used on SCSI side for RRQ testing.
20858
+ * The caller should pass NULL for ndlp on NVME side.
20859
+ *
20860
+ * Return:
20861
+ * pointer to one free IO buf - if private pool is not empty
20862
+ * NULL - if private pool is empty
20863
+ **/
20864
+static struct lpfc_io_buf *
20865
+lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20866
+ struct lpfc_nodelist *ndlp,
20867
+ int hwqid, int expedite)
20868
+{
20869
+ struct lpfc_sli4_hdw_queue *qp;
20870
+ struct lpfc_multixri_pool *multixri_pool;
20871
+ struct lpfc_pvt_pool *pvt_pool;
20872
+ struct lpfc_io_buf *lpfc_ncmd;
20873
+
20874
+ qp = &phba->sli4_hba.hdwq[hwqid];
20875
+ lpfc_ncmd = NULL;
20876
+ multixri_pool = qp->p_multixri_pool;
20877
+ pvt_pool = &multixri_pool->pvt_pool;
20878
+ multixri_pool->io_req_count++;
20879
+
20880
+ /* If pvt_pool is empty, move some XRIs from public to private pool */
20881
+ if (pvt_pool->count == 0)
20882
+ lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20883
+
20884
+ /* Get one XRI from private free xri pool */
20885
+ lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
20886
+
20887
+ if (lpfc_ncmd) {
20888
+ lpfc_ncmd->hdwq = qp;
20889
+ lpfc_ncmd->hdwq_no = hwqid;
20890
+ } else if (expedite) {
20891
+ /* If we fail to get one from pvt_pool and this is an expedite
20892
+ * request, get one free xri from expedite pool.
20893
+ */
20894
+ lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20895
+ }
20896
+
20897
+ return lpfc_ncmd;
20898
+}
20899
+
20900
+static inline struct lpfc_io_buf *
20901
+lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20902
+{
20903
+ struct lpfc_sli4_hdw_queue *qp;
20904
+ struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20905
+
20906
+ qp = &phba->sli4_hba.hdwq[idx];
20907
+ list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20908
+ &qp->lpfc_io_buf_list_get, list) {
20909
+ if (lpfc_test_rrq_active(phba, ndlp,
20910
+ lpfc_cmd->cur_iocbq.sli4_lxritag))
20911
+ continue;
20912
+
20913
+ if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20914
+ continue;
20915
+
20916
+ list_del_init(&lpfc_cmd->list);
20917
+ qp->get_io_bufs--;
20918
+ lpfc_cmd->hdwq = qp;
20919
+ lpfc_cmd->hdwq_no = idx;
20920
+ return lpfc_cmd;
20921
+ }
20922
+ return NULL;
20923
+}
20924
+
20925
+/**
20926
+ * lpfc_get_io_buf - Get one IO buffer from free pool
20927
+ * @phba: The HBA for which this call is being executed.
20928
+ * @ndlp: pointer to lpfc nodelist data structure.
20929
+ * @hwqid: belong to which HWQ
20930
+ * @expedite: 1 means this request is urgent.
20931
+ *
20932
+ * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
20933
+ * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
20934
+ * a IO buffer from head of @hdwq io_buf_list and returns to caller.
20935
+ *
20936
+ * Note: ndlp is only used on SCSI side for RRQ testing.
20937
+ * The caller should pass NULL for ndlp on NVME side.
20938
+ *
20939
+ * Return codes:
20940
+ * NULL - Error
20941
+ * Pointer to lpfc_io_buf - Success
20942
+ **/
20943
+struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20944
+ struct lpfc_nodelist *ndlp,
20945
+ u32 hwqid, int expedite)
20946
+{
20947
+ struct lpfc_sli4_hdw_queue *qp;
20948
+ unsigned long iflag;
20949
+ struct lpfc_io_buf *lpfc_cmd;
20950
+
20951
+ qp = &phba->sli4_hba.hdwq[hwqid];
20952
+ lpfc_cmd = NULL;
20953
+
20954
+ if (phba->cfg_xri_rebalancing)
20955
+ lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20956
+ phba, ndlp, hwqid, expedite);
20957
+ else {
20958
+ lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20959
+ qp, alloc_xri_get);
20960
+ if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20961
+ lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20962
+ if (!lpfc_cmd) {
20963
+ lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20964
+ qp, alloc_xri_put);
20965
+ list_splice(&qp->lpfc_io_buf_list_put,
20966
+ &qp->lpfc_io_buf_list_get);
20967
+ qp->get_io_bufs += qp->put_io_bufs;
20968
+ INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20969
+ qp->put_io_bufs = 0;
20970
+ spin_unlock(&qp->io_buf_list_put_lock);
20971
+ if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20972
+ expedite)
20973
+ lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20974
+ }
20975
+ spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
20976
+ }
20977
+
20978
+ return lpfc_cmd;
20979
+}
20980
+
20981
+/**
20982
+ * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
20983
+ * @phba: The HBA for which this call is being executed.
20984
+ * @lpfc_buf: IO buf structure to append the SGL chunk
20985
+ *
20986
+ * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
20987
+ * and will allocate an SGL chunk if the pool is empty.
20988
+ *
20989
+ * Return codes:
20990
+ * NULL - Error
20991
+ * Pointer to sli4_hybrid_sgl - Success
20992
+ **/
20993
+struct sli4_hybrid_sgl *
20994
+lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20995
+{
20996
+ struct sli4_hybrid_sgl *list_entry = NULL;
20997
+ struct sli4_hybrid_sgl *tmp = NULL;
20998
+ struct sli4_hybrid_sgl *allocated_sgl = NULL;
20999
+ struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21000
+ struct list_head *buf_list = &hdwq->sgl_list;
21001
+ unsigned long iflags;
21002
+
21003
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21004
+
21005
+ if (likely(!list_empty(buf_list))) {
21006
+ /* break off 1 chunk from the sgl_list */
21007
+ list_for_each_entry_safe(list_entry, tmp,
21008
+ buf_list, list_node) {
21009
+ list_move_tail(&list_entry->list_node,
21010
+ &lpfc_buf->dma_sgl_xtra_list);
21011
+ break;
21012
+ }
21013
+ } else {
21014
+ /* allocate more */
21015
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21016
+ tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21017
+ cpu_to_node(hdwq->io_wq->chann));
21018
+ if (!tmp) {
21019
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21020
+ "8353 error kmalloc memory for HDWQ "
21021
+ "%d %s\n",
21022
+ lpfc_buf->hdwq_no, __func__);
21023
+ return NULL;
21024
+ }
21025
+
21026
+ tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
21027
+ GFP_ATOMIC, &tmp->dma_phys_sgl);
21028
+ if (!tmp->dma_sgl) {
21029
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21030
+ "8354 error pool_alloc memory for HDWQ "
21031
+ "%d %s\n",
21032
+ lpfc_buf->hdwq_no, __func__);
21033
+ kfree(tmp);
21034
+ return NULL;
21035
+ }
21036
+
21037
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21038
+ list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
21039
+ }
21040
+
21041
+ allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
21042
+ struct sli4_hybrid_sgl,
21043
+ list_node);
21044
+
21045
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21046
+
21047
+ return allocated_sgl;
21048
+}
21049
+
21050
+/**
21051
+ * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
21052
+ * @phba: The HBA for which this call is being executed.
21053
+ * @lpfc_buf: IO buf structure with the SGL chunk
21054
+ *
21055
+ * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
21056
+ *
21057
+ * Return codes:
21058
+ * 0 - Success
21059
+ * -EINVAL - Error
21060
+ **/
21061
+int
21062
+lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21063
+{
21064
+ int rc = 0;
21065
+ struct sli4_hybrid_sgl *list_entry = NULL;
21066
+ struct sli4_hybrid_sgl *tmp = NULL;
21067
+ struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21068
+ struct list_head *buf_list = &hdwq->sgl_list;
21069
+ unsigned long iflags;
21070
+
21071
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21072
+
21073
+ if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
21074
+ list_for_each_entry_safe(list_entry, tmp,
21075
+ &lpfc_buf->dma_sgl_xtra_list,
21076
+ list_node) {
21077
+ list_move_tail(&list_entry->list_node,
21078
+ buf_list);
21079
+ }
21080
+ } else {
21081
+ rc = -EINVAL;
21082
+ }
21083
+
21084
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21085
+ return rc;
21086
+}
21087
+
21088
+/**
21089
+ * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
21090
+ * @phba: phba object
21091
+ * @hdwq: hdwq to cleanup sgl buff resources on
21092
+ *
21093
+ * This routine frees all SGL chunks of hdwq SGL chunk pool.
21094
+ *
21095
+ * Return codes:
21096
+ * None
21097
+ **/
21098
+void
21099
+lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
21100
+ struct lpfc_sli4_hdw_queue *hdwq)
21101
+{
21102
+ struct list_head *buf_list = &hdwq->sgl_list;
21103
+ struct sli4_hybrid_sgl *list_entry = NULL;
21104
+ struct sli4_hybrid_sgl *tmp = NULL;
21105
+ unsigned long iflags;
21106
+
21107
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21108
+
21109
+ /* Free sgl pool */
21110
+ list_for_each_entry_safe(list_entry, tmp,
21111
+ buf_list, list_node) {
21112
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
21113
+ list_entry->dma_sgl,
21114
+ list_entry->dma_phys_sgl);
21115
+ list_del(&list_entry->list_node);
21116
+ kfree(list_entry);
21117
+ }
21118
+
21119
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21120
+}
21121
+
21122
+/**
21123
+ * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
21124
+ * @phba: The HBA for which this call is being executed.
21125
+ * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
21126
+ *
21127
+ * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
21128
+ * and will allocate an CMD/RSP buffer if the pool is empty.
21129
+ *
21130
+ * Return codes:
21131
+ * NULL - Error
21132
+ * Pointer to fcp_cmd_rsp_buf - Success
21133
+ **/
21134
+struct fcp_cmd_rsp_buf *
21135
+lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21136
+ struct lpfc_io_buf *lpfc_buf)
21137
+{
21138
+ struct fcp_cmd_rsp_buf *list_entry = NULL;
21139
+ struct fcp_cmd_rsp_buf *tmp = NULL;
21140
+ struct fcp_cmd_rsp_buf *allocated_buf = NULL;
21141
+ struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21142
+ struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21143
+ unsigned long iflags;
21144
+
21145
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21146
+
21147
+ if (likely(!list_empty(buf_list))) {
21148
+ /* break off 1 chunk from the list */
21149
+ list_for_each_entry_safe(list_entry, tmp,
21150
+ buf_list,
21151
+ list_node) {
21152
+ list_move_tail(&list_entry->list_node,
21153
+ &lpfc_buf->dma_cmd_rsp_list);
21154
+ break;
21155
+ }
21156
+ } else {
21157
+ /* allocate more */
21158
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21159
+ tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21160
+ cpu_to_node(hdwq->io_wq->chann));
21161
+ if (!tmp) {
21162
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21163
+ "8355 error kmalloc memory for HDWQ "
21164
+ "%d %s\n",
21165
+ lpfc_buf->hdwq_no, __func__);
21166
+ return NULL;
21167
+ }
21168
+
21169
+ tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
21170
+ GFP_ATOMIC,
21171
+ &tmp->fcp_cmd_rsp_dma_handle);
21172
+
21173
+ if (!tmp->fcp_cmnd) {
21174
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21175
+ "8356 error pool_alloc memory for HDWQ "
21176
+ "%d %s\n",
21177
+ lpfc_buf->hdwq_no, __func__);
21178
+ kfree(tmp);
21179
+ return NULL;
21180
+ }
21181
+
21182
+ tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
21183
+ sizeof(struct fcp_cmnd));
21184
+
21185
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21186
+ list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
21187
+ }
21188
+
21189
+ allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
21190
+ struct fcp_cmd_rsp_buf,
21191
+ list_node);
21192
+
21193
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21194
+
21195
+ return allocated_buf;
21196
+}
21197
+
21198
+/**
21199
+ * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
21200
+ * @phba: The HBA for which this call is being executed.
21201
+ * @lpfc_buf: IO buf structure with the CMD/RSP buf
21202
+ *
21203
+ * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
21204
+ *
21205
+ * Return codes:
21206
+ * 0 - Success
21207
+ * -EINVAL - Error
21208
+ **/
21209
+int
21210
+lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21211
+ struct lpfc_io_buf *lpfc_buf)
21212
+{
21213
+ int rc = 0;
21214
+ struct fcp_cmd_rsp_buf *list_entry = NULL;
21215
+ struct fcp_cmd_rsp_buf *tmp = NULL;
21216
+ struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21217
+ struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21218
+ unsigned long iflags;
21219
+
21220
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21221
+
21222
+ if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
21223
+ list_for_each_entry_safe(list_entry, tmp,
21224
+ &lpfc_buf->dma_cmd_rsp_list,
21225
+ list_node) {
21226
+ list_move_tail(&list_entry->list_node,
21227
+ buf_list);
21228
+ }
21229
+ } else {
21230
+ rc = -EINVAL;
21231
+ }
21232
+
21233
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21234
+ return rc;
21235
+}
21236
+
21237
+/**
21238
+ * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
21239
+ * @phba: phba object
21240
+ * @hdwq: hdwq to cleanup cmd rsp buff resources on
21241
+ *
21242
+ * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
21243
+ *
21244
+ * Return codes:
21245
+ * None
21246
+ **/
21247
+void
21248
+lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21249
+ struct lpfc_sli4_hdw_queue *hdwq)
21250
+{
21251
+ struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21252
+ struct fcp_cmd_rsp_buf *list_entry = NULL;
21253
+ struct fcp_cmd_rsp_buf *tmp = NULL;
21254
+ unsigned long iflags;
21255
+
21256
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21257
+
21258
+ /* Free cmd_rsp buf pool */
21259
+ list_for_each_entry_safe(list_entry, tmp,
21260
+ buf_list,
21261
+ list_node) {
21262
+ dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
21263
+ list_entry->fcp_cmnd,
21264
+ list_entry->fcp_cmd_rsp_dma_handle);
21265
+ list_del(&list_entry->list_node);
21266
+ kfree(list_entry);
21267
+ }
21268
+
21269
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21270
+}