| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * QLogic FCoE Offload Driver |
|---|
| 3 | 4 | * Copyright (c) 2016-2018 Cavium Inc. |
|---|
| 4 | | - * |
|---|
| 5 | | - * This software is available under the terms of the GNU General Public License |
|---|
| 6 | | - * (GPL) Version 2, available from the file COPYING in the main directory of |
|---|
| 7 | | - * this source tree. |
|---|
| 8 | 5 | */ |
|---|
| 9 | 6 | #include <linux/init.h> |
|---|
| 10 | 7 | #include <linux/kernel.h> |
|---|
| .. | .. |
|---|
| 16 | 13 | #include <linux/interrupt.h> |
|---|
| 17 | 14 | #include <linux/list.h> |
|---|
| 18 | 15 | #include <linux/kthread.h> |
|---|
| 16 | +#include <linux/phylink.h> |
|---|
| 19 | 17 | #include <scsi/libfc.h> |
|---|
| 20 | 18 | #include <scsi/scsi_host.h> |
|---|
| 21 | 19 | #include <scsi/fc_frame.h> |
|---|
| .. | .. |
|---|
| 30 | 28 | |
|---|
| 31 | 29 | static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id); |
|---|
| 32 | 30 | static void qedf_remove(struct pci_dev *pdev); |
|---|
| 31 | +static void qedf_shutdown(struct pci_dev *pdev); |
|---|
| 32 | +static void qedf_schedule_recovery_handler(void *dev); |
|---|
| 33 | +static void qedf_recovery_handler(struct work_struct *work); |
|---|
| 33 | 34 | |
|---|
| 34 | 35 | /* |
|---|
| 35 | 36 | * Driver module parameters. |
|---|
| .. | .. |
|---|
| 40 | 41 | "remote ports (default 60)"); |
|---|
| 41 | 42 | |
|---|
| 42 | 43 | uint qedf_debug = QEDF_LOG_INFO; |
|---|
| 43 | | -module_param_named(debug, qedf_debug, uint, S_IRUGO); |
|---|
| 44 | +module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR); |
|---|
| 44 | 45 | MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging" |
|---|
| 45 | 46 | " mask"); |
|---|
| 46 | 47 | |
|---|
| .. | .. |
|---|
| 104 | 105 | MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module " |
|---|
| 105 | 106 | "during probe (0-3: 0 more verbose)."); |
|---|
| 106 | 107 | |
|---|
| 108 | +static bool qedf_enable_recovery = true; |
|---|
| 109 | +module_param_named(enable_recovery, qedf_enable_recovery, |
|---|
| 110 | + bool, S_IRUGO | S_IWUSR); |
|---|
| 111 | +MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware " |
|---|
| 112 | + "interface level errors 0 = Disabled, 1 = Enabled (Default: 1)."); |
|---|
| 113 | + |
|---|
| 107 | 114 | struct workqueue_struct *qedf_io_wq; |
|---|
| 108 | 115 | |
|---|
| 109 | 116 | static struct fcoe_percpu_s qedf_global; |
|---|
| .. | .. |
|---|
| 113 | 120 | |
|---|
| 114 | 121 | void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id) |
|---|
| 115 | 122 | { |
|---|
| 116 | | - qedf->vlan_id = vlan_id; |
|---|
| 117 | | - qedf->vlan_id |= qedf->prio << VLAN_PRIO_SHIFT; |
|---|
| 118 | | - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x " |
|---|
| 119 | | - "prio=%d.\n", vlan_id, qedf->prio); |
|---|
| 123 | + int vlan_id_tmp = 0; |
|---|
| 124 | + |
|---|
| 125 | + vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT); |
|---|
| 126 | + qedf->vlan_id = vlan_id_tmp; |
|---|
| 127 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
|---|
| 128 | + "Setting vlan_id=0x%04x prio=%d.\n", |
|---|
| 129 | + vlan_id_tmp, qedf->prio); |
|---|
| 120 | 130 | } |
|---|
| 121 | 131 | |
|---|
| 122 | 132 | /* Returns true if we have a valid vlan, false otherwise */ |
|---|
| 123 | 133 | static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf) |
|---|
| 124 | 134 | { |
|---|
| 125 | | - int rc; |
|---|
| 126 | | - |
|---|
| 127 | | - if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { |
|---|
| 128 | | - QEDF_ERR(&(qedf->dbg_ctx), "Link not up.\n"); |
|---|
| 129 | | - return false; |
|---|
| 130 | | - } |
|---|
| 131 | 135 | |
|---|
| 132 | 136 | while (qedf->fipvlan_retries--) { |
|---|
| 133 | | - if (qedf->vlan_id > 0) |
|---|
| 137 | + /* This is to catch if link goes down during fipvlan retries */ |
|---|
| 138 | + if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { |
|---|
| 139 | + QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n"); |
|---|
| 140 | + return false; |
|---|
| 141 | + } |
|---|
| 142 | + |
|---|
| 143 | + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { |
|---|
| 144 | + QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n"); |
|---|
| 145 | + return false; |
|---|
| 146 | + } |
|---|
| 147 | + |
|---|
| 148 | + if (qedf->vlan_id > 0) { |
|---|
| 149 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
|---|
| 150 | + "vlan = 0x%x already set, calling ctlr_link_up.\n", |
|---|
| 151 | + qedf->vlan_id); |
|---|
| 152 | + if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) |
|---|
| 153 | + fcoe_ctlr_link_up(&qedf->ctlr); |
|---|
| 134 | 154 | return true; |
|---|
| 155 | + } |
|---|
| 156 | + |
|---|
| 135 | 157 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, |
|---|
| 136 | 158 | "Retry %d.\n", qedf->fipvlan_retries); |
|---|
| 137 | 159 | init_completion(&qedf->fipvlan_compl); |
|---|
| 138 | 160 | qedf_fcoe_send_vlan_req(qedf); |
|---|
| 139 | | - rc = wait_for_completion_timeout(&qedf->fipvlan_compl, |
|---|
| 140 | | - 1 * HZ); |
|---|
| 141 | | - if (rc > 0) { |
|---|
| 142 | | - fcoe_ctlr_link_up(&qedf->ctlr); |
|---|
| 143 | | - return true; |
|---|
| 144 | | - } |
|---|
| 161 | + wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ); |
|---|
| 145 | 162 | } |
|---|
| 146 | 163 | |
|---|
| 147 | 164 | return false; |
|---|
| .. | .. |
|---|
| 153 | 170 | container_of(work, struct qedf_ctx, link_update.work); |
|---|
| 154 | 171 | int rc; |
|---|
| 155 | 172 | |
|---|
| 156 | | - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Entered.\n"); |
|---|
| 173 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n", |
|---|
| 174 | + atomic_read(&qedf->link_state)); |
|---|
| 157 | 175 | |
|---|
| 158 | 176 | if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { |
|---|
| 159 | 177 | rc = qedf_initiate_fipvlan_req(qedf); |
|---|
| 160 | 178 | if (rc) |
|---|
| 161 | 179 | return; |
|---|
| 180 | + |
|---|
| 181 | + if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { |
|---|
| 182 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
|---|
| 183 | + "Link is down, resetting vlan_id.\n"); |
|---|
| 184 | + qedf->vlan_id = 0; |
|---|
| 185 | + return; |
|---|
| 186 | + } |
|---|
| 187 | + |
|---|
| 162 | 188 | /* |
|---|
| 163 | 189 | * If we get here then we never received a repsonse to our |
|---|
| 164 | 190 | * fip vlan request so set the vlan_id to the default and |
|---|
| .. | .. |
|---|
| 185 | 211 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, |
|---|
| 186 | 212 | "Calling fcoe_ctlr_link_down().\n"); |
|---|
| 187 | 213 | fcoe_ctlr_link_down(&qedf->ctlr); |
|---|
| 188 | | - qedf_wait_for_upload(qedf); |
|---|
| 214 | + if (qedf_wait_for_upload(qedf) == false) |
|---|
| 215 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 216 | + "Could not upload all sessions.\n"); |
|---|
| 189 | 217 | /* Reset the number of FIP VLAN retries */ |
|---|
| 190 | 218 | qedf->fipvlan_retries = qedf_fipvlan_retries; |
|---|
| 191 | 219 | } |
|---|
| .. | .. |
|---|
| 263 | 291 | else if (fc_frame_payload_op(fp) == ELS_LS_ACC) { |
|---|
| 264 | 292 | /* Set the source MAC we will use for FCoE traffic */ |
|---|
| 265 | 293 | qedf_set_data_src_addr(qedf, fp); |
|---|
| 294 | + qedf->flogi_pending = 0; |
|---|
| 266 | 295 | } |
|---|
| 267 | 296 | |
|---|
| 268 | 297 | /* Complete flogi_compl so we can proceed to sending ADISCs */ |
|---|
| .. | .. |
|---|
| 288 | 317 | */ |
|---|
| 289 | 318 | if (resp == fc_lport_flogi_resp) { |
|---|
| 290 | 319 | qedf->flogi_cnt++; |
|---|
| 320 | + if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) { |
|---|
| 321 | + schedule_delayed_work(&qedf->stag_work, 2); |
|---|
| 322 | + return NULL; |
|---|
| 323 | + } |
|---|
| 324 | + qedf->flogi_pending++; |
|---|
| 291 | 325 | return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp, |
|---|
| 292 | 326 | arg, timeout); |
|---|
| 293 | 327 | } |
|---|
| .. | .. |
|---|
| 302 | 336 | |
|---|
| 303 | 337 | lport = qedf->lport; |
|---|
| 304 | 338 | |
|---|
| 305 | | - if (!lport->tt.elsct_send) |
|---|
| 339 | + if (!lport->tt.elsct_send) { |
|---|
| 340 | + QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n"); |
|---|
| 306 | 341 | return -EINVAL; |
|---|
| 342 | + } |
|---|
| 307 | 343 | |
|---|
| 308 | 344 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); |
|---|
| 309 | 345 | if (!fp) { |
|---|
| .. | .. |
|---|
| 321 | 357 | return 0; |
|---|
| 322 | 358 | } |
|---|
| 323 | 359 | |
|---|
| 324 | | -struct qedf_tmp_rdata_item { |
|---|
| 325 | | - struct fc_rport_priv *rdata; |
|---|
| 326 | | - struct list_head list; |
|---|
| 327 | | -}; |
|---|
| 328 | | - |
|---|
| 329 | 360 | /* |
|---|
| 330 | 361 | * This function is called if link_down_tmo is in use. If we get a link up and |
|---|
| 331 | 362 | * link_down_tmo has not expired then use just FLOGI/ADISC to recover our |
|---|
| .. | .. |
|---|
| 335 | 366 | { |
|---|
| 336 | 367 | struct qedf_ctx *qedf = |
|---|
| 337 | 368 | container_of(work, struct qedf_ctx, link_recovery.work); |
|---|
| 338 | | - struct qedf_rport *fcport; |
|---|
| 369 | + struct fc_lport *lport = qedf->lport; |
|---|
| 339 | 370 | struct fc_rport_priv *rdata; |
|---|
| 340 | | - struct qedf_tmp_rdata_item *rdata_item, *tmp_rdata_item; |
|---|
| 341 | 371 | bool rc; |
|---|
| 342 | 372 | int retries = 30; |
|---|
| 343 | 373 | int rval, i; |
|---|
| .. | .. |
|---|
| 404 | 434 | * Call lport->tt.rport_login which will cause libfc to send an |
|---|
| 405 | 435 | * ADISC since the rport is in state ready. |
|---|
| 406 | 436 | */ |
|---|
| 407 | | - rcu_read_lock(); |
|---|
| 408 | | - list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { |
|---|
| 409 | | - rdata = fcport->rdata; |
|---|
| 410 | | - if (rdata == NULL) |
|---|
| 411 | | - continue; |
|---|
| 412 | | - rdata_item = kzalloc(sizeof(struct qedf_tmp_rdata_item), |
|---|
| 413 | | - GFP_ATOMIC); |
|---|
| 414 | | - if (!rdata_item) |
|---|
| 415 | | - continue; |
|---|
| 437 | + mutex_lock(&lport->disc.disc_mutex); |
|---|
| 438 | + list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { |
|---|
| 416 | 439 | if (kref_get_unless_zero(&rdata->kref)) { |
|---|
| 417 | | - rdata_item->rdata = rdata; |
|---|
| 418 | | - list_add(&rdata_item->list, &rdata_login_list); |
|---|
| 419 | | - } else |
|---|
| 420 | | - kfree(rdata_item); |
|---|
| 440 | + fc_rport_login(rdata); |
|---|
| 441 | + kref_put(&rdata->kref, fc_rport_destroy); |
|---|
| 442 | + } |
|---|
| 421 | 443 | } |
|---|
| 422 | | - rcu_read_unlock(); |
|---|
| 423 | | - /* |
|---|
| 424 | | - * Do the fc_rport_login outside of the rcu lock so we don't take a |
|---|
| 425 | | - * mutex in an atomic context. |
|---|
| 426 | | - */ |
|---|
| 427 | | - list_for_each_entry_safe(rdata_item, tmp_rdata_item, &rdata_login_list, |
|---|
| 428 | | - list) { |
|---|
| 429 | | - list_del(&rdata_item->list); |
|---|
| 430 | | - fc_rport_login(rdata_item->rdata); |
|---|
| 431 | | - kref_put(&rdata_item->rdata->kref, fc_rport_destroy); |
|---|
| 432 | | - kfree(rdata_item); |
|---|
| 433 | | - } |
|---|
| 444 | + mutex_unlock(&lport->disc.disc_mutex); |
|---|
| 434 | 445 | } |
|---|
| 435 | 446 | |
|---|
| 436 | 447 | static void qedf_update_link_speed(struct qedf_ctx *qedf, |
|---|
| 437 | 448 | struct qed_link_output *link) |
|---|
| 438 | 449 | { |
|---|
| 450 | + __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps); |
|---|
| 439 | 451 | struct fc_lport *lport = qedf->lport; |
|---|
| 440 | 452 | |
|---|
| 441 | 453 | lport->link_speed = FC_PORTSPEED_UNKNOWN; |
|---|
| .. | .. |
|---|
| 458 | 470 | case 100000: |
|---|
| 459 | 471 | lport->link_speed = FC_PORTSPEED_100GBIT; |
|---|
| 460 | 472 | break; |
|---|
| 473 | + case 20000: |
|---|
| 474 | + lport->link_speed = FC_PORTSPEED_20GBIT; |
|---|
| 475 | + break; |
|---|
| 461 | 476 | default: |
|---|
| 462 | 477 | lport->link_speed = FC_PORTSPEED_UNKNOWN; |
|---|
| 463 | 478 | break; |
|---|
| .. | .. |
|---|
| 467 | 482 | * Set supported link speed by querying the supported |
|---|
| 468 | 483 | * capabilities of the link. |
|---|
| 469 | 484 | */ |
|---|
| 470 | | - if (link->supported_caps & SUPPORTED_10000baseKR_Full) |
|---|
| 485 | + |
|---|
| 486 | + phylink_zero(sup_caps); |
|---|
| 487 | + phylink_set(sup_caps, 10000baseT_Full); |
|---|
| 488 | + phylink_set(sup_caps, 10000baseKX4_Full); |
|---|
| 489 | + phylink_set(sup_caps, 10000baseR_FEC); |
|---|
| 490 | + phylink_set(sup_caps, 10000baseCR_Full); |
|---|
| 491 | + phylink_set(sup_caps, 10000baseSR_Full); |
|---|
| 492 | + phylink_set(sup_caps, 10000baseLR_Full); |
|---|
| 493 | + phylink_set(sup_caps, 10000baseLRM_Full); |
|---|
| 494 | + phylink_set(sup_caps, 10000baseKR_Full); |
|---|
| 495 | + |
|---|
| 496 | + if (linkmode_intersects(link->supported_caps, sup_caps)) |
|---|
| 471 | 497 | lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; |
|---|
| 472 | | - if (link->supported_caps & SUPPORTED_25000baseKR_Full) |
|---|
| 498 | + |
|---|
| 499 | + phylink_zero(sup_caps); |
|---|
| 500 | + phylink_set(sup_caps, 25000baseKR_Full); |
|---|
| 501 | + phylink_set(sup_caps, 25000baseCR_Full); |
|---|
| 502 | + phylink_set(sup_caps, 25000baseSR_Full); |
|---|
| 503 | + |
|---|
| 504 | + if (linkmode_intersects(link->supported_caps, sup_caps)) |
|---|
| 473 | 505 | lport->link_supported_speeds |= FC_PORTSPEED_25GBIT; |
|---|
| 474 | | - if (link->supported_caps & SUPPORTED_40000baseLR4_Full) |
|---|
| 506 | + |
|---|
| 507 | + phylink_zero(sup_caps); |
|---|
| 508 | + phylink_set(sup_caps, 40000baseLR4_Full); |
|---|
| 509 | + phylink_set(sup_caps, 40000baseKR4_Full); |
|---|
| 510 | + phylink_set(sup_caps, 40000baseCR4_Full); |
|---|
| 511 | + phylink_set(sup_caps, 40000baseSR4_Full); |
|---|
| 512 | + |
|---|
| 513 | + if (linkmode_intersects(link->supported_caps, sup_caps)) |
|---|
| 475 | 514 | lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; |
|---|
| 476 | | - if (link->supported_caps & SUPPORTED_50000baseKR2_Full) |
|---|
| 515 | + |
|---|
| 516 | + phylink_zero(sup_caps); |
|---|
| 517 | + phylink_set(sup_caps, 50000baseKR2_Full); |
|---|
| 518 | + phylink_set(sup_caps, 50000baseCR2_Full); |
|---|
| 519 | + phylink_set(sup_caps, 50000baseSR2_Full); |
|---|
| 520 | + |
|---|
| 521 | + if (linkmode_intersects(link->supported_caps, sup_caps)) |
|---|
| 477 | 522 | lport->link_supported_speeds |= FC_PORTSPEED_50GBIT; |
|---|
| 478 | | - if (link->supported_caps & SUPPORTED_100000baseKR4_Full) |
|---|
| 523 | + |
|---|
| 524 | + phylink_zero(sup_caps); |
|---|
| 525 | + phylink_set(sup_caps, 100000baseKR4_Full); |
|---|
| 526 | + phylink_set(sup_caps, 100000baseSR4_Full); |
|---|
| 527 | + phylink_set(sup_caps, 100000baseCR4_Full); |
|---|
| 528 | + phylink_set(sup_caps, 100000baseLR4_ER4_Full); |
|---|
| 529 | + |
|---|
| 530 | + if (linkmode_intersects(link->supported_caps, sup_caps)) |
|---|
| 479 | 531 | lport->link_supported_speeds |= FC_PORTSPEED_100GBIT; |
|---|
| 480 | | - fc_host_supported_speeds(lport->host) = lport->link_supported_speeds; |
|---|
| 532 | + |
|---|
| 533 | + phylink_zero(sup_caps); |
|---|
| 534 | + phylink_set(sup_caps, 20000baseKR2_Full); |
|---|
| 535 | + |
|---|
| 536 | + if (linkmode_intersects(link->supported_caps, sup_caps)) |
|---|
| 537 | + lport->link_supported_speeds |= FC_PORTSPEED_20GBIT; |
|---|
| 538 | + |
|---|
| 539 | + if (lport->host && lport->host->shost_data) |
|---|
| 540 | + fc_host_supported_speeds(lport->host) = |
|---|
| 541 | + lport->link_supported_speeds; |
|---|
| 542 | +} |
|---|
| 543 | + |
|---|
| 544 | +static void qedf_bw_update(void *dev) |
|---|
| 545 | +{ |
|---|
| 546 | + struct qedf_ctx *qedf = (struct qedf_ctx *)dev; |
|---|
| 547 | + struct qed_link_output link; |
|---|
| 548 | + |
|---|
| 549 | + /* Get the latest status of the link */ |
|---|
| 550 | + qed_ops->common->get_link(qedf->cdev, &link); |
|---|
| 551 | + |
|---|
| 552 | + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { |
|---|
| 553 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 554 | + "Ignore link update, driver getting unload.\n"); |
|---|
| 555 | + return; |
|---|
| 556 | + } |
|---|
| 557 | + |
|---|
| 558 | + if (link.link_up) { |
|---|
| 559 | + if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) |
|---|
| 560 | + qedf_update_link_speed(qedf, &link); |
|---|
| 561 | + else |
|---|
| 562 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 563 | + "Ignore bw update, link is down.\n"); |
|---|
| 564 | + |
|---|
| 565 | + } else { |
|---|
| 566 | + QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n"); |
|---|
| 567 | + } |
|---|
| 481 | 568 | } |
|---|
| 482 | 569 | |
|---|
| 483 | 570 | static void qedf_link_update(void *dev, struct qed_link_output *link) |
|---|
| 484 | 571 | { |
|---|
| 485 | 572 | struct qedf_ctx *qedf = (struct qedf_ctx *)dev; |
|---|
| 573 | + |
|---|
| 574 | + /* |
|---|
| 575 | + * Prevent race where we're removing the module and we get link update |
|---|
| 576 | + * for qed. |
|---|
| 577 | + */ |
|---|
| 578 | + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { |
|---|
| 579 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 580 | + "Ignore link update, driver getting unload.\n"); |
|---|
| 581 | + return; |
|---|
| 582 | + } |
|---|
| 486 | 583 | |
|---|
| 487 | 584 | if (link->link_up) { |
|---|
| 488 | 585 | if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { |
|---|
| .. | .. |
|---|
| 563 | 660 | tmp_prio = get->operational.app_prio.fcoe; |
|---|
| 564 | 661 | if (qedf_default_prio > -1) |
|---|
| 565 | 662 | qedf->prio = qedf_default_prio; |
|---|
| 566 | | - else if (tmp_prio < 0 || tmp_prio > 7) { |
|---|
| 663 | + else if (tmp_prio > 7) { |
|---|
| 567 | 664 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, |
|---|
| 568 | 665 | "FIP/FCoE prio %d out of range, setting to %d.\n", |
|---|
| 569 | 666 | tmp_prio, QEDF_DEFAULT_PRIO); |
|---|
| .. | .. |
|---|
| 596 | 693 | static struct qed_fcoe_cb_ops qedf_cb_ops = { |
|---|
| 597 | 694 | { |
|---|
| 598 | 695 | .link_update = qedf_link_update, |
|---|
| 696 | + .bw_update = qedf_bw_update, |
|---|
| 697 | + .schedule_recovery_handler = qedf_schedule_recovery_handler, |
|---|
| 599 | 698 | .dcbx_aen = qedf_dcbx_handler, |
|---|
| 600 | 699 | .get_generic_tlv_data = qedf_get_generic_tlv_data, |
|---|
| 601 | 700 | .get_protocol_tlv_data = qedf_get_protocol_tlv_data, |
|---|
| 701 | + .schedule_hw_err_handler = qedf_schedule_hw_err_handler, |
|---|
| 602 | 702 | } |
|---|
| 603 | 703 | }; |
|---|
| 604 | 704 | |
|---|
| .. | .. |
|---|
| 615 | 715 | static int qedf_eh_abort(struct scsi_cmnd *sc_cmd) |
|---|
| 616 | 716 | { |
|---|
| 617 | 717 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); |
|---|
| 618 | | - struct fc_rport_libfc_priv *rp = rport->dd_data; |
|---|
| 619 | | - struct qedf_rport *fcport; |
|---|
| 620 | 718 | struct fc_lport *lport; |
|---|
| 621 | 719 | struct qedf_ctx *qedf; |
|---|
| 622 | 720 | struct qedf_ioreq *io_req; |
|---|
| 721 | + struct fc_rport_libfc_priv *rp = rport->dd_data; |
|---|
| 722 | + struct fc_rport_priv *rdata; |
|---|
| 723 | + struct qedf_rport *fcport = NULL; |
|---|
| 623 | 724 | int rc = FAILED; |
|---|
| 725 | + int wait_count = 100; |
|---|
| 726 | + int refcount = 0; |
|---|
| 624 | 727 | int rval; |
|---|
| 625 | | - |
|---|
| 626 | | - if (fc_remote_port_chkready(rport)) { |
|---|
| 627 | | - QEDF_ERR(NULL, "rport not ready\n"); |
|---|
| 628 | | - goto out; |
|---|
| 629 | | - } |
|---|
| 728 | + int got_ref = 0; |
|---|
| 630 | 729 | |
|---|
| 631 | 730 | lport = shost_priv(sc_cmd->device->host); |
|---|
| 632 | 731 | qedf = (struct qedf_ctx *)lport_priv(lport); |
|---|
| 633 | 732 | |
|---|
| 634 | | - if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { |
|---|
| 635 | | - QEDF_ERR(&(qedf->dbg_ctx), "link not ready.\n"); |
|---|
| 636 | | - goto out; |
|---|
| 637 | | - } |
|---|
| 638 | | - |
|---|
| 733 | + /* rport and tgt are allocated together, so tgt should be non-NULL */ |
|---|
| 639 | 734 | fcport = (struct qedf_rport *)&rp[1]; |
|---|
| 640 | | - |
|---|
| 641 | | - io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr; |
|---|
| 642 | | - if (!io_req) { |
|---|
| 643 | | - QEDF_ERR(&(qedf->dbg_ctx), "io_req is NULL.\n"); |
|---|
| 735 | + rdata = fcport->rdata; |
|---|
| 736 | + if (!rdata || !kref_get_unless_zero(&rdata->kref)) { |
|---|
| 737 | + QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd); |
|---|
| 644 | 738 | rc = SUCCESS; |
|---|
| 645 | 739 | goto out; |
|---|
| 646 | 740 | } |
|---|
| 647 | 741 | |
|---|
| 648 | | - QEDF_ERR(&(qedf->dbg_ctx), "Aborting io_req sc_cmd=%p xid=0x%x " |
|---|
| 649 | | - "fp_idx=%d.\n", sc_cmd, io_req->xid, io_req->fp_idx); |
|---|
| 742 | + |
|---|
| 743 | + io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr; |
|---|
| 744 | + if (!io_req) { |
|---|
| 745 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 746 | + "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n", |
|---|
| 747 | + sc_cmd, sc_cmd->cmnd[0], |
|---|
| 748 | + rdata->ids.port_id); |
|---|
| 749 | + rc = SUCCESS; |
|---|
| 750 | + goto drop_rdata_kref; |
|---|
| 751 | + } |
|---|
| 752 | + |
|---|
| 753 | + rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */ |
|---|
| 754 | + if (rval) |
|---|
| 755 | + got_ref = 1; |
|---|
| 756 | + |
|---|
| 757 | + /* If we got a valid io_req, confirm it belongs to this sc_cmd. */ |
|---|
| 758 | + if (!rval || io_req->sc_cmd != sc_cmd) { |
|---|
| 759 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 760 | + "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n", |
|---|
| 761 | + io_req->sc_cmd, sc_cmd, rdata->ids.port_id); |
|---|
| 762 | + |
|---|
| 763 | + goto drop_rdata_kref; |
|---|
| 764 | + } |
|---|
| 765 | + |
|---|
| 766 | + if (fc_remote_port_chkready(rport)) { |
|---|
| 767 | + refcount = kref_read(&io_req->refcount); |
|---|
| 768 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 769 | + "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n", |
|---|
| 770 | + io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0], |
|---|
| 771 | + refcount, rdata->ids.port_id); |
|---|
| 772 | + |
|---|
| 773 | + goto drop_rdata_kref; |
|---|
| 774 | + } |
|---|
| 775 | + |
|---|
| 776 | + rc = fc_block_scsi_eh(sc_cmd); |
|---|
| 777 | + if (rc) |
|---|
| 778 | + goto drop_rdata_kref; |
|---|
| 779 | + |
|---|
| 780 | + if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { |
|---|
| 781 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 782 | + "Connection uploading, xid=0x%x., port_id=%06x\n", |
|---|
| 783 | + io_req->xid, rdata->ids.port_id); |
|---|
| 784 | + while (io_req->sc_cmd && (wait_count != 0)) { |
|---|
| 785 | + msleep(100); |
|---|
| 786 | + wait_count--; |
|---|
| 787 | + } |
|---|
| 788 | + if (wait_count) { |
|---|
| 789 | + QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n"); |
|---|
| 790 | + rc = SUCCESS; |
|---|
| 791 | + } else { |
|---|
| 792 | + QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n"); |
|---|
| 793 | + rc = FAILED; |
|---|
| 794 | + } |
|---|
| 795 | + goto drop_rdata_kref; |
|---|
| 796 | + } |
|---|
| 797 | + |
|---|
| 798 | + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { |
|---|
| 799 | + QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n"); |
|---|
| 800 | + goto drop_rdata_kref; |
|---|
| 801 | + } |
|---|
| 802 | + |
|---|
| 803 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 804 | + "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n", |
|---|
| 805 | + io_req, sc_cmd, io_req->xid, io_req->fp_idx, |
|---|
| 806 | + rdata->ids.port_id); |
|---|
| 650 | 807 | |
|---|
| 651 | 808 | if (qedf->stop_io_on_error) { |
|---|
| 652 | 809 | qedf_stop_all_io(qedf); |
|---|
| 653 | 810 | rc = SUCCESS; |
|---|
| 654 | | - goto out; |
|---|
| 811 | + goto drop_rdata_kref; |
|---|
| 655 | 812 | } |
|---|
| 656 | 813 | |
|---|
| 657 | 814 | init_completion(&io_req->abts_done); |
|---|
| 658 | 815 | rval = qedf_initiate_abts(io_req, true); |
|---|
| 659 | 816 | if (rval) { |
|---|
| 660 | 817 | QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); |
|---|
| 661 | | - goto out; |
|---|
| 818 | + /* |
|---|
| 819 | + * If we fail to queue the ABTS then return this command to |
|---|
| 820 | + * the SCSI layer as it will own and free the xid |
|---|
| 821 | + */ |
|---|
| 822 | + rc = SUCCESS; |
|---|
| 823 | + qedf_scsi_done(qedf, io_req, DID_ERROR); |
|---|
| 824 | + goto drop_rdata_kref; |
|---|
| 662 | 825 | } |
|---|
| 663 | 826 | |
|---|
| 664 | 827 | wait_for_completion(&io_req->abts_done); |
|---|
| .. | .. |
|---|
| 684 | 847 | QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n", |
|---|
| 685 | 848 | io_req->xid); |
|---|
| 686 | 849 | |
|---|
| 850 | +drop_rdata_kref: |
|---|
| 851 | + kref_put(&rdata->kref, fc_rport_destroy); |
|---|
| 687 | 852 | out: |
|---|
| 853 | + if (got_ref) |
|---|
| 854 | + kref_put(&io_req->refcount, qedf_release_cmd); |
|---|
| 688 | 855 | return rc; |
|---|
| 689 | 856 | } |
|---|
| 690 | 857 | |
|---|
| 691 | 858 | static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd) |
|---|
| 692 | 859 | { |
|---|
| 693 | | - QEDF_ERR(NULL, "TARGET RESET Issued..."); |
|---|
| 860 | + QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...", |
|---|
| 861 | + sc_cmd->device->host->host_no, sc_cmd->device->id, |
|---|
| 862 | + sc_cmd->device->lun); |
|---|
| 694 | 863 | return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); |
|---|
| 695 | 864 | } |
|---|
| 696 | 865 | |
|---|
| 697 | 866 | static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd) |
|---|
| 698 | 867 | { |
|---|
| 699 | | - QEDF_ERR(NULL, "LUN RESET Issued...\n"); |
|---|
| 868 | + QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ", |
|---|
| 869 | + sc_cmd->device->host->host_no, sc_cmd->device->id, |
|---|
| 870 | + sc_cmd->device->lun); |
|---|
| 700 | 871 | return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); |
|---|
| 701 | 872 | } |
|---|
| 702 | 873 | |
|---|
| 703 | | -void qedf_wait_for_upload(struct qedf_ctx *qedf) |
|---|
| 874 | +bool qedf_wait_for_upload(struct qedf_ctx *qedf) |
|---|
| 704 | 875 | { |
|---|
| 705 | | - while (1) { |
|---|
| 876 | + struct qedf_rport *fcport = NULL; |
|---|
| 877 | + int wait_cnt = 120; |
|---|
| 878 | + |
|---|
| 879 | + while (wait_cnt--) { |
|---|
| 706 | 880 | if (atomic_read(&qedf->num_offloads)) |
|---|
| 707 | | - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, |
|---|
| 708 | | - "Waiting for all uploads to complete.\n"); |
|---|
| 881 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
|---|
| 882 | + "Waiting for all uploads to complete num_offloads = 0x%x.\n", |
|---|
| 883 | + atomic_read(&qedf->num_offloads)); |
|---|
| 709 | 884 | else |
|---|
| 710 | | - break; |
|---|
| 885 | + return true; |
|---|
| 711 | 886 | msleep(500); |
|---|
| 712 | 887 | } |
|---|
| 888 | + |
|---|
| 889 | + rcu_read_lock(); |
|---|
| 890 | + list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { |
|---|
| 891 | + if (fcport && test_bit(QEDF_RPORT_SESSION_READY, |
|---|
| 892 | + &fcport->flags)) { |
|---|
| 893 | + if (fcport->rdata) |
|---|
| 894 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 895 | + "Waiting for fcport %p portid=%06x.\n", |
|---|
| 896 | + fcport, fcport->rdata->ids.port_id); |
|---|
| 897 | + } else { |
|---|
| 898 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 899 | + "Waiting for fcport %p.\n", fcport); |
|---|
| 900 | + } |
|---|
| 901 | + } |
|---|
| 902 | + rcu_read_unlock(); |
|---|
| 903 | + return false; |
|---|
| 904 | + |
|---|
| 713 | 905 | } |
|---|
| 714 | 906 | |
|---|
| 715 | 907 | /* Performs soft reset of qedf_ctx by simulating a link down/up */ |
|---|
| 716 | | -static void qedf_ctx_soft_reset(struct fc_lport *lport) |
|---|
| 908 | +void qedf_ctx_soft_reset(struct fc_lport *lport) |
|---|
| 717 | 909 | { |
|---|
| 718 | 910 | struct qedf_ctx *qedf; |
|---|
| 911 | + struct qed_link_output if_link; |
|---|
| 719 | 912 | |
|---|
| 720 | 913 | if (lport->vport) { |
|---|
| 721 | 914 | QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n"); |
|---|
| .. | .. |
|---|
| 724 | 917 | |
|---|
| 725 | 918 | qedf = lport_priv(lport); |
|---|
| 726 | 919 | |
|---|
| 920 | + qedf->flogi_pending = 0; |
|---|
| 727 | 921 | /* For host reset, essentially do a soft link up/down */ |
|---|
| 728 | 922 | atomic_set(&qedf->link_state, QEDF_LINK_DOWN); |
|---|
| 923 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
|---|
| 924 | + "Queuing link down work.\n"); |
|---|
| 729 | 925 | queue_delayed_work(qedf->link_update_wq, &qedf->link_update, |
|---|
| 730 | 926 | 0); |
|---|
| 731 | | - qedf_wait_for_upload(qedf); |
|---|
| 927 | + |
|---|
| 928 | + if (qedf_wait_for_upload(qedf) == false) { |
|---|
| 929 | + QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n"); |
|---|
| 930 | + WARN_ON(atomic_read(&qedf->num_offloads)); |
|---|
| 931 | + } |
|---|
| 932 | + |
|---|
| 933 | + /* Before setting link up query physical link state */ |
|---|
| 934 | + qed_ops->common->get_link(qedf->cdev, &if_link); |
|---|
| 935 | + /* Bail if the physical link is not up */ |
|---|
| 936 | + if (!if_link.link_up) { |
|---|
| 937 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
|---|
| 938 | + "Physical link is not up.\n"); |
|---|
| 939 | + return; |
|---|
| 940 | + } |
|---|
| 941 | + /* Flush and wait to make sure link down is processed */ |
|---|
| 942 | + flush_delayed_work(&qedf->link_update); |
|---|
| 943 | + msleep(500); |
|---|
| 944 | + |
|---|
| 732 | 945 | atomic_set(&qedf->link_state, QEDF_LINK_UP); |
|---|
| 733 | 946 | qedf->vlan_id = 0; |
|---|
| 947 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
|---|
| 948 | + "Queue link up work.\n"); |
|---|
| 734 | 949 | queue_delayed_work(qedf->link_update_wq, &qedf->link_update, |
|---|
| 735 | 950 | 0); |
|---|
| 736 | 951 | } |
|---|
| .. | .. |
|---|
| 740 | 955 | { |
|---|
| 741 | 956 | struct fc_lport *lport; |
|---|
| 742 | 957 | struct qedf_ctx *qedf; |
|---|
| 743 | | - struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); |
|---|
| 744 | | - struct fc_rport_libfc_priv *rp = rport->dd_data; |
|---|
| 745 | | - struct qedf_rport *fcport = (struct qedf_rport *)&rp[1]; |
|---|
| 746 | | - int rval; |
|---|
| 747 | | - |
|---|
| 748 | | - rval = fc_remote_port_chkready(rport); |
|---|
| 749 | | - |
|---|
| 750 | | - if (rval) { |
|---|
| 751 | | - QEDF_ERR(NULL, "device_reset rport not ready\n"); |
|---|
| 752 | | - return FAILED; |
|---|
| 753 | | - } |
|---|
| 754 | | - |
|---|
| 755 | | - if (fcport == NULL) { |
|---|
| 756 | | - QEDF_ERR(NULL, "device_reset: rport is NULL\n"); |
|---|
| 757 | | - return FAILED; |
|---|
| 758 | | - } |
|---|
| 759 | 958 | |
|---|
| 760 | 959 | lport = shost_priv(sc_cmd->device->host); |
|---|
| 761 | 960 | qedf = lport_priv(lport); |
|---|
| .. | .. |
|---|
| 785 | 984 | .name = QEDF_MODULE_NAME, |
|---|
| 786 | 985 | .this_id = -1, |
|---|
| 787 | 986 | .cmd_per_lun = 32, |
|---|
| 788 | | - .use_clustering = ENABLE_CLUSTERING, |
|---|
| 789 | 987 | .max_sectors = 0xffff, |
|---|
| 790 | 988 | .queuecommand = qedf_queuecommand, |
|---|
| 791 | 989 | .shost_attrs = qedf_host_attrs, |
|---|
| .. | .. |
|---|
| 852 | 1050 | return rc; |
|---|
| 853 | 1051 | } |
|---|
| 854 | 1052 | |
|---|
| 855 | | -/** |
|---|
| 1053 | +/* |
|---|
| 856 | 1054 | * qedf_xmit - qedf FCoE frame transmit function |
|---|
| 857 | | - * |
|---|
| 858 | 1055 | */ |
|---|
| 859 | 1056 | static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp) |
|---|
| 860 | 1057 | { |
|---|
| .. | .. |
|---|
| 908 | 1105 | "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id)); |
|---|
| 909 | 1106 | kfree_skb(skb); |
|---|
| 910 | 1107 | rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id)); |
|---|
| 911 | | - if (rdata) |
|---|
| 1108 | + if (rdata) { |
|---|
| 912 | 1109 | rdata->retries = lport->max_rport_retry_count; |
|---|
| 1110 | + kref_put(&rdata->kref, fc_rport_destroy); |
|---|
| 1111 | + } |
|---|
| 913 | 1112 | return -EINVAL; |
|---|
| 914 | 1113 | } |
|---|
| 915 | 1114 | /* End NPIV filtering */ |
|---|
| .. | .. |
|---|
| 969 | 1168 | return -ENOMEM; |
|---|
| 970 | 1169 | } |
|---|
| 971 | 1170 | frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; |
|---|
| 972 | | - cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset; |
|---|
| 1171 | + cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); |
|---|
| 973 | 1172 | } else { |
|---|
| 974 | 1173 | cp = skb_put(skb, tlen); |
|---|
| 975 | 1174 | } |
|---|
| .. | .. |
|---|
| 1032 | 1231 | if (qedf_dump_frames) |
|---|
| 1033 | 1232 | print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, |
|---|
| 1034 | 1233 | 1, skb->data, skb->len, false); |
|---|
| 1035 | | - qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); |
|---|
| 1234 | + rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); |
|---|
| 1235 | + if (rc) { |
|---|
| 1236 | + QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc); |
|---|
| 1237 | + kfree_skb(skb); |
|---|
| 1238 | + return rc; |
|---|
| 1239 | + } |
|---|
| 1036 | 1240 | |
|---|
| 1037 | 1241 | return 0; |
|---|
| 1038 | 1242 | } |
|---|
| .. | .. |
|---|
| 1051 | 1255 | sizeof(void *); |
|---|
| 1052 | 1256 | fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; |
|---|
| 1053 | 1257 | |
|---|
| 1054 | | - fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev, |
|---|
| 1055 | | - fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL); |
|---|
| 1258 | + fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size, |
|---|
| 1259 | + &fcport->sq_dma, GFP_KERNEL); |
|---|
| 1056 | 1260 | if (!fcport->sq) { |
|---|
| 1057 | 1261 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n"); |
|---|
| 1058 | 1262 | rval = 1; |
|---|
| 1059 | 1263 | goto out; |
|---|
| 1060 | 1264 | } |
|---|
| 1061 | 1265 | |
|---|
| 1062 | | - fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev, |
|---|
| 1063 | | - fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL); |
|---|
| 1266 | + fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev, |
|---|
| 1267 | + fcport->sq_pbl_size, |
|---|
| 1268 | + &fcport->sq_pbl_dma, GFP_KERNEL); |
|---|
| 1064 | 1269 | if (!fcport->sq_pbl) { |
|---|
| 1065 | 1270 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n"); |
|---|
| 1066 | 1271 | rval = 1; |
|---|
| .. | .. |
|---|
| 1137 | 1342 | ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr); |
|---|
| 1138 | 1343 | |
|---|
| 1139 | 1344 | conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size; |
|---|
| 1140 | | - conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov / 20; |
|---|
| 1345 | + conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov; |
|---|
| 1141 | 1346 | conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */ |
|---|
| 1142 | 1347 | conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size; |
|---|
| 1143 | 1348 | |
|---|
| .. | .. |
|---|
| 1224 | 1429 | static void qedf_cleanup_fcport(struct qedf_ctx *qedf, |
|---|
| 1225 | 1430 | struct qedf_rport *fcport) |
|---|
| 1226 | 1431 | { |
|---|
| 1432 | + struct fc_rport_priv *rdata = fcport->rdata; |
|---|
| 1433 | + |
|---|
| 1227 | 1434 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n", |
|---|
| 1228 | 1435 | fcport->rdata->ids.port_id); |
|---|
| 1229 | 1436 | |
|---|
| .. | .. |
|---|
| 1235 | 1442 | qedf_free_sq(qedf, fcport); |
|---|
| 1236 | 1443 | fcport->rdata = NULL; |
|---|
| 1237 | 1444 | fcport->qedf = NULL; |
|---|
| 1445 | + kref_put(&rdata->kref, fc_rport_destroy); |
|---|
| 1238 | 1446 | } |
|---|
| 1239 | 1447 | |
|---|
| 1240 | | -/** |
|---|
| 1448 | +/* |
|---|
| 1241 | 1449 | * This event_callback is called after successful completion of libfc |
|---|
| 1242 | 1450 | * initiated target login. qedf can proceed with initiating the session |
|---|
| 1243 | 1451 | * establishment. |
|---|
| .. | .. |
|---|
| 1310 | 1518 | break; |
|---|
| 1311 | 1519 | } |
|---|
| 1312 | 1520 | |
|---|
| 1521 | + /* Initial reference held on entry, so this can't fail */ |
|---|
| 1522 | + kref_get(&rdata->kref); |
|---|
| 1313 | 1523 | fcport->rdata = rdata; |
|---|
| 1314 | 1524 | fcport->rport = rport; |
|---|
| 1315 | 1525 | |
|---|
| .. | .. |
|---|
| 1357 | 1567 | if (port_id == FC_FID_DIR_SERV) |
|---|
| 1358 | 1568 | break; |
|---|
| 1359 | 1569 | |
|---|
| 1570 | + if (rdata->spp_type != FC_TYPE_FCP) { |
|---|
| 1571 | + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, |
|---|
| 1572 | + "No action since spp type isn't FCP\n"); |
|---|
| 1573 | + break; |
|---|
| 1574 | + } |
|---|
| 1575 | + if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { |
|---|
| 1576 | + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, |
|---|
| 1577 | + "Not FCP target so no action\n"); |
|---|
| 1578 | + break; |
|---|
| 1579 | + } |
|---|
| 1580 | + |
|---|
| 1360 | 1581 | if (!rport) { |
|---|
| 1361 | 1582 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, |
|---|
| 1362 | 1583 | "port_id=%x - rport notcreated Yet!!\n", port_id); |
|---|
| .. | .. |
|---|
| 1369 | 1590 | */ |
|---|
| 1370 | 1591 | fcport = (struct qedf_rport *)&rp[1]; |
|---|
| 1371 | 1592 | |
|---|
| 1593 | + spin_lock_irqsave(&fcport->rport_lock, flags); |
|---|
| 1372 | 1594 | /* Only free this fcport if it is offloaded already */ |
|---|
| 1373 | | - if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { |
|---|
| 1374 | | - set_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags); |
|---|
| 1595 | + if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) && |
|---|
| 1596 | + !test_bit(QEDF_RPORT_UPLOADING_CONNECTION, |
|---|
| 1597 | + &fcport->flags)) { |
|---|
| 1598 | + set_bit(QEDF_RPORT_UPLOADING_CONNECTION, |
|---|
| 1599 | + &fcport->flags); |
|---|
| 1600 | + spin_unlock_irqrestore(&fcport->rport_lock, flags); |
|---|
| 1375 | 1601 | qedf_cleanup_fcport(qedf, fcport); |
|---|
| 1376 | | - |
|---|
| 1377 | 1602 | /* |
|---|
| 1378 | 1603 | * Remove fcport to list of qedf_ctx list of offloaded |
|---|
| 1379 | 1604 | * ports |
|---|
| .. | .. |
|---|
| 1385 | 1610 | clear_bit(QEDF_RPORT_UPLOADING_CONNECTION, |
|---|
| 1386 | 1611 | &fcport->flags); |
|---|
| 1387 | 1612 | atomic_dec(&qedf->num_offloads); |
|---|
| 1613 | + } else { |
|---|
| 1614 | + spin_unlock_irqrestore(&fcport->rport_lock, flags); |
|---|
| 1388 | 1615 | } |
|---|
| 1389 | | - |
|---|
| 1390 | 1616 | break; |
|---|
| 1391 | 1617 | |
|---|
| 1392 | 1618 | case RPORT_EV_NONE: |
|---|
| .. | .. |
|---|
| 1428 | 1654 | static void qedf_setup_fdmi(struct qedf_ctx *qedf) |
|---|
| 1429 | 1655 | { |
|---|
| 1430 | 1656 | struct fc_lport *lport = qedf->lport; |
|---|
| 1431 | | - struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host); |
|---|
| 1432 | 1657 | u8 buf[8]; |
|---|
| 1433 | | - int i, pos; |
|---|
| 1658 | + int pos; |
|---|
| 1659 | + uint32_t i; |
|---|
| 1434 | 1660 | |
|---|
| 1435 | 1661 | /* |
|---|
| 1436 | | - * fdmi_enabled needs to be set for libfc to execute FDMI registration. |
|---|
| 1662 | + * fdmi_enabled needs to be set for libfc |
|---|
| 1663 | + * to execute FDMI registration |
|---|
| 1437 | 1664 | */ |
|---|
| 1438 | 1665 | lport->fdmi_enabled = 1; |
|---|
| 1439 | 1666 | |
|---|
| .. | .. |
|---|
| 1449 | 1676 | for (i = 0; i < 8; i++) |
|---|
| 1450 | 1677 | pci_read_config_byte(qedf->pdev, pos + i, &buf[i]); |
|---|
| 1451 | 1678 | |
|---|
| 1452 | | - snprintf(fc_host->serial_number, |
|---|
| 1453 | | - sizeof(fc_host->serial_number), |
|---|
| 1679 | + snprintf(fc_host_serial_number(lport->host), |
|---|
| 1680 | + FC_SERIAL_NUMBER_SIZE, |
|---|
| 1454 | 1681 | "%02X%02X%02X%02X%02X%02X%02X%02X", |
|---|
| 1455 | 1682 | buf[7], buf[6], buf[5], buf[4], |
|---|
| 1456 | 1683 | buf[3], buf[2], buf[1], buf[0]); |
|---|
| 1457 | 1684 | } else |
|---|
| 1458 | | - snprintf(fc_host->serial_number, |
|---|
| 1459 | | - sizeof(fc_host->serial_number), "Unknown"); |
|---|
| 1685 | + snprintf(fc_host_serial_number(lport->host), |
|---|
| 1686 | + FC_SERIAL_NUMBER_SIZE, "Unknown"); |
|---|
| 1460 | 1687 | |
|---|
| 1461 | | - snprintf(fc_host->manufacturer, |
|---|
| 1462 | | - sizeof(fc_host->manufacturer), "%s", "Cavium Inc."); |
|---|
| 1688 | + snprintf(fc_host_manufacturer(lport->host), |
|---|
| 1689 | + FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc."); |
|---|
| 1463 | 1690 | |
|---|
| 1464 | | - snprintf(fc_host->model, sizeof(fc_host->model), "%s", "QL41000"); |
|---|
| 1691 | + if (qedf->pdev->device == QL45xxx) { |
|---|
| 1692 | + snprintf(fc_host_model(lport->host), |
|---|
| 1693 | + FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx"); |
|---|
| 1465 | 1694 | |
|---|
| 1466 | | - snprintf(fc_host->model_description, sizeof(fc_host->model_description), |
|---|
| 1467 | | - "%s", "QLogic FastLinQ QL41000 Series 10/25/40/50GGbE Controller" |
|---|
| 1468 | | - "(FCoE)"); |
|---|
| 1695 | + snprintf(fc_host_model_description(lport->host), |
|---|
| 1696 | + FC_SYMBOLIC_NAME_SIZE, "%s", |
|---|
| 1697 | + "Marvell FastLinQ QL45xxx FCoE Adapter"); |
|---|
| 1698 | + } |
|---|
| 1469 | 1699 | |
|---|
| 1470 | | - snprintf(fc_host->hardware_version, sizeof(fc_host->hardware_version), |
|---|
| 1471 | | - "Rev %d", qedf->pdev->revision); |
|---|
| 1700 | + if (qedf->pdev->device == QL41xxx) { |
|---|
| 1701 | + snprintf(fc_host_model(lport->host), |
|---|
| 1702 | + FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx"); |
|---|
| 1472 | 1703 | |
|---|
| 1473 | | - snprintf(fc_host->driver_version, sizeof(fc_host->driver_version), |
|---|
| 1474 | | - "%s", QEDF_VERSION); |
|---|
| 1704 | + snprintf(fc_host_model_description(lport->host), |
|---|
| 1705 | + FC_SYMBOLIC_NAME_SIZE, "%s", |
|---|
| 1706 | + "Marvell FastLinQ QL41xxx FCoE Adapter"); |
|---|
| 1707 | + } |
|---|
| 1475 | 1708 | |
|---|
| 1476 | | - snprintf(fc_host->firmware_version, sizeof(fc_host->firmware_version), |
|---|
| 1477 | | - "%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION, |
|---|
| 1478 | | - FW_REVISION_VERSION, FW_ENGINEERING_VERSION); |
|---|
| 1709 | + snprintf(fc_host_hardware_version(lport->host), |
|---|
| 1710 | + FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision); |
|---|
| 1711 | + |
|---|
| 1712 | + snprintf(fc_host_driver_version(lport->host), |
|---|
| 1713 | + FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION); |
|---|
| 1714 | + |
|---|
| 1715 | + snprintf(fc_host_firmware_version(lport->host), |
|---|
| 1716 | + FC_VERSION_STRING_SIZE, "%d.%d.%d.%d", |
|---|
| 1717 | + FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, |
|---|
| 1718 | + FW_ENGINEERING_VERSION); |
|---|
| 1719 | + |
|---|
| 1479 | 1720 | } |
|---|
| 1480 | 1721 | |
|---|
| 1481 | 1722 | static int qedf_lport_setup(struct qedf_ctx *qedf) |
|---|
| .. | .. |
|---|
| 1498 | 1739 | fc_set_wwnn(lport, qedf->wwnn); |
|---|
| 1499 | 1740 | fc_set_wwpn(lport, qedf->wwpn); |
|---|
| 1500 | 1741 | |
|---|
| 1501 | | - fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0); |
|---|
| 1742 | + if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) { |
|---|
| 1743 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 1744 | + "fcoe_libfc_config failed.\n"); |
|---|
| 1745 | + return -ENOMEM; |
|---|
| 1746 | + } |
|---|
| 1502 | 1747 | |
|---|
| 1503 | 1748 | /* Allocate the exchange manager */ |
|---|
| 1504 | | - fc_exch_mgr_alloc(lport, FC_CLASS_3, qedf->max_scsi_xid + 1, |
|---|
| 1505 | | - qedf->max_els_xid, NULL); |
|---|
| 1749 | + fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS, |
|---|
| 1750 | + 0xfffe, NULL); |
|---|
| 1506 | 1751 | |
|---|
| 1507 | 1752 | if (fc_lport_init_stats(lport)) |
|---|
| 1508 | 1753 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 1518 | 1763 | fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo; |
|---|
| 1519 | 1764 | |
|---|
| 1520 | 1765 | /* Set symbolic node name */ |
|---|
| 1521 | | - snprintf(fc_host_symbolic_name(lport->host), 256, |
|---|
| 1522 | | - "QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION); |
|---|
| 1766 | + if (qedf->pdev->device == QL45xxx) |
|---|
| 1767 | + snprintf(fc_host_symbolic_name(lport->host), 256, |
|---|
| 1768 | + "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION); |
|---|
| 1769 | + |
|---|
| 1770 | + if (qedf->pdev->device == QL41xxx) |
|---|
| 1771 | + snprintf(fc_host_symbolic_name(lport->host), 256, |
|---|
| 1772 | + "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION); |
|---|
| 1523 | 1773 | |
|---|
| 1524 | 1774 | qedf_setup_fdmi(qedf); |
|---|
| 1525 | 1775 | |
|---|
| .. | .. |
|---|
| 1577 | 1827 | fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); |
|---|
| 1578 | 1828 | QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, " |
|---|
| 1579 | 1829 | "WWPN (0x%s) already exists.\n", buf); |
|---|
| 1580 | | - goto err1; |
|---|
| 1830 | + return rc; |
|---|
| 1581 | 1831 | } |
|---|
| 1582 | 1832 | |
|---|
| 1583 | 1833 | if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) { |
|---|
| 1584 | 1834 | QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport " |
|---|
| 1585 | 1835 | "because link is not up.\n"); |
|---|
| 1586 | | - rc = -EIO; |
|---|
| 1587 | | - goto err1; |
|---|
| 1836 | + return -EIO; |
|---|
| 1588 | 1837 | } |
|---|
| 1589 | 1838 | |
|---|
| 1590 | 1839 | vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx)); |
|---|
| 1591 | 1840 | if (!vn_port) { |
|---|
| 1592 | 1841 | QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport " |
|---|
| 1593 | 1842 | "for vport.\n"); |
|---|
| 1594 | | - rc = -ENOMEM; |
|---|
| 1595 | | - goto err1; |
|---|
| 1843 | + return -ENOMEM; |
|---|
| 1596 | 1844 | } |
|---|
| 1597 | 1845 | |
|---|
| 1598 | 1846 | fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); |
|---|
| .. | .. |
|---|
| 1611 | 1859 | vport_qedf->cmd_mgr = base_qedf->cmd_mgr; |
|---|
| 1612 | 1860 | init_completion(&vport_qedf->flogi_compl); |
|---|
| 1613 | 1861 | INIT_LIST_HEAD(&vport_qedf->fcports); |
|---|
| 1862 | + INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work); |
|---|
| 1614 | 1863 | |
|---|
| 1615 | 1864 | rc = qedf_vport_libfc_config(vport, vn_port); |
|---|
| 1616 | 1865 | if (rc) { |
|---|
| 1617 | 1866 | QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory " |
|---|
| 1618 | 1867 | "for lport stats.\n"); |
|---|
| 1619 | | - goto err2; |
|---|
| 1868 | + goto err; |
|---|
| 1620 | 1869 | } |
|---|
| 1621 | 1870 | |
|---|
| 1622 | 1871 | fc_set_wwnn(vn_port, vport->node_name); |
|---|
| .. | .. |
|---|
| 1625 | 1874 | vport_qedf->wwpn = vn_port->wwpn; |
|---|
| 1626 | 1875 | |
|---|
| 1627 | 1876 | vn_port->host->transportt = qedf_fc_vport_transport_template; |
|---|
| 1628 | | - vn_port->host->can_queue = QEDF_MAX_ELS_XID; |
|---|
| 1877 | + vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS; |
|---|
| 1629 | 1878 | vn_port->host->max_lun = qedf_max_lun; |
|---|
| 1630 | 1879 | vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD; |
|---|
| 1631 | 1880 | vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN; |
|---|
| 1632 | 1881 | |
|---|
| 1633 | 1882 | rc = scsi_add_host(vn_port->host, &vport->dev); |
|---|
| 1634 | 1883 | if (rc) { |
|---|
| 1635 | | - QEDF_WARN(&(base_qedf->dbg_ctx), "Error adding Scsi_Host.\n"); |
|---|
| 1636 | | - goto err2; |
|---|
| 1884 | + QEDF_WARN(&base_qedf->dbg_ctx, |
|---|
| 1885 | + "Error adding Scsi_Host rc=0x%x.\n", rc); |
|---|
| 1886 | + goto err; |
|---|
| 1637 | 1887 | } |
|---|
| 1638 | 1888 | |
|---|
| 1639 | 1889 | /* Set default dev_loss_tmo based on module parameter */ |
|---|
| .. | .. |
|---|
| 1667 | 1917 | fc_vport_setlink(vn_port); |
|---|
| 1668 | 1918 | } |
|---|
| 1669 | 1919 | |
|---|
| 1920 | + /* Set symbolic node name */ |
|---|
| 1921 | + if (base_qedf->pdev->device == QL45xxx) |
|---|
| 1922 | + snprintf(fc_host_symbolic_name(vn_port->host), 256, |
|---|
| 1923 | + "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION); |
|---|
| 1924 | + |
|---|
| 1925 | + if (base_qedf->pdev->device == QL41xxx) |
|---|
| 1926 | + snprintf(fc_host_symbolic_name(vn_port->host), 256, |
|---|
| 1927 | + "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION); |
|---|
| 1928 | + |
|---|
| 1929 | + /* Set supported speed */ |
|---|
| 1930 | + fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds; |
|---|
| 1931 | + |
|---|
| 1932 | + /* Set speed */ |
|---|
| 1933 | + vn_port->link_speed = n_port->link_speed; |
|---|
| 1934 | + |
|---|
| 1935 | + /* Set port type */ |
|---|
| 1936 | + fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV; |
|---|
| 1937 | + |
|---|
| 1938 | + /* Set maxframe size */ |
|---|
| 1939 | + fc_host_maxframe_size(vn_port->host) = n_port->mfs; |
|---|
| 1940 | + |
|---|
| 1670 | 1941 | QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n", |
|---|
| 1671 | 1942 | vn_port); |
|---|
| 1672 | 1943 | |
|---|
| .. | .. |
|---|
| 1674 | 1945 | vport_qedf->dbg_ctx.host_no = vn_port->host->host_no; |
|---|
| 1675 | 1946 | vport_qedf->dbg_ctx.pdev = base_qedf->pdev; |
|---|
| 1676 | 1947 | |
|---|
| 1677 | | -err2: |
|---|
| 1948 | + return 0; |
|---|
| 1949 | + |
|---|
| 1950 | +err: |
|---|
| 1678 | 1951 | scsi_host_put(vn_port->host); |
|---|
| 1679 | | -err1: |
|---|
| 1680 | 1952 | return rc; |
|---|
| 1681 | 1953 | } |
|---|
| 1682 | 1954 | |
|---|
| .. | .. |
|---|
| 1717 | 1989 | fc_lport_free_stats(vn_port); |
|---|
| 1718 | 1990 | |
|---|
| 1719 | 1991 | /* Release Scsi_Host */ |
|---|
| 1720 | | - if (vn_port->host) |
|---|
| 1721 | | - scsi_host_put(vn_port->host); |
|---|
| 1992 | + scsi_host_put(vn_port->host); |
|---|
| 1722 | 1993 | |
|---|
| 1723 | 1994 | out: |
|---|
| 1724 | 1995 | return 0; |
|---|
| .. | .. |
|---|
| 1771 | 2042 | |
|---|
| 1772 | 2043 | qedf_ctx_soft_reset(lport); |
|---|
| 1773 | 2044 | return 0; |
|---|
| 2045 | +} |
|---|
| 2046 | + |
|---|
| 2047 | +static void qedf_get_host_port_id(struct Scsi_Host *shost) |
|---|
| 2048 | +{ |
|---|
| 2049 | + struct fc_lport *lport = shost_priv(shost); |
|---|
| 2050 | + |
|---|
| 2051 | + fc_host_port_id(shost) = lport->port_id; |
|---|
| 1774 | 2052 | } |
|---|
| 1775 | 2053 | |
|---|
| 1776 | 2054 | static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host |
|---|
| .. | .. |
|---|
| 1843 | 2121 | .show_host_active_fc4s = 1, |
|---|
| 1844 | 2122 | .show_host_maxframe_size = 1, |
|---|
| 1845 | 2123 | |
|---|
| 2124 | + .get_host_port_id = qedf_get_host_port_id, |
|---|
| 1846 | 2125 | .show_host_port_id = 1, |
|---|
| 1847 | 2126 | .show_host_supported_speeds = 1, |
|---|
| 1848 | 2127 | .get_host_speed = fc_get_host_speed, |
|---|
| .. | .. |
|---|
| 2086 | 2365 | static void qedf_sync_free_irqs(struct qedf_ctx *qedf) |
|---|
| 2087 | 2366 | { |
|---|
| 2088 | 2367 | int i; |
|---|
| 2368 | + u16 vector_idx = 0; |
|---|
| 2369 | + u32 vector; |
|---|
| 2089 | 2370 | |
|---|
| 2090 | 2371 | if (qedf->int_info.msix_cnt) { |
|---|
| 2091 | 2372 | for (i = 0; i < qedf->int_info.used_cnt; i++) { |
|---|
| 2092 | | - synchronize_irq(qedf->int_info.msix[i].vector); |
|---|
| 2093 | | - irq_set_affinity_hint(qedf->int_info.msix[i].vector, |
|---|
| 2094 | | - NULL); |
|---|
| 2095 | | - irq_set_affinity_notifier(qedf->int_info.msix[i].vector, |
|---|
| 2096 | | - NULL); |
|---|
| 2097 | | - free_irq(qedf->int_info.msix[i].vector, |
|---|
| 2098 | | - &qedf->fp_array[i]); |
|---|
| 2373 | + vector_idx = i * qedf->dev_info.common.num_hwfns + |
|---|
| 2374 | + qed_ops->common->get_affin_hwfn_idx(qedf->cdev); |
|---|
| 2375 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
|---|
| 2376 | + "Freeing IRQ #%d vector_idx=%d.\n", |
|---|
| 2377 | + i, vector_idx); |
|---|
| 2378 | + vector = qedf->int_info.msix[vector_idx].vector; |
|---|
| 2379 | + synchronize_irq(vector); |
|---|
| 2380 | + irq_set_affinity_hint(vector, NULL); |
|---|
| 2381 | + irq_set_affinity_notifier(vector, NULL); |
|---|
| 2382 | + free_irq(vector, &qedf->fp_array[i]); |
|---|
| 2099 | 2383 | } |
|---|
| 2100 | 2384 | } else |
|---|
| 2101 | 2385 | qed_ops->common->simd_handler_clean(qedf->cdev, |
|---|
| .. | .. |
|---|
| 2108 | 2392 | static int qedf_request_msix_irq(struct qedf_ctx *qedf) |
|---|
| 2109 | 2393 | { |
|---|
| 2110 | 2394 | int i, rc, cpu; |
|---|
| 2395 | + u16 vector_idx = 0; |
|---|
| 2396 | + u32 vector; |
|---|
| 2111 | 2397 | |
|---|
| 2112 | 2398 | cpu = cpumask_first(cpu_online_mask); |
|---|
| 2113 | 2399 | for (i = 0; i < qedf->num_queues; i++) { |
|---|
| 2114 | | - rc = request_irq(qedf->int_info.msix[i].vector, |
|---|
| 2115 | | - qedf_msix_handler, 0, "qedf", &qedf->fp_array[i]); |
|---|
| 2400 | + vector_idx = i * qedf->dev_info.common.num_hwfns + |
|---|
| 2401 | + qed_ops->common->get_affin_hwfn_idx(qedf->cdev); |
|---|
| 2402 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
|---|
| 2403 | + "Requesting IRQ #%d vector_idx=%d.\n", |
|---|
| 2404 | + i, vector_idx); |
|---|
| 2405 | + vector = qedf->int_info.msix[vector_idx].vector; |
|---|
| 2406 | + rc = request_irq(vector, qedf_msix_handler, 0, "qedf", |
|---|
| 2407 | + &qedf->fp_array[i]); |
|---|
| 2116 | 2408 | |
|---|
| 2117 | 2409 | if (rc) { |
|---|
| 2118 | 2410 | QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n"); |
|---|
| .. | .. |
|---|
| 2121 | 2413 | } |
|---|
| 2122 | 2414 | |
|---|
| 2123 | 2415 | qedf->int_info.used_cnt++; |
|---|
| 2124 | | - rc = irq_set_affinity_hint(qedf->int_info.msix[i].vector, |
|---|
| 2125 | | - get_cpu_mask(cpu)); |
|---|
| 2416 | + rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu)); |
|---|
| 2126 | 2417 | cpu = cpumask_next(cpu, cpu_online_mask); |
|---|
| 2127 | 2418 | } |
|---|
| 2128 | 2419 | |
|---|
| .. | .. |
|---|
| 2155 | 2446 | QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler); |
|---|
| 2156 | 2447 | qedf->int_info.used_cnt = 1; |
|---|
| 2157 | 2448 | |
|---|
| 2158 | | - QEDF_ERR(&qedf->dbg_ctx, "Only MSI-X supported. Failing probe.\n"); |
|---|
| 2449 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 2450 | + "Cannot load driver due to a lack of MSI-X vectors.\n"); |
|---|
| 2159 | 2451 | return -EINVAL; |
|---|
| 2160 | 2452 | } |
|---|
| 2161 | 2453 | |
|---|
| .. | .. |
|---|
| 2198 | 2490 | fr_dev(fp) = lport; |
|---|
| 2199 | 2491 | fr_sof(fp) = hp->fcoe_sof; |
|---|
| 2200 | 2492 | if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { |
|---|
| 2493 | + QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n"); |
|---|
| 2201 | 2494 | kfree_skb(skb); |
|---|
| 2202 | 2495 | return; |
|---|
| 2203 | 2496 | } |
|---|
| 2204 | 2497 | fr_eof(fp) = crc_eof.fcoe_eof; |
|---|
| 2205 | 2498 | fr_crc(fp) = crc_eof.fcoe_crc32; |
|---|
| 2206 | 2499 | if (pskb_trim(skb, fr_len)) { |
|---|
| 2500 | + QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n"); |
|---|
| 2207 | 2501 | kfree_skb(skb); |
|---|
| 2208 | 2502 | return; |
|---|
| 2209 | 2503 | } |
|---|
| .. | .. |
|---|
| 2264 | 2558 | * empty then this is not addressed to our port so simply drop it. |
|---|
| 2265 | 2559 | */ |
|---|
| 2266 | 2560 | if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) { |
|---|
| 2267 | | - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, |
|---|
| 2268 | | - "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n", |
|---|
| 2269 | | - lport->port_id, ntoh24(fh->fh_d_id)); |
|---|
| 2561 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, |
|---|
| 2562 | + "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n", |
|---|
| 2563 | + lport->port_id, ntoh24(fh->fh_d_id)); |
|---|
| 2270 | 2564 | kfree_skb(skb); |
|---|
| 2271 | 2565 | return; |
|---|
| 2272 | 2566 | } |
|---|
| .. | .. |
|---|
| 2275 | 2569 | if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) && |
|---|
| 2276 | 2570 | (f_ctl & FC_FC_EX_CTX)) { |
|---|
| 2277 | 2571 | /* Drop incoming ABTS response that has both SEQ/EX CTX set */ |
|---|
| 2572 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, |
|---|
| 2573 | + "Dropping ABTS response as both SEQ/EX CTX set.\n"); |
|---|
| 2278 | 2574 | kfree_skb(skb); |
|---|
| 2279 | 2575 | return; |
|---|
| 2280 | 2576 | } |
|---|
| .. | .. |
|---|
| 2356 | 2652 | struct qedf_ctx *qedf = (struct qedf_ctx *)cookie; |
|---|
| 2357 | 2653 | struct qedf_skb_work *skb_work; |
|---|
| 2358 | 2654 | |
|---|
| 2655 | + if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { |
|---|
| 2656 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, |
|---|
| 2657 | + "Dropping frame as link state is down.\n"); |
|---|
| 2658 | + kfree_skb(skb); |
|---|
| 2659 | + return 0; |
|---|
| 2660 | + } |
|---|
| 2661 | + |
|---|
| 2359 | 2662 | skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC); |
|---|
| 2360 | 2663 | if (!skb_work) { |
|---|
| 2361 | 2664 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so " |
|---|
| .. | .. |
|---|
| 2411 | 2714 | sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL); |
|---|
| 2412 | 2715 | |
|---|
| 2413 | 2716 | if (!sb_virt) { |
|---|
| 2414 | | - QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed " |
|---|
| 2415 | | - "for id = %d.\n", sb_id); |
|---|
| 2717 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 2718 | + "Status block allocation failed for id = %d.\n", |
|---|
| 2719 | + sb_id); |
|---|
| 2416 | 2720 | return -ENOMEM; |
|---|
| 2417 | 2721 | } |
|---|
| 2418 | 2722 | |
|---|
| .. | .. |
|---|
| 2420 | 2724 | sb_id, QED_SB_TYPE_STORAGE); |
|---|
| 2421 | 2725 | |
|---|
| 2422 | 2726 | if (ret) { |
|---|
| 2423 | | - QEDF_ERR(&(qedf->dbg_ctx), "Status block initialization " |
|---|
| 2424 | | - "failed for id = %d.\n", sb_id); |
|---|
| 2727 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 2728 | + "Status block initialization failed (0x%x) for id = %d.\n", |
|---|
| 2729 | + ret, sb_id); |
|---|
| 2425 | 2730 | return ret; |
|---|
| 2426 | 2731 | } |
|---|
| 2427 | 2732 | |
|---|
| .. | .. |
|---|
| 2505 | 2810 | io_req = &qedf->cmd_mgr->cmds[xid]; |
|---|
| 2506 | 2811 | |
|---|
| 2507 | 2812 | /* Completion not for a valid I/O anymore so just return */ |
|---|
| 2508 | | - if (!io_req) |
|---|
| 2813 | + if (!io_req) { |
|---|
| 2814 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 2815 | + "io_req is NULL for xid=0x%x.\n", xid); |
|---|
| 2509 | 2816 | return; |
|---|
| 2817 | + } |
|---|
| 2510 | 2818 | |
|---|
| 2511 | 2819 | fcport = io_req->fcport; |
|---|
| 2512 | 2820 | |
|---|
| 2513 | 2821 | if (fcport == NULL) { |
|---|
| 2514 | | - QEDF_ERR(&(qedf->dbg_ctx), "fcport is NULL.\n"); |
|---|
| 2822 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 2823 | + "fcport is NULL for xid=0x%x io_req=%p.\n", |
|---|
| 2824 | + xid, io_req); |
|---|
| 2515 | 2825 | return; |
|---|
| 2516 | 2826 | } |
|---|
| 2517 | 2827 | |
|---|
| .. | .. |
|---|
| 2520 | 2830 | * isn't valid and shouldn't be taken. We should just return. |
|---|
| 2521 | 2831 | */ |
|---|
| 2522 | 2832 | if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { |
|---|
| 2523 | | - QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); |
|---|
| 2833 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 2834 | + "Session not offloaded yet, fcport = %p.\n", fcport); |
|---|
| 2524 | 2835 | return; |
|---|
| 2525 | 2836 | } |
|---|
| 2526 | 2837 | |
|---|
| .. | .. |
|---|
| 2681 | 2992 | } |
|---|
| 2682 | 2993 | |
|---|
| 2683 | 2994 | /* Allocate list of PBL pages */ |
|---|
| 2684 | | - qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev, |
|---|
| 2685 | | - QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL); |
|---|
| 2995 | + qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev, |
|---|
| 2996 | + QEDF_PAGE_SIZE, |
|---|
| 2997 | + &qedf->bdq_pbl_list_dma, |
|---|
| 2998 | + GFP_KERNEL); |
|---|
| 2686 | 2999 | if (!qedf->bdq_pbl_list) { |
|---|
| 2687 | 3000 | QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n"); |
|---|
| 2688 | 3001 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 2709 | 3022 | { |
|---|
| 2710 | 3023 | u32 *list; |
|---|
| 2711 | 3024 | int i; |
|---|
| 2712 | | - int status = 0, rc; |
|---|
| 3025 | + int status; |
|---|
| 2713 | 3026 | u32 *pbl; |
|---|
| 2714 | 3027 | dma_addr_t page; |
|---|
| 2715 | 3028 | int num_pages; |
|---|
| .. | .. |
|---|
| 2721 | 3034 | */ |
|---|
| 2722 | 3035 | if (!qedf->num_queues) { |
|---|
| 2723 | 3036 | QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n"); |
|---|
| 2724 | | - return 1; |
|---|
| 3037 | + return -ENOMEM; |
|---|
| 2725 | 3038 | } |
|---|
| 2726 | 3039 | |
|---|
| 2727 | 3040 | /* |
|---|
| .. | .. |
|---|
| 2729 | 3042 | * addresses of our queues |
|---|
| 2730 | 3043 | */ |
|---|
| 2731 | 3044 | if (!qedf->p_cpuq) { |
|---|
| 2732 | | - status = 1; |
|---|
| 3045 | + status = -EINVAL; |
|---|
| 3046 | + QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n"); |
|---|
| 2733 | 3047 | goto mem_alloc_failure; |
|---|
| 2734 | 3048 | } |
|---|
| 2735 | 3049 | |
|---|
| .. | .. |
|---|
| 2744 | 3058 | "qedf->global_queues=%p.\n", qedf->global_queues); |
|---|
| 2745 | 3059 | |
|---|
| 2746 | 3060 | /* Allocate DMA coherent buffers for BDQ */ |
|---|
| 2747 | | - rc = qedf_alloc_bdq(qedf); |
|---|
| 2748 | | - if (rc) |
|---|
| 3061 | + status = qedf_alloc_bdq(qedf); |
|---|
| 3062 | + if (status) { |
|---|
| 3063 | + QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n"); |
|---|
| 2749 | 3064 | goto mem_alloc_failure; |
|---|
| 3065 | + } |
|---|
| 2750 | 3066 | |
|---|
| 2751 | 3067 | /* Allocate a CQ and an associated PBL for each MSI-X vector */ |
|---|
| 2752 | 3068 | for (i = 0; i < qedf->num_queues; i++) { |
|---|
| .. | .. |
|---|
| 2771 | 3087 | ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); |
|---|
| 2772 | 3088 | |
|---|
| 2773 | 3089 | qedf->global_queues[i]->cq = |
|---|
| 2774 | | - dma_zalloc_coherent(&qedf->pdev->dev, |
|---|
| 2775 | | - qedf->global_queues[i]->cq_mem_size, |
|---|
| 2776 | | - &qedf->global_queues[i]->cq_dma, GFP_KERNEL); |
|---|
| 3090 | + dma_alloc_coherent(&qedf->pdev->dev, |
|---|
| 3091 | + qedf->global_queues[i]->cq_mem_size, |
|---|
| 3092 | + &qedf->global_queues[i]->cq_dma, |
|---|
| 3093 | + GFP_KERNEL); |
|---|
| 2777 | 3094 | |
|---|
| 2778 | 3095 | if (!qedf->global_queues[i]->cq) { |
|---|
| 2779 | 3096 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n"); |
|---|
| .. | .. |
|---|
| 2782 | 3099 | } |
|---|
| 2783 | 3100 | |
|---|
| 2784 | 3101 | qedf->global_queues[i]->cq_pbl = |
|---|
| 2785 | | - dma_zalloc_coherent(&qedf->pdev->dev, |
|---|
| 2786 | | - qedf->global_queues[i]->cq_pbl_size, |
|---|
| 2787 | | - &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL); |
|---|
| 3102 | + dma_alloc_coherent(&qedf->pdev->dev, |
|---|
| 3103 | + qedf->global_queues[i]->cq_pbl_size, |
|---|
| 3104 | + &qedf->global_queues[i]->cq_pbl_dma, |
|---|
| 3105 | + GFP_KERNEL); |
|---|
| 2788 | 3106 | |
|---|
| 2789 | 3107 | if (!qedf->global_queues[i]->cq_pbl) { |
|---|
| 2790 | 3108 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n"); |
|---|
| .. | .. |
|---|
| 2855 | 3173 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", |
|---|
| 2856 | 3174 | qedf->num_queues); |
|---|
| 2857 | 3175 | |
|---|
| 2858 | | - qedf->p_cpuq = pci_alloc_consistent(qedf->pdev, |
|---|
| 3176 | + qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev, |
|---|
| 2859 | 3177 | qedf->num_queues * sizeof(struct qedf_glbl_q_params), |
|---|
| 2860 | | - &qedf->hw_p_cpuq); |
|---|
| 3178 | + &qedf->hw_p_cpuq, GFP_KERNEL); |
|---|
| 2861 | 3179 | |
|---|
| 2862 | 3180 | if (!qedf->p_cpuq) { |
|---|
| 2863 | | - QEDF_ERR(&(qedf->dbg_ctx), "pci_alloc_consistent failed.\n"); |
|---|
| 3181 | + QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n"); |
|---|
| 2864 | 3182 | return 1; |
|---|
| 2865 | 3183 | } |
|---|
| 2866 | 3184 | |
|---|
| .. | .. |
|---|
| 2929 | 3247 | |
|---|
| 2930 | 3248 | if (qedf->p_cpuq) { |
|---|
| 2931 | 3249 | size = qedf->num_queues * sizeof(struct qedf_glbl_q_params); |
|---|
| 2932 | | - pci_free_consistent(qedf->pdev, size, qedf->p_cpuq, |
|---|
| 3250 | + dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq, |
|---|
| 2933 | 3251 | qedf->hw_p_cpuq); |
|---|
| 2934 | 3252 | } |
|---|
| 2935 | 3253 | |
|---|
| 2936 | 3254 | qedf_free_global_queues(qedf); |
|---|
| 2937 | 3255 | |
|---|
| 2938 | | - if (qedf->global_queues) |
|---|
| 2939 | | - kfree(qedf->global_queues); |
|---|
| 3256 | + kfree(qedf->global_queues); |
|---|
| 2940 | 3257 | } |
|---|
| 2941 | 3258 | |
|---|
| 2942 | 3259 | /* |
|---|
| .. | .. |
|---|
| 2955 | 3272 | .id_table = qedf_pci_tbl, |
|---|
| 2956 | 3273 | .probe = qedf_probe, |
|---|
| 2957 | 3274 | .remove = qedf_remove, |
|---|
| 3275 | + .shutdown = qedf_shutdown, |
|---|
| 2958 | 3276 | }; |
|---|
| 2959 | 3277 | |
|---|
| 2960 | 3278 | static int __qedf_probe(struct pci_dev *pdev, int mode) |
|---|
| .. | .. |
|---|
| 2971 | 3289 | void *task_start, *task_end; |
|---|
| 2972 | 3290 | struct qed_slowpath_params slowpath_params; |
|---|
| 2973 | 3291 | struct qed_probe_params qed_params; |
|---|
| 2974 | | - u16 tmp; |
|---|
| 3292 | + u16 retry_cnt = 10; |
|---|
| 2975 | 3293 | |
|---|
| 2976 | 3294 | /* |
|---|
| 2977 | 3295 | * When doing error recovery we didn't reap the lport so don't try |
|---|
| 2978 | 3296 | * to reallocate it. |
|---|
| 2979 | 3297 | */ |
|---|
| 3298 | +retry_probe: |
|---|
| 3299 | + if (mode == QEDF_MODE_RECOVERY) |
|---|
| 3300 | + msleep(2000); |
|---|
| 3301 | + |
|---|
| 2980 | 3302 | if (mode != QEDF_MODE_RECOVERY) { |
|---|
| 2981 | 3303 | lport = libfc_host_alloc(&qedf_host_template, |
|---|
| 2982 | 3304 | sizeof(struct qedf_ctx)); |
|---|
| .. | .. |
|---|
| 2986 | 3308 | rc = -ENOMEM; |
|---|
| 2987 | 3309 | goto err0; |
|---|
| 2988 | 3310 | } |
|---|
| 3311 | + |
|---|
| 3312 | + fc_disc_init(lport); |
|---|
| 2989 | 3313 | |
|---|
| 2990 | 3314 | /* Initialize qedf_ctx */ |
|---|
| 2991 | 3315 | qedf = lport_priv(lport); |
|---|
| .. | .. |
|---|
| 3003 | 3327 | pci_set_drvdata(pdev, qedf); |
|---|
| 3004 | 3328 | init_completion(&qedf->fipvlan_compl); |
|---|
| 3005 | 3329 | mutex_init(&qedf->stats_mutex); |
|---|
| 3330 | + mutex_init(&qedf->flush_mutex); |
|---|
| 3331 | + qedf->flogi_pending = 0; |
|---|
| 3006 | 3332 | |
|---|
| 3007 | 3333 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, |
|---|
| 3008 | 3334 | "QLogic FastLinQ FCoE Module qedf %s, " |
|---|
| .. | .. |
|---|
| 3036 | 3362 | INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update); |
|---|
| 3037 | 3363 | INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery); |
|---|
| 3038 | 3364 | INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump); |
|---|
| 3365 | + INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work); |
|---|
| 3039 | 3366 | qedf->fipvlan_retries = qedf_fipvlan_retries; |
|---|
| 3040 | 3367 | /* Set a default prio in case DCBX doesn't converge */ |
|---|
| 3041 | 3368 | if (qedf_default_prio > -1) { |
|---|
| .. | .. |
|---|
| 3058 | 3385 | qed_params.is_vf = is_vf; |
|---|
| 3059 | 3386 | qedf->cdev = qed_ops->common->probe(pdev, &qed_params); |
|---|
| 3060 | 3387 | if (!qedf->cdev) { |
|---|
| 3388 | + if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) { |
|---|
| 3389 | + QEDF_ERR(&qedf->dbg_ctx, |
|---|
| 3390 | + "Retry %d initialize hardware\n", retry_cnt); |
|---|
| 3391 | + retry_cnt--; |
|---|
| 3392 | + goto retry_probe; |
|---|
| 3393 | + } |
|---|
| 3394 | + QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n"); |
|---|
| 3061 | 3395 | rc = -ENODEV; |
|---|
| 3062 | 3396 | goto err1; |
|---|
| 3063 | 3397 | } |
|---|
| .. | .. |
|---|
| 3068 | 3402 | QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); |
|---|
| 3069 | 3403 | goto err1; |
|---|
| 3070 | 3404 | } |
|---|
| 3405 | + |
|---|
| 3406 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
|---|
| 3407 | + "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n", |
|---|
| 3408 | + qedf->dev_info.common.num_hwfns, |
|---|
| 3409 | + qed_ops->common->get_affin_hwfn_idx(qedf->cdev)); |
|---|
| 3071 | 3410 | |
|---|
| 3072 | 3411 | /* queue allocation code should come here |
|---|
| 3073 | 3412 | * order should be |
|---|
| .. | .. |
|---|
| 3083 | 3422 | goto err2; |
|---|
| 3084 | 3423 | } |
|---|
| 3085 | 3424 | qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); |
|---|
| 3425 | + |
|---|
| 3426 | + /* Learn information crucial for qedf to progress */ |
|---|
| 3427 | + rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); |
|---|
| 3428 | + if (rc) { |
|---|
| 3429 | + QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n"); |
|---|
| 3430 | + goto err2; |
|---|
| 3431 | + } |
|---|
| 3086 | 3432 | |
|---|
| 3087 | 3433 | /* Record BDQ producer doorbell addresses */ |
|---|
| 3088 | 3434 | qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; |
|---|
| .. | .. |
|---|
| 3121 | 3467 | |
|---|
| 3122 | 3468 | /* Setup interrupts */ |
|---|
| 3123 | 3469 | rc = qedf_setup_int(qedf); |
|---|
| 3124 | | - if (rc) |
|---|
| 3470 | + if (rc) { |
|---|
| 3471 | + QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n"); |
|---|
| 3125 | 3472 | goto err3; |
|---|
| 3473 | + } |
|---|
| 3126 | 3474 | |
|---|
| 3127 | 3475 | rc = qed_ops->start(qedf->cdev, &qedf->tasks); |
|---|
| 3128 | 3476 | if (rc) { |
|---|
| .. | .. |
|---|
| 3145 | 3493 | "Writing %d to primary and secondary BDQ doorbell registers.\n", |
|---|
| 3146 | 3494 | qedf->bdq_prod_idx); |
|---|
| 3147 | 3495 | writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); |
|---|
| 3148 | | - tmp = readw(qedf->bdq_primary_prod); |
|---|
| 3496 | + readw(qedf->bdq_primary_prod); |
|---|
| 3149 | 3497 | writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); |
|---|
| 3150 | | - tmp = readw(qedf->bdq_secondary_prod); |
|---|
| 3498 | + readw(qedf->bdq_secondary_prod); |
|---|
| 3151 | 3499 | |
|---|
| 3152 | 3500 | qed_ops->common->set_power_state(qedf->cdev, PCI_D0); |
|---|
| 3153 | 3501 | |
|---|
| .. | .. |
|---|
| 3182 | 3530 | sprintf(host_buf, "host_%d", host->host_no); |
|---|
| 3183 | 3531 | qed_ops->common->set_name(qedf->cdev, host_buf); |
|---|
| 3184 | 3532 | |
|---|
| 3185 | | - |
|---|
| 3186 | | - /* Set xid max values */ |
|---|
| 3187 | | - qedf->max_scsi_xid = QEDF_MAX_SCSI_XID; |
|---|
| 3188 | | - qedf->max_els_xid = QEDF_MAX_ELS_XID; |
|---|
| 3189 | | - |
|---|
| 3190 | 3533 | /* Allocate cmd mgr */ |
|---|
| 3191 | 3534 | qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf); |
|---|
| 3192 | 3535 | if (!qedf->cmd_mgr) { |
|---|
| .. | .. |
|---|
| 3197 | 3540 | |
|---|
| 3198 | 3541 | if (mode != QEDF_MODE_RECOVERY) { |
|---|
| 3199 | 3542 | host->transportt = qedf_fc_transport_template; |
|---|
| 3200 | | - host->can_queue = QEDF_MAX_ELS_XID; |
|---|
| 3201 | 3543 | host->max_lun = qedf_max_lun; |
|---|
| 3202 | 3544 | host->max_cmd_len = QEDF_MAX_CDB_LEN; |
|---|
| 3545 | + host->can_queue = FCOE_PARAMS_NUM_TASKS; |
|---|
| 3203 | 3546 | rc = scsi_add_host(host, &pdev->dev); |
|---|
| 3204 | | - if (rc) |
|---|
| 3547 | + if (rc) { |
|---|
| 3548 | + QEDF_WARN(&qedf->dbg_ctx, |
|---|
| 3549 | + "Error adding Scsi_Host rc=0x%x.\n", rc); |
|---|
| 3205 | 3550 | goto err6; |
|---|
| 3551 | + } |
|---|
| 3206 | 3552 | } |
|---|
| 3207 | 3553 | |
|---|
| 3208 | 3554 | memset(¶ms, 0, sizeof(params)); |
|---|
| 3209 | | - params.mtu = 9000; |
|---|
| 3555 | + params.mtu = QEDF_LL2_BUF_SIZE; |
|---|
| 3210 | 3556 | ether_addr_copy(params.ll2_mac_address, qedf->mac); |
|---|
| 3211 | 3557 | |
|---|
| 3212 | 3558 | /* Start LL2 processing thread */ |
|---|
| .. | .. |
|---|
| 3269 | 3615 | qedf->lport->host->host_no); |
|---|
| 3270 | 3616 | qedf->dpc_wq = create_workqueue(host_buf); |
|---|
| 3271 | 3617 | } |
|---|
| 3618 | + INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler); |
|---|
| 3272 | 3619 | |
|---|
| 3273 | 3620 | /* |
|---|
| 3274 | 3621 | * GRC dump and sysfs parameters are not reaped during the recovery |
|---|
| .. | .. |
|---|
| 3345 | 3692 | err1: |
|---|
| 3346 | 3693 | scsi_host_put(lport->host); |
|---|
| 3347 | 3694 | err0: |
|---|
| 3348 | | - if (qedf) { |
|---|
| 3349 | | - QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); |
|---|
| 3350 | | - |
|---|
| 3351 | | - clear_bit(QEDF_PROBING, &qedf->flags); |
|---|
| 3352 | | - } |
|---|
| 3353 | 3695 | return rc; |
|---|
| 3354 | 3696 | } |
|---|
| 3355 | 3697 | |
|---|
| .. | .. |
|---|
| 3387 | 3729 | fcoe_ctlr_link_down(&qedf->ctlr); |
|---|
| 3388 | 3730 | else |
|---|
| 3389 | 3731 | fc_fabric_logoff(qedf->lport); |
|---|
| 3390 | | - qedf_wait_for_upload(qedf); |
|---|
| 3732 | + |
|---|
| 3733 | + if (qedf_wait_for_upload(qedf) == false) |
|---|
| 3734 | + QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n"); |
|---|
| 3391 | 3735 | |
|---|
| 3392 | 3736 | #ifdef CONFIG_DEBUG_FS |
|---|
| 3393 | 3737 | qedf_dbg_host_exit(&(qedf->dbg_ctx)); |
|---|
| .. | .. |
|---|
| 3490 | 3834 | qedf_capture_grc_dump(qedf); |
|---|
| 3491 | 3835 | } |
|---|
| 3492 | 3836 | |
|---|
| 3837 | +void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type) |
|---|
| 3838 | +{ |
|---|
| 3839 | + struct qedf_ctx *qedf = dev; |
|---|
| 3840 | + |
|---|
| 3841 | + QEDF_ERR(&(qedf->dbg_ctx), |
|---|
| 3842 | + "Hardware error handler scheduled, event=%d.\n", |
|---|
| 3843 | + err_type); |
|---|
| 3844 | + |
|---|
| 3845 | + if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) { |
|---|
| 3846 | + QEDF_ERR(&(qedf->dbg_ctx), |
|---|
| 3847 | + "Already in recovery, not scheduling board disable work.\n"); |
|---|
| 3848 | + return; |
|---|
| 3849 | + } |
|---|
| 3850 | + |
|---|
| 3851 | + switch (err_type) { |
|---|
| 3852 | + case QED_HW_ERR_FAN_FAIL: |
|---|
| 3853 | + schedule_delayed_work(&qedf->board_disable_work, 0); |
|---|
| 3854 | + break; |
|---|
| 3855 | + case QED_HW_ERR_MFW_RESP_FAIL: |
|---|
| 3856 | + case QED_HW_ERR_HW_ATTN: |
|---|
| 3857 | + case QED_HW_ERR_DMAE_FAIL: |
|---|
| 3858 | + case QED_HW_ERR_FW_ASSERT: |
|---|
| 3859 | + /* Prevent HW attentions from being reasserted */ |
|---|
| 3860 | + qed_ops->common->attn_clr_enable(qedf->cdev, true); |
|---|
| 3861 | + break; |
|---|
| 3862 | + case QED_HW_ERR_RAMROD_FAIL: |
|---|
| 3863 | + /* Prevent HW attentions from being reasserted */ |
|---|
| 3864 | + qed_ops->common->attn_clr_enable(qedf->cdev, true); |
|---|
| 3865 | + |
|---|
| 3866 | + if (qedf_enable_recovery) |
|---|
| 3867 | + qed_ops->common->recovery_process(qedf->cdev); |
|---|
| 3868 | + |
|---|
| 3869 | + break; |
|---|
| 3870 | + default: |
|---|
| 3871 | + break; |
|---|
| 3872 | + } |
|---|
| 3873 | +} |
|---|
| 3874 | + |
|---|
| 3493 | 3875 | /* |
|---|
| 3494 | 3876 | * Protocol TLV handler |
|---|
| 3495 | 3877 | */ |
|---|
| .. | .. |
|---|
| 3584 | 3966 | |
|---|
| 3585 | 3967 | fcoe->scsi_tsk_full_set = true; |
|---|
| 3586 | 3968 | fcoe->scsi_tsk_full = qedf->task_set_fulls; |
|---|
| 3969 | +} |
|---|
| 3970 | + |
|---|
| 3971 | +/* Deferred work function to perform soft context reset on STAG change */ |
|---|
| 3972 | +void qedf_stag_change_work(struct work_struct *work) |
|---|
| 3973 | +{ |
|---|
| 3974 | + struct qedf_ctx *qedf = |
|---|
| 3975 | + container_of(work, struct qedf_ctx, stag_work.work); |
|---|
| 3976 | + |
|---|
| 3977 | + if (!qedf) { |
|---|
| 3978 | + QEDF_ERR(NULL, "qedf is NULL"); |
|---|
| 3979 | + return; |
|---|
| 3980 | + } |
|---|
| 3981 | + QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n"); |
|---|
| 3982 | + qedf_ctx_soft_reset(qedf->lport); |
|---|
| 3983 | +} |
|---|
| 3984 | + |
|---|
| 3985 | +static void qedf_shutdown(struct pci_dev *pdev) |
|---|
| 3986 | +{ |
|---|
| 3987 | + __qedf_remove(pdev, QEDF_MODE_NORMAL); |
|---|
| 3988 | +} |
|---|
| 3989 | + |
|---|
| 3990 | +/* |
|---|
| 3991 | + * Recovery handler code |
|---|
| 3992 | + */ |
|---|
| 3993 | +static void qedf_schedule_recovery_handler(void *dev) |
|---|
| 3994 | +{ |
|---|
| 3995 | + struct qedf_ctx *qedf = dev; |
|---|
| 3996 | + |
|---|
| 3997 | + QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n"); |
|---|
| 3998 | + schedule_delayed_work(&qedf->recovery_work, 0); |
|---|
| 3999 | +} |
|---|
| 4000 | + |
|---|
| 4001 | +static void qedf_recovery_handler(struct work_struct *work) |
|---|
| 4002 | +{ |
|---|
| 4003 | + struct qedf_ctx *qedf = |
|---|
| 4004 | + container_of(work, struct qedf_ctx, recovery_work.work); |
|---|
| 4005 | + |
|---|
| 4006 | + if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags)) |
|---|
| 4007 | + return; |
|---|
| 4008 | + |
|---|
| 4009 | + /* |
|---|
| 4010 | + * Call common_ops->recovery_prolog to allow the MFW to quiesce |
|---|
| 4011 | + * any PCI transactions. |
|---|
| 4012 | + */ |
|---|
| 4013 | + qed_ops->common->recovery_prolog(qedf->cdev); |
|---|
| 4014 | + |
|---|
| 4015 | + QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n"); |
|---|
| 4016 | + __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY); |
|---|
| 4017 | + /* |
|---|
| 4018 | + * Reset link and dcbx to down state since we will not get a link down |
|---|
| 4019 | + * event from the MFW but calling __qedf_remove will essentially be a |
|---|
| 4020 | + * link down event. |
|---|
| 4021 | + */ |
|---|
| 4022 | + atomic_set(&qedf->link_state, QEDF_LINK_DOWN); |
|---|
| 4023 | + atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); |
|---|
| 4024 | + __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY); |
|---|
| 4025 | + clear_bit(QEDF_IN_RECOVERY, &qedf->flags); |
|---|
| 4026 | + QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n"); |
|---|
| 3587 | 4027 | } |
|---|
| 3588 | 4028 | |
|---|
| 3589 | 4029 | /* Generic TLV data callback */ |
|---|
| .. | .. |
|---|
| 3712 | 4152 | } |
|---|
| 3713 | 4153 | |
|---|
| 3714 | 4154 | MODULE_LICENSE("GPL"); |
|---|
| 3715 | | -MODULE_DESCRIPTION("QLogic QEDF 25/40/50/100Gb FCoE Driver"); |
|---|
| 4155 | +MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module"); |
|---|
| 3716 | 4156 | MODULE_AUTHOR("QLogic Corporation"); |
|---|
| 3717 | 4157 | MODULE_VERSION(QEDF_VERSION); |
|---|
| 3718 | 4158 | module_init(qedf_init); |
|---|