.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * QLogic FCoE Offload Driver |
---|
3 | 4 | * Copyright (c) 2016-2018 Cavium Inc. |
---|
4 | | - * |
---|
5 | | - * This software is available under the terms of the GNU General Public License |
---|
6 | | - * (GPL) Version 2, available from the file COPYING in the main directory of |
---|
7 | | - * this source tree. |
---|
8 | 5 | */ |
---|
9 | 6 | #include <linux/init.h> |
---|
10 | 7 | #include <linux/kernel.h> |
---|
.. | .. |
---|
16 | 13 | #include <linux/interrupt.h> |
---|
17 | 14 | #include <linux/list.h> |
---|
18 | 15 | #include <linux/kthread.h> |
---|
| 16 | +#include <linux/phylink.h> |
---|
19 | 17 | #include <scsi/libfc.h> |
---|
20 | 18 | #include <scsi/scsi_host.h> |
---|
21 | 19 | #include <scsi/fc_frame.h> |
---|
.. | .. |
---|
30 | 28 | |
---|
31 | 29 | static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id); |
---|
32 | 30 | static void qedf_remove(struct pci_dev *pdev); |
---|
| 31 | +static void qedf_shutdown(struct pci_dev *pdev); |
---|
| 32 | +static void qedf_schedule_recovery_handler(void *dev); |
---|
| 33 | +static void qedf_recovery_handler(struct work_struct *work); |
---|
| 34 | +static int qedf_suspend(struct pci_dev *pdev, pm_message_t state); |
---|
33 | 35 | |
---|
34 | 36 | /* |
---|
35 | 37 | * Driver module parameters. |
---|
.. | .. |
---|
40 | 42 | "remote ports (default 60)"); |
---|
41 | 43 | |
---|
42 | 44 | uint qedf_debug = QEDF_LOG_INFO; |
---|
43 | | -module_param_named(debug, qedf_debug, uint, S_IRUGO); |
---|
| 45 | +module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR); |
---|
44 | 46 | MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging" |
---|
45 | 47 | " mask"); |
---|
46 | 48 | |
---|
.. | .. |
---|
104 | 106 | MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module " |
---|
105 | 107 | "during probe (0-3: 0 more verbose)."); |
---|
106 | 108 | |
---|
| 109 | +static bool qedf_enable_recovery = true; |
---|
| 110 | +module_param_named(enable_recovery, qedf_enable_recovery, |
---|
| 111 | + bool, S_IRUGO | S_IWUSR); |
---|
| 112 | +MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware " |
---|
| 113 | + "interface level errors 0 = Disabled, 1 = Enabled (Default: 1)."); |
---|
| 114 | + |
---|
107 | 115 | struct workqueue_struct *qedf_io_wq; |
---|
108 | 116 | |
---|
109 | 117 | static struct fcoe_percpu_s qedf_global; |
---|
.. | .. |
---|
113 | 121 | |
---|
114 | 122 | void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id) |
---|
115 | 123 | { |
---|
116 | | - qedf->vlan_id = vlan_id; |
---|
117 | | - qedf->vlan_id |= qedf->prio << VLAN_PRIO_SHIFT; |
---|
118 | | - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x " |
---|
119 | | - "prio=%d.\n", vlan_id, qedf->prio); |
---|
| 124 | + int vlan_id_tmp = 0; |
---|
| 125 | + |
---|
| 126 | + vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT); |
---|
| 127 | + qedf->vlan_id = vlan_id_tmp; |
---|
| 128 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
---|
| 129 | + "Setting vlan_id=0x%04x prio=%d.\n", |
---|
| 130 | + vlan_id_tmp, qedf->prio); |
---|
120 | 131 | } |
---|
121 | 132 | |
---|
122 | 133 | /* Returns true if we have a valid vlan, false otherwise */ |
---|
123 | 134 | static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf) |
---|
124 | 135 | { |
---|
125 | | - int rc; |
---|
126 | | - |
---|
127 | | - if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { |
---|
128 | | - QEDF_ERR(&(qedf->dbg_ctx), "Link not up.\n"); |
---|
129 | | - return false; |
---|
130 | | - } |
---|
131 | 136 | |
---|
132 | 137 | while (qedf->fipvlan_retries--) { |
---|
133 | | - if (qedf->vlan_id > 0) |
---|
| 138 | + /* This is to catch if link goes down during fipvlan retries */ |
---|
| 139 | + if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { |
---|
| 140 | + QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n"); |
---|
| 141 | + return false; |
---|
| 142 | + } |
---|
| 143 | + |
---|
| 144 | + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { |
---|
| 145 | + QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n"); |
---|
| 146 | + return false; |
---|
| 147 | + } |
---|
| 148 | + |
---|
| 149 | + if (qedf->vlan_id > 0) { |
---|
| 150 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
---|
| 151 | + "vlan = 0x%x already set, calling ctlr_link_up.\n", |
---|
| 152 | + qedf->vlan_id); |
---|
| 153 | + if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) |
---|
| 154 | + fcoe_ctlr_link_up(&qedf->ctlr); |
---|
134 | 155 | return true; |
---|
| 156 | + } |
---|
| 157 | + |
---|
135 | 158 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, |
---|
136 | 159 | "Retry %d.\n", qedf->fipvlan_retries); |
---|
137 | 160 | init_completion(&qedf->fipvlan_compl); |
---|
138 | 161 | qedf_fcoe_send_vlan_req(qedf); |
---|
139 | | - rc = wait_for_completion_timeout(&qedf->fipvlan_compl, |
---|
140 | | - 1 * HZ); |
---|
141 | | - if (rc > 0) { |
---|
142 | | - fcoe_ctlr_link_up(&qedf->ctlr); |
---|
143 | | - return true; |
---|
144 | | - } |
---|
| 162 | + wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ); |
---|
145 | 163 | } |
---|
146 | 164 | |
---|
147 | 165 | return false; |
---|
.. | .. |
---|
153 | 171 | container_of(work, struct qedf_ctx, link_update.work); |
---|
154 | 172 | int rc; |
---|
155 | 173 | |
---|
156 | | - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Entered.\n"); |
---|
| 174 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n", |
---|
| 175 | + atomic_read(&qedf->link_state)); |
---|
157 | 176 | |
---|
158 | 177 | if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { |
---|
159 | 178 | rc = qedf_initiate_fipvlan_req(qedf); |
---|
160 | 179 | if (rc) |
---|
161 | 180 | return; |
---|
| 181 | + |
---|
| 182 | + if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { |
---|
| 183 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
---|
| 184 | + "Link is down, resetting vlan_id.\n"); |
---|
| 185 | + qedf->vlan_id = 0; |
---|
| 186 | + return; |
---|
| 187 | + } |
---|
| 188 | + |
---|
162 | 189 | /* |
---|
163 | 190 | * If we get here then we never received a repsonse to our |
---|
164 | 191 | * fip vlan request so set the vlan_id to the default and |
---|
.. | .. |
---|
185 | 212 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, |
---|
186 | 213 | "Calling fcoe_ctlr_link_down().\n"); |
---|
187 | 214 | fcoe_ctlr_link_down(&qedf->ctlr); |
---|
188 | | - qedf_wait_for_upload(qedf); |
---|
| 215 | + if (qedf_wait_for_upload(qedf) == false) |
---|
| 216 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 217 | + "Could not upload all sessions.\n"); |
---|
189 | 218 | /* Reset the number of FIP VLAN retries */ |
---|
190 | 219 | qedf->fipvlan_retries = qedf_fipvlan_retries; |
---|
191 | 220 | } |
---|
.. | .. |
---|
263 | 292 | else if (fc_frame_payload_op(fp) == ELS_LS_ACC) { |
---|
264 | 293 | /* Set the source MAC we will use for FCoE traffic */ |
---|
265 | 294 | qedf_set_data_src_addr(qedf, fp); |
---|
| 295 | + qedf->flogi_pending = 0; |
---|
266 | 296 | } |
---|
267 | 297 | |
---|
268 | 298 | /* Complete flogi_compl so we can proceed to sending ADISCs */ |
---|
.. | .. |
---|
288 | 318 | */ |
---|
289 | 319 | if (resp == fc_lport_flogi_resp) { |
---|
290 | 320 | qedf->flogi_cnt++; |
---|
| 321 | + if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) { |
---|
| 322 | + schedule_delayed_work(&qedf->stag_work, 2); |
---|
| 323 | + return NULL; |
---|
| 324 | + } |
---|
| 325 | + qedf->flogi_pending++; |
---|
291 | 326 | return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp, |
---|
292 | 327 | arg, timeout); |
---|
293 | 328 | } |
---|
.. | .. |
---|
302 | 337 | |
---|
303 | 338 | lport = qedf->lport; |
---|
304 | 339 | |
---|
305 | | - if (!lport->tt.elsct_send) |
---|
| 340 | + if (!lport->tt.elsct_send) { |
---|
| 341 | + QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n"); |
---|
306 | 342 | return -EINVAL; |
---|
| 343 | + } |
---|
307 | 344 | |
---|
308 | 345 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); |
---|
309 | 346 | if (!fp) { |
---|
.. | .. |
---|
321 | 358 | return 0; |
---|
322 | 359 | } |
---|
323 | 360 | |
---|
324 | | -struct qedf_tmp_rdata_item { |
---|
325 | | - struct fc_rport_priv *rdata; |
---|
326 | | - struct list_head list; |
---|
327 | | -}; |
---|
328 | | - |
---|
329 | 361 | /* |
---|
330 | 362 | * This function is called if link_down_tmo is in use. If we get a link up and |
---|
331 | 363 | * link_down_tmo has not expired then use just FLOGI/ADISC to recover our |
---|
.. | .. |
---|
335 | 367 | { |
---|
336 | 368 | struct qedf_ctx *qedf = |
---|
337 | 369 | container_of(work, struct qedf_ctx, link_recovery.work); |
---|
338 | | - struct qedf_rport *fcport; |
---|
| 370 | + struct fc_lport *lport = qedf->lport; |
---|
339 | 371 | struct fc_rport_priv *rdata; |
---|
340 | | - struct qedf_tmp_rdata_item *rdata_item, *tmp_rdata_item; |
---|
341 | 372 | bool rc; |
---|
342 | 373 | int retries = 30; |
---|
343 | 374 | int rval, i; |
---|
.. | .. |
---|
404 | 435 | * Call lport->tt.rport_login which will cause libfc to send an |
---|
405 | 436 | * ADISC since the rport is in state ready. |
---|
406 | 437 | */ |
---|
407 | | - rcu_read_lock(); |
---|
408 | | - list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { |
---|
409 | | - rdata = fcport->rdata; |
---|
410 | | - if (rdata == NULL) |
---|
411 | | - continue; |
---|
412 | | - rdata_item = kzalloc(sizeof(struct qedf_tmp_rdata_item), |
---|
413 | | - GFP_ATOMIC); |
---|
414 | | - if (!rdata_item) |
---|
415 | | - continue; |
---|
| 438 | + mutex_lock(&lport->disc.disc_mutex); |
---|
| 439 | + list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { |
---|
416 | 440 | if (kref_get_unless_zero(&rdata->kref)) { |
---|
417 | | - rdata_item->rdata = rdata; |
---|
418 | | - list_add(&rdata_item->list, &rdata_login_list); |
---|
419 | | - } else |
---|
420 | | - kfree(rdata_item); |
---|
| 441 | + fc_rport_login(rdata); |
---|
| 442 | + kref_put(&rdata->kref, fc_rport_destroy); |
---|
| 443 | + } |
---|
421 | 444 | } |
---|
422 | | - rcu_read_unlock(); |
---|
423 | | - /* |
---|
424 | | - * Do the fc_rport_login outside of the rcu lock so we don't take a |
---|
425 | | - * mutex in an atomic context. |
---|
426 | | - */ |
---|
427 | | - list_for_each_entry_safe(rdata_item, tmp_rdata_item, &rdata_login_list, |
---|
428 | | - list) { |
---|
429 | | - list_del(&rdata_item->list); |
---|
430 | | - fc_rport_login(rdata_item->rdata); |
---|
431 | | - kref_put(&rdata_item->rdata->kref, fc_rport_destroy); |
---|
432 | | - kfree(rdata_item); |
---|
433 | | - } |
---|
| 445 | + mutex_unlock(&lport->disc.disc_mutex); |
---|
434 | 446 | } |
---|
435 | 447 | |
---|
436 | 448 | static void qedf_update_link_speed(struct qedf_ctx *qedf, |
---|
437 | 449 | struct qed_link_output *link) |
---|
438 | 450 | { |
---|
| 451 | + __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps); |
---|
439 | 452 | struct fc_lport *lport = qedf->lport; |
---|
440 | 453 | |
---|
441 | 454 | lport->link_speed = FC_PORTSPEED_UNKNOWN; |
---|
.. | .. |
---|
458 | 471 | case 100000: |
---|
459 | 472 | lport->link_speed = FC_PORTSPEED_100GBIT; |
---|
460 | 473 | break; |
---|
| 474 | + case 20000: |
---|
| 475 | + lport->link_speed = FC_PORTSPEED_20GBIT; |
---|
| 476 | + break; |
---|
461 | 477 | default: |
---|
462 | 478 | lport->link_speed = FC_PORTSPEED_UNKNOWN; |
---|
463 | 479 | break; |
---|
.. | .. |
---|
467 | 483 | * Set supported link speed by querying the supported |
---|
468 | 484 | * capabilities of the link. |
---|
469 | 485 | */ |
---|
470 | | - if (link->supported_caps & SUPPORTED_10000baseKR_Full) |
---|
| 486 | + |
---|
| 487 | + phylink_zero(sup_caps); |
---|
| 488 | + phylink_set(sup_caps, 10000baseT_Full); |
---|
| 489 | + phylink_set(sup_caps, 10000baseKX4_Full); |
---|
| 490 | + phylink_set(sup_caps, 10000baseR_FEC); |
---|
| 491 | + phylink_set(sup_caps, 10000baseCR_Full); |
---|
| 492 | + phylink_set(sup_caps, 10000baseSR_Full); |
---|
| 493 | + phylink_set(sup_caps, 10000baseLR_Full); |
---|
| 494 | + phylink_set(sup_caps, 10000baseLRM_Full); |
---|
| 495 | + phylink_set(sup_caps, 10000baseKR_Full); |
---|
| 496 | + |
---|
| 497 | + if (linkmode_intersects(link->supported_caps, sup_caps)) |
---|
471 | 498 | lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; |
---|
472 | | - if (link->supported_caps & SUPPORTED_25000baseKR_Full) |
---|
| 499 | + |
---|
| 500 | + phylink_zero(sup_caps); |
---|
| 501 | + phylink_set(sup_caps, 25000baseKR_Full); |
---|
| 502 | + phylink_set(sup_caps, 25000baseCR_Full); |
---|
| 503 | + phylink_set(sup_caps, 25000baseSR_Full); |
---|
| 504 | + |
---|
| 505 | + if (linkmode_intersects(link->supported_caps, sup_caps)) |
---|
473 | 506 | lport->link_supported_speeds |= FC_PORTSPEED_25GBIT; |
---|
474 | | - if (link->supported_caps & SUPPORTED_40000baseLR4_Full) |
---|
| 507 | + |
---|
| 508 | + phylink_zero(sup_caps); |
---|
| 509 | + phylink_set(sup_caps, 40000baseLR4_Full); |
---|
| 510 | + phylink_set(sup_caps, 40000baseKR4_Full); |
---|
| 511 | + phylink_set(sup_caps, 40000baseCR4_Full); |
---|
| 512 | + phylink_set(sup_caps, 40000baseSR4_Full); |
---|
| 513 | + |
---|
| 514 | + if (linkmode_intersects(link->supported_caps, sup_caps)) |
---|
475 | 515 | lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; |
---|
476 | | - if (link->supported_caps & SUPPORTED_50000baseKR2_Full) |
---|
| 516 | + |
---|
| 517 | + phylink_zero(sup_caps); |
---|
| 518 | + phylink_set(sup_caps, 50000baseKR2_Full); |
---|
| 519 | + phylink_set(sup_caps, 50000baseCR2_Full); |
---|
| 520 | + phylink_set(sup_caps, 50000baseSR2_Full); |
---|
| 521 | + |
---|
| 522 | + if (linkmode_intersects(link->supported_caps, sup_caps)) |
---|
477 | 523 | lport->link_supported_speeds |= FC_PORTSPEED_50GBIT; |
---|
478 | | - if (link->supported_caps & SUPPORTED_100000baseKR4_Full) |
---|
| 524 | + |
---|
| 525 | + phylink_zero(sup_caps); |
---|
| 526 | + phylink_set(sup_caps, 100000baseKR4_Full); |
---|
| 527 | + phylink_set(sup_caps, 100000baseSR4_Full); |
---|
| 528 | + phylink_set(sup_caps, 100000baseCR4_Full); |
---|
| 529 | + phylink_set(sup_caps, 100000baseLR4_ER4_Full); |
---|
| 530 | + |
---|
| 531 | + if (linkmode_intersects(link->supported_caps, sup_caps)) |
---|
479 | 532 | lport->link_supported_speeds |= FC_PORTSPEED_100GBIT; |
---|
480 | | - fc_host_supported_speeds(lport->host) = lport->link_supported_speeds; |
---|
| 533 | + |
---|
| 534 | + phylink_zero(sup_caps); |
---|
| 535 | + phylink_set(sup_caps, 20000baseKR2_Full); |
---|
| 536 | + |
---|
| 537 | + if (linkmode_intersects(link->supported_caps, sup_caps)) |
---|
| 538 | + lport->link_supported_speeds |= FC_PORTSPEED_20GBIT; |
---|
| 539 | + |
---|
| 540 | + if (lport->host && lport->host->shost_data) |
---|
| 541 | + fc_host_supported_speeds(lport->host) = |
---|
| 542 | + lport->link_supported_speeds; |
---|
| 543 | +} |
---|
| 544 | + |
---|
| 545 | +static void qedf_bw_update(void *dev) |
---|
| 546 | +{ |
---|
| 547 | + struct qedf_ctx *qedf = (struct qedf_ctx *)dev; |
---|
| 548 | + struct qed_link_output link; |
---|
| 549 | + |
---|
| 550 | + /* Get the latest status of the link */ |
---|
| 551 | + qed_ops->common->get_link(qedf->cdev, &link); |
---|
| 552 | + |
---|
| 553 | + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { |
---|
| 554 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 555 | + "Ignore link update, driver getting unload.\n"); |
---|
| 556 | + return; |
---|
| 557 | + } |
---|
| 558 | + |
---|
| 559 | + if (link.link_up) { |
---|
| 560 | + if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) |
---|
| 561 | + qedf_update_link_speed(qedf, &link); |
---|
| 562 | + else |
---|
| 563 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 564 | + "Ignore bw update, link is down.\n"); |
---|
| 565 | + |
---|
| 566 | + } else { |
---|
| 567 | + QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n"); |
---|
| 568 | + } |
---|
481 | 569 | } |
---|
482 | 570 | |
---|
483 | 571 | static void qedf_link_update(void *dev, struct qed_link_output *link) |
---|
484 | 572 | { |
---|
485 | 573 | struct qedf_ctx *qedf = (struct qedf_ctx *)dev; |
---|
| 574 | + |
---|
| 575 | + /* |
---|
| 576 | + * Prevent race where we're removing the module and we get link update |
---|
| 577 | + * for qed. |
---|
| 578 | + */ |
---|
| 579 | + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { |
---|
| 580 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 581 | + "Ignore link update, driver getting unload.\n"); |
---|
| 582 | + return; |
---|
| 583 | + } |
---|
486 | 584 | |
---|
487 | 585 | if (link->link_up) { |
---|
488 | 586 | if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { |
---|
.. | .. |
---|
563 | 661 | tmp_prio = get->operational.app_prio.fcoe; |
---|
564 | 662 | if (qedf_default_prio > -1) |
---|
565 | 663 | qedf->prio = qedf_default_prio; |
---|
566 | | - else if (tmp_prio < 0 || tmp_prio > 7) { |
---|
| 664 | + else if (tmp_prio > 7) { |
---|
567 | 665 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, |
---|
568 | 666 | "FIP/FCoE prio %d out of range, setting to %d.\n", |
---|
569 | 667 | tmp_prio, QEDF_DEFAULT_PRIO); |
---|
.. | .. |
---|
596 | 694 | static struct qed_fcoe_cb_ops qedf_cb_ops = { |
---|
597 | 695 | { |
---|
598 | 696 | .link_update = qedf_link_update, |
---|
| 697 | + .bw_update = qedf_bw_update, |
---|
| 698 | + .schedule_recovery_handler = qedf_schedule_recovery_handler, |
---|
599 | 699 | .dcbx_aen = qedf_dcbx_handler, |
---|
600 | 700 | .get_generic_tlv_data = qedf_get_generic_tlv_data, |
---|
601 | 701 | .get_protocol_tlv_data = qedf_get_protocol_tlv_data, |
---|
| 702 | + .schedule_hw_err_handler = qedf_schedule_hw_err_handler, |
---|
602 | 703 | } |
---|
603 | 704 | }; |
---|
604 | 705 | |
---|
.. | .. |
---|
615 | 716 | static int qedf_eh_abort(struct scsi_cmnd *sc_cmd) |
---|
616 | 717 | { |
---|
617 | 718 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); |
---|
618 | | - struct fc_rport_libfc_priv *rp = rport->dd_data; |
---|
619 | | - struct qedf_rport *fcport; |
---|
620 | 719 | struct fc_lport *lport; |
---|
621 | 720 | struct qedf_ctx *qedf; |
---|
622 | 721 | struct qedf_ioreq *io_req; |
---|
| 722 | + struct fc_rport_libfc_priv *rp = rport->dd_data; |
---|
| 723 | + struct fc_rport_priv *rdata; |
---|
| 724 | + struct qedf_rport *fcport = NULL; |
---|
623 | 725 | int rc = FAILED; |
---|
| 726 | + int wait_count = 100; |
---|
| 727 | + int refcount = 0; |
---|
624 | 728 | int rval; |
---|
625 | | - |
---|
626 | | - if (fc_remote_port_chkready(rport)) { |
---|
627 | | - QEDF_ERR(NULL, "rport not ready\n"); |
---|
628 | | - goto out; |
---|
629 | | - } |
---|
| 729 | + int got_ref = 0; |
---|
630 | 730 | |
---|
631 | 731 | lport = shost_priv(sc_cmd->device->host); |
---|
632 | 732 | qedf = (struct qedf_ctx *)lport_priv(lport); |
---|
633 | 733 | |
---|
634 | | - if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { |
---|
635 | | - QEDF_ERR(&(qedf->dbg_ctx), "link not ready.\n"); |
---|
636 | | - goto out; |
---|
637 | | - } |
---|
638 | | - |
---|
| 734 | + /* rport and tgt are allocated together, so tgt should be non-NULL */ |
---|
639 | 735 | fcport = (struct qedf_rport *)&rp[1]; |
---|
640 | | - |
---|
641 | | - io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr; |
---|
642 | | - if (!io_req) { |
---|
643 | | - QEDF_ERR(&(qedf->dbg_ctx), "io_req is NULL.\n"); |
---|
| 736 | + rdata = fcport->rdata; |
---|
| 737 | + if (!rdata || !kref_get_unless_zero(&rdata->kref)) { |
---|
| 738 | + QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd); |
---|
644 | 739 | rc = SUCCESS; |
---|
645 | 740 | goto out; |
---|
646 | 741 | } |
---|
647 | 742 | |
---|
648 | | - QEDF_ERR(&(qedf->dbg_ctx), "Aborting io_req sc_cmd=%p xid=0x%x " |
---|
649 | | - "fp_idx=%d.\n", sc_cmd, io_req->xid, io_req->fp_idx); |
---|
| 743 | + |
---|
| 744 | + io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr; |
---|
| 745 | + if (!io_req) { |
---|
| 746 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 747 | + "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n", |
---|
| 748 | + sc_cmd, sc_cmd->cmnd[0], |
---|
| 749 | + rdata->ids.port_id); |
---|
| 750 | + rc = SUCCESS; |
---|
| 751 | + goto drop_rdata_kref; |
---|
| 752 | + } |
---|
| 753 | + |
---|
| 754 | + rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */ |
---|
| 755 | + if (rval) |
---|
| 756 | + got_ref = 1; |
---|
| 757 | + |
---|
| 758 | + /* If we got a valid io_req, confirm it belongs to this sc_cmd. */ |
---|
| 759 | + if (!rval || io_req->sc_cmd != sc_cmd) { |
---|
| 760 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 761 | + "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n", |
---|
| 762 | + io_req->sc_cmd, sc_cmd, rdata->ids.port_id); |
---|
| 763 | + |
---|
| 764 | + goto drop_rdata_kref; |
---|
| 765 | + } |
---|
| 766 | + |
---|
| 767 | + if (fc_remote_port_chkready(rport)) { |
---|
| 768 | + refcount = kref_read(&io_req->refcount); |
---|
| 769 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 770 | + "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n", |
---|
| 771 | + io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0], |
---|
| 772 | + refcount, rdata->ids.port_id); |
---|
| 773 | + |
---|
| 774 | + goto drop_rdata_kref; |
---|
| 775 | + } |
---|
| 776 | + |
---|
| 777 | + rc = fc_block_scsi_eh(sc_cmd); |
---|
| 778 | + if (rc) |
---|
| 779 | + goto drop_rdata_kref; |
---|
| 780 | + |
---|
| 781 | + if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { |
---|
| 782 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 783 | + "Connection uploading, xid=0x%x., port_id=%06x\n", |
---|
| 784 | + io_req->xid, rdata->ids.port_id); |
---|
| 785 | + while (io_req->sc_cmd && (wait_count != 0)) { |
---|
| 786 | + msleep(100); |
---|
| 787 | + wait_count--; |
---|
| 788 | + } |
---|
| 789 | + if (wait_count) { |
---|
| 790 | + QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n"); |
---|
| 791 | + rc = SUCCESS; |
---|
| 792 | + } else { |
---|
| 793 | + QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n"); |
---|
| 794 | + rc = FAILED; |
---|
| 795 | + } |
---|
| 796 | + goto drop_rdata_kref; |
---|
| 797 | + } |
---|
| 798 | + |
---|
| 799 | + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { |
---|
| 800 | + QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n"); |
---|
| 801 | + goto drop_rdata_kref; |
---|
| 802 | + } |
---|
| 803 | + |
---|
| 804 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 805 | + "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n", |
---|
| 806 | + io_req, sc_cmd, io_req->xid, io_req->fp_idx, |
---|
| 807 | + rdata->ids.port_id); |
---|
650 | 808 | |
---|
651 | 809 | if (qedf->stop_io_on_error) { |
---|
652 | 810 | qedf_stop_all_io(qedf); |
---|
653 | 811 | rc = SUCCESS; |
---|
654 | | - goto out; |
---|
| 812 | + goto drop_rdata_kref; |
---|
655 | 813 | } |
---|
656 | 814 | |
---|
657 | 815 | init_completion(&io_req->abts_done); |
---|
658 | 816 | rval = qedf_initiate_abts(io_req, true); |
---|
659 | 817 | if (rval) { |
---|
660 | 818 | QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); |
---|
661 | | - goto out; |
---|
| 819 | + /* |
---|
| 820 | + * If we fail to queue the ABTS then return this command to |
---|
| 821 | + * the SCSI layer as it will own and free the xid |
---|
| 822 | + */ |
---|
| 823 | + rc = SUCCESS; |
---|
| 824 | + qedf_scsi_done(qedf, io_req, DID_ERROR); |
---|
| 825 | + goto drop_rdata_kref; |
---|
662 | 826 | } |
---|
663 | 827 | |
---|
664 | 828 | wait_for_completion(&io_req->abts_done); |
---|
.. | .. |
---|
684 | 848 | QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n", |
---|
685 | 849 | io_req->xid); |
---|
686 | 850 | |
---|
| 851 | +drop_rdata_kref: |
---|
| 852 | + kref_put(&rdata->kref, fc_rport_destroy); |
---|
687 | 853 | out: |
---|
| 854 | + if (got_ref) |
---|
| 855 | + kref_put(&io_req->refcount, qedf_release_cmd); |
---|
688 | 856 | return rc; |
---|
689 | 857 | } |
---|
690 | 858 | |
---|
691 | 859 | static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd) |
---|
692 | 860 | { |
---|
693 | | - QEDF_ERR(NULL, "TARGET RESET Issued..."); |
---|
| 861 | + QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...", |
---|
| 862 | + sc_cmd->device->host->host_no, sc_cmd->device->id, |
---|
| 863 | + sc_cmd->device->lun); |
---|
694 | 864 | return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); |
---|
695 | 865 | } |
---|
696 | 866 | |
---|
697 | 867 | static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd) |
---|
698 | 868 | { |
---|
699 | | - QEDF_ERR(NULL, "LUN RESET Issued...\n"); |
---|
| 869 | + QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ", |
---|
| 870 | + sc_cmd->device->host->host_no, sc_cmd->device->id, |
---|
| 871 | + sc_cmd->device->lun); |
---|
700 | 872 | return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); |
---|
701 | 873 | } |
---|
702 | 874 | |
---|
703 | | -void qedf_wait_for_upload(struct qedf_ctx *qedf) |
---|
| 875 | +bool qedf_wait_for_upload(struct qedf_ctx *qedf) |
---|
704 | 876 | { |
---|
705 | | - while (1) { |
---|
| 877 | + struct qedf_rport *fcport = NULL; |
---|
| 878 | + int wait_cnt = 120; |
---|
| 879 | + |
---|
| 880 | + while (wait_cnt--) { |
---|
706 | 881 | if (atomic_read(&qedf->num_offloads)) |
---|
707 | | - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, |
---|
708 | | - "Waiting for all uploads to complete.\n"); |
---|
| 882 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
---|
| 883 | + "Waiting for all uploads to complete num_offloads = 0x%x.\n", |
---|
| 884 | + atomic_read(&qedf->num_offloads)); |
---|
709 | 885 | else |
---|
710 | | - break; |
---|
| 886 | + return true; |
---|
711 | 887 | msleep(500); |
---|
712 | 888 | } |
---|
| 889 | + |
---|
| 890 | + rcu_read_lock(); |
---|
| 891 | + list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { |
---|
| 892 | + if (fcport && test_bit(QEDF_RPORT_SESSION_READY, |
---|
| 893 | + &fcport->flags)) { |
---|
| 894 | + if (fcport->rdata) |
---|
| 895 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 896 | + "Waiting for fcport %p portid=%06x.\n", |
---|
| 897 | + fcport, fcport->rdata->ids.port_id); |
---|
| 898 | + } else { |
---|
| 899 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 900 | + "Waiting for fcport %p.\n", fcport); |
---|
| 901 | + } |
---|
| 902 | + } |
---|
| 903 | + rcu_read_unlock(); |
---|
| 904 | + return false; |
---|
| 905 | + |
---|
713 | 906 | } |
---|
714 | 907 | |
---|
715 | 908 | /* Performs soft reset of qedf_ctx by simulating a link down/up */ |
---|
716 | | -static void qedf_ctx_soft_reset(struct fc_lport *lport) |
---|
| 909 | +void qedf_ctx_soft_reset(struct fc_lport *lport) |
---|
717 | 910 | { |
---|
718 | 911 | struct qedf_ctx *qedf; |
---|
| 912 | + struct qed_link_output if_link; |
---|
719 | 913 | |
---|
720 | 914 | if (lport->vport) { |
---|
721 | 915 | QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n"); |
---|
.. | .. |
---|
724 | 918 | |
---|
725 | 919 | qedf = lport_priv(lport); |
---|
726 | 920 | |
---|
| 921 | + qedf->flogi_pending = 0; |
---|
727 | 922 | /* For host reset, essentially do a soft link up/down */ |
---|
728 | 923 | atomic_set(&qedf->link_state, QEDF_LINK_DOWN); |
---|
| 924 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
---|
| 925 | + "Queuing link down work.\n"); |
---|
729 | 926 | queue_delayed_work(qedf->link_update_wq, &qedf->link_update, |
---|
730 | 927 | 0); |
---|
731 | | - qedf_wait_for_upload(qedf); |
---|
| 928 | + |
---|
| 929 | + if (qedf_wait_for_upload(qedf) == false) { |
---|
| 930 | + QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n"); |
---|
| 931 | + WARN_ON(atomic_read(&qedf->num_offloads)); |
---|
| 932 | + } |
---|
| 933 | + |
---|
| 934 | + /* Before setting link up query physical link state */ |
---|
| 935 | + qed_ops->common->get_link(qedf->cdev, &if_link); |
---|
| 936 | + /* Bail if the physical link is not up */ |
---|
| 937 | + if (!if_link.link_up) { |
---|
| 938 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
---|
| 939 | + "Physical link is not up.\n"); |
---|
| 940 | + return; |
---|
| 941 | + } |
---|
| 942 | + /* Flush and wait to make sure link down is processed */ |
---|
| 943 | + flush_delayed_work(&qedf->link_update); |
---|
| 944 | + msleep(500); |
---|
| 945 | + |
---|
732 | 946 | atomic_set(&qedf->link_state, QEDF_LINK_UP); |
---|
733 | 947 | qedf->vlan_id = 0; |
---|
| 948 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
---|
| 949 | + "Queue link up work.\n"); |
---|
734 | 950 | queue_delayed_work(qedf->link_update_wq, &qedf->link_update, |
---|
735 | 951 | 0); |
---|
736 | 952 | } |
---|
.. | .. |
---|
740 | 956 | { |
---|
741 | 957 | struct fc_lport *lport; |
---|
742 | 958 | struct qedf_ctx *qedf; |
---|
743 | | - struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); |
---|
744 | | - struct fc_rport_libfc_priv *rp = rport->dd_data; |
---|
745 | | - struct qedf_rport *fcport = (struct qedf_rport *)&rp[1]; |
---|
746 | | - int rval; |
---|
747 | | - |
---|
748 | | - rval = fc_remote_port_chkready(rport); |
---|
749 | | - |
---|
750 | | - if (rval) { |
---|
751 | | - QEDF_ERR(NULL, "device_reset rport not ready\n"); |
---|
752 | | - return FAILED; |
---|
753 | | - } |
---|
754 | | - |
---|
755 | | - if (fcport == NULL) { |
---|
756 | | - QEDF_ERR(NULL, "device_reset: rport is NULL\n"); |
---|
757 | | - return FAILED; |
---|
758 | | - } |
---|
759 | 959 | |
---|
760 | 960 | lport = shost_priv(sc_cmd->device->host); |
---|
761 | 961 | qedf = lport_priv(lport); |
---|
.. | .. |
---|
785 | 985 | .name = QEDF_MODULE_NAME, |
---|
786 | 986 | .this_id = -1, |
---|
787 | 987 | .cmd_per_lun = 32, |
---|
788 | | - .use_clustering = ENABLE_CLUSTERING, |
---|
789 | 988 | .max_sectors = 0xffff, |
---|
790 | 989 | .queuecommand = qedf_queuecommand, |
---|
791 | 990 | .shost_attrs = qedf_host_attrs, |
---|
.. | .. |
---|
852 | 1051 | return rc; |
---|
853 | 1052 | } |
---|
854 | 1053 | |
---|
855 | | -/** |
---|
| 1054 | +/* |
---|
856 | 1055 | * qedf_xmit - qedf FCoE frame transmit function |
---|
857 | | - * |
---|
858 | 1056 | */ |
---|
859 | 1057 | static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp) |
---|
860 | 1058 | { |
---|
.. | .. |
---|
908 | 1106 | "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id)); |
---|
909 | 1107 | kfree_skb(skb); |
---|
910 | 1108 | rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id)); |
---|
911 | | - if (rdata) |
---|
| 1109 | + if (rdata) { |
---|
912 | 1110 | rdata->retries = lport->max_rport_retry_count; |
---|
| 1111 | + kref_put(&rdata->kref, fc_rport_destroy); |
---|
| 1112 | + } |
---|
913 | 1113 | return -EINVAL; |
---|
914 | 1114 | } |
---|
915 | 1115 | /* End NPIV filtering */ |
---|
.. | .. |
---|
969 | 1169 | return -ENOMEM; |
---|
970 | 1170 | } |
---|
971 | 1171 | frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; |
---|
972 | | - cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset; |
---|
| 1172 | + cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); |
---|
973 | 1173 | } else { |
---|
974 | 1174 | cp = skb_put(skb, tlen); |
---|
975 | 1175 | } |
---|
.. | .. |
---|
1032 | 1232 | if (qedf_dump_frames) |
---|
1033 | 1233 | print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, |
---|
1034 | 1234 | 1, skb->data, skb->len, false); |
---|
1035 | | - qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); |
---|
| 1235 | + rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); |
---|
| 1236 | + if (rc) { |
---|
| 1237 | + QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc); |
---|
| 1238 | + kfree_skb(skb); |
---|
| 1239 | + return rc; |
---|
| 1240 | + } |
---|
1036 | 1241 | |
---|
1037 | 1242 | return 0; |
---|
1038 | 1243 | } |
---|
.. | .. |
---|
1051 | 1256 | sizeof(void *); |
---|
1052 | 1257 | fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; |
---|
1053 | 1258 | |
---|
1054 | | - fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev, |
---|
1055 | | - fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL); |
---|
| 1259 | + fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size, |
---|
| 1260 | + &fcport->sq_dma, GFP_KERNEL); |
---|
1056 | 1261 | if (!fcport->sq) { |
---|
1057 | 1262 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n"); |
---|
1058 | 1263 | rval = 1; |
---|
1059 | 1264 | goto out; |
---|
1060 | 1265 | } |
---|
1061 | 1266 | |
---|
1062 | | - fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev, |
---|
1063 | | - fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL); |
---|
| 1267 | + fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev, |
---|
| 1268 | + fcport->sq_pbl_size, |
---|
| 1269 | + &fcport->sq_pbl_dma, GFP_KERNEL); |
---|
1064 | 1270 | if (!fcport->sq_pbl) { |
---|
1065 | 1271 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n"); |
---|
1066 | 1272 | rval = 1; |
---|
.. | .. |
---|
1137 | 1343 | ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr); |
---|
1138 | 1344 | |
---|
1139 | 1345 | conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size; |
---|
1140 | | - conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov / 20; |
---|
| 1346 | + conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov; |
---|
1141 | 1347 | conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */ |
---|
1142 | 1348 | conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size; |
---|
1143 | 1349 | |
---|
.. | .. |
---|
1224 | 1430 | static void qedf_cleanup_fcport(struct qedf_ctx *qedf, |
---|
1225 | 1431 | struct qedf_rport *fcport) |
---|
1226 | 1432 | { |
---|
| 1433 | + struct fc_rport_priv *rdata = fcport->rdata; |
---|
| 1434 | + |
---|
1227 | 1435 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n", |
---|
1228 | 1436 | fcport->rdata->ids.port_id); |
---|
1229 | 1437 | |
---|
.. | .. |
---|
1235 | 1443 | qedf_free_sq(qedf, fcport); |
---|
1236 | 1444 | fcport->rdata = NULL; |
---|
1237 | 1445 | fcport->qedf = NULL; |
---|
| 1446 | + kref_put(&rdata->kref, fc_rport_destroy); |
---|
1238 | 1447 | } |
---|
1239 | 1448 | |
---|
1240 | | -/** |
---|
| 1449 | +/* |
---|
1241 | 1450 | * This event_callback is called after successful completion of libfc |
---|
1242 | 1451 | * initiated target login. qedf can proceed with initiating the session |
---|
1243 | 1452 | * establishment. |
---|
.. | .. |
---|
1310 | 1519 | break; |
---|
1311 | 1520 | } |
---|
1312 | 1521 | |
---|
| 1522 | + /* Initial reference held on entry, so this can't fail */ |
---|
| 1523 | + kref_get(&rdata->kref); |
---|
1313 | 1524 | fcport->rdata = rdata; |
---|
1314 | 1525 | fcport->rport = rport; |
---|
1315 | 1526 | |
---|
.. | .. |
---|
1357 | 1568 | if (port_id == FC_FID_DIR_SERV) |
---|
1358 | 1569 | break; |
---|
1359 | 1570 | |
---|
| 1571 | + if (rdata->spp_type != FC_TYPE_FCP) { |
---|
| 1572 | + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, |
---|
| 1573 | + "No action since spp type isn't FCP\n"); |
---|
| 1574 | + break; |
---|
| 1575 | + } |
---|
| 1576 | + if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { |
---|
| 1577 | + QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, |
---|
| 1578 | + "Not FCP target so no action\n"); |
---|
| 1579 | + break; |
---|
| 1580 | + } |
---|
| 1581 | + |
---|
1360 | 1582 | if (!rport) { |
---|
1361 | 1583 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, |
---|
1362 | 1584 | "port_id=%x - rport notcreated Yet!!\n", port_id); |
---|
.. | .. |
---|
1369 | 1591 | */ |
---|
1370 | 1592 | fcport = (struct qedf_rport *)&rp[1]; |
---|
1371 | 1593 | |
---|
| 1594 | + spin_lock_irqsave(&fcport->rport_lock, flags); |
---|
1372 | 1595 | /* Only free this fcport if it is offloaded already */ |
---|
1373 | | - if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { |
---|
1374 | | - set_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags); |
---|
| 1596 | + if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) && |
---|
| 1597 | + !test_bit(QEDF_RPORT_UPLOADING_CONNECTION, |
---|
| 1598 | + &fcport->flags)) { |
---|
| 1599 | + set_bit(QEDF_RPORT_UPLOADING_CONNECTION, |
---|
| 1600 | + &fcport->flags); |
---|
| 1601 | + spin_unlock_irqrestore(&fcport->rport_lock, flags); |
---|
1375 | 1602 | qedf_cleanup_fcport(qedf, fcport); |
---|
1376 | | - |
---|
1377 | 1603 | /* |
---|
1378 | 1604 | * Remove fcport to list of qedf_ctx list of offloaded |
---|
1379 | 1605 | * ports |
---|
.. | .. |
---|
1385 | 1611 | clear_bit(QEDF_RPORT_UPLOADING_CONNECTION, |
---|
1386 | 1612 | &fcport->flags); |
---|
1387 | 1613 | atomic_dec(&qedf->num_offloads); |
---|
| 1614 | + } else { |
---|
| 1615 | + spin_unlock_irqrestore(&fcport->rport_lock, flags); |
---|
1388 | 1616 | } |
---|
1389 | | - |
---|
1390 | 1617 | break; |
---|
1391 | 1618 | |
---|
1392 | 1619 | case RPORT_EV_NONE: |
---|
.. | .. |
---|
1428 | 1655 | static void qedf_setup_fdmi(struct qedf_ctx *qedf) |
---|
1429 | 1656 | { |
---|
1430 | 1657 | struct fc_lport *lport = qedf->lport; |
---|
1431 | | - struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host); |
---|
1432 | 1658 | u8 buf[8]; |
---|
1433 | | - int i, pos; |
---|
| 1659 | + int pos; |
---|
| 1660 | + uint32_t i; |
---|
1434 | 1661 | |
---|
1435 | 1662 | /* |
---|
1436 | | - * fdmi_enabled needs to be set for libfc to execute FDMI registration. |
---|
| 1663 | + * fdmi_enabled needs to be set for libfc |
---|
| 1664 | + * to execute FDMI registration |
---|
1437 | 1665 | */ |
---|
1438 | 1666 | lport->fdmi_enabled = 1; |
---|
1439 | 1667 | |
---|
.. | .. |
---|
1449 | 1677 | for (i = 0; i < 8; i++) |
---|
1450 | 1678 | pci_read_config_byte(qedf->pdev, pos + i, &buf[i]); |
---|
1451 | 1679 | |
---|
1452 | | - snprintf(fc_host->serial_number, |
---|
1453 | | - sizeof(fc_host->serial_number), |
---|
| 1680 | + snprintf(fc_host_serial_number(lport->host), |
---|
| 1681 | + FC_SERIAL_NUMBER_SIZE, |
---|
1454 | 1682 | "%02X%02X%02X%02X%02X%02X%02X%02X", |
---|
1455 | 1683 | buf[7], buf[6], buf[5], buf[4], |
---|
1456 | 1684 | buf[3], buf[2], buf[1], buf[0]); |
---|
1457 | 1685 | } else |
---|
1458 | | - snprintf(fc_host->serial_number, |
---|
1459 | | - sizeof(fc_host->serial_number), "Unknown"); |
---|
| 1686 | + snprintf(fc_host_serial_number(lport->host), |
---|
| 1687 | + FC_SERIAL_NUMBER_SIZE, "Unknown"); |
---|
1460 | 1688 | |
---|
1461 | | - snprintf(fc_host->manufacturer, |
---|
1462 | | - sizeof(fc_host->manufacturer), "%s", "Cavium Inc."); |
---|
| 1689 | + snprintf(fc_host_manufacturer(lport->host), |
---|
| 1690 | + FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc."); |
---|
1463 | 1691 | |
---|
1464 | | - snprintf(fc_host->model, sizeof(fc_host->model), "%s", "QL41000"); |
---|
| 1692 | + if (qedf->pdev->device == QL45xxx) { |
---|
| 1693 | + snprintf(fc_host_model(lport->host), |
---|
| 1694 | + FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx"); |
---|
1465 | 1695 | |
---|
1466 | | - snprintf(fc_host->model_description, sizeof(fc_host->model_description), |
---|
1467 | | - "%s", "QLogic FastLinQ QL41000 Series 10/25/40/50GGbE Controller" |
---|
1468 | | - "(FCoE)"); |
---|
| 1696 | + snprintf(fc_host_model_description(lport->host), |
---|
| 1697 | + FC_SYMBOLIC_NAME_SIZE, "%s", |
---|
| 1698 | + "Marvell FastLinQ QL45xxx FCoE Adapter"); |
---|
| 1699 | + } |
---|
1469 | 1700 | |
---|
1470 | | - snprintf(fc_host->hardware_version, sizeof(fc_host->hardware_version), |
---|
1471 | | - "Rev %d", qedf->pdev->revision); |
---|
| 1701 | + if (qedf->pdev->device == QL41xxx) { |
---|
| 1702 | + snprintf(fc_host_model(lport->host), |
---|
| 1703 | + FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx"); |
---|
1472 | 1704 | |
---|
1473 | | - snprintf(fc_host->driver_version, sizeof(fc_host->driver_version), |
---|
1474 | | - "%s", QEDF_VERSION); |
---|
| 1705 | + snprintf(fc_host_model_description(lport->host), |
---|
| 1706 | + FC_SYMBOLIC_NAME_SIZE, "%s", |
---|
| 1707 | + "Marvell FastLinQ QL41xxx FCoE Adapter"); |
---|
| 1708 | + } |
---|
1475 | 1709 | |
---|
1476 | | - snprintf(fc_host->firmware_version, sizeof(fc_host->firmware_version), |
---|
1477 | | - "%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION, |
---|
1478 | | - FW_REVISION_VERSION, FW_ENGINEERING_VERSION); |
---|
| 1710 | + snprintf(fc_host_hardware_version(lport->host), |
---|
| 1711 | + FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision); |
---|
| 1712 | + |
---|
| 1713 | + snprintf(fc_host_driver_version(lport->host), |
---|
| 1714 | + FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION); |
---|
| 1715 | + |
---|
| 1716 | + snprintf(fc_host_firmware_version(lport->host), |
---|
| 1717 | + FC_VERSION_STRING_SIZE, "%d.%d.%d.%d", |
---|
| 1718 | + FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, |
---|
| 1719 | + FW_ENGINEERING_VERSION); |
---|
| 1720 | + |
---|
1479 | 1721 | } |
---|
1480 | 1722 | |
---|
1481 | 1723 | static int qedf_lport_setup(struct qedf_ctx *qedf) |
---|
.. | .. |
---|
1498 | 1740 | fc_set_wwnn(lport, qedf->wwnn); |
---|
1499 | 1741 | fc_set_wwpn(lport, qedf->wwpn); |
---|
1500 | 1742 | |
---|
1501 | | - fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0); |
---|
| 1743 | + if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) { |
---|
| 1744 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 1745 | + "fcoe_libfc_config failed.\n"); |
---|
| 1746 | + return -ENOMEM; |
---|
| 1747 | + } |
---|
1502 | 1748 | |
---|
1503 | 1749 | /* Allocate the exchange manager */ |
---|
1504 | | - fc_exch_mgr_alloc(lport, FC_CLASS_3, qedf->max_scsi_xid + 1, |
---|
1505 | | - qedf->max_els_xid, NULL); |
---|
| 1750 | + fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS, |
---|
| 1751 | + 0xfffe, NULL); |
---|
1506 | 1752 | |
---|
1507 | 1753 | if (fc_lport_init_stats(lport)) |
---|
1508 | 1754 | return -ENOMEM; |
---|
.. | .. |
---|
1518 | 1764 | fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo; |
---|
1519 | 1765 | |
---|
1520 | 1766 | /* Set symbolic node name */ |
---|
1521 | | - snprintf(fc_host_symbolic_name(lport->host), 256, |
---|
1522 | | - "QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION); |
---|
| 1767 | + if (qedf->pdev->device == QL45xxx) |
---|
| 1768 | + snprintf(fc_host_symbolic_name(lport->host), 256, |
---|
| 1769 | + "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION); |
---|
| 1770 | + |
---|
| 1771 | + if (qedf->pdev->device == QL41xxx) |
---|
| 1772 | + snprintf(fc_host_symbolic_name(lport->host), 256, |
---|
| 1773 | + "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION); |
---|
1523 | 1774 | |
---|
1524 | 1775 | qedf_setup_fdmi(qedf); |
---|
1525 | 1776 | |
---|
.. | .. |
---|
1577 | 1828 | fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); |
---|
1578 | 1829 | QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, " |
---|
1579 | 1830 | "WWPN (0x%s) already exists.\n", buf); |
---|
1580 | | - goto err1; |
---|
| 1831 | + return rc; |
---|
1581 | 1832 | } |
---|
1582 | 1833 | |
---|
1583 | 1834 | if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) { |
---|
1584 | 1835 | QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport " |
---|
1585 | 1836 | "because link is not up.\n"); |
---|
1586 | | - rc = -EIO; |
---|
1587 | | - goto err1; |
---|
| 1837 | + return -EIO; |
---|
1588 | 1838 | } |
---|
1589 | 1839 | |
---|
1590 | 1840 | vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx)); |
---|
1591 | 1841 | if (!vn_port) { |
---|
1592 | 1842 | QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport " |
---|
1593 | 1843 | "for vport.\n"); |
---|
1594 | | - rc = -ENOMEM; |
---|
1595 | | - goto err1; |
---|
| 1844 | + return -ENOMEM; |
---|
1596 | 1845 | } |
---|
1597 | 1846 | |
---|
1598 | 1847 | fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); |
---|
.. | .. |
---|
1611 | 1860 | vport_qedf->cmd_mgr = base_qedf->cmd_mgr; |
---|
1612 | 1861 | init_completion(&vport_qedf->flogi_compl); |
---|
1613 | 1862 | INIT_LIST_HEAD(&vport_qedf->fcports); |
---|
| 1863 | + INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work); |
---|
1614 | 1864 | |
---|
1615 | 1865 | rc = qedf_vport_libfc_config(vport, vn_port); |
---|
1616 | 1866 | if (rc) { |
---|
1617 | 1867 | QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory " |
---|
1618 | 1868 | "for lport stats.\n"); |
---|
1619 | | - goto err2; |
---|
| 1869 | + goto err; |
---|
1620 | 1870 | } |
---|
1621 | 1871 | |
---|
1622 | 1872 | fc_set_wwnn(vn_port, vport->node_name); |
---|
.. | .. |
---|
1625 | 1875 | vport_qedf->wwpn = vn_port->wwpn; |
---|
1626 | 1876 | |
---|
1627 | 1877 | vn_port->host->transportt = qedf_fc_vport_transport_template; |
---|
1628 | | - vn_port->host->can_queue = QEDF_MAX_ELS_XID; |
---|
| 1878 | + vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS; |
---|
1629 | 1879 | vn_port->host->max_lun = qedf_max_lun; |
---|
1630 | 1880 | vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD; |
---|
1631 | 1881 | vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN; |
---|
1632 | 1882 | |
---|
1633 | 1883 | rc = scsi_add_host(vn_port->host, &vport->dev); |
---|
1634 | 1884 | if (rc) { |
---|
1635 | | - QEDF_WARN(&(base_qedf->dbg_ctx), "Error adding Scsi_Host.\n"); |
---|
1636 | | - goto err2; |
---|
| 1885 | + QEDF_WARN(&base_qedf->dbg_ctx, |
---|
| 1886 | + "Error adding Scsi_Host rc=0x%x.\n", rc); |
---|
| 1887 | + goto err; |
---|
1637 | 1888 | } |
---|
1638 | 1889 | |
---|
1639 | 1890 | /* Set default dev_loss_tmo based on module parameter */ |
---|
.. | .. |
---|
1667 | 1918 | fc_vport_setlink(vn_port); |
---|
1668 | 1919 | } |
---|
1669 | 1920 | |
---|
| 1921 | + /* Set symbolic node name */ |
---|
| 1922 | + if (base_qedf->pdev->device == QL45xxx) |
---|
| 1923 | + snprintf(fc_host_symbolic_name(vn_port->host), 256, |
---|
| 1924 | + "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION); |
---|
| 1925 | + |
---|
| 1926 | + if (base_qedf->pdev->device == QL41xxx) |
---|
| 1927 | + snprintf(fc_host_symbolic_name(vn_port->host), 256, |
---|
| 1928 | + "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION); |
---|
| 1929 | + |
---|
| 1930 | + /* Set supported speed */ |
---|
| 1931 | + fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds; |
---|
| 1932 | + |
---|
| 1933 | + /* Set speed */ |
---|
| 1934 | + vn_port->link_speed = n_port->link_speed; |
---|
| 1935 | + |
---|
| 1936 | + /* Set port type */ |
---|
| 1937 | + fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV; |
---|
| 1938 | + |
---|
| 1939 | + /* Set maxframe size */ |
---|
| 1940 | + fc_host_maxframe_size(vn_port->host) = n_port->mfs; |
---|
| 1941 | + |
---|
1670 | 1942 | QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n", |
---|
1671 | 1943 | vn_port); |
---|
1672 | 1944 | |
---|
.. | .. |
---|
1674 | 1946 | vport_qedf->dbg_ctx.host_no = vn_port->host->host_no; |
---|
1675 | 1947 | vport_qedf->dbg_ctx.pdev = base_qedf->pdev; |
---|
1676 | 1948 | |
---|
1677 | | -err2: |
---|
| 1949 | + return 0; |
---|
| 1950 | + |
---|
| 1951 | +err: |
---|
1678 | 1952 | scsi_host_put(vn_port->host); |
---|
1679 | | -err1: |
---|
1680 | 1953 | return rc; |
---|
1681 | 1954 | } |
---|
1682 | 1955 | |
---|
.. | .. |
---|
1717 | 1990 | fc_lport_free_stats(vn_port); |
---|
1718 | 1991 | |
---|
1719 | 1992 | /* Release Scsi_Host */ |
---|
1720 | | - if (vn_port->host) |
---|
1721 | | - scsi_host_put(vn_port->host); |
---|
| 1993 | + scsi_host_put(vn_port->host); |
---|
1722 | 1994 | |
---|
1723 | 1995 | out: |
---|
1724 | 1996 | return 0; |
---|
.. | .. |
---|
1771 | 2043 | |
---|
1772 | 2044 | qedf_ctx_soft_reset(lport); |
---|
1773 | 2045 | return 0; |
---|
| 2046 | +} |
---|
| 2047 | + |
---|
| 2048 | +static void qedf_get_host_port_id(struct Scsi_Host *shost) |
---|
| 2049 | +{ |
---|
| 2050 | + struct fc_lport *lport = shost_priv(shost); |
---|
| 2051 | + |
---|
| 2052 | + fc_host_port_id(shost) = lport->port_id; |
---|
1774 | 2053 | } |
---|
1775 | 2054 | |
---|
1776 | 2055 | static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host |
---|
.. | .. |
---|
1843 | 2122 | .show_host_active_fc4s = 1, |
---|
1844 | 2123 | .show_host_maxframe_size = 1, |
---|
1845 | 2124 | |
---|
| 2125 | + .get_host_port_id = qedf_get_host_port_id, |
---|
1846 | 2126 | .show_host_port_id = 1, |
---|
1847 | 2127 | .show_host_supported_speeds = 1, |
---|
1848 | 2128 | .get_host_speed = fc_get_host_speed, |
---|
.. | .. |
---|
2086 | 2366 | static void qedf_sync_free_irqs(struct qedf_ctx *qedf) |
---|
2087 | 2367 | { |
---|
2088 | 2368 | int i; |
---|
| 2369 | + u16 vector_idx = 0; |
---|
| 2370 | + u32 vector; |
---|
2089 | 2371 | |
---|
2090 | 2372 | if (qedf->int_info.msix_cnt) { |
---|
2091 | 2373 | for (i = 0; i < qedf->int_info.used_cnt; i++) { |
---|
2092 | | - synchronize_irq(qedf->int_info.msix[i].vector); |
---|
2093 | | - irq_set_affinity_hint(qedf->int_info.msix[i].vector, |
---|
2094 | | - NULL); |
---|
2095 | | - irq_set_affinity_notifier(qedf->int_info.msix[i].vector, |
---|
2096 | | - NULL); |
---|
2097 | | - free_irq(qedf->int_info.msix[i].vector, |
---|
2098 | | - &qedf->fp_array[i]); |
---|
| 2374 | + vector_idx = i * qedf->dev_info.common.num_hwfns + |
---|
| 2375 | + qed_ops->common->get_affin_hwfn_idx(qedf->cdev); |
---|
| 2376 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
---|
| 2377 | + "Freeing IRQ #%d vector_idx=%d.\n", |
---|
| 2378 | + i, vector_idx); |
---|
| 2379 | + vector = qedf->int_info.msix[vector_idx].vector; |
---|
| 2380 | + synchronize_irq(vector); |
---|
| 2381 | + irq_set_affinity_hint(vector, NULL); |
---|
| 2382 | + irq_set_affinity_notifier(vector, NULL); |
---|
| 2383 | + free_irq(vector, &qedf->fp_array[i]); |
---|
2099 | 2384 | } |
---|
2100 | 2385 | } else |
---|
2101 | 2386 | qed_ops->common->simd_handler_clean(qedf->cdev, |
---|
.. | .. |
---|
2108 | 2393 | static int qedf_request_msix_irq(struct qedf_ctx *qedf) |
---|
2109 | 2394 | { |
---|
2110 | 2395 | int i, rc, cpu; |
---|
| 2396 | + u16 vector_idx = 0; |
---|
| 2397 | + u32 vector; |
---|
2111 | 2398 | |
---|
2112 | 2399 | cpu = cpumask_first(cpu_online_mask); |
---|
2113 | 2400 | for (i = 0; i < qedf->num_queues; i++) { |
---|
2114 | | - rc = request_irq(qedf->int_info.msix[i].vector, |
---|
2115 | | - qedf_msix_handler, 0, "qedf", &qedf->fp_array[i]); |
---|
| 2401 | + vector_idx = i * qedf->dev_info.common.num_hwfns + |
---|
| 2402 | + qed_ops->common->get_affin_hwfn_idx(qedf->cdev); |
---|
| 2403 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
---|
| 2404 | + "Requesting IRQ #%d vector_idx=%d.\n", |
---|
| 2405 | + i, vector_idx); |
---|
| 2406 | + vector = qedf->int_info.msix[vector_idx].vector; |
---|
| 2407 | + rc = request_irq(vector, qedf_msix_handler, 0, "qedf", |
---|
| 2408 | + &qedf->fp_array[i]); |
---|
2116 | 2409 | |
---|
2117 | 2410 | if (rc) { |
---|
2118 | 2411 | QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n"); |
---|
.. | .. |
---|
2121 | 2414 | } |
---|
2122 | 2415 | |
---|
2123 | 2416 | qedf->int_info.used_cnt++; |
---|
2124 | | - rc = irq_set_affinity_hint(qedf->int_info.msix[i].vector, |
---|
2125 | | - get_cpu_mask(cpu)); |
---|
| 2417 | + rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu)); |
---|
2126 | 2418 | cpu = cpumask_next(cpu, cpu_online_mask); |
---|
2127 | 2419 | } |
---|
2128 | 2420 | |
---|
.. | .. |
---|
2155 | 2447 | QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler); |
---|
2156 | 2448 | qedf->int_info.used_cnt = 1; |
---|
2157 | 2449 | |
---|
2158 | | - QEDF_ERR(&qedf->dbg_ctx, "Only MSI-X supported. Failing probe.\n"); |
---|
| 2450 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 2451 | + "Cannot load driver due to a lack of MSI-X vectors.\n"); |
---|
2159 | 2452 | return -EINVAL; |
---|
2160 | 2453 | } |
---|
2161 | 2454 | |
---|
.. | .. |
---|
2198 | 2491 | fr_dev(fp) = lport; |
---|
2199 | 2492 | fr_sof(fp) = hp->fcoe_sof; |
---|
2200 | 2493 | if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { |
---|
| 2494 | + QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n"); |
---|
2201 | 2495 | kfree_skb(skb); |
---|
2202 | 2496 | return; |
---|
2203 | 2497 | } |
---|
2204 | 2498 | fr_eof(fp) = crc_eof.fcoe_eof; |
---|
2205 | 2499 | fr_crc(fp) = crc_eof.fcoe_crc32; |
---|
2206 | 2500 | if (pskb_trim(skb, fr_len)) { |
---|
| 2501 | + QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n"); |
---|
2207 | 2502 | kfree_skb(skb); |
---|
2208 | 2503 | return; |
---|
2209 | 2504 | } |
---|
.. | .. |
---|
2264 | 2559 | * empty then this is not addressed to our port so simply drop it. |
---|
2265 | 2560 | */ |
---|
2266 | 2561 | if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) { |
---|
2267 | | - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, |
---|
2268 | | - "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n", |
---|
2269 | | - lport->port_id, ntoh24(fh->fh_d_id)); |
---|
| 2562 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, |
---|
| 2563 | + "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n", |
---|
| 2564 | + lport->port_id, ntoh24(fh->fh_d_id)); |
---|
2270 | 2565 | kfree_skb(skb); |
---|
2271 | 2566 | return; |
---|
2272 | 2567 | } |
---|
.. | .. |
---|
2275 | 2570 | if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) && |
---|
2276 | 2571 | (f_ctl & FC_FC_EX_CTX)) { |
---|
2277 | 2572 | /* Drop incoming ABTS response that has both SEQ/EX CTX set */ |
---|
| 2573 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, |
---|
| 2574 | + "Dropping ABTS response as both SEQ/EX CTX set.\n"); |
---|
2278 | 2575 | kfree_skb(skb); |
---|
2279 | 2576 | return; |
---|
2280 | 2577 | } |
---|
.. | .. |
---|
2356 | 2653 | struct qedf_ctx *qedf = (struct qedf_ctx *)cookie; |
---|
2357 | 2654 | struct qedf_skb_work *skb_work; |
---|
2358 | 2655 | |
---|
| 2656 | + if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { |
---|
| 2657 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, |
---|
| 2658 | + "Dropping frame as link state is down.\n"); |
---|
| 2659 | + kfree_skb(skb); |
---|
| 2660 | + return 0; |
---|
| 2661 | + } |
---|
| 2662 | + |
---|
2359 | 2663 | skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC); |
---|
2360 | 2664 | if (!skb_work) { |
---|
2361 | 2665 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so " |
---|
.. | .. |
---|
2411 | 2715 | sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL); |
---|
2412 | 2716 | |
---|
2413 | 2717 | if (!sb_virt) { |
---|
2414 | | - QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed " |
---|
2415 | | - "for id = %d.\n", sb_id); |
---|
| 2718 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 2719 | + "Status block allocation failed for id = %d.\n", |
---|
| 2720 | + sb_id); |
---|
2416 | 2721 | return -ENOMEM; |
---|
2417 | 2722 | } |
---|
2418 | 2723 | |
---|
.. | .. |
---|
2420 | 2725 | sb_id, QED_SB_TYPE_STORAGE); |
---|
2421 | 2726 | |
---|
2422 | 2727 | if (ret) { |
---|
2423 | | - QEDF_ERR(&(qedf->dbg_ctx), "Status block initialization " |
---|
2424 | | - "failed for id = %d.\n", sb_id); |
---|
| 2728 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 2729 | + "Status block initialization failed (0x%x) for id = %d.\n", |
---|
| 2730 | + ret, sb_id); |
---|
2425 | 2731 | return ret; |
---|
2426 | 2732 | } |
---|
2427 | 2733 | |
---|
.. | .. |
---|
2497 | 2803 | struct qedf_ioreq *io_req; |
---|
2498 | 2804 | struct qedf_rport *fcport; |
---|
2499 | 2805 | u32 comp_type; |
---|
| 2806 | + u8 io_comp_type; |
---|
| 2807 | + unsigned long flags; |
---|
2500 | 2808 | |
---|
2501 | 2809 | comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & |
---|
2502 | 2810 | FCOE_CQE_CQE_TYPE_MASK; |
---|
.. | .. |
---|
2505 | 2813 | io_req = &qedf->cmd_mgr->cmds[xid]; |
---|
2506 | 2814 | |
---|
2507 | 2815 | /* Completion not for a valid I/O anymore so just return */ |
---|
2508 | | - if (!io_req) |
---|
| 2816 | + if (!io_req) { |
---|
| 2817 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 2818 | + "io_req is NULL for xid=0x%x.\n", xid); |
---|
2509 | 2819 | return; |
---|
| 2820 | + } |
---|
2510 | 2821 | |
---|
2511 | 2822 | fcport = io_req->fcport; |
---|
2512 | 2823 | |
---|
2513 | 2824 | if (fcport == NULL) { |
---|
2514 | | - QEDF_ERR(&(qedf->dbg_ctx), "fcport is NULL.\n"); |
---|
| 2825 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 2826 | + "fcport is NULL for xid=0x%x io_req=%p.\n", |
---|
| 2827 | + xid, io_req); |
---|
2515 | 2828 | return; |
---|
2516 | 2829 | } |
---|
2517 | 2830 | |
---|
.. | .. |
---|
2520 | 2833 | * isn't valid and shouldn't be taken. We should just return. |
---|
2521 | 2834 | */ |
---|
2522 | 2835 | if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { |
---|
2523 | | - QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); |
---|
| 2836 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 2837 | + "Session not offloaded yet, fcport = %p.\n", fcport); |
---|
2524 | 2838 | return; |
---|
2525 | 2839 | } |
---|
2526 | 2840 | |
---|
| 2841 | + spin_lock_irqsave(&fcport->rport_lock, flags); |
---|
| 2842 | + io_comp_type = io_req->cmd_type; |
---|
| 2843 | + spin_unlock_irqrestore(&fcport->rport_lock, flags); |
---|
2527 | 2844 | |
---|
2528 | 2845 | switch (comp_type) { |
---|
2529 | 2846 | case FCOE_GOOD_COMPLETION_CQE_TYPE: |
---|
2530 | 2847 | atomic_inc(&fcport->free_sqes); |
---|
2531 | | - switch (io_req->cmd_type) { |
---|
| 2848 | + switch (io_comp_type) { |
---|
2532 | 2849 | case QEDF_SCSI_CMD: |
---|
2533 | 2850 | qedf_scsi_completion(qedf, cqe, io_req); |
---|
2534 | 2851 | break; |
---|
.. | .. |
---|
2681 | 2998 | } |
---|
2682 | 2999 | |
---|
2683 | 3000 | /* Allocate list of PBL pages */ |
---|
2684 | | - qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev, |
---|
2685 | | - QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL); |
---|
| 3001 | + qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev, |
---|
| 3002 | + QEDF_PAGE_SIZE, |
---|
| 3003 | + &qedf->bdq_pbl_list_dma, |
---|
| 3004 | + GFP_KERNEL); |
---|
2686 | 3005 | if (!qedf->bdq_pbl_list) { |
---|
2687 | 3006 | QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n"); |
---|
2688 | 3007 | return -ENOMEM; |
---|
.. | .. |
---|
2709 | 3028 | { |
---|
2710 | 3029 | u32 *list; |
---|
2711 | 3030 | int i; |
---|
2712 | | - int status = 0, rc; |
---|
| 3031 | + int status; |
---|
2713 | 3032 | u32 *pbl; |
---|
2714 | 3033 | dma_addr_t page; |
---|
2715 | 3034 | int num_pages; |
---|
.. | .. |
---|
2721 | 3040 | */ |
---|
2722 | 3041 | if (!qedf->num_queues) { |
---|
2723 | 3042 | QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n"); |
---|
2724 | | - return 1; |
---|
| 3043 | + return -ENOMEM; |
---|
2725 | 3044 | } |
---|
2726 | 3045 | |
---|
2727 | 3046 | /* |
---|
.. | .. |
---|
2729 | 3048 | * addresses of our queues |
---|
2730 | 3049 | */ |
---|
2731 | 3050 | if (!qedf->p_cpuq) { |
---|
2732 | | - status = 1; |
---|
2733 | | - goto mem_alloc_failure; |
---|
| 3051 | + QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n"); |
---|
| 3052 | + return -EINVAL; |
---|
2734 | 3053 | } |
---|
2735 | 3054 | |
---|
2736 | 3055 | qedf->global_queues = kzalloc((sizeof(struct global_queue *) |
---|
.. | .. |
---|
2744 | 3063 | "qedf->global_queues=%p.\n", qedf->global_queues); |
---|
2745 | 3064 | |
---|
2746 | 3065 | /* Allocate DMA coherent buffers for BDQ */ |
---|
2747 | | - rc = qedf_alloc_bdq(qedf); |
---|
2748 | | - if (rc) |
---|
| 3066 | + status = qedf_alloc_bdq(qedf); |
---|
| 3067 | + if (status) { |
---|
| 3068 | + QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n"); |
---|
2749 | 3069 | goto mem_alloc_failure; |
---|
| 3070 | + } |
---|
2750 | 3071 | |
---|
2751 | 3072 | /* Allocate a CQ and an associated PBL for each MSI-X vector */ |
---|
2752 | 3073 | for (i = 0; i < qedf->num_queues; i++) { |
---|
.. | .. |
---|
2771 | 3092 | ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); |
---|
2772 | 3093 | |
---|
2773 | 3094 | qedf->global_queues[i]->cq = |
---|
2774 | | - dma_zalloc_coherent(&qedf->pdev->dev, |
---|
2775 | | - qedf->global_queues[i]->cq_mem_size, |
---|
2776 | | - &qedf->global_queues[i]->cq_dma, GFP_KERNEL); |
---|
| 3095 | + dma_alloc_coherent(&qedf->pdev->dev, |
---|
| 3096 | + qedf->global_queues[i]->cq_mem_size, |
---|
| 3097 | + &qedf->global_queues[i]->cq_dma, |
---|
| 3098 | + GFP_KERNEL); |
---|
2777 | 3099 | |
---|
2778 | 3100 | if (!qedf->global_queues[i]->cq) { |
---|
2779 | 3101 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n"); |
---|
.. | .. |
---|
2782 | 3104 | } |
---|
2783 | 3105 | |
---|
2784 | 3106 | qedf->global_queues[i]->cq_pbl = |
---|
2785 | | - dma_zalloc_coherent(&qedf->pdev->dev, |
---|
2786 | | - qedf->global_queues[i]->cq_pbl_size, |
---|
2787 | | - &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL); |
---|
| 3107 | + dma_alloc_coherent(&qedf->pdev->dev, |
---|
| 3108 | + qedf->global_queues[i]->cq_pbl_size, |
---|
| 3109 | + &qedf->global_queues[i]->cq_pbl_dma, |
---|
| 3110 | + GFP_KERNEL); |
---|
2788 | 3111 | |
---|
2789 | 3112 | if (!qedf->global_queues[i]->cq_pbl) { |
---|
2790 | 3113 | QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n"); |
---|
.. | .. |
---|
2855 | 3178 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", |
---|
2856 | 3179 | qedf->num_queues); |
---|
2857 | 3180 | |
---|
2858 | | - qedf->p_cpuq = pci_alloc_consistent(qedf->pdev, |
---|
| 3181 | + qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev, |
---|
2859 | 3182 | qedf->num_queues * sizeof(struct qedf_glbl_q_params), |
---|
2860 | | - &qedf->hw_p_cpuq); |
---|
| 3183 | + &qedf->hw_p_cpuq, GFP_KERNEL); |
---|
2861 | 3184 | |
---|
2862 | 3185 | if (!qedf->p_cpuq) { |
---|
2863 | | - QEDF_ERR(&(qedf->dbg_ctx), "pci_alloc_consistent failed.\n"); |
---|
| 3186 | + QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n"); |
---|
2864 | 3187 | return 1; |
---|
2865 | 3188 | } |
---|
2866 | 3189 | |
---|
.. | .. |
---|
2929 | 3252 | |
---|
2930 | 3253 | if (qedf->p_cpuq) { |
---|
2931 | 3254 | size = qedf->num_queues * sizeof(struct qedf_glbl_q_params); |
---|
2932 | | - pci_free_consistent(qedf->pdev, size, qedf->p_cpuq, |
---|
| 3255 | + dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq, |
---|
2933 | 3256 | qedf->hw_p_cpuq); |
---|
2934 | 3257 | } |
---|
2935 | 3258 | |
---|
2936 | 3259 | qedf_free_global_queues(qedf); |
---|
2937 | 3260 | |
---|
2938 | | - if (qedf->global_queues) |
---|
2939 | | - kfree(qedf->global_queues); |
---|
| 3261 | + kfree(qedf->global_queues); |
---|
2940 | 3262 | } |
---|
2941 | 3263 | |
---|
2942 | 3264 | /* |
---|
.. | .. |
---|
2955 | 3277 | .id_table = qedf_pci_tbl, |
---|
2956 | 3278 | .probe = qedf_probe, |
---|
2957 | 3279 | .remove = qedf_remove, |
---|
| 3280 | + .shutdown = qedf_shutdown, |
---|
| 3281 | + .suspend = qedf_suspend, |
---|
2958 | 3282 | }; |
---|
2959 | 3283 | |
---|
2960 | 3284 | static int __qedf_probe(struct pci_dev *pdev, int mode) |
---|
.. | .. |
---|
2971 | 3295 | void *task_start, *task_end; |
---|
2972 | 3296 | struct qed_slowpath_params slowpath_params; |
---|
2973 | 3297 | struct qed_probe_params qed_params; |
---|
2974 | | - u16 tmp; |
---|
| 3298 | + u16 retry_cnt = 10; |
---|
2975 | 3299 | |
---|
2976 | 3300 | /* |
---|
2977 | 3301 | * When doing error recovery we didn't reap the lport so don't try |
---|
2978 | 3302 | * to reallocate it. |
---|
2979 | 3303 | */ |
---|
| 3304 | +retry_probe: |
---|
| 3305 | + if (mode == QEDF_MODE_RECOVERY) |
---|
| 3306 | + msleep(2000); |
---|
| 3307 | + |
---|
2980 | 3308 | if (mode != QEDF_MODE_RECOVERY) { |
---|
2981 | 3309 | lport = libfc_host_alloc(&qedf_host_template, |
---|
2982 | 3310 | sizeof(struct qedf_ctx)); |
---|
.. | .. |
---|
2986 | 3314 | rc = -ENOMEM; |
---|
2987 | 3315 | goto err0; |
---|
2988 | 3316 | } |
---|
| 3317 | + |
---|
| 3318 | + fc_disc_init(lport); |
---|
2989 | 3319 | |
---|
2990 | 3320 | /* Initialize qedf_ctx */ |
---|
2991 | 3321 | qedf = lport_priv(lport); |
---|
.. | .. |
---|
3003 | 3333 | pci_set_drvdata(pdev, qedf); |
---|
3004 | 3334 | init_completion(&qedf->fipvlan_compl); |
---|
3005 | 3335 | mutex_init(&qedf->stats_mutex); |
---|
| 3336 | + mutex_init(&qedf->flush_mutex); |
---|
| 3337 | + qedf->flogi_pending = 0; |
---|
3006 | 3338 | |
---|
3007 | 3339 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, |
---|
3008 | 3340 | "QLogic FastLinQ FCoE Module qedf %s, " |
---|
.. | .. |
---|
3036 | 3368 | INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update); |
---|
3037 | 3369 | INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery); |
---|
3038 | 3370 | INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump); |
---|
| 3371 | + INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work); |
---|
3039 | 3372 | qedf->fipvlan_retries = qedf_fipvlan_retries; |
---|
3040 | 3373 | /* Set a default prio in case DCBX doesn't converge */ |
---|
3041 | 3374 | if (qedf_default_prio > -1) { |
---|
.. | .. |
---|
3058 | 3391 | qed_params.is_vf = is_vf; |
---|
3059 | 3392 | qedf->cdev = qed_ops->common->probe(pdev, &qed_params); |
---|
3060 | 3393 | if (!qedf->cdev) { |
---|
| 3394 | + if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) { |
---|
| 3395 | + QEDF_ERR(&qedf->dbg_ctx, |
---|
| 3396 | + "Retry %d initialize hardware\n", retry_cnt); |
---|
| 3397 | + retry_cnt--; |
---|
| 3398 | + goto retry_probe; |
---|
| 3399 | + } |
---|
| 3400 | + QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n"); |
---|
3061 | 3401 | rc = -ENODEV; |
---|
3062 | 3402 | goto err1; |
---|
3063 | 3403 | } |
---|
.. | .. |
---|
3068 | 3408 | QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); |
---|
3069 | 3409 | goto err1; |
---|
3070 | 3410 | } |
---|
| 3411 | + |
---|
| 3412 | + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, |
---|
| 3413 | + "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n", |
---|
| 3414 | + qedf->dev_info.common.num_hwfns, |
---|
| 3415 | + qed_ops->common->get_affin_hwfn_idx(qedf->cdev)); |
---|
3071 | 3416 | |
---|
3072 | 3417 | /* queue allocation code should come here |
---|
3073 | 3418 | * order should be |
---|
.. | .. |
---|
3083 | 3428 | goto err2; |
---|
3084 | 3429 | } |
---|
3085 | 3430 | qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); |
---|
| 3431 | + |
---|
| 3432 | + /* Learn information crucial for qedf to progress */ |
---|
| 3433 | + rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); |
---|
| 3434 | + if (rc) { |
---|
| 3435 | + QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n"); |
---|
| 3436 | + goto err2; |
---|
| 3437 | + } |
---|
3086 | 3438 | |
---|
3087 | 3439 | /* Record BDQ producer doorbell addresses */ |
---|
3088 | 3440 | qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; |
---|
.. | .. |
---|
3121 | 3473 | |
---|
3122 | 3474 | /* Setup interrupts */ |
---|
3123 | 3475 | rc = qedf_setup_int(qedf); |
---|
3124 | | - if (rc) |
---|
| 3476 | + if (rc) { |
---|
| 3477 | + QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n"); |
---|
3125 | 3478 | goto err3; |
---|
| 3479 | + } |
---|
3126 | 3480 | |
---|
3127 | 3481 | rc = qed_ops->start(qedf->cdev, &qedf->tasks); |
---|
3128 | 3482 | if (rc) { |
---|
.. | .. |
---|
3145 | 3499 | "Writing %d to primary and secondary BDQ doorbell registers.\n", |
---|
3146 | 3500 | qedf->bdq_prod_idx); |
---|
3147 | 3501 | writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); |
---|
3148 | | - tmp = readw(qedf->bdq_primary_prod); |
---|
| 3502 | + readw(qedf->bdq_primary_prod); |
---|
3149 | 3503 | writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); |
---|
3150 | | - tmp = readw(qedf->bdq_secondary_prod); |
---|
| 3504 | + readw(qedf->bdq_secondary_prod); |
---|
3151 | 3505 | |
---|
3152 | 3506 | qed_ops->common->set_power_state(qedf->cdev, PCI_D0); |
---|
3153 | 3507 | |
---|
.. | .. |
---|
3182 | 3536 | sprintf(host_buf, "host_%d", host->host_no); |
---|
3183 | 3537 | qed_ops->common->set_name(qedf->cdev, host_buf); |
---|
3184 | 3538 | |
---|
3185 | | - |
---|
3186 | | - /* Set xid max values */ |
---|
3187 | | - qedf->max_scsi_xid = QEDF_MAX_SCSI_XID; |
---|
3188 | | - qedf->max_els_xid = QEDF_MAX_ELS_XID; |
---|
3189 | | - |
---|
3190 | 3539 | /* Allocate cmd mgr */ |
---|
3191 | 3540 | qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf); |
---|
3192 | 3541 | if (!qedf->cmd_mgr) { |
---|
.. | .. |
---|
3197 | 3546 | |
---|
3198 | 3547 | if (mode != QEDF_MODE_RECOVERY) { |
---|
3199 | 3548 | host->transportt = qedf_fc_transport_template; |
---|
3200 | | - host->can_queue = QEDF_MAX_ELS_XID; |
---|
3201 | 3549 | host->max_lun = qedf_max_lun; |
---|
3202 | 3550 | host->max_cmd_len = QEDF_MAX_CDB_LEN; |
---|
| 3551 | + host->can_queue = FCOE_PARAMS_NUM_TASKS; |
---|
3203 | 3552 | rc = scsi_add_host(host, &pdev->dev); |
---|
3204 | | - if (rc) |
---|
| 3553 | + if (rc) { |
---|
| 3554 | + QEDF_WARN(&qedf->dbg_ctx, |
---|
| 3555 | + "Error adding Scsi_Host rc=0x%x.\n", rc); |
---|
3205 | 3556 | goto err6; |
---|
| 3557 | + } |
---|
3206 | 3558 | } |
---|
3207 | 3559 | |
---|
3208 | 3560 | memset(¶ms, 0, sizeof(params)); |
---|
3209 | | - params.mtu = 9000; |
---|
| 3561 | + params.mtu = QEDF_LL2_BUF_SIZE; |
---|
3210 | 3562 | ether_addr_copy(params.ll2_mac_address, qedf->mac); |
---|
3211 | 3563 | |
---|
3212 | 3564 | /* Start LL2 processing thread */ |
---|
.. | .. |
---|
3269 | 3621 | qedf->lport->host->host_no); |
---|
3270 | 3622 | qedf->dpc_wq = create_workqueue(host_buf); |
---|
3271 | 3623 | } |
---|
| 3624 | + INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler); |
---|
3272 | 3625 | |
---|
3273 | 3626 | /* |
---|
3274 | 3627 | * GRC dump and sysfs parameters are not reaped during the recovery |
---|
.. | .. |
---|
3345 | 3698 | err1: |
---|
3346 | 3699 | scsi_host_put(lport->host); |
---|
3347 | 3700 | err0: |
---|
3348 | | - if (qedf) { |
---|
3349 | | - QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); |
---|
3350 | | - |
---|
3351 | | - clear_bit(QEDF_PROBING, &qedf->flags); |
---|
3352 | | - } |
---|
3353 | 3701 | return rc; |
---|
3354 | 3702 | } |
---|
3355 | 3703 | |
---|
.. | .. |
---|
3387 | 3735 | fcoe_ctlr_link_down(&qedf->ctlr); |
---|
3388 | 3736 | else |
---|
3389 | 3737 | fc_fabric_logoff(qedf->lport); |
---|
3390 | | - qedf_wait_for_upload(qedf); |
---|
| 3738 | + |
---|
| 3739 | + if (qedf_wait_for_upload(qedf) == false) |
---|
| 3740 | + QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n"); |
---|
3391 | 3741 | |
---|
3392 | 3742 | #ifdef CONFIG_DEBUG_FS |
---|
3393 | 3743 | qedf_dbg_host_exit(&(qedf->dbg_ctx)); |
---|
.. | .. |
---|
3490 | 3840 | qedf_capture_grc_dump(qedf); |
---|
3491 | 3841 | } |
---|
3492 | 3842 | |
---|
| 3843 | +void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type) |
---|
| 3844 | +{ |
---|
| 3845 | + struct qedf_ctx *qedf = dev; |
---|
| 3846 | + |
---|
| 3847 | + QEDF_ERR(&(qedf->dbg_ctx), |
---|
| 3848 | + "Hardware error handler scheduled, event=%d.\n", |
---|
| 3849 | + err_type); |
---|
| 3850 | + |
---|
| 3851 | + if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) { |
---|
| 3852 | + QEDF_ERR(&(qedf->dbg_ctx), |
---|
| 3853 | + "Already in recovery, not scheduling board disable work.\n"); |
---|
| 3854 | + return; |
---|
| 3855 | + } |
---|
| 3856 | + |
---|
| 3857 | + switch (err_type) { |
---|
| 3858 | + case QED_HW_ERR_FAN_FAIL: |
---|
| 3859 | + schedule_delayed_work(&qedf->board_disable_work, 0); |
---|
| 3860 | + break; |
---|
| 3861 | + case QED_HW_ERR_MFW_RESP_FAIL: |
---|
| 3862 | + case QED_HW_ERR_HW_ATTN: |
---|
| 3863 | + case QED_HW_ERR_DMAE_FAIL: |
---|
| 3864 | + case QED_HW_ERR_FW_ASSERT: |
---|
| 3865 | + /* Prevent HW attentions from being reasserted */ |
---|
| 3866 | + qed_ops->common->attn_clr_enable(qedf->cdev, true); |
---|
| 3867 | + break; |
---|
| 3868 | + case QED_HW_ERR_RAMROD_FAIL: |
---|
| 3869 | + /* Prevent HW attentions from being reasserted */ |
---|
| 3870 | + qed_ops->common->attn_clr_enable(qedf->cdev, true); |
---|
| 3871 | + |
---|
| 3872 | + if (qedf_enable_recovery) |
---|
| 3873 | + qed_ops->common->recovery_process(qedf->cdev); |
---|
| 3874 | + |
---|
| 3875 | + break; |
---|
| 3876 | + default: |
---|
| 3877 | + break; |
---|
| 3878 | + } |
---|
| 3879 | +} |
---|
| 3880 | + |
---|
3493 | 3881 | /* |
---|
3494 | 3882 | * Protocol TLV handler |
---|
3495 | 3883 | */ |
---|
.. | .. |
---|
3584 | 3972 | |
---|
3585 | 3973 | fcoe->scsi_tsk_full_set = true; |
---|
3586 | 3974 | fcoe->scsi_tsk_full = qedf->task_set_fulls; |
---|
| 3975 | +} |
---|
| 3976 | + |
---|
| 3977 | +/* Deferred work function to perform soft context reset on STAG change */ |
---|
| 3978 | +void qedf_stag_change_work(struct work_struct *work) |
---|
| 3979 | +{ |
---|
| 3980 | + struct qedf_ctx *qedf = |
---|
| 3981 | + container_of(work, struct qedf_ctx, stag_work.work); |
---|
| 3982 | + |
---|
| 3983 | + if (!qedf) { |
---|
| 3984 | + QEDF_ERR(NULL, "qedf is NULL"); |
---|
| 3985 | + return; |
---|
| 3986 | + } |
---|
| 3987 | + QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n"); |
---|
| 3988 | + qedf_ctx_soft_reset(qedf->lport); |
---|
| 3989 | +} |
---|
| 3990 | + |
---|
| 3991 | +static void qedf_shutdown(struct pci_dev *pdev) |
---|
| 3992 | +{ |
---|
| 3993 | + __qedf_remove(pdev, QEDF_MODE_NORMAL); |
---|
| 3994 | +} |
---|
| 3995 | + |
---|
| 3996 | +static int qedf_suspend(struct pci_dev *pdev, pm_message_t state) |
---|
| 3997 | +{ |
---|
| 3998 | + struct qedf_ctx *qedf; |
---|
| 3999 | + |
---|
| 4000 | + if (!pdev) { |
---|
| 4001 | + QEDF_ERR(NULL, "pdev is NULL.\n"); |
---|
| 4002 | + return -ENODEV; |
---|
| 4003 | + } |
---|
| 4004 | + |
---|
| 4005 | + qedf = pci_get_drvdata(pdev); |
---|
| 4006 | + |
---|
| 4007 | + QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__); |
---|
| 4008 | + |
---|
| 4009 | + return -EPERM; |
---|
| 4010 | +} |
---|
| 4011 | + |
---|
| 4012 | +/* |
---|
| 4013 | + * Recovery handler code |
---|
| 4014 | + */ |
---|
| 4015 | +static void qedf_schedule_recovery_handler(void *dev) |
---|
| 4016 | +{ |
---|
| 4017 | + struct qedf_ctx *qedf = dev; |
---|
| 4018 | + |
---|
| 4019 | + QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n"); |
---|
| 4020 | + schedule_delayed_work(&qedf->recovery_work, 0); |
---|
| 4021 | +} |
---|
| 4022 | + |
---|
| 4023 | +static void qedf_recovery_handler(struct work_struct *work) |
---|
| 4024 | +{ |
---|
| 4025 | + struct qedf_ctx *qedf = |
---|
| 4026 | + container_of(work, struct qedf_ctx, recovery_work.work); |
---|
| 4027 | + |
---|
| 4028 | + if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags)) |
---|
| 4029 | + return; |
---|
| 4030 | + |
---|
| 4031 | + /* |
---|
| 4032 | + * Call common_ops->recovery_prolog to allow the MFW to quiesce |
---|
| 4033 | + * any PCI transactions. |
---|
| 4034 | + */ |
---|
| 4035 | + qed_ops->common->recovery_prolog(qedf->cdev); |
---|
| 4036 | + |
---|
| 4037 | + QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n"); |
---|
| 4038 | + __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY); |
---|
| 4039 | + /* |
---|
| 4040 | + * Reset link and dcbx to down state since we will not get a link down |
---|
| 4041 | + * event from the MFW but calling __qedf_remove will essentially be a |
---|
| 4042 | + * link down event. |
---|
| 4043 | + */ |
---|
| 4044 | + atomic_set(&qedf->link_state, QEDF_LINK_DOWN); |
---|
| 4045 | + atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); |
---|
| 4046 | + __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY); |
---|
| 4047 | + clear_bit(QEDF_IN_RECOVERY, &qedf->flags); |
---|
| 4048 | + QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n"); |
---|
3587 | 4049 | } |
---|
3588 | 4050 | |
---|
3589 | 4051 | /* Generic TLV data callback */ |
---|
.. | .. |
---|
3712 | 4174 | } |
---|
3713 | 4175 | |
---|
3714 | 4176 | MODULE_LICENSE("GPL"); |
---|
3715 | | -MODULE_DESCRIPTION("QLogic QEDF 25/40/50/100Gb FCoE Driver"); |
---|
| 4177 | +MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module"); |
---|
3716 | 4178 | MODULE_AUTHOR("QLogic Corporation"); |
---|
3717 | 4179 | MODULE_VERSION(QEDF_VERSION); |
---|
3718 | 4180 | module_init(qedf_init); |
---|