forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-04 1543e317f1da31b75942316931e8f491a8920811
kernel/drivers/nvme/target/fcloop.c
....@@ -1,17 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * Copyright (c) 2016 Avago Technologies. All rights reserved.
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of version 2 of the GNU General Public License as
6
- * published by the Free Software Foundation.
7
- *
8
- * This program is distributed in the hope that it will be useful.
9
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10
- * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11
- * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12
- * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13
- * See the GNU General Public License for more details, a copy of which
14
- * can be found in the file COPYING included with this package
154 */
165 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
176 #include <linux/module.h>
....@@ -54,6 +43,17 @@
5443 { NVMF_OPT_ERR, NULL }
5544 };
5645
46
+static int fcloop_verify_addr(substring_t *s)
47
+{
48
+ size_t blen = s->to - s->from + 1;
49
+
50
+ if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
51
+ strncmp(s->from, "0x", 2))
52
+ return -EINVAL;
53
+
54
+ return 0;
55
+}
56
+
5757 static int
5858 fcloop_parse_options(struct fcloop_ctrl_options *opts,
5959 const char *buf)
....@@ -75,14 +75,16 @@
7575 opts->mask |= token;
7676 switch (token) {
7777 case NVMF_OPT_WWNN:
78
- if (match_u64(args, &token64)) {
78
+ if (fcloop_verify_addr(args) ||
79
+ match_u64(args, &token64)) {
7980 ret = -EINVAL;
8081 goto out_free_options;
8182 }
8283 opts->wwnn = token64;
8384 break;
8485 case NVMF_OPT_WWPN:
85
- if (match_u64(args, &token64)) {
86
+ if (fcloop_verify_addr(args) ||
87
+ match_u64(args, &token64)) {
8688 ret = -EINVAL;
8789 goto out_free_options;
8890 }
....@@ -103,14 +105,16 @@
103105 opts->fcaddr = token;
104106 break;
105107 case NVMF_OPT_LPWWNN:
106
- if (match_u64(args, &token64)) {
108
+ if (fcloop_verify_addr(args) ||
109
+ match_u64(args, &token64)) {
107110 ret = -EINVAL;
108111 goto out_free_options;
109112 }
110113 opts->lpwwnn = token64;
111114 break;
112115 case NVMF_OPT_LPWWPN:
113
- if (match_u64(args, &token64)) {
116
+ if (fcloop_verify_addr(args) ||
117
+ match_u64(args, &token64)) {
114118 ret = -EINVAL;
115119 goto out_free_options;
116120 }
....@@ -152,14 +156,16 @@
152156 token = match_token(p, opt_tokens, args);
153157 switch (token) {
154158 case NVMF_OPT_WWNN:
155
- if (match_u64(args, &token64)) {
159
+ if (fcloop_verify_addr(args) ||
160
+ match_u64(args, &token64)) {
156161 ret = -EINVAL;
157162 goto out_free_options;
158163 }
159164 *nname = token64;
160165 break;
161166 case NVMF_OPT_WWPN:
162
- if (match_u64(args, &token64)) {
167
+ if (fcloop_verify_addr(args) ||
168
+ match_u64(args, &token64)) {
163169 ret = -EINVAL;
164170 goto out_free_options;
165171 }
....@@ -209,17 +215,23 @@
209215 };
210216
211217 struct fcloop_rport {
212
- struct nvme_fc_remote_port *remoteport;
213
- struct nvmet_fc_target_port *targetport;
214
- struct fcloop_nport *nport;
215
- struct fcloop_lport *lport;
218
+ struct nvme_fc_remote_port *remoteport;
219
+ struct nvmet_fc_target_port *targetport;
220
+ struct fcloop_nport *nport;
221
+ struct fcloop_lport *lport;
222
+ spinlock_t lock;
223
+ struct list_head ls_list;
224
+ struct work_struct ls_work;
216225 };
217226
218227 struct fcloop_tport {
219
- struct nvmet_fc_target_port *targetport;
220
- struct nvme_fc_remote_port *remoteport;
221
- struct fcloop_nport *nport;
222
- struct fcloop_lport *lport;
228
+ struct nvmet_fc_target_port *targetport;
229
+ struct nvme_fc_remote_port *remoteport;
230
+ struct fcloop_nport *nport;
231
+ struct fcloop_lport *lport;
232
+ spinlock_t lock;
233
+ struct list_head ls_list;
234
+ struct work_struct ls_work;
223235 };
224236
225237 struct fcloop_nport {
....@@ -235,11 +247,16 @@
235247 };
236248
237249 struct fcloop_lsreq {
238
- struct fcloop_tport *tport;
239250 struct nvmefc_ls_req *lsreq;
240
- struct work_struct work;
241
- struct nvmefc_tgt_ls_req tgt_ls_req;
251
+ struct nvmefc_ls_rsp ls_rsp;
252
+ int lsdir; /* H2T or T2H */
242253 int status;
254
+ struct list_head ls_list; /* fcloop_rport->ls_list */
255
+};
256
+
257
+struct fcloop_rscn {
258
+ struct fcloop_tport *tport;
259
+ struct work_struct work;
243260 };
244261
245262 enum {
....@@ -271,9 +288,9 @@
271288 };
272289
273290 static inline struct fcloop_lsreq *
274
-tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
291
+ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
275292 {
276
- return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
293
+ return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
277294 }
278295
279296 static inline struct fcloop_fcpreq *
....@@ -298,25 +315,36 @@
298315 {
299316 }
300317
301
-
302
-/*
303
- * Transmit of LS RSP done (e.g. buffers all set). call back up
304
- * initiator "done" flows.
305
- */
306318 static void
307
-fcloop_tgt_lsrqst_done_work(struct work_struct *work)
319
+fcloop_rport_lsrqst_work(struct work_struct *work)
308320 {
309
- struct fcloop_lsreq *tls_req =
310
- container_of(work, struct fcloop_lsreq, work);
311
- struct fcloop_tport *tport = tls_req->tport;
312
- struct nvmefc_ls_req *lsreq = tls_req->lsreq;
321
+ struct fcloop_rport *rport =
322
+ container_of(work, struct fcloop_rport, ls_work);
323
+ struct fcloop_lsreq *tls_req;
313324
314
- if (!tport || tport->remoteport)
315
- lsreq->done(lsreq, tls_req->status);
325
+ spin_lock(&rport->lock);
326
+ for (;;) {
327
+ tls_req = list_first_entry_or_null(&rport->ls_list,
328
+ struct fcloop_lsreq, ls_list);
329
+ if (!tls_req)
330
+ break;
331
+
332
+ list_del(&tls_req->ls_list);
333
+ spin_unlock(&rport->lock);
334
+
335
+ tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
336
+ /*
337
+ * callee may free memory containing tls_req.
338
+ * do not reference lsreq after this.
339
+ */
340
+
341
+ spin_lock(&rport->lock);
342
+ }
343
+ spin_unlock(&rport->lock);
316344 }
317345
318346 static int
319
-fcloop_ls_req(struct nvme_fc_local_port *localport,
347
+fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
320348 struct nvme_fc_remote_port *remoteport,
321349 struct nvmefc_ls_req *lsreq)
322350 {
....@@ -325,38 +353,174 @@
325353 int ret = 0;
326354
327355 tls_req->lsreq = lsreq;
328
- INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
356
+ INIT_LIST_HEAD(&tls_req->ls_list);
329357
330358 if (!rport->targetport) {
331359 tls_req->status = -ECONNREFUSED;
332
- tls_req->tport = NULL;
333
- schedule_work(&tls_req->work);
360
+ spin_lock(&rport->lock);
361
+ list_add_tail(&rport->ls_list, &tls_req->ls_list);
362
+ spin_unlock(&rport->lock);
363
+ schedule_work(&rport->ls_work);
334364 return ret;
335365 }
336366
337367 tls_req->status = 0;
338
- tls_req->tport = rport->targetport->private;
339
- ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
368
+ ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
369
+ &tls_req->ls_rsp,
370
+ lsreq->rqstaddr, lsreq->rqstlen);
371
+
372
+ return ret;
373
+}
374
+
375
+static int
376
+fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
377
+ struct nvmefc_ls_rsp *lsrsp)
378
+{
379
+ struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
380
+ struct nvmefc_ls_req *lsreq = tls_req->lsreq;
381
+ struct fcloop_tport *tport = targetport->private;
382
+ struct nvme_fc_remote_port *remoteport = tport->remoteport;
383
+ struct fcloop_rport *rport;
384
+
385
+ memcpy(lsreq->rspaddr, lsrsp->rspbuf,
386
+ ((lsreq->rsplen < lsrsp->rsplen) ?
387
+ lsreq->rsplen : lsrsp->rsplen));
388
+
389
+ lsrsp->done(lsrsp);
390
+
391
+ if (remoteport) {
392
+ rport = remoteport->private;
393
+ spin_lock(&rport->lock);
394
+ list_add_tail(&rport->ls_list, &tls_req->ls_list);
395
+ spin_unlock(&rport->lock);
396
+ schedule_work(&rport->ls_work);
397
+ }
398
+
399
+ return 0;
400
+}
401
+
402
+static void
403
+fcloop_tport_lsrqst_work(struct work_struct *work)
404
+{
405
+ struct fcloop_tport *tport =
406
+ container_of(work, struct fcloop_tport, ls_work);
407
+ struct fcloop_lsreq *tls_req;
408
+
409
+ spin_lock(&tport->lock);
410
+ for (;;) {
411
+ tls_req = list_first_entry_or_null(&tport->ls_list,
412
+ struct fcloop_lsreq, ls_list);
413
+ if (!tls_req)
414
+ break;
415
+
416
+ list_del(&tls_req->ls_list);
417
+ spin_unlock(&tport->lock);
418
+
419
+ tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
420
+ /*
421
+ * callee may free memory containing tls_req.
422
+ * do not reference lsreq after this.
423
+ */
424
+
425
+ spin_lock(&tport->lock);
426
+ }
427
+ spin_unlock(&tport->lock);
428
+}
429
+
430
+static int
431
+fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
432
+ struct nvmefc_ls_req *lsreq)
433
+{
434
+ struct fcloop_lsreq *tls_req = lsreq->private;
435
+ struct fcloop_tport *tport = targetport->private;
436
+ int ret = 0;
437
+
438
+ /*
439
+ * hosthandle should be the dst.rport value.
440
+ * hosthandle ignored as fcloop currently is
441
+ * 1:1 tgtport vs remoteport
442
+ */
443
+ tls_req->lsreq = lsreq;
444
+ INIT_LIST_HEAD(&tls_req->ls_list);
445
+
446
+ if (!tport->remoteport) {
447
+ tls_req->status = -ECONNREFUSED;
448
+ spin_lock(&tport->lock);
449
+ list_add_tail(&tport->ls_list, &tls_req->ls_list);
450
+ spin_unlock(&tport->lock);
451
+ schedule_work(&tport->ls_work);
452
+ return ret;
453
+ }
454
+
455
+ tls_req->status = 0;
456
+ ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
340457 lsreq->rqstaddr, lsreq->rqstlen);
341458
342459 return ret;
343460 }
344461
345462 static int
346
-fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
347
- struct nvmefc_tgt_ls_req *tgt_lsreq)
463
+fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
464
+ struct nvme_fc_remote_port *remoteport,
465
+ struct nvmefc_ls_rsp *lsrsp)
348466 {
349
- struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
467
+ struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
350468 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
469
+ struct fcloop_rport *rport = remoteport->private;
470
+ struct nvmet_fc_target_port *targetport = rport->targetport;
471
+ struct fcloop_tport *tport;
351472
352
- memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
353
- ((lsreq->rsplen < tgt_lsreq->rsplen) ?
354
- lsreq->rsplen : tgt_lsreq->rsplen));
355
- tgt_lsreq->done(tgt_lsreq);
473
+ memcpy(lsreq->rspaddr, lsrsp->rspbuf,
474
+ ((lsreq->rsplen < lsrsp->rsplen) ?
475
+ lsreq->rsplen : lsrsp->rsplen));
476
+ lsrsp->done(lsrsp);
356477
357
- schedule_work(&tls_req->work);
478
+ if (targetport) {
479
+ tport = targetport->private;
480
+ spin_lock(&tport->lock);
481
+ list_add_tail(&tport->ls_list, &tls_req->ls_list);
482
+ spin_unlock(&tport->lock);
483
+ schedule_work(&tport->ls_work);
484
+ }
358485
359486 return 0;
487
+}
488
+
489
+static void
490
+fcloop_t2h_host_release(void *hosthandle)
491
+{
492
+ /* host handle ignored for now */
493
+}
494
+
495
+/*
496
+ * Simulate reception of RSCN and converting it to a initiator transport
497
+ * call to rescan a remote port.
498
+ */
499
+static void
500
+fcloop_tgt_rscn_work(struct work_struct *work)
501
+{
502
+ struct fcloop_rscn *tgt_rscn =
503
+ container_of(work, struct fcloop_rscn, work);
504
+ struct fcloop_tport *tport = tgt_rscn->tport;
505
+
506
+ if (tport->remoteport)
507
+ nvme_fc_rescan_remoteport(tport->remoteport);
508
+ kfree(tgt_rscn);
509
+}
510
+
511
+static void
512
+fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
513
+{
514
+ struct fcloop_rscn *tgt_rscn;
515
+
516
+ tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
517
+ if (!tgt_rscn)
518
+ return;
519
+
520
+ tgt_rscn->tport = tgtport->private;
521
+ INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
522
+
523
+ schedule_work(&tgt_rscn->work);
360524 }
361525
362526 static void
....@@ -406,10 +570,11 @@
406570 struct fcloop_fcpreq *tfcp_req =
407571 container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
408572 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
573
+ unsigned long flags;
409574 int ret = 0;
410575 bool aborted = false;
411576
412
- spin_lock(&tfcp_req->reqlock);
577
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
413578 switch (tfcp_req->inistate) {
414579 case INI_IO_START:
415580 tfcp_req->inistate = INI_IO_ACTIVE;
....@@ -418,11 +583,11 @@
418583 aborted = true;
419584 break;
420585 default:
421
- spin_unlock(&tfcp_req->reqlock);
586
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
422587 WARN_ON(1);
423588 return;
424589 }
425
- spin_unlock(&tfcp_req->reqlock);
590
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
426591
427592 if (unlikely(aborted))
428593 ret = -ECANCELED;
....@@ -443,8 +608,9 @@
443608 container_of(work, struct fcloop_fcpreq, abort_rcv_work);
444609 struct nvmefc_fcp_req *fcpreq;
445610 bool completed = false;
611
+ unsigned long flags;
446612
447
- spin_lock(&tfcp_req->reqlock);
613
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
448614 fcpreq = tfcp_req->fcpreq;
449615 switch (tfcp_req->inistate) {
450616 case INI_IO_ABORTED:
....@@ -453,11 +619,11 @@
453619 completed = true;
454620 break;
455621 default:
456
- spin_unlock(&tfcp_req->reqlock);
622
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
457623 WARN_ON(1);
458624 return;
459625 }
460
- spin_unlock(&tfcp_req->reqlock);
626
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
461627
462628 if (unlikely(completed)) {
463629 /* remove reference taken in original abort downcall */
....@@ -469,9 +635,9 @@
469635 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
470636 &tfcp_req->tgt_fcp_req);
471637
472
- spin_lock(&tfcp_req->reqlock);
638
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
473639 tfcp_req->fcpreq = NULL;
474
- spin_unlock(&tfcp_req->reqlock);
640
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
475641
476642 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
477643 /* call_host_done releases reference for abort downcall */
....@@ -487,11 +653,12 @@
487653 struct fcloop_fcpreq *tfcp_req =
488654 container_of(work, struct fcloop_fcpreq, tio_done_work);
489655 struct nvmefc_fcp_req *fcpreq;
656
+ unsigned long flags;
490657
491
- spin_lock(&tfcp_req->reqlock);
658
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
492659 fcpreq = tfcp_req->fcpreq;
493660 tfcp_req->inistate = INI_IO_COMPLETED;
494
- spin_unlock(&tfcp_req->reqlock);
661
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
495662
496663 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
497664 }
....@@ -510,7 +677,7 @@
510677 if (!rport->targetport)
511678 return -ECONNREFUSED;
512679
513
- tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
680
+ tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
514681 if (!tfcp_req)
515682 return -ENOMEM;
516683
....@@ -595,13 +762,14 @@
595762 u32 rsplen = 0, xfrlen = 0;
596763 int fcp_err = 0, active, aborted;
597764 u8 op = tgt_fcpreq->op;
765
+ unsigned long flags;
598766
599
- spin_lock(&tfcp_req->reqlock);
767
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
600768 fcpreq = tfcp_req->fcpreq;
601769 active = tfcp_req->active;
602770 aborted = tfcp_req->aborted;
603771 tfcp_req->active = true;
604
- spin_unlock(&tfcp_req->reqlock);
772
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
605773
606774 if (unlikely(active))
607775 /* illegal - call while i/o active */
....@@ -609,9 +777,9 @@
609777
610778 if (unlikely(aborted)) {
611779 /* target transport has aborted i/o prior */
612
- spin_lock(&tfcp_req->reqlock);
780
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
613781 tfcp_req->active = false;
614
- spin_unlock(&tfcp_req->reqlock);
782
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
615783 tgt_fcpreq->transferred_length = 0;
616784 tgt_fcpreq->fcp_error = -ECANCELED;
617785 tgt_fcpreq->done(tgt_fcpreq);
....@@ -648,7 +816,7 @@
648816 break;
649817
650818 /* Fall-Thru to RSP handling */
651
- /* FALLTHRU */
819
+ fallthrough;
652820
653821 case NVMET_FCOP_RSP:
654822 if (fcpreq) {
....@@ -668,9 +836,9 @@
668836 break;
669837 }
670838
671
- spin_lock(&tfcp_req->reqlock);
839
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
672840 tfcp_req->active = false;
673
- spin_unlock(&tfcp_req->reqlock);
841
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
674842
675843 tgt_fcpreq->transferred_length = xfrlen;
676844 tgt_fcpreq->fcp_error = fcp_err;
....@@ -684,15 +852,16 @@
684852 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
685853 {
686854 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
855
+ unsigned long flags;
687856
688857 /*
689858 * mark aborted only in case there were 2 threads in transport
690859 * (one doing io, other doing abort) and only kills ops posted
691860 * after the abort request
692861 */
693
- spin_lock(&tfcp_req->reqlock);
862
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
694863 tfcp_req->aborted = true;
695
- spin_unlock(&tfcp_req->reqlock);
864
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
696865
697866 tfcp_req->status = NVME_SC_INTERNAL;
698867
....@@ -713,9 +882,15 @@
713882 }
714883
715884 static void
716
-fcloop_ls_abort(struct nvme_fc_local_port *localport,
885
+fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
717886 struct nvme_fc_remote_port *remoteport,
718887 struct nvmefc_ls_req *lsreq)
888
+{
889
+}
890
+
891
+static void
892
+fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
893
+ void *hosthandle, struct nvmefc_ls_req *lsreq)
719894 {
720895 }
721896
....@@ -728,6 +903,7 @@
728903 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
729904 struct fcloop_fcpreq *tfcp_req;
730905 bool abortio = true;
906
+ unsigned long flags;
731907
732908 spin_lock(&inireq->inilock);
733909 tfcp_req = inireq->tfcp_req;
....@@ -740,7 +916,7 @@
740916 return;
741917
742918 /* break initiator/target relationship for io */
743
- spin_lock(&tfcp_req->reqlock);
919
+ spin_lock_irqsave(&tfcp_req->reqlock, flags);
744920 switch (tfcp_req->inistate) {
745921 case INI_IO_START:
746922 case INI_IO_ACTIVE:
....@@ -750,11 +926,11 @@
750926 abortio = false;
751927 break;
752928 default:
753
- spin_unlock(&tfcp_req->reqlock);
929
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
754930 WARN_ON(1);
755931 return;
756932 }
757
- spin_unlock(&tfcp_req->reqlock);
933
+ spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
758934
759935 if (abortio)
760936 /* leave the reference while the work item is scheduled */
....@@ -809,6 +985,7 @@
809985 {
810986 struct fcloop_rport *rport = remoteport->private;
811987
988
+ flush_work(&rport->ls_work);
812989 fcloop_nport_put(rport->nport);
813990 }
814991
....@@ -817,6 +994,7 @@
817994 {
818995 struct fcloop_tport *tport = targetport->private;
819996
997
+ flush_work(&tport->ls_work);
820998 fcloop_nport_put(tport->nport);
821999 }
8221000
....@@ -829,10 +1007,11 @@
8291007 .remoteport_delete = fcloop_remoteport_delete,
8301008 .create_queue = fcloop_create_queue,
8311009 .delete_queue = fcloop_delete_queue,
832
- .ls_req = fcloop_ls_req,
1010
+ .ls_req = fcloop_h2t_ls_req,
8331011 .fcp_io = fcloop_fcp_req,
834
- .ls_abort = fcloop_ls_abort,
1012
+ .ls_abort = fcloop_h2t_ls_abort,
8351013 .fcp_abort = fcloop_fcp_abort,
1014
+ .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp,
8361015 .max_hw_queues = FCLOOP_HW_QUEUES,
8371016 .max_sgl_segments = FCLOOP_SGL_SEGS,
8381017 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
....@@ -846,10 +1025,14 @@
8461025
8471026 static struct nvmet_fc_target_template tgttemplate = {
8481027 .targetport_delete = fcloop_targetport_delete,
849
- .xmt_ls_rsp = fcloop_xmt_ls_rsp,
1028
+ .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp,
8501029 .fcp_op = fcloop_fcp_op,
8511030 .fcp_abort = fcloop_tgt_fcp_abort,
8521031 .fcp_req_release = fcloop_fcp_req_release,
1032
+ .discovery_event = fcloop_tgt_discovery_evt,
1033
+ .ls_req = fcloop_t2h_ls_req,
1034
+ .ls_abort = fcloop_t2h_ls_abort,
1035
+ .host_release = fcloop_t2h_host_release,
8531036 .max_hw_queues = FCLOOP_HW_QUEUES,
8541037 .max_sgl_segments = FCLOOP_SGL_SEGS,
8551038 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
....@@ -858,6 +1041,7 @@
8581041 .target_features = 0,
8591042 /* sizes of additional private data for data structures */
8601043 .target_priv_sz = sizeof(struct fcloop_tport),
1044
+ .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
8611045 };
8621046
8631047 static ssize_t
....@@ -1109,6 +1293,9 @@
11091293 rport->nport = nport;
11101294 rport->lport = nport->lport;
11111295 nport->rport = rport;
1296
+ spin_lock_init(&rport->lock);
1297
+ INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
1298
+ INIT_LIST_HEAD(&rport->ls_list);
11121299
11131300 return count;
11141301 }
....@@ -1204,6 +1391,9 @@
12041391 tport->nport = nport;
12051392 tport->lport = nport->lport;
12061393 nport->tport = tport;
1394
+ spin_lock_init(&tport->lock);
1395
+ INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
1396
+ INIT_LIST_HEAD(&tport->ls_list);
12071397
12081398 return count;
12091399 }