forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/scsi/qla2xxx/qla_dfs.c
....@@ -1,8 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * QLogic Fibre Channel HBA Driver
34 * Copyright (c) 2003-2014 QLogic Corporation
4
- *
5
- * See LICENSE.qla2xxx for copyright and licensing details.
65 */
76 #include "qla_def.h"
87
....@@ -11,6 +10,140 @@
1110
1211 static struct dentry *qla2x00_dfs_root;
1312 static atomic_t qla2x00_dfs_root_count;
13
+
14
+#define QLA_DFS_RPORT_DEVLOSS_TMO 1
15
+
16
+static int
17
+qla_dfs_rport_get(struct fc_port *fp, int attr_id, u64 *val)
18
+{
19
+ switch (attr_id) {
20
+ case QLA_DFS_RPORT_DEVLOSS_TMO:
21
+ /* Only supported for FC-NVMe devices that are registered. */
22
+ if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
23
+ return -EIO;
24
+ *val = fp->nvme_remote_port->dev_loss_tmo;
25
+ break;
26
+ default:
27
+ return -EINVAL;
28
+ }
29
+ return 0;
30
+}
31
+
32
+static int
33
+qla_dfs_rport_set(struct fc_port *fp, int attr_id, u64 val)
34
+{
35
+ switch (attr_id) {
36
+ case QLA_DFS_RPORT_DEVLOSS_TMO:
37
+ /* Only supported for FC-NVMe devices that are registered. */
38
+ if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
39
+ return -EIO;
40
+#if (IS_ENABLED(CONFIG_NVME_FC))
41
+ return nvme_fc_set_remoteport_devloss(fp->nvme_remote_port,
42
+ val);
43
+#else /* CONFIG_NVME_FC */
44
+ return -EINVAL;
45
+#endif /* CONFIG_NVME_FC */
46
+ default:
47
+ return -EINVAL;
48
+ }
49
+ return 0;
50
+}
51
+
52
+#define DEFINE_QLA_DFS_RPORT_RW_ATTR(_attr_id, _attr) \
53
+static int qla_dfs_rport_##_attr##_get(void *data, u64 *val) \
54
+{ \
55
+ struct fc_port *fp = data; \
56
+ return qla_dfs_rport_get(fp, _attr_id, val); \
57
+} \
58
+static int qla_dfs_rport_##_attr##_set(void *data, u64 val) \
59
+{ \
60
+ struct fc_port *fp = data; \
61
+ return qla_dfs_rport_set(fp, _attr_id, val); \
62
+} \
63
+DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_##_attr##_fops, \
64
+ qla_dfs_rport_##_attr##_get, \
65
+ qla_dfs_rport_##_attr##_set, "%llu\n")
66
+
67
+/*
68
+ * Wrapper for getting fc_port fields.
69
+ *
70
+ * _attr : Attribute name.
71
+ * _get_val : Accessor macro to retrieve the value.
72
+ */
73
+#define DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val) \
74
+static int qla_dfs_rport_field_##_attr##_get(void *data, u64 *val) \
75
+{ \
76
+ struct fc_port *fp = data; \
77
+ *val = _get_val; \
78
+ return 0; \
79
+} \
80
+DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_field_##_attr##_fops, \
81
+ qla_dfs_rport_field_##_attr##_get, \
82
+ NULL, "%llu\n")
83
+
84
+#define DEFINE_QLA_DFS_RPORT_ACCESS(_attr, _get_val) \
85
+ DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)
86
+
87
+#define DEFINE_QLA_DFS_RPORT_FIELD(_attr) \
88
+ DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, fp->_attr)
89
+
90
+DEFINE_QLA_DFS_RPORT_RW_ATTR(QLA_DFS_RPORT_DEVLOSS_TMO, dev_loss_tmo);
91
+
92
+DEFINE_QLA_DFS_RPORT_FIELD(disc_state);
93
+DEFINE_QLA_DFS_RPORT_FIELD(scan_state);
94
+DEFINE_QLA_DFS_RPORT_FIELD(fw_login_state);
95
+DEFINE_QLA_DFS_RPORT_FIELD(login_pause);
96
+DEFINE_QLA_DFS_RPORT_FIELD(flags);
97
+DEFINE_QLA_DFS_RPORT_FIELD(nvme_flag);
98
+DEFINE_QLA_DFS_RPORT_FIELD(last_rscn_gen);
99
+DEFINE_QLA_DFS_RPORT_FIELD(rscn_gen);
100
+DEFINE_QLA_DFS_RPORT_FIELD(login_gen);
101
+DEFINE_QLA_DFS_RPORT_FIELD(loop_id);
102
+DEFINE_QLA_DFS_RPORT_FIELD_GET(port_id, fp->d_id.b24);
103
+DEFINE_QLA_DFS_RPORT_FIELD_GET(sess_kref, kref_read(&fp->sess_kref));
104
+
105
+void
106
+qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
107
+{
108
+ char wwn[32];
109
+
110
+#define QLA_CREATE_RPORT_FIELD_ATTR(_attr) \
111
+ debugfs_create_file(#_attr, 0400, fp->dfs_rport_dir, \
112
+ fp, &qla_dfs_rport_field_##_attr##_fops)
113
+
114
+ if (!vha->dfs_rport_root || fp->dfs_rport_dir)
115
+ return;
116
+
117
+ sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
118
+ fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
119
+ if (IS_ERR(fp->dfs_rport_dir))
120
+ return;
121
+ if (NVME_TARGET(vha->hw, fp))
122
+ debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
123
+ fp, &qla_dfs_rport_dev_loss_tmo_fops);
124
+
125
+ QLA_CREATE_RPORT_FIELD_ATTR(disc_state);
126
+ QLA_CREATE_RPORT_FIELD_ATTR(scan_state);
127
+ QLA_CREATE_RPORT_FIELD_ATTR(fw_login_state);
128
+ QLA_CREATE_RPORT_FIELD_ATTR(login_pause);
129
+ QLA_CREATE_RPORT_FIELD_ATTR(flags);
130
+ QLA_CREATE_RPORT_FIELD_ATTR(nvme_flag);
131
+ QLA_CREATE_RPORT_FIELD_ATTR(last_rscn_gen);
132
+ QLA_CREATE_RPORT_FIELD_ATTR(rscn_gen);
133
+ QLA_CREATE_RPORT_FIELD_ATTR(login_gen);
134
+ QLA_CREATE_RPORT_FIELD_ATTR(loop_id);
135
+ QLA_CREATE_RPORT_FIELD_ATTR(port_id);
136
+ QLA_CREATE_RPORT_FIELD_ATTR(sess_kref);
137
+}
138
+
139
+void
140
+qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp)
141
+{
142
+ if (!vha->dfs_rport_root || !fp->dfs_rport_dir)
143
+ return;
144
+ debugfs_remove_recursive(fp->dfs_rport_dir);
145
+ fp->dfs_rport_dir = NULL;
146
+}
14147
15148 static int
16149 qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
....@@ -37,19 +170,7 @@
37170 return 0;
38171 }
39172
40
-static int
41
-qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
42
-{
43
- scsi_qla_host_t *vha = inode->i_private;
44
- return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
45
-}
46
-
47
-static const struct file_operations dfs_tgt_sess_ops = {
48
- .open = qla2x00_dfs_tgt_sess_open,
49
- .read = seq_read,
50
- .llseek = seq_lseek,
51
- .release = single_release,
52
-};
173
+DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_sess);
53174
54175 static int
55176 qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
....@@ -62,66 +183,50 @@
62183 char *id_iter;
63184 int rc, i;
64185 uint16_t entries, loop_id;
65
- struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
66186
67187 seq_printf(s, "%s\n", vha->host_str);
68
- if (tgt) {
69
- gid_list = dma_alloc_coherent(&ha->pdev->dev,
70
- qla2x00_gid_list_size(ha),
71
- &gid_list_dma, GFP_KERNEL);
72
- if (!gid_list) {
73
- ql_dbg(ql_dbg_user, vha, 0x7018,
74
- "DMA allocation failed for %u\n",
75
- qla2x00_gid_list_size(ha));
76
- return 0;
77
- }
78
-
79
- rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
80
- &entries);
81
- if (rc != QLA_SUCCESS)
82
- goto out_free_id_list;
83
-
84
- id_iter = (char *)gid_list;
85
-
86
- seq_puts(s, "Port Name Port ID Loop ID\n");
87
-
88
- for (i = 0; i < entries; i++) {
89
- struct gid_list_info *gid =
90
- (struct gid_list_info *)id_iter;
91
- loop_id = le16_to_cpu(gid->loop_id);
92
- memset(&fc_port, 0, sizeof(fc_port_t));
93
-
94
- fc_port.loop_id = loop_id;
95
-
96
- rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
97
- seq_printf(s, "%8phC %02x%02x%02x %d\n",
98
- fc_port.port_name, fc_port.d_id.b.domain,
99
- fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
100
- fc_port.loop_id);
101
- id_iter += ha->gid_list_info_size;
102
- }
103
-out_free_id_list:
104
- dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
105
- gid_list, gid_list_dma);
188
+ gid_list = dma_alloc_coherent(&ha->pdev->dev,
189
+ qla2x00_gid_list_size(ha),
190
+ &gid_list_dma, GFP_KERNEL);
191
+ if (!gid_list) {
192
+ ql_dbg(ql_dbg_user, vha, 0x7018,
193
+ "DMA allocation failed for %u\n",
194
+ qla2x00_gid_list_size(ha));
195
+ return 0;
106196 }
197
+
198
+ rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
199
+ &entries);
200
+ if (rc != QLA_SUCCESS)
201
+ goto out_free_id_list;
202
+
203
+ id_iter = (char *)gid_list;
204
+
205
+ seq_puts(s, "Port Name Port ID Loop ID\n");
206
+
207
+ for (i = 0; i < entries; i++) {
208
+ struct gid_list_info *gid =
209
+ (struct gid_list_info *)id_iter;
210
+ loop_id = le16_to_cpu(gid->loop_id);
211
+ memset(&fc_port, 0, sizeof(fc_port_t));
212
+
213
+ fc_port.loop_id = loop_id;
214
+
215
+ rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
216
+ seq_printf(s, "%8phC %02x%02x%02x %d\n",
217
+ fc_port.port_name, fc_port.d_id.b.domain,
218
+ fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
219
+ fc_port.loop_id);
220
+ id_iter += ha->gid_list_info_size;
221
+ }
222
+out_free_id_list:
223
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
224
+ gid_list, gid_list_dma);
107225
108226 return 0;
109227 }
110228
111
-static int
112
-qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
113
-{
114
- scsi_qla_host_t *vha = inode->i_private;
115
-
116
- return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
117
-}
118
-
119
-static const struct file_operations dfs_tgt_port_database_ops = {
120
- .open = qla2x00_dfs_tgt_port_database_open,
121
- .read = seq_read,
122
- .llseek = seq_lseek,
123
- .release = single_release,
124
-};
229
+DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_port_database);
125230
126231 static int
127232 qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
....@@ -129,6 +234,8 @@
129234 struct scsi_qla_host *vha = s->private;
130235 uint16_t mb[MAX_IOCB_MB_REG];
131236 int rc;
237
+ struct qla_hw_data *ha = vha->hw;
238
+ u16 iocbs_used, i;
132239
133240 rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
134241 if (rc != QLA_SUCCESS) {
....@@ -136,11 +243,11 @@
136243 } else {
137244 seq_puts(s, "FW Resource count\n\n");
138245 seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
139
- seq_printf(s, "current TGT exchg count[%d]\n", mb[2]);
140
- seq_printf(s, "original Initiator Exchange count[%d]\n", mb[3]);
141
- seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[6]);
142
- seq_printf(s, "Original IOCB count[%d]\n", mb[7]);
143
- seq_printf(s, "Current IOCB count[%d]\n", mb[10]);
246
+ seq_printf(s, "Current TGT exchg count[%d]\n", mb[2]);
247
+ seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[3]);
248
+ seq_printf(s, "Original Initiator Exchange count[%d]\n", mb[6]);
249
+ seq_printf(s, "Current IOCB count[%d]\n", mb[7]);
250
+ seq_printf(s, "Original IOCB count[%d]\n", mb[10]);
144251 seq_printf(s, "MAX VP count[%d]\n", mb[11]);
145252 seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
146253 seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
....@@ -151,25 +258,24 @@
151258 mb[22]);
152259 seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
153260 mb[23]);
261
+ }
154262
263
+ if (ql2xenforce_iocb_limit) {
264
+ /* lock is not require. It's an estimate. */
265
+ iocbs_used = ha->base_qpair->fwres.iocbs_used;
266
+ for (i = 0; i < ha->max_qpairs; i++) {
267
+ if (ha->queue_pair_map[i])
268
+ iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
269
+ }
270
+
271
+ seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
272
+ iocbs_used, ha->base_qpair->fwres.iocbs_limit);
155273 }
156274
157275 return 0;
158276 }
159277
160
-static int
161
-qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file)
162
-{
163
- struct scsi_qla_host *vha = inode->i_private;
164
- return single_open(file, qla_dfs_fw_resource_cnt_show, vha);
165
-}
166
-
167
-static const struct file_operations dfs_fw_resource_cnt_ops = {
168
- .open = qla_dfs_fw_resource_cnt_open,
169
- .read = seq_read,
170
- .llseek = seq_lseek,
171
- .release = single_release,
172
-};
278
+DEFINE_SHOW_ATTRIBUTE(qla_dfs_fw_resource_cnt);
173279
174280 static int
175281 qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
....@@ -193,6 +299,8 @@
193299
194300 for (i = 0; i < vha->hw->max_qpairs; i++) {
195301 qpair = vha->hw->queue_pair_map[i];
302
+ if (!qpair)
303
+ continue;
196304 qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
197305 core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
198306 qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
....@@ -244,19 +352,7 @@
244352 return 0;
245353 }
246354
247
-static int
248
-qla_dfs_tgt_counters_open(struct inode *inode, struct file *file)
249
-{
250
- struct scsi_qla_host *vha = inode->i_private;
251
- return single_open(file, qla_dfs_tgt_counters_show, vha);
252
-}
253
-
254
-static const struct file_operations dfs_tgt_counters_ops = {
255
- .open = qla_dfs_tgt_counters_open,
256
- .read = seq_read,
257
- .llseek = seq_lseek,
258
- .release = single_release,
259
-};
355
+DEFINE_SHOW_ATTRIBUTE(qla_dfs_tgt_counters);
260356
261357 static int
262358 qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
....@@ -384,7 +480,7 @@
384480 int rc = 0;
385481 unsigned long num_act_qp;
386482
387
- if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha))) {
483
+ if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) {
388484 pr_err("host%ld: this adapter does not support Multi Q.",
389485 vha->host_no);
390486 return -EINVAL;
....@@ -436,7 +532,7 @@
436532 struct qla_hw_data *ha = vha->hw;
437533
438534 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
439
- !IS_QLA27XX(ha))
535
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
440536 goto out;
441537 if (!ha->fce)
442538 goto out;
....@@ -446,11 +542,6 @@
446542
447543 atomic_set(&qla2x00_dfs_root_count, 0);
448544 qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
449
- if (!qla2x00_dfs_root) {
450
- ql_log(ql_log_warn, vha, 0x00f7,
451
- "Unable to create debugfs root directory.\n");
452
- goto out;
453
- }
454545
455546 create_dir:
456547 if (ha->dfs_dir)
....@@ -458,63 +549,39 @@
458549
459550 mutex_init(&ha->fce_mutex);
460551 ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
461
- if (!ha->dfs_dir) {
462
- ql_log(ql_log_warn, vha, 0x00f8,
463
- "Unable to create debugfs ha directory.\n");
464
- goto out;
465
- }
466552
467553 atomic_inc(&qla2x00_dfs_root_count);
468554
469555 create_nodes:
470556 ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
471
- S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops);
472
- if (!ha->dfs_fw_resource_cnt) {
473
- ql_log(ql_log_warn, vha, 0x00fd,
474
- "Unable to create debugFS fw_resource_count node.\n");
475
- goto out;
476
- }
557
+ S_IRUSR, ha->dfs_dir, vha, &qla_dfs_fw_resource_cnt_fops);
477558
478559 ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
479
- ha->dfs_dir, vha, &dfs_tgt_counters_ops);
480
- if (!ha->dfs_tgt_counters) {
481
- ql_log(ql_log_warn, vha, 0xd301,
482
- "Unable to create debugFS tgt_counters node.\n");
483
- goto out;
484
- }
560
+ ha->dfs_dir, vha, &qla_dfs_tgt_counters_fops);
485561
486562 ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
487
- S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
488
- if (!ha->tgt.dfs_tgt_port_database) {
489
- ql_log(ql_log_warn, vha, 0xd03f,
490
- "Unable to create debugFS tgt_port_database node.\n");
491
- goto out;
492
- }
563
+ S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_port_database_fops);
493564
494565 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
495566 &dfs_fce_ops);
496
- if (!ha->dfs_fce) {
497
- ql_log(ql_log_warn, vha, 0x00f9,
498
- "Unable to create debugfs fce node.\n");
499
- goto out;
500
- }
501567
502568 ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
503
- S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
504
- if (!ha->tgt.dfs_tgt_sess) {
505
- ql_log(ql_log_warn, vha, 0xd040,
506
- "Unable to create debugFS tgt_sess node.\n");
507
- goto out;
508
- }
569
+ S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_sess_fops);
509570
510
- if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
571
+ if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
511572 ha->tgt.dfs_naqp = debugfs_create_file("naqp",
512573 0400, ha->dfs_dir, vha, &dfs_naqp_ops);
513
- if (!ha->tgt.dfs_naqp) {
574
+ if (IS_ERR(ha->tgt.dfs_naqp)) {
514575 ql_log(ql_log_warn, vha, 0xd011,
515
- "Unable to create debugFS naqp node.\n");
576
+ "Unable to create debugFS naqp node.\n");
516577 goto out;
517578 }
579
+ }
580
+ vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
581
+ if (IS_ERR(vha->dfs_rport_root)) {
582
+ ql_log(ql_log_warn, vha, 0xd012,
583
+ "Unable to create debugFS rports node.\n");
584
+ goto out;
518585 }
519586 out:
520587 return 0;
....@@ -555,6 +622,11 @@
555622 ha->dfs_fce = NULL;
556623 }
557624
625
+ if (vha->dfs_rport_root) {
626
+ debugfs_remove_recursive(vha->dfs_rport_root);
627
+ vha->dfs_rport_root = NULL;
628
+ }
629
+
558630 if (ha->dfs_dir) {
559631 debugfs_remove(ha->dfs_dir);
560632 ha->dfs_dir = NULL;