forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/nvme/target/nvmet.h
....@@ -1,14 +1,6 @@
1
+/* SPDX-License-Identifier: GPL-2.0 */
12 /*
23 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
3
- *
4
- * This program is free software; you can redistribute it and/or modify it
5
- * under the terms and conditions of the GNU General Public License,
6
- * version 2, as published by the Free Software Foundation.
7
- *
8
- * This program is distributed in the hope it will be useful, but WITHOUT
9
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11
- * more details.
124 */
135
146 #ifndef _NVMET_H
....@@ -26,15 +18,23 @@
2618 #include <linux/configfs.h>
2719 #include <linux/rcupdate.h>
2820 #include <linux/blkdev.h>
21
+#include <linux/radix-tree.h>
22
+#include <linux/t10-pi.h>
23
+
24
+#define NVMET_DEFAULT_VS NVME_VS(1, 3, 0)
2925
3026 #define NVMET_ASYNC_EVENTS 4
3127 #define NVMET_ERROR_LOG_SLOTS 128
28
+#define NVMET_NO_ERROR_LOC ((u16)-1)
29
+#define NVMET_DEFAULT_CTRL_MODEL "Linux"
3230
3331 /*
3432 * Supported optional AENs:
3533 */
3634 #define NVMET_AEN_CFG_OPTIONAL \
3735 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
36
+#define NVMET_DISC_AEN_CFG_OPTIONAL \
37
+ (NVME_AEN_CFG_DISC_CHANGE)
3838
3939 /*
4040 * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
....@@ -54,7 +54,6 @@
5454 (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
5555
5656 struct nvmet_ns {
57
- struct list_head dev_link;
5857 struct percpu_ref ref;
5958 struct block_device *bdev;
6059 struct file *file;
....@@ -77,11 +76,21 @@
7776 struct completion disable_done;
7877 mempool_t *bvec_pool;
7978 struct kmem_cache *bvec_cache;
79
+
80
+ int use_p2pmem;
81
+ struct pci_dev *p2p_dev;
82
+ int pi_type;
83
+ int metadata_size;
8084 };
8185
8286 static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
8387 {
8488 return container_of(to_config_group(item), struct nvmet_ns, group);
89
+}
90
+
91
+static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
92
+{
93
+ return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
8594 }
8695
8796 struct nvmet_cq {
....@@ -95,6 +104,7 @@
95104 u16 qid;
96105 u16 size;
97106 u32 sqhd;
107
+ bool sqhd_disabled;
98108 struct completion free_done;
99109 struct completion confirm_done;
100110 };
....@@ -128,12 +138,15 @@
128138 struct list_head subsystems;
129139 struct config_group referrals_group;
130140 struct list_head referrals;
141
+ struct list_head global_entry;
131142 struct config_group ana_groups_group;
132143 struct nvmet_ana_group ana_default_group;
133144 enum nvme_ana_state *ana_state;
134145 void *priv;
135146 bool enabled;
136147 int inline_data_size;
148
+ const struct nvmet_fabrics_ops *tr_ops;
149
+ bool pi_enable;
137150 };
138151
139152 static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
....@@ -151,8 +164,9 @@
151164
152165 struct nvmet_ctrl {
153166 struct nvmet_subsys *subsys;
154
- struct nvmet_cq **cqs;
155167 struct nvmet_sq **sqs;
168
+
169
+ bool reset_tbkas;
156170
157171 struct mutex lock;
158172 u64 cap;
....@@ -184,6 +198,19 @@
184198
185199 char subsysnqn[NVMF_NQN_FIELD_LEN];
186200 char hostnqn[NVMF_NQN_FIELD_LEN];
201
+
202
+ struct device *p2p_client;
203
+ struct radix_tree_root p2p_ns_map;
204
+
205
+ spinlock_t error_lock;
206
+ u64 err_counter;
207
+ struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
208
+ bool pi_support;
209
+};
210
+
211
+struct nvmet_subsys_model {
212
+ struct rcu_head rcuhead;
213
+ char number[];
187214 };
188215
189216 struct nvmet_subsys {
....@@ -192,9 +219,11 @@
192219 struct mutex lock;
193220 struct kref ref;
194221
195
- struct list_head namespaces;
222
+ struct xarray namespaces;
196223 unsigned int nr_namespaces;
197224 unsigned int max_nsid;
225
+ u16 cntlid_min;
226
+ u16 cntlid_max;
198227
199228 struct list_head ctrls;
200229
....@@ -206,11 +235,20 @@
206235 u64 ver;
207236 u64 serial;
208237 char *subsysnqn;
238
+ bool pi_support;
209239
210240 struct config_group group;
211241
212242 struct config_group namespaces_group;
213243 struct config_group allowed_hosts_group;
244
+
245
+ struct nvmet_subsys_model __rcu *model;
246
+
247
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
248
+ struct nvme_ctrl *passthru_ctrl;
249
+ char *passthru_ctrl_path;
250
+ struct config_group passthru_group;
251
+#endif /* CONFIG_NVME_TARGET_PASSTHRU */
214252 };
215253
216254 static inline struct nvmet_subsys *to_subsys(struct config_item *item)
....@@ -254,24 +292,31 @@
254292 struct module *owner;
255293 unsigned int type;
256294 unsigned int msdbd;
257
- bool has_keyed_sgls : 1;
295
+ unsigned int flags;
296
+#define NVMF_KEYED_SGLS (1 << 0)
297
+#define NVMF_METADATA_SUPPORTED (1 << 1)
258298 void (*queue_response)(struct nvmet_req *req);
259299 int (*add_port)(struct nvmet_port *port);
260300 void (*remove_port)(struct nvmet_port *port);
261301 void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
262302 void (*disc_traddr)(struct nvmet_req *req,
263303 struct nvmet_port *port, char *traddr);
304
+ u16 (*install_queue)(struct nvmet_sq *nvme_sq);
305
+ void (*discovery_chg)(struct nvmet_port *port);
306
+ u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
264307 };
265308
266309 #define NVMET_MAX_INLINE_BIOVEC 8
310
+#define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
267311
268312 struct nvmet_req {
269313 struct nvme_command *cmd;
270
- struct nvme_completion *rsp;
314
+ struct nvme_completion *cqe;
271315 struct nvmet_sq *sq;
272316 struct nvmet_cq *cq;
273317 struct nvmet_ns *ns;
274318 struct scatterlist *sg;
319
+ struct scatterlist *metadata_sg;
275320 struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
276321 union {
277322 struct {
....@@ -283,29 +328,34 @@
283328 struct bio_vec *bvec;
284329 struct work_struct work;
285330 } f;
331
+ struct {
332
+ struct request *rq;
333
+ struct work_struct work;
334
+ bool use_workqueue;
335
+ } p;
286336 };
287337 int sg_cnt;
288
- /* data length as parsed from the command: */
289
- size_t data_len;
338
+ int metadata_sg_cnt;
290339 /* data length as parsed from the SGL descriptor: */
291340 size_t transfer_len;
341
+ size_t metadata_len;
292342
293343 struct nvmet_port *port;
294344
295345 void (*execute)(struct nvmet_req *req);
296346 const struct nvmet_fabrics_ops *ops;
347
+
348
+ struct pci_dev *p2p_dev;
349
+ struct device *p2p_client;
350
+ u16 error_loc;
351
+ u64 error_slba;
297352 };
298353
299354 extern struct workqueue_struct *buffered_io_wq;
300355
301
-static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
302
-{
303
- req->rsp->status = cpu_to_le16(status << 1);
304
-}
305
-
306356 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
307357 {
308
- req->rsp->result.u32 = cpu_to_le32(result);
358
+ req->cqe->result.u32 = cpu_to_le32(result);
309359 }
310360
311361 /*
....@@ -324,7 +374,31 @@
324374 u8 log_page;
325375 };
326376
377
+static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
378
+{
379
+ int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
380
+
381
+ if (!rae)
382
+ clear_bit(bn, &req->sq->ctrl->aen_masked);
383
+}
384
+
385
+static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
386
+{
387
+ if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
388
+ return true;
389
+ return test_and_set_bit(bn, &ctrl->aen_masked);
390
+}
391
+
392
+void nvmet_get_feat_kato(struct nvmet_req *req);
393
+void nvmet_get_feat_async_event(struct nvmet_req *req);
394
+u16 nvmet_set_feat_kato(struct nvmet_req *req);
395
+u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
396
+void nvmet_execute_async_event(struct nvmet_req *req);
397
+void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
398
+void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
399
+
327400 u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
401
+void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
328402 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
329403 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
330404 u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
....@@ -334,8 +408,15 @@
334408 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
335409 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
336410 void nvmet_req_uninit(struct nvmet_req *req);
337
-void nvmet_req_execute(struct nvmet_req *req);
411
+bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
412
+bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
338413 void nvmet_req_complete(struct nvmet_req *req, u16 status);
414
+int nvmet_req_alloc_sgls(struct nvmet_req *req);
415
+void nvmet_req_free_sgls(struct nvmet_req *req);
416
+
417
+void nvmet_execute_set_features(struct nvmet_req *req);
418
+void nvmet_execute_get_features(struct nvmet_req *req);
419
+void nvmet_execute_keep_alive(struct nvmet_req *req);
339420
340421 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
341422 u16 size);
....@@ -373,11 +454,14 @@
373454 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
374455 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
375456
457
+void nvmet_port_del_ctrls(struct nvmet_port *port,
458
+ struct nvmet_subsys *subsys);
459
+
376460 int nvmet_enable_port(struct nvmet_port *port);
377461 void nvmet_disable_port(struct nvmet_port *port);
378462
379463 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
380
-void nvmet_referral_disable(struct nvmet_port *port);
464
+void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
381465
382466 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
383467 size_t len);
....@@ -386,6 +470,15 @@
386470 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
387471
388472 u32 nvmet_get_log_page_len(struct nvme_command *cmd);
473
+u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
474
+
475
+extern struct list_head *nvmet_ports;
476
+void nvmet_port_disc_changed(struct nvmet_port *port,
477
+ struct nvmet_subsys *subsys);
478
+void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
479
+ struct nvmet_host *host);
480
+void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
481
+ u8 event_info, u8 log_page);
389482
390483 #define NVMET_QUEUE_SIZE 1024
391484 #define NVMET_NR_QUEUES 128
....@@ -407,7 +500,7 @@
407500 #define NVMET_DEFAULT_ANA_GRPID 1
408501
409502 #define NVMET_KAS 10
410
-#define NVMET_DISC_KATO 120
503
+#define NVMET_DISC_KATO_MS 120000
411504
412505 int __init nvmet_init_configfs(void);
413506 void __exit nvmet_exit_configfs(void);
....@@ -416,15 +509,13 @@
416509 void nvmet_exit_discovery(void);
417510
418511 extern struct nvmet_subsys *nvmet_disc_subsys;
419
-extern u64 nvmet_genctr;
420512 extern struct rw_semaphore nvmet_config_sem;
421513
422514 extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
423515 extern u64 nvmet_ana_chgcnt;
424516 extern struct rw_semaphore nvmet_ana_sem;
425517
426
-bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
427
- const char *hostnqn);
518
+bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
428519
429520 int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
430521 int nvmet_file_ns_enable(struct nvmet_ns *ns);
....@@ -433,10 +524,96 @@
433524 u16 nvmet_bdev_flush(struct nvmet_req *req);
434525 u16 nvmet_file_flush(struct nvmet_req *req);
435526 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
527
+void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
528
+int nvmet_file_ns_revalidate(struct nvmet_ns *ns);
529
+void nvmet_ns_revalidate(struct nvmet_ns *ns);
436530
437
-static inline u32 nvmet_rw_len(struct nvmet_req *req)
531
+static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
438532 {
439533 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
440534 req->ns->blksize_shift;
441535 }
536
+
537
+static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
538
+{
539
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
540
+ return 0;
541
+ return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
542
+ req->ns->metadata_size;
543
+}
544
+
545
+static inline u32 nvmet_dsm_len(struct nvmet_req *req)
546
+{
547
+ return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
548
+ sizeof(struct nvme_dsm_range);
549
+}
550
+
551
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
552
+void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
553
+int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
554
+void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
555
+u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
556
+u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
557
+static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
558
+{
559
+ return subsys->passthru_ctrl;
560
+}
561
+#else /* CONFIG_NVME_TARGET_PASSTHRU */
562
+static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
563
+{
564
+}
565
+static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
566
+{
567
+}
568
+static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
569
+{
570
+ return 0;
571
+}
572
+static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
573
+{
574
+ return 0;
575
+}
576
+static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
577
+{
578
+ return NULL;
579
+}
580
+#endif /* CONFIG_NVME_TARGET_PASSTHRU */
581
+
582
+static inline struct nvme_ctrl *
583
+nvmet_req_passthru_ctrl(struct nvmet_req *req)
584
+{
585
+ return nvmet_passthru_ctrl(req->sq->ctrl->subsys);
586
+}
587
+
588
+u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
589
+
590
+/* Convert a 32-bit number to a 16-bit 0's based number */
591
+static inline __le16 to0based(u32 a)
592
+{
593
+ return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
594
+}
595
+
596
+static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
597
+{
598
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
599
+ return false;
600
+ return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
601
+}
602
+
603
+static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
604
+{
605
+ return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
606
+}
607
+
608
+static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
609
+{
610
+ return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
611
+}
612
+
613
+static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
614
+{
615
+ return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
616
+ req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
617
+}
618
+
442619 #endif /* _NVMET_H */