.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0 */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or modify it |
---|
5 | | - * under the terms and conditions of the GNU General Public License, |
---|
6 | | - * version 2, as published by the Free Software Foundation. |
---|
7 | | - * |
---|
8 | | - * This program is distributed in the hope it will be useful, but WITHOUT |
---|
9 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
10 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
11 | | - * more details. |
---|
12 | 4 | */ |
---|
13 | 5 | |
---|
14 | 6 | #ifndef _NVMET_H |
---|
.. | .. |
---|
26 | 18 | #include <linux/configfs.h> |
---|
27 | 19 | #include <linux/rcupdate.h> |
---|
28 | 20 | #include <linux/blkdev.h> |
---|
| 21 | +#include <linux/radix-tree.h> |
---|
| 22 | +#include <linux/t10-pi.h> |
---|
| 23 | + |
---|
| 24 | +#define NVMET_DEFAULT_VS NVME_VS(1, 3, 0) |
---|
29 | 25 | |
---|
30 | 26 | #define NVMET_ASYNC_EVENTS 4 |
---|
31 | 27 | #define NVMET_ERROR_LOG_SLOTS 128 |
---|
| 28 | +#define NVMET_NO_ERROR_LOC ((u16)-1) |
---|
| 29 | +#define NVMET_DEFAULT_CTRL_MODEL "Linux" |
---|
32 | 30 | |
---|
33 | 31 | /* |
---|
34 | 32 | * Supported optional AENs: |
---|
35 | 33 | */ |
---|
36 | 34 | #define NVMET_AEN_CFG_OPTIONAL \ |
---|
37 | 35 | (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE) |
---|
| 36 | +#define NVMET_DISC_AEN_CFG_OPTIONAL \ |
---|
| 37 | + (NVME_AEN_CFG_DISC_CHANGE) |
---|
38 | 38 | |
---|
39 | 39 | /* |
---|
40 | 40 | * Plus mandatory SMART AENs (we'll never send them, but allow enabling them): |
---|
.. | .. |
---|
54 | 54 | (cpu_to_le32(offsetof(struct nvmf_connect_command, x))) |
---|
55 | 55 | |
---|
56 | 56 | struct nvmet_ns { |
---|
57 | | - struct list_head dev_link; |
---|
58 | 57 | struct percpu_ref ref; |
---|
59 | 58 | struct block_device *bdev; |
---|
60 | 59 | struct file *file; |
---|
.. | .. |
---|
77 | 76 | struct completion disable_done; |
---|
78 | 77 | mempool_t *bvec_pool; |
---|
79 | 78 | struct kmem_cache *bvec_cache; |
---|
| 79 | + |
---|
| 80 | + int use_p2pmem; |
---|
| 81 | + struct pci_dev *p2p_dev; |
---|
| 82 | + int pi_type; |
---|
| 83 | + int metadata_size; |
---|
80 | 84 | }; |
---|
81 | 85 | |
---|
82 | 86 | static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item) |
---|
83 | 87 | { |
---|
84 | 88 | return container_of(to_config_group(item), struct nvmet_ns, group); |
---|
| 89 | +} |
---|
| 90 | + |
---|
| 91 | +static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns) |
---|
| 92 | +{ |
---|
| 93 | + return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL; |
---|
85 | 94 | } |
---|
86 | 95 | |
---|
87 | 96 | struct nvmet_cq { |
---|
.. | .. |
---|
95 | 104 | u16 qid; |
---|
96 | 105 | u16 size; |
---|
97 | 106 | u32 sqhd; |
---|
| 107 | + bool sqhd_disabled; |
---|
98 | 108 | struct completion free_done; |
---|
99 | 109 | struct completion confirm_done; |
---|
100 | 110 | }; |
---|
.. | .. |
---|
128 | 138 | struct list_head subsystems; |
---|
129 | 139 | struct config_group referrals_group; |
---|
130 | 140 | struct list_head referrals; |
---|
| 141 | + struct list_head global_entry; |
---|
131 | 142 | struct config_group ana_groups_group; |
---|
132 | 143 | struct nvmet_ana_group ana_default_group; |
---|
133 | 144 | enum nvme_ana_state *ana_state; |
---|
134 | 145 | void *priv; |
---|
135 | 146 | bool enabled; |
---|
136 | 147 | int inline_data_size; |
---|
| 148 | + const struct nvmet_fabrics_ops *tr_ops; |
---|
| 149 | + bool pi_enable; |
---|
137 | 150 | }; |
---|
138 | 151 | |
---|
139 | 152 | static inline struct nvmet_port *to_nvmet_port(struct config_item *item) |
---|
.. | .. |
---|
151 | 164 | |
---|
152 | 165 | struct nvmet_ctrl { |
---|
153 | 166 | struct nvmet_subsys *subsys; |
---|
154 | | - struct nvmet_cq **cqs; |
---|
155 | 167 | struct nvmet_sq **sqs; |
---|
| 168 | + |
---|
| 169 | + bool reset_tbkas; |
---|
156 | 170 | |
---|
157 | 171 | struct mutex lock; |
---|
158 | 172 | u64 cap; |
---|
.. | .. |
---|
184 | 198 | |
---|
185 | 199 | char subsysnqn[NVMF_NQN_FIELD_LEN]; |
---|
186 | 200 | char hostnqn[NVMF_NQN_FIELD_LEN]; |
---|
| 201 | + |
---|
| 202 | + struct device *p2p_client; |
---|
| 203 | + struct radix_tree_root p2p_ns_map; |
---|
| 204 | + |
---|
| 205 | + spinlock_t error_lock; |
---|
| 206 | + u64 err_counter; |
---|
| 207 | + struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS]; |
---|
| 208 | + bool pi_support; |
---|
| 209 | +}; |
---|
| 210 | + |
---|
| 211 | +struct nvmet_subsys_model { |
---|
| 212 | + struct rcu_head rcuhead; |
---|
| 213 | + char number[]; |
---|
187 | 214 | }; |
---|
188 | 215 | |
---|
189 | 216 | struct nvmet_subsys { |
---|
.. | .. |
---|
192 | 219 | struct mutex lock; |
---|
193 | 220 | struct kref ref; |
---|
194 | 221 | |
---|
195 | | - struct list_head namespaces; |
---|
| 222 | + struct xarray namespaces; |
---|
196 | 223 | unsigned int nr_namespaces; |
---|
197 | 224 | unsigned int max_nsid; |
---|
| 225 | + u16 cntlid_min; |
---|
| 226 | + u16 cntlid_max; |
---|
198 | 227 | |
---|
199 | 228 | struct list_head ctrls; |
---|
200 | 229 | |
---|
.. | .. |
---|
206 | 235 | u64 ver; |
---|
207 | 236 | u64 serial; |
---|
208 | 237 | char *subsysnqn; |
---|
| 238 | + bool pi_support; |
---|
209 | 239 | |
---|
210 | 240 | struct config_group group; |
---|
211 | 241 | |
---|
212 | 242 | struct config_group namespaces_group; |
---|
213 | 243 | struct config_group allowed_hosts_group; |
---|
| 244 | + |
---|
| 245 | + struct nvmet_subsys_model __rcu *model; |
---|
| 246 | + |
---|
| 247 | +#ifdef CONFIG_NVME_TARGET_PASSTHRU |
---|
| 248 | + struct nvme_ctrl *passthru_ctrl; |
---|
| 249 | + char *passthru_ctrl_path; |
---|
| 250 | + struct config_group passthru_group; |
---|
| 251 | +#endif /* CONFIG_NVME_TARGET_PASSTHRU */ |
---|
214 | 252 | }; |
---|
215 | 253 | |
---|
216 | 254 | static inline struct nvmet_subsys *to_subsys(struct config_item *item) |
---|
.. | .. |
---|
254 | 292 | struct module *owner; |
---|
255 | 293 | unsigned int type; |
---|
256 | 294 | unsigned int msdbd; |
---|
257 | | - bool has_keyed_sgls : 1; |
---|
| 295 | + unsigned int flags; |
---|
| 296 | +#define NVMF_KEYED_SGLS (1 << 0) |
---|
| 297 | +#define NVMF_METADATA_SUPPORTED (1 << 1) |
---|
258 | 298 | void (*queue_response)(struct nvmet_req *req); |
---|
259 | 299 | int (*add_port)(struct nvmet_port *port); |
---|
260 | 300 | void (*remove_port)(struct nvmet_port *port); |
---|
261 | 301 | void (*delete_ctrl)(struct nvmet_ctrl *ctrl); |
---|
262 | 302 | void (*disc_traddr)(struct nvmet_req *req, |
---|
263 | 303 | struct nvmet_port *port, char *traddr); |
---|
| 304 | + u16 (*install_queue)(struct nvmet_sq *nvme_sq); |
---|
| 305 | + void (*discovery_chg)(struct nvmet_port *port); |
---|
| 306 | + u8 (*get_mdts)(const struct nvmet_ctrl *ctrl); |
---|
264 | 307 | }; |
---|
265 | 308 | |
---|
266 | 309 | #define NVMET_MAX_INLINE_BIOVEC 8 |
---|
| 310 | +#define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE |
---|
267 | 311 | |
---|
268 | 312 | struct nvmet_req { |
---|
269 | 313 | struct nvme_command *cmd; |
---|
270 | | - struct nvme_completion *rsp; |
---|
| 314 | + struct nvme_completion *cqe; |
---|
271 | 315 | struct nvmet_sq *sq; |
---|
272 | 316 | struct nvmet_cq *cq; |
---|
273 | 317 | struct nvmet_ns *ns; |
---|
274 | 318 | struct scatterlist *sg; |
---|
| 319 | + struct scatterlist *metadata_sg; |
---|
275 | 320 | struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC]; |
---|
276 | 321 | union { |
---|
277 | 322 | struct { |
---|
.. | .. |
---|
283 | 328 | struct bio_vec *bvec; |
---|
284 | 329 | struct work_struct work; |
---|
285 | 330 | } f; |
---|
| 331 | + struct { |
---|
| 332 | + struct request *rq; |
---|
| 333 | + struct work_struct work; |
---|
| 334 | + bool use_workqueue; |
---|
| 335 | + } p; |
---|
286 | 336 | }; |
---|
287 | 337 | int sg_cnt; |
---|
288 | | - /* data length as parsed from the command: */ |
---|
289 | | - size_t data_len; |
---|
| 338 | + int metadata_sg_cnt; |
---|
290 | 339 | /* data length as parsed from the SGL descriptor: */ |
---|
291 | 340 | size_t transfer_len; |
---|
| 341 | + size_t metadata_len; |
---|
292 | 342 | |
---|
293 | 343 | struct nvmet_port *port; |
---|
294 | 344 | |
---|
295 | 345 | void (*execute)(struct nvmet_req *req); |
---|
296 | 346 | const struct nvmet_fabrics_ops *ops; |
---|
| 347 | + |
---|
| 348 | + struct pci_dev *p2p_dev; |
---|
| 349 | + struct device *p2p_client; |
---|
| 350 | + u16 error_loc; |
---|
| 351 | + u64 error_slba; |
---|
297 | 352 | }; |
---|
298 | 353 | |
---|
299 | 354 | extern struct workqueue_struct *buffered_io_wq; |
---|
300 | 355 | |
---|
301 | | -static inline void nvmet_set_status(struct nvmet_req *req, u16 status) |
---|
302 | | -{ |
---|
303 | | - req->rsp->status = cpu_to_le16(status << 1); |
---|
304 | | -} |
---|
305 | | - |
---|
306 | 356 | static inline void nvmet_set_result(struct nvmet_req *req, u32 result) |
---|
307 | 357 | { |
---|
308 | | - req->rsp->result.u32 = cpu_to_le32(result); |
---|
| 358 | + req->cqe->result.u32 = cpu_to_le32(result); |
---|
309 | 359 | } |
---|
310 | 360 | |
---|
311 | 361 | /* |
---|
.. | .. |
---|
324 | 374 | u8 log_page; |
---|
325 | 375 | }; |
---|
326 | 376 | |
---|
| 377 | +static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn) |
---|
| 378 | +{ |
---|
| 379 | + int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15; |
---|
| 380 | + |
---|
| 381 | + if (!rae) |
---|
| 382 | + clear_bit(bn, &req->sq->ctrl->aen_masked); |
---|
| 383 | +} |
---|
| 384 | + |
---|
| 385 | +static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn) |
---|
| 386 | +{ |
---|
| 387 | + if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn))) |
---|
| 388 | + return true; |
---|
| 389 | + return test_and_set_bit(bn, &ctrl->aen_masked); |
---|
| 390 | +} |
---|
| 391 | + |
---|
| 392 | +void nvmet_get_feat_kato(struct nvmet_req *req); |
---|
| 393 | +void nvmet_get_feat_async_event(struct nvmet_req *req); |
---|
| 394 | +u16 nvmet_set_feat_kato(struct nvmet_req *req); |
---|
| 395 | +u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask); |
---|
| 396 | +void nvmet_execute_async_event(struct nvmet_req *req); |
---|
| 397 | +void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl); |
---|
| 398 | +void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl); |
---|
| 399 | + |
---|
327 | 400 | u16 nvmet_parse_connect_cmd(struct nvmet_req *req); |
---|
| 401 | +void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id); |
---|
328 | 402 | u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req); |
---|
329 | 403 | u16 nvmet_file_parse_io_cmd(struct nvmet_req *req); |
---|
330 | 404 | u16 nvmet_parse_admin_cmd(struct nvmet_req *req); |
---|
.. | .. |
---|
334 | 408 | bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, |
---|
335 | 409 | struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); |
---|
336 | 410 | void nvmet_req_uninit(struct nvmet_req *req); |
---|
337 | | -void nvmet_req_execute(struct nvmet_req *req); |
---|
| 411 | +bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len); |
---|
| 412 | +bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len); |
---|
338 | 413 | void nvmet_req_complete(struct nvmet_req *req, u16 status); |
---|
| 414 | +int nvmet_req_alloc_sgls(struct nvmet_req *req); |
---|
| 415 | +void nvmet_req_free_sgls(struct nvmet_req *req); |
---|
| 416 | + |
---|
| 417 | +void nvmet_execute_set_features(struct nvmet_req *req); |
---|
| 418 | +void nvmet_execute_get_features(struct nvmet_req *req); |
---|
| 419 | +void nvmet_execute_keep_alive(struct nvmet_req *req); |
---|
339 | 420 | |
---|
340 | 421 | void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, |
---|
341 | 422 | u16 size); |
---|
.. | .. |
---|
373 | 454 | int nvmet_register_transport(const struct nvmet_fabrics_ops *ops); |
---|
374 | 455 | void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops); |
---|
375 | 456 | |
---|
| 457 | +void nvmet_port_del_ctrls(struct nvmet_port *port, |
---|
| 458 | + struct nvmet_subsys *subsys); |
---|
| 459 | + |
---|
376 | 460 | int nvmet_enable_port(struct nvmet_port *port); |
---|
377 | 461 | void nvmet_disable_port(struct nvmet_port *port); |
---|
378 | 462 | |
---|
379 | 463 | void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port); |
---|
380 | | -void nvmet_referral_disable(struct nvmet_port *port); |
---|
| 464 | +void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port); |
---|
381 | 465 | |
---|
382 | 466 | u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, |
---|
383 | 467 | size_t len); |
---|
.. | .. |
---|
386 | 470 | u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len); |
---|
387 | 471 | |
---|
388 | 472 | u32 nvmet_get_log_page_len(struct nvme_command *cmd); |
---|
| 473 | +u64 nvmet_get_log_page_offset(struct nvme_command *cmd); |
---|
| 474 | + |
---|
| 475 | +extern struct list_head *nvmet_ports; |
---|
| 476 | +void nvmet_port_disc_changed(struct nvmet_port *port, |
---|
| 477 | + struct nvmet_subsys *subsys); |
---|
| 478 | +void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, |
---|
| 479 | + struct nvmet_host *host); |
---|
| 480 | +void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, |
---|
| 481 | + u8 event_info, u8 log_page); |
---|
389 | 482 | |
---|
390 | 483 | #define NVMET_QUEUE_SIZE 1024 |
---|
391 | 484 | #define NVMET_NR_QUEUES 128 |
---|
.. | .. |
---|
407 | 500 | #define NVMET_DEFAULT_ANA_GRPID 1 |
---|
408 | 501 | |
---|
409 | 502 | #define NVMET_KAS 10 |
---|
410 | | -#define NVMET_DISC_KATO 120 |
---|
| 503 | +#define NVMET_DISC_KATO_MS 120000 |
---|
411 | 504 | |
---|
412 | 505 | int __init nvmet_init_configfs(void); |
---|
413 | 506 | void __exit nvmet_exit_configfs(void); |
---|
.. | .. |
---|
416 | 509 | void nvmet_exit_discovery(void); |
---|
417 | 510 | |
---|
418 | 511 | extern struct nvmet_subsys *nvmet_disc_subsys; |
---|
419 | | -extern u64 nvmet_genctr; |
---|
420 | 512 | extern struct rw_semaphore nvmet_config_sem; |
---|
421 | 513 | |
---|
422 | 514 | extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1]; |
---|
423 | 515 | extern u64 nvmet_ana_chgcnt; |
---|
424 | 516 | extern struct rw_semaphore nvmet_ana_sem; |
---|
425 | 517 | |
---|
426 | | -bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys, |
---|
427 | | - const char *hostnqn); |
---|
| 518 | +bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn); |
---|
428 | 519 | |
---|
429 | 520 | int nvmet_bdev_ns_enable(struct nvmet_ns *ns); |
---|
430 | 521 | int nvmet_file_ns_enable(struct nvmet_ns *ns); |
---|
.. | .. |
---|
433 | 524 | u16 nvmet_bdev_flush(struct nvmet_req *req); |
---|
434 | 525 | u16 nvmet_file_flush(struct nvmet_req *req); |
---|
435 | 526 | void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid); |
---|
| 527 | +void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns); |
---|
| 528 | +int nvmet_file_ns_revalidate(struct nvmet_ns *ns); |
---|
| 529 | +void nvmet_ns_revalidate(struct nvmet_ns *ns); |
---|
436 | 530 | |
---|
437 | | -static inline u32 nvmet_rw_len(struct nvmet_req *req) |
---|
| 531 | +static inline u32 nvmet_rw_data_len(struct nvmet_req *req) |
---|
438 | 532 | { |
---|
439 | 533 | return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) << |
---|
440 | 534 | req->ns->blksize_shift; |
---|
441 | 535 | } |
---|
| 536 | + |
---|
| 537 | +static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req) |
---|
| 538 | +{ |
---|
| 539 | + if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) |
---|
| 540 | + return 0; |
---|
| 541 | + return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) * |
---|
| 542 | + req->ns->metadata_size; |
---|
| 543 | +} |
---|
| 544 | + |
---|
| 545 | +static inline u32 nvmet_dsm_len(struct nvmet_req *req) |
---|
| 546 | +{ |
---|
| 547 | + return (le32_to_cpu(req->cmd->dsm.nr) + 1) * |
---|
| 548 | + sizeof(struct nvme_dsm_range); |
---|
| 549 | +} |
---|
| 550 | + |
---|
| 551 | +#ifdef CONFIG_NVME_TARGET_PASSTHRU |
---|
| 552 | +void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys); |
---|
| 553 | +int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys); |
---|
| 554 | +void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys); |
---|
| 555 | +u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req); |
---|
| 556 | +u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req); |
---|
| 557 | +static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys) |
---|
| 558 | +{ |
---|
| 559 | + return subsys->passthru_ctrl; |
---|
| 560 | +} |
---|
| 561 | +#else /* CONFIG_NVME_TARGET_PASSTHRU */ |
---|
| 562 | +static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys) |
---|
| 563 | +{ |
---|
| 564 | +} |
---|
| 565 | +static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys) |
---|
| 566 | +{ |
---|
| 567 | +} |
---|
| 568 | +static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) |
---|
| 569 | +{ |
---|
| 570 | + return 0; |
---|
| 571 | +} |
---|
| 572 | +static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req) |
---|
| 573 | +{ |
---|
| 574 | + return 0; |
---|
| 575 | +} |
---|
| 576 | +static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys) |
---|
| 577 | +{ |
---|
| 578 | + return NULL; |
---|
| 579 | +} |
---|
| 580 | +#endif /* CONFIG_NVME_TARGET_PASSTHRU */ |
---|
| 581 | + |
---|
| 582 | +static inline struct nvme_ctrl * |
---|
| 583 | +nvmet_req_passthru_ctrl(struct nvmet_req *req) |
---|
| 584 | +{ |
---|
| 585 | + return nvmet_passthru_ctrl(req->sq->ctrl->subsys); |
---|
| 586 | +} |
---|
| 587 | + |
---|
| 588 | +u16 errno_to_nvme_status(struct nvmet_req *req, int errno); |
---|
| 589 | + |
---|
| 590 | +/* Convert a 32-bit number to a 16-bit 0's based number */ |
---|
| 591 | +static inline __le16 to0based(u32 a) |
---|
| 592 | +{ |
---|
| 593 | + return cpu_to_le16(max(1U, min(1U << 16, a)) - 1); |
---|
| 594 | +} |
---|
| 595 | + |
---|
| 596 | +static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns) |
---|
| 597 | +{ |
---|
| 598 | + if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) |
---|
| 599 | + return false; |
---|
| 600 | + return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple); |
---|
| 601 | +} |
---|
| 602 | + |
---|
| 603 | +static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect) |
---|
| 604 | +{ |
---|
| 605 | + return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT)); |
---|
| 606 | +} |
---|
| 607 | + |
---|
| 608 | +static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba) |
---|
| 609 | +{ |
---|
| 610 | + return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT); |
---|
| 611 | +} |
---|
| 612 | + |
---|
| 613 | +static inline bool nvmet_use_inline_bvec(struct nvmet_req *req) |
---|
| 614 | +{ |
---|
| 615 | + return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN && |
---|
| 616 | + req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC; |
---|
| 617 | +} |
---|
| 618 | + |
---|
442 | 619 | #endif /* _NVMET_H */ |
---|