.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * Network block device - make block devices work over TCP |
---|
3 | 4 | * |
---|
.. | .. |
---|
6 | 7 | * |
---|
7 | 8 | * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> |
---|
8 | 9 | * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> |
---|
9 | | - * |
---|
10 | | - * This file is released under GPLv2 or later. |
---|
11 | 10 | * |
---|
12 | 11 | * (part of code stolen from loop.c) |
---|
13 | 12 | */ |
---|
.. | .. |
---|
27 | 26 | #include <linux/ioctl.h> |
---|
28 | 27 | #include <linux/mutex.h> |
---|
29 | 28 | #include <linux/compiler.h> |
---|
| 29 | +#include <linux/completion.h> |
---|
30 | 30 | #include <linux/err.h> |
---|
31 | 31 | #include <linux/kernel.h> |
---|
32 | 32 | #include <linux/slab.h> |
---|
.. | .. |
---|
43 | 43 | #include <linux/nbd.h> |
---|
44 | 44 | #include <linux/nbd-netlink.h> |
---|
45 | 45 | #include <net/genetlink.h> |
---|
| 46 | + |
---|
| 47 | +#define CREATE_TRACE_POINTS |
---|
| 48 | +#include <trace/events/nbd.h> |
---|
46 | 49 | |
---|
47 | 50 | static DEFINE_IDR(nbd_index_idr); |
---|
48 | 51 | static DEFINE_MUTEX(nbd_index_mutex); |
---|
.. | .. |
---|
69 | 72 | int index; |
---|
70 | 73 | }; |
---|
71 | 74 | |
---|
72 | | -#define NBD_TIMEDOUT 0 |
---|
| 75 | +#define NBD_RT_TIMEDOUT 0 |
---|
| 76 | +#define NBD_RT_DISCONNECT_REQUESTED 1 |
---|
| 77 | +#define NBD_RT_DISCONNECTED 2 |
---|
| 78 | +#define NBD_RT_HAS_PID_FILE 3 |
---|
| 79 | +#define NBD_RT_HAS_CONFIG_REF 4 |
---|
| 80 | +#define NBD_RT_BOUND 5 |
---|
| 81 | +#define NBD_RT_DISCONNECT_ON_CLOSE 6 |
---|
| 82 | + |
---|
| 83 | +#define NBD_DESTROY_ON_DISCONNECT 0 |
---|
73 | 84 | #define NBD_DISCONNECT_REQUESTED 1 |
---|
74 | | -#define NBD_DISCONNECTED 2 |
---|
75 | | -#define NBD_HAS_PID_FILE 3 |
---|
76 | | -#define NBD_HAS_CONFIG_REF 4 |
---|
77 | | -#define NBD_BOUND 5 |
---|
78 | | -#define NBD_DESTROY_ON_DISCONNECT 6 |
---|
79 | | -#define NBD_DISCONNECT_ON_CLOSE 7 |
---|
80 | 85 | |
---|
81 | 86 | struct nbd_config { |
---|
82 | 87 | u32 flags; |
---|
.. | .. |
---|
111 | 116 | struct list_head list; |
---|
112 | 117 | struct task_struct *task_recv; |
---|
113 | 118 | struct task_struct *task_setup; |
---|
| 119 | + |
---|
| 120 | + struct completion *destroy_complete; |
---|
| 121 | + unsigned long flags; |
---|
114 | 122 | }; |
---|
115 | 123 | |
---|
116 | 124 | #define NBD_CMD_REQUEUED 1 |
---|
.. | .. |
---|
120 | 128 | struct mutex lock; |
---|
121 | 129 | int index; |
---|
122 | 130 | int cookie; |
---|
| 131 | + int retries; |
---|
123 | 132 | blk_status_t status; |
---|
124 | 133 | unsigned long flags; |
---|
125 | 134 | u32 cmd_cookie; |
---|
.. | .. |
---|
220 | 229 | disk->private_data = NULL; |
---|
221 | 230 | put_disk(disk); |
---|
222 | 231 | } |
---|
| 232 | + |
---|
| 233 | + /* |
---|
| 234 | + * Place this in the last just before the nbd is freed to |
---|
| 235 | + * make sure that the disk and the related kobject are also |
---|
| 236 | + * totally removed to avoid duplicate creation of the same |
---|
| 237 | + * one. |
---|
| 238 | + */ |
---|
| 239 | + if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete) |
---|
| 240 | + complete(nbd->destroy_complete); |
---|
| 241 | + |
---|
223 | 242 | kfree(nbd); |
---|
224 | 243 | } |
---|
225 | 244 | |
---|
.. | .. |
---|
235 | 254 | |
---|
236 | 255 | static int nbd_disconnected(struct nbd_config *config) |
---|
237 | 256 | { |
---|
238 | | - return test_bit(NBD_DISCONNECTED, &config->runtime_flags) || |
---|
239 | | - test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags); |
---|
| 257 | + return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) || |
---|
| 258 | + test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags); |
---|
240 | 259 | } |
---|
241 | 260 | |
---|
242 | 261 | static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock, |
---|
.. | .. |
---|
254 | 273 | if (!nsock->dead) { |
---|
255 | 274 | kernel_sock_shutdown(nsock->sock, SHUT_RDWR); |
---|
256 | 275 | if (atomic_dec_return(&nbd->config->live_connections) == 0) { |
---|
257 | | - if (test_and_clear_bit(NBD_DISCONNECT_REQUESTED, |
---|
| 276 | + if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED, |
---|
258 | 277 | &nbd->config->runtime_flags)) { |
---|
259 | | - set_bit(NBD_DISCONNECTED, |
---|
| 278 | + set_bit(NBD_RT_DISCONNECTED, |
---|
260 | 279 | &nbd->config->runtime_flags); |
---|
261 | 280 | dev_info(nbd_to_dev(nbd), |
---|
262 | 281 | "Disconnected due to user request.\n"); |
---|
.. | .. |
---|
280 | 299 | { |
---|
281 | 300 | struct nbd_config *config = nbd->config; |
---|
282 | 301 | struct block_device *bdev = bdget_disk(nbd->disk, 0); |
---|
| 302 | + sector_t nr_sectors = config->bytesize >> 9; |
---|
283 | 303 | |
---|
284 | 304 | if (config->flags & NBD_FLAG_SEND_TRIM) { |
---|
285 | 305 | nbd->disk->queue->limits.discard_granularity = config->blksize; |
---|
.. | .. |
---|
288 | 308 | } |
---|
289 | 309 | blk_queue_logical_block_size(nbd->disk->queue, config->blksize); |
---|
290 | 310 | blk_queue_physical_block_size(nbd->disk->queue, config->blksize); |
---|
291 | | - set_capacity(nbd->disk, config->bytesize >> 9); |
---|
| 311 | + set_capacity(nbd->disk, nr_sectors); |
---|
292 | 312 | if (bdev) { |
---|
293 | 313 | if (bdev->bd_disk) { |
---|
294 | | - bd_set_size(bdev, config->bytesize); |
---|
| 314 | + bd_set_nr_sectors(bdev, nr_sectors); |
---|
295 | 315 | if (start) |
---|
296 | 316 | set_blocksize(bdev, config->blksize); |
---|
297 | 317 | } else |
---|
298 | | - bdev->bd_invalidated = 1; |
---|
| 318 | + set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); |
---|
299 | 319 | bdput(bdev); |
---|
300 | 320 | } |
---|
301 | 321 | kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); |
---|
.. | .. |
---|
331 | 351 | |
---|
332 | 352 | if (config->num_connections == 0) |
---|
333 | 353 | return; |
---|
334 | | - if (test_and_set_bit(NBD_DISCONNECTED, &config->runtime_flags)) |
---|
| 354 | + if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) |
---|
335 | 355 | return; |
---|
336 | 356 | |
---|
337 | 357 | for (i = 0; i < config->num_connections; i++) { |
---|
.. | .. |
---|
341 | 361 | mutex_unlock(&nsock->tx_lock); |
---|
342 | 362 | } |
---|
343 | 363 | dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); |
---|
| 364 | +} |
---|
| 365 | + |
---|
| 366 | +static u32 req_to_nbd_cmd_type(struct request *req) |
---|
| 367 | +{ |
---|
| 368 | + switch (req_op(req)) { |
---|
| 369 | + case REQ_OP_DISCARD: |
---|
| 370 | + return NBD_CMD_TRIM; |
---|
| 371 | + case REQ_OP_FLUSH: |
---|
| 372 | + return NBD_CMD_FLUSH; |
---|
| 373 | + case REQ_OP_WRITE: |
---|
| 374 | + return NBD_CMD_WRITE; |
---|
| 375 | + case REQ_OP_READ: |
---|
| 376 | + return NBD_CMD_READ; |
---|
| 377 | + default: |
---|
| 378 | + return U32_MAX; |
---|
| 379 | + } |
---|
344 | 380 | } |
---|
345 | 381 | |
---|
346 | 382 | static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, |
---|
.. | .. |
---|
360 | 396 | } |
---|
361 | 397 | config = nbd->config; |
---|
362 | 398 | |
---|
363 | | - if (config->num_connections > 1) { |
---|
| 399 | + if (config->num_connections > 1 || |
---|
| 400 | + (config->num_connections == 1 && nbd->tag_set.timeout)) { |
---|
364 | 401 | dev_err_ratelimited(nbd_to_dev(nbd), |
---|
365 | 402 | "Connection timed out, retrying (%d/%d alive)\n", |
---|
366 | 403 | atomic_read(&config->live_connections), |
---|
367 | 404 | config->num_connections); |
---|
368 | 405 | /* |
---|
369 | 406 | * Hooray we have more connections, requeue this IO, the submit |
---|
370 | | - * path will put it on a real connection. |
---|
| 407 | + * path will put it on a real connection. Or if only one |
---|
| 408 | + * connection is configured, the submit path will wait util |
---|
| 409 | + * a new connection is reconfigured or util dead timeout. |
---|
371 | 410 | */ |
---|
372 | | - if (config->socks && config->num_connections > 1) { |
---|
| 411 | + if (config->socks) { |
---|
373 | 412 | if (cmd->index < config->num_connections) { |
---|
374 | 413 | struct nbd_sock *nsock = |
---|
375 | 414 | config->socks[cmd->index]; |
---|
.. | .. |
---|
389 | 428 | nbd_config_put(nbd); |
---|
390 | 429 | return BLK_EH_DONE; |
---|
391 | 430 | } |
---|
392 | | - } else { |
---|
393 | | - dev_err_ratelimited(nbd_to_dev(nbd), |
---|
394 | | - "Connection timed out\n"); |
---|
395 | 431 | } |
---|
396 | | - set_bit(NBD_TIMEDOUT, &config->runtime_flags); |
---|
| 432 | + |
---|
| 433 | + if (!nbd->tag_set.timeout) { |
---|
| 434 | + /* |
---|
| 435 | + * Userspace sets timeout=0 to disable socket disconnection, |
---|
| 436 | + * so just warn and reset the timer. |
---|
| 437 | + */ |
---|
| 438 | + struct nbd_sock *nsock = config->socks[cmd->index]; |
---|
| 439 | + cmd->retries++; |
---|
| 440 | + dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n", |
---|
| 441 | + req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)), |
---|
| 442 | + (unsigned long long)blk_rq_pos(req) << 9, |
---|
| 443 | + blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries); |
---|
| 444 | + |
---|
| 445 | + mutex_lock(&nsock->tx_lock); |
---|
| 446 | + if (cmd->cookie != nsock->cookie) { |
---|
| 447 | + nbd_requeue_cmd(cmd); |
---|
| 448 | + mutex_unlock(&nsock->tx_lock); |
---|
| 449 | + mutex_unlock(&cmd->lock); |
---|
| 450 | + nbd_config_put(nbd); |
---|
| 451 | + return BLK_EH_DONE; |
---|
| 452 | + } |
---|
| 453 | + mutex_unlock(&nsock->tx_lock); |
---|
| 454 | + mutex_unlock(&cmd->lock); |
---|
| 455 | + nbd_config_put(nbd); |
---|
| 456 | + return BLK_EH_RESET_TIMER; |
---|
| 457 | + } |
---|
| 458 | + |
---|
| 459 | + dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n"); |
---|
| 460 | + set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags); |
---|
397 | 461 | cmd->status = BLK_STS_IOERR; |
---|
398 | 462 | mutex_unlock(&cmd->lock); |
---|
399 | 463 | sock_shutdown(nbd); |
---|
.. | .. |
---|
478 | 542 | u32 nbd_cmd_flags = 0; |
---|
479 | 543 | int sent = nsock->sent, skip = 0; |
---|
480 | 544 | |
---|
481 | | - iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); |
---|
| 545 | + iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request)); |
---|
482 | 546 | |
---|
483 | | - switch (req_op(req)) { |
---|
484 | | - case REQ_OP_DISCARD: |
---|
485 | | - type = NBD_CMD_TRIM; |
---|
486 | | - break; |
---|
487 | | - case REQ_OP_FLUSH: |
---|
488 | | - type = NBD_CMD_FLUSH; |
---|
489 | | - break; |
---|
490 | | - case REQ_OP_WRITE: |
---|
491 | | - type = NBD_CMD_WRITE; |
---|
492 | | - break; |
---|
493 | | - case REQ_OP_READ: |
---|
494 | | - type = NBD_CMD_READ; |
---|
495 | | - break; |
---|
496 | | - default: |
---|
| 547 | + type = req_to_nbd_cmd_type(req); |
---|
| 548 | + if (type == U32_MAX) |
---|
497 | 549 | return -EIO; |
---|
498 | | - } |
---|
499 | 550 | |
---|
500 | 551 | if (rq_data_dir(req) == WRITE && |
---|
501 | 552 | (config->flags & NBD_FLAG_READ_ONLY)) { |
---|
.. | .. |
---|
514 | 565 | if (sent) { |
---|
515 | 566 | if (sent >= sizeof(request)) { |
---|
516 | 567 | skip = sent - sizeof(request); |
---|
| 568 | + |
---|
| 569 | + /* initialize handle for tracing purposes */ |
---|
| 570 | + handle = nbd_cmd_handle(cmd); |
---|
| 571 | + |
---|
517 | 572 | goto send_pages; |
---|
518 | 573 | } |
---|
519 | 574 | iov_iter_advance(&from, sent); |
---|
.. | .. |
---|
522 | 577 | } |
---|
523 | 578 | cmd->index = index; |
---|
524 | 579 | cmd->cookie = nsock->cookie; |
---|
| 580 | + cmd->retries = 0; |
---|
525 | 581 | request.type = htonl(type | nbd_cmd_flags); |
---|
526 | 582 | if (type != NBD_CMD_FLUSH) { |
---|
527 | 583 | request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); |
---|
.. | .. |
---|
530 | 586 | handle = nbd_cmd_handle(cmd); |
---|
531 | 587 | memcpy(request.handle, &handle, sizeof(handle)); |
---|
532 | 588 | |
---|
| 589 | + trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd)); |
---|
| 590 | + |
---|
533 | 591 | dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", |
---|
534 | 592 | req, nbdcmd_to_ascii(type), |
---|
535 | 593 | (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); |
---|
536 | 594 | result = sock_xmit(nbd, index, 1, &from, |
---|
537 | 595 | (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); |
---|
| 596 | + trace_nbd_header_sent(req, handle); |
---|
538 | 597 | if (result <= 0) { |
---|
539 | 598 | if (was_interrupted(result)) { |
---|
540 | 599 | /* If we havne't sent anything we can just return BUSY, |
---|
.. | .. |
---|
569 | 628 | |
---|
570 | 629 | dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", |
---|
571 | 630 | req, bvec.bv_len); |
---|
572 | | - iov_iter_bvec(&from, ITER_BVEC | WRITE, |
---|
573 | | - &bvec, 1, bvec.bv_len); |
---|
| 631 | + iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len); |
---|
574 | 632 | if (skip) { |
---|
575 | 633 | if (skip >= iov_iter_count(&from)) { |
---|
576 | 634 | skip -= iov_iter_count(&from); |
---|
.. | .. |
---|
608 | 666 | bio = next; |
---|
609 | 667 | } |
---|
610 | 668 | out: |
---|
| 669 | + trace_nbd_payload_sent(req, handle); |
---|
611 | 670 | nsock->pending = NULL; |
---|
612 | 671 | nsock->sent = 0; |
---|
613 | 672 | return 0; |
---|
.. | .. |
---|
629 | 688 | int ret = 0; |
---|
630 | 689 | |
---|
631 | 690 | reply.magic = 0; |
---|
632 | | - iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); |
---|
| 691 | + iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply)); |
---|
633 | 692 | result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); |
---|
634 | 693 | if (result <= 0) { |
---|
635 | 694 | if (!nbd_disconnected(config)) |
---|
.. | .. |
---|
655 | 714 | tag, req); |
---|
656 | 715 | return ERR_PTR(-ENOENT); |
---|
657 | 716 | } |
---|
| 717 | + trace_nbd_header_received(req, handle); |
---|
658 | 718 | cmd = blk_mq_rq_to_pdu(req); |
---|
659 | 719 | |
---|
660 | 720 | mutex_lock(&cmd->lock); |
---|
.. | .. |
---|
689 | 749 | struct bio_vec bvec; |
---|
690 | 750 | |
---|
691 | 751 | rq_for_each_segment(bvec, req, iter) { |
---|
692 | | - iov_iter_bvec(&to, ITER_BVEC | READ, |
---|
693 | | - &bvec, 1, bvec.bv_len); |
---|
| 752 | + iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len); |
---|
694 | 753 | result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); |
---|
695 | 754 | if (result <= 0) { |
---|
696 | 755 | dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", |
---|
697 | 756 | result); |
---|
698 | 757 | /* |
---|
699 | | - * If we've disconnected or we only have 1 |
---|
700 | | - * connection then we need to make sure we |
---|
| 758 | + * If we've disconnected, we need to make sure we |
---|
701 | 759 | * complete this request, otherwise error out |
---|
702 | 760 | * and let the timeout stuff handle resubmitting |
---|
703 | 761 | * this request onto another connection. |
---|
704 | 762 | */ |
---|
705 | | - if (nbd_disconnected(config) || |
---|
706 | | - config->num_connections <= 1) { |
---|
| 763 | + if (nbd_disconnected(config)) { |
---|
707 | 764 | cmd->status = BLK_STS_IOERR; |
---|
708 | 765 | goto out; |
---|
709 | 766 | } |
---|
.. | .. |
---|
715 | 772 | } |
---|
716 | 773 | } |
---|
717 | 774 | out: |
---|
| 775 | + trace_nbd_payload_received(req, handle); |
---|
718 | 776 | mutex_unlock(&cmd->lock); |
---|
719 | 777 | return ret ? ERR_PTR(ret) : cmd; |
---|
720 | 778 | } |
---|
.. | .. |
---|
727 | 785 | struct nbd_device *nbd = args->nbd; |
---|
728 | 786 | struct nbd_config *config = nbd->config; |
---|
729 | 787 | struct nbd_cmd *cmd; |
---|
| 788 | + struct request *rq; |
---|
730 | 789 | |
---|
731 | 790 | while (1) { |
---|
732 | 791 | cmd = nbd_read_stat(nbd, args->index); |
---|
.. | .. |
---|
739 | 798 | break; |
---|
740 | 799 | } |
---|
741 | 800 | |
---|
742 | | - blk_mq_complete_request(blk_mq_rq_from_pdu(cmd)); |
---|
| 801 | + rq = blk_mq_rq_from_pdu(cmd); |
---|
| 802 | + if (likely(!blk_should_fake_timeout(rq->q))) |
---|
| 803 | + blk_mq_complete_request(rq); |
---|
743 | 804 | } |
---|
744 | 805 | nbd_config_put(nbd); |
---|
745 | 806 | atomic_dec(&config->recv_threads); |
---|
.. | .. |
---|
747 | 808 | kfree(args); |
---|
748 | 809 | } |
---|
749 | 810 | |
---|
750 | | -static void nbd_clear_req(struct request *req, void *data, bool reserved) |
---|
| 811 | +static bool nbd_clear_req(struct request *req, void *data, bool reserved) |
---|
751 | 812 | { |
---|
752 | 813 | struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); |
---|
| 814 | + |
---|
| 815 | + /* don't abort one completed request */ |
---|
| 816 | + if (blk_mq_request_completed(req)) |
---|
| 817 | + return true; |
---|
753 | 818 | |
---|
754 | 819 | mutex_lock(&cmd->lock); |
---|
755 | 820 | cmd->status = BLK_STS_IOERR; |
---|
756 | 821 | mutex_unlock(&cmd->lock); |
---|
757 | 822 | |
---|
758 | 823 | blk_mq_complete_request(req); |
---|
| 824 | + return true; |
---|
759 | 825 | } |
---|
760 | 826 | |
---|
761 | 827 | static void nbd_clear_que(struct nbd_device *nbd) |
---|
.. | .. |
---|
773 | 839 | struct nbd_sock *nsock = config->socks[index]; |
---|
774 | 840 | int fallback = nsock->fallback_index; |
---|
775 | 841 | |
---|
776 | | - if (test_bit(NBD_DISCONNECTED, &config->runtime_flags)) |
---|
| 842 | + if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) |
---|
777 | 843 | return new_index; |
---|
778 | 844 | |
---|
779 | 845 | if (config->num_connections <= 1) { |
---|
780 | 846 | dev_err_ratelimited(disk_to_dev(nbd->disk), |
---|
781 | | - "Attempted send on invalid socket\n"); |
---|
| 847 | + "Dead connection, failed to find a fallback\n"); |
---|
782 | 848 | return new_index; |
---|
783 | 849 | } |
---|
784 | 850 | |
---|
.. | .. |
---|
814 | 880 | struct nbd_config *config = nbd->config; |
---|
815 | 881 | if (!config->dead_conn_timeout) |
---|
816 | 882 | return 0; |
---|
817 | | - if (test_bit(NBD_DISCONNECTED, &config->runtime_flags)) |
---|
| 883 | + |
---|
| 884 | + if (!wait_event_timeout(config->conn_wait, |
---|
| 885 | + test_bit(NBD_RT_DISCONNECTED, |
---|
| 886 | + &config->runtime_flags) || |
---|
| 887 | + atomic_read(&config->live_connections) > 0, |
---|
| 888 | + config->dead_conn_timeout)) |
---|
818 | 889 | return 0; |
---|
819 | | - return wait_event_timeout(config->conn_wait, |
---|
820 | | - atomic_read(&config->live_connections) > 0, |
---|
821 | | - config->dead_conn_timeout) > 0; |
---|
| 890 | + |
---|
| 891 | + return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags); |
---|
822 | 892 | } |
---|
823 | 893 | |
---|
824 | 894 | static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) |
---|
.. | .. |
---|
973 | 1043 | blk_mq_freeze_queue(nbd->disk->queue); |
---|
974 | 1044 | |
---|
975 | 1045 | if (!netlink && !nbd->task_setup && |
---|
976 | | - !test_bit(NBD_BOUND, &config->runtime_flags)) |
---|
| 1046 | + !test_bit(NBD_RT_BOUND, &config->runtime_flags)) |
---|
977 | 1047 | nbd->task_setup = current; |
---|
978 | 1048 | |
---|
979 | 1049 | if (!netlink && |
---|
980 | 1050 | (nbd->task_setup != current || |
---|
981 | | - test_bit(NBD_BOUND, &config->runtime_flags))) { |
---|
| 1051 | + test_bit(NBD_RT_BOUND, &config->runtime_flags))) { |
---|
982 | 1052 | dev_err(disk_to_dev(nbd->disk), |
---|
983 | 1053 | "Device being setup by another task"); |
---|
984 | 1054 | err = -EBUSY; |
---|
.. | .. |
---|
1065 | 1135 | mutex_unlock(&nsock->tx_lock); |
---|
1066 | 1136 | sockfd_put(old); |
---|
1067 | 1137 | |
---|
1068 | | - clear_bit(NBD_DISCONNECTED, &config->runtime_flags); |
---|
| 1138 | + clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags); |
---|
1069 | 1139 | |
---|
1070 | 1140 | /* We take the tx_mutex in an error path in the recv_work, so we |
---|
1071 | 1141 | * need to queue_work outside of the tx_mutex. |
---|
.. | .. |
---|
1085 | 1155 | { |
---|
1086 | 1156 | if (bdev->bd_openers > 1) |
---|
1087 | 1157 | return; |
---|
1088 | | - bd_set_size(bdev, 0); |
---|
| 1158 | + bd_set_nr_sectors(bdev, 0); |
---|
1089 | 1159 | } |
---|
1090 | 1160 | |
---|
1091 | 1161 | static void nbd_parse_flags(struct nbd_device *nbd) |
---|
.. | .. |
---|
1121 | 1191 | for (i = 0; i < config->num_connections; i++) { |
---|
1122 | 1192 | struct nbd_sock *nsock = config->socks[i]; |
---|
1123 | 1193 | |
---|
1124 | | - iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); |
---|
| 1194 | + iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request)); |
---|
1125 | 1195 | mutex_lock(&nsock->tx_lock); |
---|
1126 | 1196 | ret = sock_xmit(nbd, i, 1, &from, 0, NULL); |
---|
1127 | 1197 | if (ret <= 0) |
---|
.. | .. |
---|
1136 | 1206 | struct nbd_config *config = nbd->config; |
---|
1137 | 1207 | |
---|
1138 | 1208 | dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); |
---|
1139 | | - set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags); |
---|
| 1209 | + set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags); |
---|
| 1210 | + set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags); |
---|
1140 | 1211 | send_disconnects(nbd); |
---|
1141 | 1212 | return 0; |
---|
1142 | 1213 | } |
---|
.. | .. |
---|
1155 | 1226 | struct nbd_config *config = nbd->config; |
---|
1156 | 1227 | nbd_dev_dbg_close(nbd); |
---|
1157 | 1228 | nbd_size_clear(nbd); |
---|
1158 | | - if (test_and_clear_bit(NBD_HAS_PID_FILE, |
---|
| 1229 | + if (test_and_clear_bit(NBD_RT_HAS_PID_FILE, |
---|
1159 | 1230 | &config->runtime_flags)) |
---|
1160 | 1231 | device_remove_file(disk_to_dev(nbd->disk), &pid_attr); |
---|
1161 | 1232 | nbd->task_recv = NULL; |
---|
.. | .. |
---|
1221 | 1292 | dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); |
---|
1222 | 1293 | return error; |
---|
1223 | 1294 | } |
---|
1224 | | - set_bit(NBD_HAS_PID_FILE, &config->runtime_flags); |
---|
| 1295 | + set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags); |
---|
1225 | 1296 | |
---|
1226 | 1297 | nbd_dev_dbg_init(nbd); |
---|
1227 | 1298 | for (i = 0; i < num_connections; i++) { |
---|
.. | .. |
---|
1267 | 1338 | return ret; |
---|
1268 | 1339 | |
---|
1269 | 1340 | if (max_part) |
---|
1270 | | - bdev->bd_invalidated = 1; |
---|
| 1341 | + set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); |
---|
1271 | 1342 | mutex_unlock(&nbd->config_lock); |
---|
1272 | 1343 | ret = wait_event_interruptible(config->recv_wq, |
---|
1273 | 1344 | atomic_read(&config->recv_threads) == 0); |
---|
1274 | | - if (ret) |
---|
| 1345 | + if (ret) { |
---|
1275 | 1346 | sock_shutdown(nbd); |
---|
1276 | | - flush_workqueue(nbd->recv_workq); |
---|
| 1347 | + nbd_clear_que(nbd); |
---|
| 1348 | + } |
---|
1277 | 1349 | |
---|
| 1350 | + flush_workqueue(nbd->recv_workq); |
---|
1278 | 1351 | mutex_lock(&nbd->config_lock); |
---|
1279 | 1352 | nbd_bdev_reset(bdev); |
---|
1280 | 1353 | /* user requested, ignore socket errors */ |
---|
1281 | | - if (test_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags)) |
---|
| 1354 | + if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags)) |
---|
1282 | 1355 | ret = 0; |
---|
1283 | | - if (test_bit(NBD_TIMEDOUT, &config->runtime_flags)) |
---|
| 1356 | + if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags)) |
---|
1284 | 1357 | ret = -ETIMEDOUT; |
---|
1285 | 1358 | return ret; |
---|
1286 | 1359 | } |
---|
.. | .. |
---|
1288 | 1361 | static void nbd_clear_sock_ioctl(struct nbd_device *nbd, |
---|
1289 | 1362 | struct block_device *bdev) |
---|
1290 | 1363 | { |
---|
1291 | | - sock_shutdown(nbd); |
---|
| 1364 | + nbd_clear_sock(nbd); |
---|
1292 | 1365 | __invalidate_device(bdev, true); |
---|
1293 | 1366 | nbd_bdev_reset(bdev); |
---|
1294 | | - if (test_and_clear_bit(NBD_HAS_CONFIG_REF, |
---|
| 1367 | + if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, |
---|
1295 | 1368 | &nbd->config->runtime_flags)) |
---|
1296 | 1369 | nbd_config_put(nbd); |
---|
1297 | 1370 | } |
---|
.. | .. |
---|
1302 | 1375 | blksize > PAGE_SIZE) |
---|
1303 | 1376 | return false; |
---|
1304 | 1377 | return true; |
---|
| 1378 | +} |
---|
| 1379 | + |
---|
| 1380 | +static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout) |
---|
| 1381 | +{ |
---|
| 1382 | + nbd->tag_set.timeout = timeout * HZ; |
---|
| 1383 | + if (timeout) |
---|
| 1384 | + blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); |
---|
| 1385 | + else |
---|
| 1386 | + blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ); |
---|
1305 | 1387 | } |
---|
1306 | 1388 | |
---|
1307 | 1389 | /* Must be called with config_lock held */ |
---|
.. | .. |
---|
1334 | 1416 | nbd_size_set(nbd, config->blksize, arg); |
---|
1335 | 1417 | return 0; |
---|
1336 | 1418 | case NBD_SET_TIMEOUT: |
---|
1337 | | - if (arg) { |
---|
1338 | | - nbd->tag_set.timeout = arg * HZ; |
---|
1339 | | - blk_queue_rq_timeout(nbd->disk->queue, arg * HZ); |
---|
1340 | | - } |
---|
| 1419 | + nbd_set_cmd_timeout(nbd, arg); |
---|
1341 | 1420 | return 0; |
---|
1342 | 1421 | |
---|
1343 | 1422 | case NBD_SET_FLAGS: |
---|
.. | .. |
---|
1382 | 1461 | /* Don't allow ioctl operations on a nbd device that was created with |
---|
1383 | 1462 | * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine. |
---|
1384 | 1463 | */ |
---|
1385 | | - if (!test_bit(NBD_BOUND, &config->runtime_flags) || |
---|
| 1464 | + if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || |
---|
1386 | 1465 | (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK)) |
---|
1387 | 1466 | error = __nbd_ioctl(bdev, nbd, cmd, arg); |
---|
1388 | 1467 | else |
---|
.. | .. |
---|
1395 | 1474 | { |
---|
1396 | 1475 | struct nbd_config *config; |
---|
1397 | 1476 | |
---|
| 1477 | + if (!try_module_get(THIS_MODULE)) |
---|
| 1478 | + return ERR_PTR(-ENODEV); |
---|
| 1479 | + |
---|
1398 | 1480 | config = kzalloc(sizeof(struct nbd_config), GFP_NOFS); |
---|
1399 | | - if (!config) |
---|
1400 | | - return NULL; |
---|
| 1481 | + if (!config) { |
---|
| 1482 | + module_put(THIS_MODULE); |
---|
| 1483 | + return ERR_PTR(-ENOMEM); |
---|
| 1484 | + } |
---|
| 1485 | + |
---|
1401 | 1486 | atomic_set(&config->recv_threads, 0); |
---|
1402 | 1487 | init_waitqueue_head(&config->recv_wq); |
---|
1403 | 1488 | init_waitqueue_head(&config->conn_wait); |
---|
1404 | 1489 | config->blksize = NBD_DEF_BLKSIZE; |
---|
1405 | 1490 | atomic_set(&config->live_connections, 0); |
---|
1406 | | - try_module_get(THIS_MODULE); |
---|
1407 | 1491 | return config; |
---|
1408 | 1492 | } |
---|
1409 | 1493 | |
---|
.. | .. |
---|
1430 | 1514 | mutex_unlock(&nbd->config_lock); |
---|
1431 | 1515 | goto out; |
---|
1432 | 1516 | } |
---|
1433 | | - config = nbd->config = nbd_alloc_config(); |
---|
1434 | | - if (!config) { |
---|
1435 | | - ret = -ENOMEM; |
---|
| 1517 | + config = nbd_alloc_config(); |
---|
| 1518 | + if (IS_ERR(config)) { |
---|
| 1519 | + ret = PTR_ERR(config); |
---|
1436 | 1520 | mutex_unlock(&nbd->config_lock); |
---|
1437 | 1521 | goto out; |
---|
1438 | 1522 | } |
---|
| 1523 | + nbd->config = config; |
---|
1439 | 1524 | refcount_set(&nbd->config_refs, 1); |
---|
1440 | 1525 | refcount_inc(&nbd->refs); |
---|
1441 | 1526 | mutex_unlock(&nbd->config_lock); |
---|
1442 | | - bdev->bd_invalidated = 1; |
---|
| 1527 | + set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); |
---|
1443 | 1528 | } else if (nbd_disconnected(nbd->config)) { |
---|
1444 | | - bdev->bd_invalidated = 1; |
---|
| 1529 | + set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); |
---|
1445 | 1530 | } |
---|
1446 | 1531 | out: |
---|
1447 | 1532 | mutex_unlock(&nbd_index_mutex); |
---|
.. | .. |
---|
1453 | 1538 | struct nbd_device *nbd = disk->private_data; |
---|
1454 | 1539 | struct block_device *bdev = bdget_disk(disk, 0); |
---|
1455 | 1540 | |
---|
1456 | | - if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && |
---|
| 1541 | + if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && |
---|
1457 | 1542 | bdev->bd_openers == 0) |
---|
1458 | 1543 | nbd_disconnect_and_put(nbd); |
---|
1459 | 1544 | bdput(bdev); |
---|
.. | .. |
---|
1539 | 1624 | return -EIO; |
---|
1540 | 1625 | |
---|
1541 | 1626 | dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); |
---|
1542 | | - if (!dir) { |
---|
| 1627 | + if (IS_ERR(dir)) { |
---|
1543 | 1628 | dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", |
---|
1544 | 1629 | nbd_name(nbd)); |
---|
1545 | 1630 | return -EIO; |
---|
.. | .. |
---|
1565 | 1650 | struct dentry *dbg_dir; |
---|
1566 | 1651 | |
---|
1567 | 1652 | dbg_dir = debugfs_create_dir("nbd", NULL); |
---|
1568 | | - if (!dbg_dir) |
---|
| 1653 | + if (IS_ERR(dbg_dir)) |
---|
1569 | 1654 | return -EIO; |
---|
1570 | 1655 | |
---|
1571 | 1656 | nbd_dbg_dir = dbg_dir; |
---|
.. | .. |
---|
1638 | 1723 | if (err == -ENOSPC) |
---|
1639 | 1724 | err = -EEXIST; |
---|
1640 | 1725 | } else { |
---|
1641 | | - err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); |
---|
| 1726 | + err = idr_alloc(&nbd_index_idr, nbd, 0, |
---|
| 1727 | + (MINORMASK >> part_shift) + 1, GFP_KERNEL); |
---|
1642 | 1728 | if (err >= 0) |
---|
1643 | 1729 | index = err; |
---|
1644 | 1730 | } |
---|
.. | .. |
---|
1653 | 1739 | nbd->tag_set.numa_node = NUMA_NO_NODE; |
---|
1654 | 1740 | nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); |
---|
1655 | 1741 | nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | |
---|
1656 | | - BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING; |
---|
| 1742 | + BLK_MQ_F_BLOCKING; |
---|
1657 | 1743 | nbd->tag_set.driver_data = nbd; |
---|
| 1744 | + nbd->destroy_complete = NULL; |
---|
1658 | 1745 | |
---|
1659 | 1746 | err = blk_mq_alloc_tag_set(&nbd->tag_set); |
---|
1660 | 1747 | if (err) |
---|
.. | .. |
---|
1743 | 1830 | [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 }, |
---|
1744 | 1831 | }; |
---|
1745 | 1832 | |
---|
| 1833 | +static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd) |
---|
| 1834 | +{ |
---|
| 1835 | + struct nbd_config *config = nbd->config; |
---|
| 1836 | + u64 bsize = config->blksize; |
---|
| 1837 | + u64 bytes = config->bytesize; |
---|
| 1838 | + |
---|
| 1839 | + if (info->attrs[NBD_ATTR_SIZE_BYTES]) |
---|
| 1840 | + bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]); |
---|
| 1841 | + |
---|
| 1842 | + if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) { |
---|
| 1843 | + bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]); |
---|
| 1844 | + if (!bsize) |
---|
| 1845 | + bsize = NBD_DEF_BLKSIZE; |
---|
| 1846 | + if (!nbd_is_valid_blksize(bsize)) { |
---|
| 1847 | + printk(KERN_ERR "Invalid block size %llu\n", bsize); |
---|
| 1848 | + return -EINVAL; |
---|
| 1849 | + } |
---|
| 1850 | + } |
---|
| 1851 | + |
---|
| 1852 | + if (bytes != config->bytesize || bsize != config->blksize) |
---|
| 1853 | + nbd_size_set(nbd, bsize, div64_u64(bytes, bsize)); |
---|
| 1854 | + return 0; |
---|
| 1855 | +} |
---|
| 1856 | + |
---|
1746 | 1857 | static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) |
---|
1747 | 1858 | { |
---|
| 1859 | + DECLARE_COMPLETION_ONSTACK(destroy_complete); |
---|
1748 | 1860 | struct nbd_device *nbd = NULL; |
---|
1749 | 1861 | struct nbd_config *config; |
---|
1750 | 1862 | int index = -1; |
---|
.. | .. |
---|
1754 | 1866 | if (!netlink_capable(skb, CAP_SYS_ADMIN)) |
---|
1755 | 1867 | return -EPERM; |
---|
1756 | 1868 | |
---|
1757 | | - if (info->attrs[NBD_ATTR_INDEX]) |
---|
| 1869 | + if (info->attrs[NBD_ATTR_INDEX]) { |
---|
1758 | 1870 | index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); |
---|
| 1871 | + |
---|
| 1872 | + /* |
---|
| 1873 | + * Too big first_minor can cause duplicate creation of |
---|
| 1874 | + * sysfs files/links, since index << part_shift might overflow, or |
---|
| 1875 | + * MKDEV() expect that the max bits of first_minor is 20. |
---|
| 1876 | + */ |
---|
| 1877 | + if (index < 0 || index > MINORMASK >> part_shift) { |
---|
| 1878 | + printk(KERN_ERR "nbd: illegal input index %d\n", index); |
---|
| 1879 | + return -EINVAL; |
---|
| 1880 | + } |
---|
| 1881 | + } |
---|
1759 | 1882 | if (!info->attrs[NBD_ATTR_SOCKETS]) { |
---|
1760 | 1883 | printk(KERN_ERR "nbd: must specify at least one socket\n"); |
---|
1761 | 1884 | return -EINVAL; |
---|
.. | .. |
---|
1796 | 1919 | mutex_unlock(&nbd_index_mutex); |
---|
1797 | 1920 | return -EINVAL; |
---|
1798 | 1921 | } |
---|
| 1922 | + |
---|
| 1923 | + if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && |
---|
| 1924 | + test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) { |
---|
| 1925 | + nbd->destroy_complete = &destroy_complete; |
---|
| 1926 | + mutex_unlock(&nbd_index_mutex); |
---|
| 1927 | + |
---|
| 1928 | + /* Wait untill the the nbd stuff is totally destroyed */ |
---|
| 1929 | + wait_for_completion(&destroy_complete); |
---|
| 1930 | + goto again; |
---|
| 1931 | + } |
---|
| 1932 | + |
---|
1799 | 1933 | if (!refcount_inc_not_zero(&nbd->refs)) { |
---|
1800 | 1934 | mutex_unlock(&nbd_index_mutex); |
---|
1801 | 1935 | if (index == -1) |
---|
.. | .. |
---|
1820 | 1954 | nbd_put(nbd); |
---|
1821 | 1955 | return -EINVAL; |
---|
1822 | 1956 | } |
---|
1823 | | - config = nbd->config = nbd_alloc_config(); |
---|
1824 | | - if (!nbd->config) { |
---|
| 1957 | + config = nbd_alloc_config(); |
---|
| 1958 | + if (IS_ERR(config)) { |
---|
1825 | 1959 | mutex_unlock(&nbd->config_lock); |
---|
1826 | 1960 | nbd_put(nbd); |
---|
1827 | 1961 | printk(KERN_ERR "nbd: couldn't allocate config\n"); |
---|
1828 | | - return -ENOMEM; |
---|
| 1962 | + return PTR_ERR(config); |
---|
1829 | 1963 | } |
---|
| 1964 | + nbd->config = config; |
---|
1830 | 1965 | refcount_set(&nbd->config_refs, 1); |
---|
1831 | | - set_bit(NBD_BOUND, &config->runtime_flags); |
---|
| 1966 | + set_bit(NBD_RT_BOUND, &config->runtime_flags); |
---|
1832 | 1967 | |
---|
1833 | | - if (info->attrs[NBD_ATTR_SIZE_BYTES]) { |
---|
1834 | | - u64 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]); |
---|
1835 | | - nbd_size_set(nbd, config->blksize, |
---|
1836 | | - div64_u64(bytes, config->blksize)); |
---|
1837 | | - } |
---|
1838 | | - if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) { |
---|
1839 | | - u64 bsize = |
---|
1840 | | - nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]); |
---|
1841 | | - if (!bsize) |
---|
1842 | | - bsize = NBD_DEF_BLKSIZE; |
---|
1843 | | - if (!nbd_is_valid_blksize(bsize)) { |
---|
1844 | | - ret = -EINVAL; |
---|
1845 | | - goto out; |
---|
1846 | | - } |
---|
1847 | | - nbd_size_set(nbd, bsize, div64_u64(config->bytesize, bsize)); |
---|
1848 | | - } |
---|
1849 | | - if (info->attrs[NBD_ATTR_TIMEOUT]) { |
---|
1850 | | - u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]); |
---|
1851 | | - nbd->tag_set.timeout = timeout * HZ; |
---|
1852 | | - blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); |
---|
1853 | | - } |
---|
| 1968 | + ret = nbd_genl_size_set(info, nbd); |
---|
| 1969 | + if (ret) |
---|
| 1970 | + goto out; |
---|
| 1971 | + |
---|
| 1972 | + if (info->attrs[NBD_ATTR_TIMEOUT]) |
---|
| 1973 | + nbd_set_cmd_timeout(nbd, |
---|
| 1974 | + nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT])); |
---|
1854 | 1975 | if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) { |
---|
1855 | 1976 | config->dead_conn_timeout = |
---|
1856 | 1977 | nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]); |
---|
.. | .. |
---|
1862 | 1983 | if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { |
---|
1863 | 1984 | u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); |
---|
1864 | 1985 | if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) { |
---|
1865 | | - set_bit(NBD_DESTROY_ON_DISCONNECT, |
---|
1866 | | - &config->runtime_flags); |
---|
1867 | | - put_dev = true; |
---|
| 1986 | + /* |
---|
| 1987 | + * We have 1 ref to keep the device around, and then 1 |
---|
| 1988 | + * ref for our current operation here, which will be |
---|
| 1989 | + * inherited by the config. If we already have |
---|
| 1990 | + * DESTROY_ON_DISCONNECT set then we know we don't have |
---|
| 1991 | + * that extra ref already held so we don't need the |
---|
| 1992 | + * put_dev. |
---|
| 1993 | + */ |
---|
| 1994 | + if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT, |
---|
| 1995 | + &nbd->flags)) |
---|
| 1996 | + put_dev = true; |
---|
| 1997 | + } else { |
---|
| 1998 | + if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT, |
---|
| 1999 | + &nbd->flags)) |
---|
| 2000 | + refcount_inc(&nbd->refs); |
---|
1868 | 2001 | } |
---|
1869 | 2002 | if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { |
---|
1870 | | - set_bit(NBD_DISCONNECT_ON_CLOSE, |
---|
| 2003 | + set_bit(NBD_RT_DISCONNECT_ON_CLOSE, |
---|
1871 | 2004 | &config->runtime_flags); |
---|
1872 | 2005 | } |
---|
1873 | 2006 | } |
---|
.. | .. |
---|
1885 | 2018 | ret = -EINVAL; |
---|
1886 | 2019 | goto out; |
---|
1887 | 2020 | } |
---|
1888 | | - ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr, |
---|
1889 | | - nbd_sock_policy, info->extack); |
---|
| 2021 | + ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX, |
---|
| 2022 | + attr, |
---|
| 2023 | + nbd_sock_policy, |
---|
| 2024 | + info->extack); |
---|
1890 | 2025 | if (ret != 0) { |
---|
1891 | 2026 | printk(KERN_ERR "nbd: error processing sock list\n"); |
---|
1892 | 2027 | ret = -EINVAL; |
---|
.. | .. |
---|
1904 | 2039 | out: |
---|
1905 | 2040 | mutex_unlock(&nbd->config_lock); |
---|
1906 | 2041 | if (!ret) { |
---|
1907 | | - set_bit(NBD_HAS_CONFIG_REF, &config->runtime_flags); |
---|
| 2042 | + set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags); |
---|
1908 | 2043 | refcount_inc(&nbd->config_refs); |
---|
1909 | 2044 | nbd_connect_reply(info, nbd->index); |
---|
1910 | 2045 | } |
---|
.. | .. |
---|
1918 | 2053 | { |
---|
1919 | 2054 | mutex_lock(&nbd->config_lock); |
---|
1920 | 2055 | nbd_disconnect(nbd); |
---|
1921 | | - nbd_clear_sock(nbd); |
---|
1922 | | - mutex_unlock(&nbd->config_lock); |
---|
| 2056 | + sock_shutdown(nbd); |
---|
| 2057 | + wake_up(&nbd->config->conn_wait); |
---|
1923 | 2058 | /* |
---|
1924 | 2059 | * Make sure recv thread has finished, so it does not drop the last |
---|
1925 | 2060 | * config ref and try to destroy the workqueue from inside the work |
---|
1926 | | - * queue. |
---|
| 2061 | + * queue. And this also ensure that we can safely call nbd_clear_que() |
---|
| 2062 | + * to cancel the inflight I/Os. |
---|
1927 | 2063 | */ |
---|
1928 | | - flush_workqueue(nbd->recv_workq); |
---|
1929 | | - if (test_and_clear_bit(NBD_HAS_CONFIG_REF, |
---|
| 2064 | + if (nbd->recv_workq) |
---|
| 2065 | + flush_workqueue(nbd->recv_workq); |
---|
| 2066 | + nbd_clear_que(nbd); |
---|
| 2067 | + nbd->task_setup = NULL; |
---|
| 2068 | + mutex_unlock(&nbd->config_lock); |
---|
| 2069 | + |
---|
| 2070 | + if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, |
---|
1930 | 2071 | &nbd->config->runtime_flags)) |
---|
1931 | 2072 | nbd_config_put(nbd); |
---|
1932 | 2073 | } |
---|
.. | .. |
---|
2010 | 2151 | |
---|
2011 | 2152 | mutex_lock(&nbd->config_lock); |
---|
2012 | 2153 | config = nbd->config; |
---|
2013 | | - if (!test_bit(NBD_BOUND, &config->runtime_flags) || |
---|
| 2154 | + if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || |
---|
2014 | 2155 | !nbd->task_recv) { |
---|
2015 | 2156 | dev_err(nbd_to_dev(nbd), |
---|
2016 | 2157 | "not configured, cannot reconfigure\n"); |
---|
.. | .. |
---|
2018 | 2159 | goto out; |
---|
2019 | 2160 | } |
---|
2020 | 2161 | |
---|
2021 | | - if (info->attrs[NBD_ATTR_TIMEOUT]) { |
---|
2022 | | - u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]); |
---|
2023 | | - nbd->tag_set.timeout = timeout * HZ; |
---|
2024 | | - blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); |
---|
2025 | | - } |
---|
| 2162 | + ret = nbd_genl_size_set(info, nbd); |
---|
| 2163 | + if (ret) |
---|
| 2164 | + goto out; |
---|
| 2165 | + |
---|
| 2166 | + if (info->attrs[NBD_ATTR_TIMEOUT]) |
---|
| 2167 | + nbd_set_cmd_timeout(nbd, |
---|
| 2168 | + nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT])); |
---|
2026 | 2169 | if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) { |
---|
2027 | 2170 | config->dead_conn_timeout = |
---|
2028 | 2171 | nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]); |
---|
.. | .. |
---|
2032 | 2175 | u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); |
---|
2033 | 2176 | if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) { |
---|
2034 | 2177 | if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT, |
---|
2035 | | - &config->runtime_flags)) |
---|
| 2178 | + &nbd->flags)) |
---|
2036 | 2179 | put_dev = true; |
---|
2037 | 2180 | } else { |
---|
2038 | 2181 | if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT, |
---|
2039 | | - &config->runtime_flags)) |
---|
| 2182 | + &nbd->flags)) |
---|
2040 | 2183 | refcount_inc(&nbd->refs); |
---|
2041 | 2184 | } |
---|
2042 | 2185 | |
---|
2043 | 2186 | if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { |
---|
2044 | | - set_bit(NBD_DISCONNECT_ON_CLOSE, |
---|
| 2187 | + set_bit(NBD_RT_DISCONNECT_ON_CLOSE, |
---|
2045 | 2188 | &config->runtime_flags); |
---|
2046 | 2189 | } else { |
---|
2047 | | - clear_bit(NBD_DISCONNECT_ON_CLOSE, |
---|
| 2190 | + clear_bit(NBD_RT_DISCONNECT_ON_CLOSE, |
---|
2048 | 2191 | &config->runtime_flags); |
---|
2049 | 2192 | } |
---|
2050 | 2193 | } |
---|
.. | .. |
---|
2062 | 2205 | ret = -EINVAL; |
---|
2063 | 2206 | goto out; |
---|
2064 | 2207 | } |
---|
2065 | | - ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr, |
---|
2066 | | - nbd_sock_policy, info->extack); |
---|
| 2208 | + ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX, |
---|
| 2209 | + attr, |
---|
| 2210 | + nbd_sock_policy, |
---|
| 2211 | + info->extack); |
---|
2067 | 2212 | if (ret != 0) { |
---|
2068 | 2213 | printk(KERN_ERR "nbd: error processing sock list\n"); |
---|
2069 | 2214 | ret = -EINVAL; |
---|
.. | .. |
---|
2090 | 2235 | return ret; |
---|
2091 | 2236 | } |
---|
2092 | 2237 | |
---|
2093 | | -static const struct genl_ops nbd_connect_genl_ops[] = { |
---|
| 2238 | +static const struct genl_small_ops nbd_connect_genl_ops[] = { |
---|
2094 | 2239 | { |
---|
2095 | 2240 | .cmd = NBD_CMD_CONNECT, |
---|
2096 | | - .policy = nbd_attr_policy, |
---|
| 2241 | + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, |
---|
2097 | 2242 | .doit = nbd_genl_connect, |
---|
2098 | 2243 | }, |
---|
2099 | 2244 | { |
---|
2100 | 2245 | .cmd = NBD_CMD_DISCONNECT, |
---|
2101 | | - .policy = nbd_attr_policy, |
---|
| 2246 | + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, |
---|
2102 | 2247 | .doit = nbd_genl_disconnect, |
---|
2103 | 2248 | }, |
---|
2104 | 2249 | { |
---|
2105 | 2250 | .cmd = NBD_CMD_RECONFIGURE, |
---|
2106 | | - .policy = nbd_attr_policy, |
---|
| 2251 | + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, |
---|
2107 | 2252 | .doit = nbd_genl_reconfigure, |
---|
2108 | 2253 | }, |
---|
2109 | 2254 | { |
---|
2110 | 2255 | .cmd = NBD_CMD_STATUS, |
---|
2111 | | - .policy = nbd_attr_policy, |
---|
| 2256 | + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, |
---|
2112 | 2257 | .doit = nbd_genl_status, |
---|
2113 | 2258 | }, |
---|
2114 | 2259 | }; |
---|
.. | .. |
---|
2122 | 2267 | .name = NBD_GENL_FAMILY_NAME, |
---|
2123 | 2268 | .version = NBD_GENL_VERSION, |
---|
2124 | 2269 | .module = THIS_MODULE, |
---|
2125 | | - .ops = nbd_connect_genl_ops, |
---|
2126 | | - .n_ops = ARRAY_SIZE(nbd_connect_genl_ops), |
---|
| 2270 | + .small_ops = nbd_connect_genl_ops, |
---|
| 2271 | + .n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops), |
---|
2127 | 2272 | .maxattr = NBD_ATTR_MAX, |
---|
| 2273 | + .policy = nbd_attr_policy, |
---|
2128 | 2274 | .mcgrps = nbd_mcast_grps, |
---|
2129 | 2275 | .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps), |
---|
2130 | 2276 | }; |
---|
.. | .. |
---|
2144 | 2290 | */ |
---|
2145 | 2291 | if (refcount_read(&nbd->config_refs)) |
---|
2146 | 2292 | connected = 1; |
---|
2147 | | - dev_opt = nla_nest_start(reply, NBD_DEVICE_ITEM); |
---|
| 2293 | + dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM); |
---|
2148 | 2294 | if (!dev_opt) |
---|
2149 | 2295 | return -EMSGSIZE; |
---|
2150 | 2296 | ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index); |
---|
.. | .. |
---|
2192 | 2338 | goto out; |
---|
2193 | 2339 | } |
---|
2194 | 2340 | |
---|
2195 | | - dev_list = nla_nest_start(reply, NBD_ATTR_DEVICE_LIST); |
---|
| 2341 | + dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST); |
---|
2196 | 2342 | if (index == -1) { |
---|
2197 | 2343 | ret = idr_for_each(&nbd_index_idr, &status_cb, reply); |
---|
2198 | 2344 | if (ret) { |
---|
.. | .. |
---|
2212 | 2358 | } |
---|
2213 | 2359 | nla_nest_end(reply, dev_list); |
---|
2214 | 2360 | genlmsg_end(reply, reply_head); |
---|
2215 | | - genlmsg_reply(reply, info); |
---|
2216 | | - ret = 0; |
---|
| 2361 | + ret = genlmsg_reply(reply, info); |
---|
2217 | 2362 | out: |
---|
2218 | 2363 | mutex_unlock(&nbd_index_mutex); |
---|
2219 | 2364 | return ret; |
---|
.. | .. |
---|
2337 | 2482 | struct nbd_device *nbd; |
---|
2338 | 2483 | LIST_HEAD(del_list); |
---|
2339 | 2484 | |
---|
| 2485 | + /* |
---|
| 2486 | + * Unregister netlink interface prior to waiting |
---|
| 2487 | + * for the completion of netlink commands. |
---|
| 2488 | + */ |
---|
| 2489 | + genl_unregister_family(&nbd_genl_family); |
---|
| 2490 | + |
---|
2340 | 2491 | nbd_dbg_close(); |
---|
2341 | 2492 | |
---|
2342 | 2493 | mutex_lock(&nbd_index_mutex); |
---|
.. | .. |
---|
2346 | 2497 | while (!list_empty(&del_list)) { |
---|
2347 | 2498 | nbd = list_first_entry(&del_list, struct nbd_device, list); |
---|
2348 | 2499 | list_del_init(&nbd->list); |
---|
| 2500 | + if (refcount_read(&nbd->config_refs)) |
---|
| 2501 | + printk(KERN_ERR "nbd: possibly leaking nbd_config (ref %d)\n", |
---|
| 2502 | + refcount_read(&nbd->config_refs)); |
---|
2349 | 2503 | if (refcount_read(&nbd->refs) != 1) |
---|
2350 | 2504 | printk(KERN_ERR "nbd: possibly leaking a device\n"); |
---|
2351 | 2505 | nbd_put(nbd); |
---|
2352 | 2506 | } |
---|
2353 | 2507 | |
---|
2354 | 2508 | idr_destroy(&nbd_index_idr); |
---|
2355 | | - genl_unregister_family(&nbd_genl_family); |
---|
2356 | 2509 | unregister_blkdev(NBD_MAJOR, "nbd"); |
---|
2357 | 2510 | } |
---|
2358 | 2511 | |
---|