hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/nvme/host/tcp.c
....@@ -1535,22 +1535,7 @@
15351535 if (ret)
15361536 goto err_init_connect;
15371537
1538
- queue->rd_enabled = true;
15391538 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1540
- nvme_tcp_init_recv_ctx(queue);
1541
-
1542
- write_lock_bh(&queue->sock->sk->sk_callback_lock);
1543
- queue->sock->sk->sk_user_data = queue;
1544
- queue->state_change = queue->sock->sk->sk_state_change;
1545
- queue->data_ready = queue->sock->sk->sk_data_ready;
1546
- queue->write_space = queue->sock->sk->sk_write_space;
1547
- queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1548
- queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1549
- queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1550
-#ifdef CONFIG_NET_RX_BUSY_POLL
1551
- queue->sock->sk->sk_ll_usec = 1;
1552
-#endif
1553
- write_unlock_bh(&queue->sock->sk->sk_callback_lock);
15541539
15551540 return 0;
15561541
....@@ -1569,7 +1554,7 @@
15691554 return ret;
15701555 }
15711556
1572
-static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1557
+static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
15731558 {
15741559 struct socket *sock = queue->sock;
15751560
....@@ -1584,7 +1569,7 @@
15841569 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
15851570 {
15861571 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1587
- nvme_tcp_restore_sock_calls(queue);
1572
+ nvme_tcp_restore_sock_ops(queue);
15881573 cancel_work_sync(&queue->io_work);
15891574 }
15901575
....@@ -1599,10 +1584,31 @@
15991584 mutex_unlock(&queue->queue_lock);
16001585 }
16011586
1587
+static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
1588
+{
1589
+ write_lock_bh(&queue->sock->sk->sk_callback_lock);
1590
+ queue->sock->sk->sk_user_data = queue;
1591
+ queue->state_change = queue->sock->sk->sk_state_change;
1592
+ queue->data_ready = queue->sock->sk->sk_data_ready;
1593
+ queue->write_space = queue->sock->sk->sk_write_space;
1594
+ queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1595
+ queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1596
+ queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1597
+#ifdef CONFIG_NET_RX_BUSY_POLL
1598
+ queue->sock->sk->sk_ll_usec = 1;
1599
+#endif
1600
+ write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1601
+}
1602
+
16021603 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
16031604 {
16041605 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1606
+ struct nvme_tcp_queue *queue = &ctrl->queues[idx];
16051607 int ret;
1608
+
1609
+ queue->rd_enabled = true;
1610
+ nvme_tcp_init_recv_ctx(queue);
1611
+ nvme_tcp_setup_sock_ops(queue);
16061612
16071613 if (idx)
16081614 ret = nvmf_connect_io_queue(nctrl, idx, false);
....@@ -1610,10 +1616,10 @@
16101616 ret = nvmf_connect_admin_queue(nctrl);
16111617
16121618 if (!ret) {
1613
- set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1619
+ set_bit(NVME_TCP_Q_LIVE, &queue->flags);
16141620 } else {
1615
- if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1616
- __nvme_tcp_stop_queue(&ctrl->queues[idx]);
1621
+ if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1622
+ __nvme_tcp_stop_queue(queue);
16171623 dev_err(nctrl->device,
16181624 "failed to connect queue: %d ret=%d\n", idx, ret);
16191625 }
....@@ -1853,6 +1859,7 @@
18531859 goto out_cleanup_connect_q;
18541860
18551861 if (!new) {
1862
+ nvme_start_freeze(ctrl);
18561863 nvme_start_queues(ctrl);
18571864 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
18581865 /*
....@@ -1861,6 +1868,7 @@
18611868 * to be safe.
18621869 */
18631870 ret = -ENODEV;
1871
+ nvme_unfreeze(ctrl);
18641872 goto out_wait_freeze_timed_out;
18651873 }
18661874 blk_mq_update_nr_hw_queues(ctrl->tagset,
....@@ -1983,7 +1991,6 @@
19831991 if (ctrl->queue_count <= 1)
19841992 return;
19851993 blk_mq_quiesce_queue(ctrl->admin_q);
1986
- nvme_start_freeze(ctrl);
19871994 nvme_stop_queues(ctrl);
19881995 nvme_sync_io_queues(ctrl);
19891996 nvme_tcp_stop_io_queues(ctrl);