.. | .. |
---|
1535 | 1535 | if (ret) |
---|
1536 | 1536 | goto err_init_connect; |
---|
1537 | 1537 | |
---|
1538 | | - queue->rd_enabled = true; |
---|
1539 | 1538 | set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); |
---|
1540 | | - nvme_tcp_init_recv_ctx(queue); |
---|
1541 | | - |
---|
1542 | | - write_lock_bh(&queue->sock->sk->sk_callback_lock); |
---|
1543 | | - queue->sock->sk->sk_user_data = queue; |
---|
1544 | | - queue->state_change = queue->sock->sk->sk_state_change; |
---|
1545 | | - queue->data_ready = queue->sock->sk->sk_data_ready; |
---|
1546 | | - queue->write_space = queue->sock->sk->sk_write_space; |
---|
1547 | | - queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; |
---|
1548 | | - queue->sock->sk->sk_state_change = nvme_tcp_state_change; |
---|
1549 | | - queue->sock->sk->sk_write_space = nvme_tcp_write_space; |
---|
1550 | | -#ifdef CONFIG_NET_RX_BUSY_POLL |
---|
1551 | | - queue->sock->sk->sk_ll_usec = 1; |
---|
1552 | | -#endif |
---|
1553 | | - write_unlock_bh(&queue->sock->sk->sk_callback_lock); |
---|
1554 | 1539 | |
---|
1555 | 1540 | return 0; |
---|
1556 | 1541 | |
---|
.. | .. |
---|
1569 | 1554 | return ret; |
---|
1570 | 1555 | } |
---|
1571 | 1556 | |
---|
1572 | | -static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue) |
---|
| 1557 | +static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue) |
---|
1573 | 1558 | { |
---|
1574 | 1559 | struct socket *sock = queue->sock; |
---|
1575 | 1560 | |
---|
.. | .. |
---|
1584 | 1569 | static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) |
---|
1585 | 1570 | { |
---|
1586 | 1571 | kernel_sock_shutdown(queue->sock, SHUT_RDWR); |
---|
1587 | | - nvme_tcp_restore_sock_calls(queue); |
---|
| 1572 | + nvme_tcp_restore_sock_ops(queue); |
---|
1588 | 1573 | cancel_work_sync(&queue->io_work); |
---|
1589 | 1574 | } |
---|
1590 | 1575 | |
---|
.. | .. |
---|
1599 | 1584 | mutex_unlock(&queue->queue_lock); |
---|
1600 | 1585 | } |
---|
1601 | 1586 | |
---|
| 1587 | +static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue) |
---|
| 1588 | +{ |
---|
| 1589 | + write_lock_bh(&queue->sock->sk->sk_callback_lock); |
---|
| 1590 | + queue->sock->sk->sk_user_data = queue; |
---|
| 1591 | + queue->state_change = queue->sock->sk->sk_state_change; |
---|
| 1592 | + queue->data_ready = queue->sock->sk->sk_data_ready; |
---|
| 1593 | + queue->write_space = queue->sock->sk->sk_write_space; |
---|
| 1594 | + queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; |
---|
| 1595 | + queue->sock->sk->sk_state_change = nvme_tcp_state_change; |
---|
| 1596 | + queue->sock->sk->sk_write_space = nvme_tcp_write_space; |
---|
| 1597 | +#ifdef CONFIG_NET_RX_BUSY_POLL |
---|
| 1598 | + queue->sock->sk->sk_ll_usec = 1; |
---|
| 1599 | +#endif |
---|
| 1600 | + write_unlock_bh(&queue->sock->sk->sk_callback_lock); |
---|
| 1601 | +} |
---|
| 1602 | + |
---|
1602 | 1603 | static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) |
---|
1603 | 1604 | { |
---|
1604 | 1605 | struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); |
---|
| 1606 | + struct nvme_tcp_queue *queue = &ctrl->queues[idx]; |
---|
1605 | 1607 | int ret; |
---|
| 1608 | + |
---|
| 1609 | + queue->rd_enabled = true; |
---|
| 1610 | + nvme_tcp_init_recv_ctx(queue); |
---|
| 1611 | + nvme_tcp_setup_sock_ops(queue); |
---|
1606 | 1612 | |
---|
1607 | 1613 | if (idx) |
---|
1608 | 1614 | ret = nvmf_connect_io_queue(nctrl, idx, false); |
---|
.. | .. |
---|
1610 | 1616 | ret = nvmf_connect_admin_queue(nctrl); |
---|
1611 | 1617 | |
---|
1612 | 1618 | if (!ret) { |
---|
1613 | | - set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags); |
---|
| 1619 | + set_bit(NVME_TCP_Q_LIVE, &queue->flags); |
---|
1614 | 1620 | } else { |
---|
1615 | | - if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags)) |
---|
1616 | | - __nvme_tcp_stop_queue(&ctrl->queues[idx]); |
---|
| 1621 | + if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) |
---|
| 1622 | + __nvme_tcp_stop_queue(queue); |
---|
1617 | 1623 | dev_err(nctrl->device, |
---|
1618 | 1624 | "failed to connect queue: %d ret=%d\n", idx, ret); |
---|
1619 | 1625 | } |
---|
.. | .. |
---|
1853 | 1859 | goto out_cleanup_connect_q; |
---|
1854 | 1860 | |
---|
1855 | 1861 | if (!new) { |
---|
| 1862 | + nvme_start_freeze(ctrl); |
---|
1856 | 1863 | nvme_start_queues(ctrl); |
---|
1857 | 1864 | if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { |
---|
1858 | 1865 | /* |
---|
.. | .. |
---|
1861 | 1868 | * to be safe. |
---|
1862 | 1869 | */ |
---|
1863 | 1870 | ret = -ENODEV; |
---|
| 1871 | + nvme_unfreeze(ctrl); |
---|
1864 | 1872 | goto out_wait_freeze_timed_out; |
---|
1865 | 1873 | } |
---|
1866 | 1874 | blk_mq_update_nr_hw_queues(ctrl->tagset, |
---|
.. | .. |
---|
1983 | 1991 | if (ctrl->queue_count <= 1) |
---|
1984 | 1992 | return; |
---|
1985 | 1993 | blk_mq_quiesce_queue(ctrl->admin_q); |
---|
1986 | | - nvme_start_freeze(ctrl); |
---|
1987 | 1994 | nvme_stop_queues(ctrl); |
---|
1988 | 1995 | nvme_sync_io_queues(ctrl); |
---|
1989 | 1996 | nvme_tcp_stop_io_queues(ctrl); |
---|