.. | .. |
---|
40 | 40 | #include <linux/random.h> |
---|
41 | 41 | #include <linux/io-mapping.h> |
---|
42 | 42 | #include <linux/mlx5/driver.h> |
---|
| 43 | +#include <linux/mlx5/eq.h> |
---|
43 | 44 | #include <linux/debugfs.h> |
---|
44 | 45 | |
---|
45 | 46 | #include "mlx5_core.h" |
---|
| 47 | +#include "lib/eq.h" |
---|
46 | 48 | |
---|
47 | 49 | enum { |
---|
48 | 50 | CMD_IF_REV = 5, |
---|
.. | .. |
---|
67 | 69 | MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, |
---|
68 | 70 | }; |
---|
69 | 71 | |
---|
70 | | -static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, |
---|
71 | | - struct mlx5_cmd_msg *in, |
---|
72 | | - struct mlx5_cmd_msg *out, |
---|
73 | | - void *uout, int uout_size, |
---|
74 | | - mlx5_cmd_cbk_t cbk, |
---|
75 | | - void *context, int page_queue) |
---|
| 72 | +static struct mlx5_cmd_work_ent * |
---|
| 73 | +cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, |
---|
| 74 | + struct mlx5_cmd_msg *out, void *uout, int uout_size, |
---|
| 75 | + mlx5_cmd_cbk_t cbk, void *context, int page_queue) |
---|
76 | 76 | { |
---|
77 | 77 | gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; |
---|
78 | 78 | struct mlx5_cmd_work_ent *ent; |
---|
.. | .. |
---|
81 | 81 | if (!ent) |
---|
82 | 82 | return ERR_PTR(-ENOMEM); |
---|
83 | 83 | |
---|
| 84 | + ent->idx = -EINVAL; |
---|
84 | 85 | ent->in = in; |
---|
85 | 86 | ent->out = out; |
---|
86 | 87 | ent->uout = uout; |
---|
.. | .. |
---|
89 | 90 | ent->context = context; |
---|
90 | 91 | ent->cmd = cmd; |
---|
91 | 92 | ent->page_queue = page_queue; |
---|
| 93 | + refcount_set(&ent->refcnt, 1); |
---|
92 | 94 | |
---|
93 | 95 | return ent; |
---|
| 96 | +} |
---|
| 97 | + |
---|
| 98 | +static void cmd_free_ent(struct mlx5_cmd_work_ent *ent) |
---|
| 99 | +{ |
---|
| 100 | + kfree(ent); |
---|
94 | 101 | } |
---|
95 | 102 | |
---|
96 | 103 | static u8 alloc_token(struct mlx5_cmd *cmd) |
---|
.. | .. |
---|
107 | 114 | return token; |
---|
108 | 115 | } |
---|
109 | 116 | |
---|
110 | | -static int alloc_ent(struct mlx5_cmd *cmd) |
---|
| 117 | +static int cmd_alloc_index(struct mlx5_cmd *cmd) |
---|
111 | 118 | { |
---|
112 | 119 | unsigned long flags; |
---|
113 | 120 | int ret; |
---|
.. | .. |
---|
121 | 128 | return ret < cmd->max_reg_cmds ? ret : -ENOMEM; |
---|
122 | 129 | } |
---|
123 | 130 | |
---|
124 | | -static void free_ent(struct mlx5_cmd *cmd, int idx) |
---|
| 131 | +static void cmd_free_index(struct mlx5_cmd *cmd, int idx) |
---|
125 | 132 | { |
---|
| 133 | + lockdep_assert_held(&cmd->alloc_lock); |
---|
| 134 | + set_bit(idx, &cmd->bitmask); |
---|
| 135 | +} |
---|
| 136 | + |
---|
| 137 | +static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) |
---|
| 138 | +{ |
---|
| 139 | + refcount_inc(&ent->refcnt); |
---|
| 140 | +} |
---|
| 141 | + |
---|
| 142 | +static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) |
---|
| 143 | +{ |
---|
| 144 | + struct mlx5_cmd *cmd = ent->cmd; |
---|
126 | 145 | unsigned long flags; |
---|
127 | 146 | |
---|
128 | 147 | spin_lock_irqsave(&cmd->alloc_lock, flags); |
---|
129 | | - set_bit(idx, &cmd->bitmask); |
---|
| 148 | + if (!refcount_dec_and_test(&ent->refcnt)) |
---|
| 149 | + goto out; |
---|
| 150 | + |
---|
| 151 | + if (ent->idx >= 0) { |
---|
| 152 | + cmd_free_index(cmd, ent->idx); |
---|
| 153 | + up(ent->page_queue ? &cmd->pages_sem : &cmd->sem); |
---|
| 154 | + } |
---|
| 155 | + |
---|
| 156 | + cmd_free_ent(ent); |
---|
| 157 | +out: |
---|
130 | 158 | spin_unlock_irqrestore(&cmd->alloc_lock, flags); |
---|
131 | 159 | } |
---|
132 | 160 | |
---|
.. | .. |
---|
217 | 245 | ent->ret = -ETIMEDOUT; |
---|
218 | 246 | } |
---|
219 | 247 | |
---|
220 | | -static void free_cmd(struct mlx5_cmd_work_ent *ent) |
---|
221 | | -{ |
---|
222 | | - kfree(ent); |
---|
223 | | -} |
---|
224 | | - |
---|
225 | 248 | static int verify_signature(struct mlx5_cmd_work_ent *ent) |
---|
226 | 249 | { |
---|
227 | 250 | struct mlx5_cmd_mailbox *next = ent->out->next; |
---|
.. | .. |
---|
308 | 331 | case MLX5_CMD_OP_MODIFY_FLOW_TABLE: |
---|
309 | 332 | case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: |
---|
310 | 333 | case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: |
---|
311 | | - case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: |
---|
| 334 | + case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT: |
---|
312 | 335 | case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT: |
---|
313 | 336 | case MLX5_CMD_OP_FPGA_DESTROY_QP: |
---|
314 | 337 | case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: |
---|
| 338 | + case MLX5_CMD_OP_DEALLOC_MEMIC: |
---|
| 339 | + case MLX5_CMD_OP_PAGE_FAULT_RESUME: |
---|
| 340 | + case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS: |
---|
315 | 341 | return MLX5_CMD_STAT_OK; |
---|
316 | 342 | |
---|
317 | 343 | case MLX5_CMD_OP_QUERY_HCA_CAP: |
---|
.. | .. |
---|
325 | 351 | case MLX5_CMD_OP_CREATE_MKEY: |
---|
326 | 352 | case MLX5_CMD_OP_QUERY_MKEY: |
---|
327 | 353 | case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: |
---|
328 | | - case MLX5_CMD_OP_PAGE_FAULT_RESUME: |
---|
329 | 354 | case MLX5_CMD_OP_CREATE_EQ: |
---|
330 | 355 | case MLX5_CMD_OP_QUERY_EQ: |
---|
331 | 356 | case MLX5_CMD_OP_GEN_EQE: |
---|
.. | .. |
---|
370 | 395 | case MLX5_CMD_OP_QUERY_VPORT_COUNTER: |
---|
371 | 396 | case MLX5_CMD_OP_ALLOC_Q_COUNTER: |
---|
372 | 397 | case MLX5_CMD_OP_QUERY_Q_COUNTER: |
---|
| 398 | + case MLX5_CMD_OP_SET_MONITOR_COUNTER: |
---|
| 399 | + case MLX5_CMD_OP_ARM_MONITOR_COUNTER: |
---|
373 | 400 | case MLX5_CMD_OP_SET_PP_RATE_LIMIT: |
---|
374 | 401 | case MLX5_CMD_OP_QUERY_RATE_LIMIT: |
---|
375 | 402 | case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: |
---|
.. | .. |
---|
426 | 453 | case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: |
---|
427 | 454 | case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: |
---|
428 | 455 | case MLX5_CMD_OP_QUERY_FLOW_COUNTER: |
---|
429 | | - case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: |
---|
| 456 | + case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT: |
---|
430 | 457 | case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: |
---|
431 | 458 | case MLX5_CMD_OP_FPGA_CREATE_QP: |
---|
432 | 459 | case MLX5_CMD_OP_FPGA_MODIFY_QP: |
---|
.. | .. |
---|
435 | 462 | case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: |
---|
436 | 463 | case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: |
---|
437 | 464 | case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: |
---|
| 465 | + case MLX5_CMD_OP_CREATE_UCTX: |
---|
| 466 | + case MLX5_CMD_OP_DESTROY_UCTX: |
---|
| 467 | + case MLX5_CMD_OP_CREATE_UMEM: |
---|
| 468 | + case MLX5_CMD_OP_DESTROY_UMEM: |
---|
| 469 | + case MLX5_CMD_OP_ALLOC_MEMIC: |
---|
| 470 | + case MLX5_CMD_OP_MODIFY_XRQ: |
---|
| 471 | + case MLX5_CMD_OP_RELEASE_XRQ_ERROR: |
---|
438 | 472 | *status = MLX5_DRIVER_STATUS_ABORTED; |
---|
439 | 473 | *synd = MLX5_DRIVER_SYND; |
---|
440 | 474 | return -EIO; |
---|
.. | .. |
---|
518 | 552 | MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); |
---|
519 | 553 | MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); |
---|
520 | 554 | MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); |
---|
| 555 | + MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER); |
---|
| 556 | + MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER); |
---|
521 | 557 | MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT); |
---|
522 | 558 | MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); |
---|
523 | 559 | MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); |
---|
.. | .. |
---|
599 | 635 | MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); |
---|
600 | 636 | MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); |
---|
601 | 637 | MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); |
---|
602 | | - MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER); |
---|
603 | | - MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER); |
---|
| 638 | + MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT); |
---|
| 639 | + MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT); |
---|
604 | 640 | MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT); |
---|
605 | 641 | MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT); |
---|
606 | 642 | MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP); |
---|
.. | .. |
---|
617 | 653 | MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT); |
---|
618 | 654 | MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT); |
---|
619 | 655 | MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT); |
---|
| 656 | + MLX5_COMMAND_STR_CASE(ALLOC_MEMIC); |
---|
| 657 | + MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC); |
---|
| 658 | + MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS); |
---|
| 659 | + MLX5_COMMAND_STR_CASE(CREATE_UCTX); |
---|
| 660 | + MLX5_COMMAND_STR_CASE(DESTROY_UCTX); |
---|
| 661 | + MLX5_COMMAND_STR_CASE(CREATE_UMEM); |
---|
| 662 | + MLX5_COMMAND_STR_CASE(DESTROY_UMEM); |
---|
| 663 | + MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR); |
---|
| 664 | + MLX5_COMMAND_STR_CASE(MODIFY_XRQ); |
---|
620 | 665 | default: return "unknown command opcode"; |
---|
621 | 666 | } |
---|
622 | 667 | } |
---|
.. | .. |
---|
801 | 846 | return MLX5_GET(mbox_in, in->first.data, opcode); |
---|
802 | 847 | } |
---|
803 | 848 | |
---|
| 849 | +static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); |
---|
| 850 | + |
---|
804 | 851 | static void cb_timeout_handler(struct work_struct *work) |
---|
805 | 852 | { |
---|
806 | 853 | struct delayed_work *dwork = container_of(work, struct delayed_work, |
---|
.. | .. |
---|
811 | 858 | struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, |
---|
812 | 859 | cmd); |
---|
813 | 860 | |
---|
| 861 | + mlx5_cmd_eq_recover(dev); |
---|
| 862 | + |
---|
| 863 | + /* Maybe got handled by eq recover ? */ |
---|
| 864 | + if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) { |
---|
| 865 | + mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx, |
---|
| 866 | + mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); |
---|
| 867 | + goto out; /* phew, already handled */ |
---|
| 868 | + } |
---|
| 869 | + |
---|
814 | 870 | ent->ret = -ETIMEDOUT; |
---|
815 | | - mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", |
---|
816 | | - mlx5_command_str(msg_to_opcode(ent->in)), |
---|
817 | | - msg_to_opcode(ent->in)); |
---|
818 | | - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); |
---|
| 871 | + mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n", |
---|
| 872 | + ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); |
---|
| 873 | + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); |
---|
| 874 | + |
---|
| 875 | +out: |
---|
| 876 | + cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */ |
---|
819 | 877 | } |
---|
820 | 878 | |
---|
821 | 879 | static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); |
---|
822 | 880 | static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, |
---|
823 | 881 | struct mlx5_cmd_msg *msg); |
---|
| 882 | + |
---|
| 883 | +static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode) |
---|
| 884 | +{ |
---|
| 885 | + if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL) |
---|
| 886 | + return true; |
---|
| 887 | + |
---|
| 888 | + return cmd->allowed_opcode == opcode; |
---|
| 889 | +} |
---|
| 890 | + |
---|
| 891 | +bool mlx5_cmd_is_down(struct mlx5_core_dev *dev) |
---|
| 892 | +{ |
---|
| 893 | + return pci_channel_offline(dev->pdev) || |
---|
| 894 | + dev->cmd.state != MLX5_CMDIF_STATE_UP || |
---|
| 895 | + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR; |
---|
| 896 | +} |
---|
824 | 897 | |
---|
825 | 898 | static void cmd_work_handler(struct work_struct *work) |
---|
826 | 899 | { |
---|
.. | .. |
---|
839 | 912 | sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; |
---|
840 | 913 | down(sem); |
---|
841 | 914 | if (!ent->page_queue) { |
---|
842 | | - alloc_ret = alloc_ent(cmd); |
---|
| 915 | + alloc_ret = cmd_alloc_index(cmd); |
---|
843 | 916 | if (alloc_ret < 0) { |
---|
844 | | - mlx5_core_err(dev, "failed to allocate command entry\n"); |
---|
| 917 | + mlx5_core_err_rl(dev, "failed to allocate command entry\n"); |
---|
845 | 918 | if (ent->callback) { |
---|
846 | 919 | ent->callback(-EAGAIN, ent->context); |
---|
847 | 920 | mlx5_free_cmd_msg(dev, ent->out); |
---|
848 | 921 | free_msg(dev, ent->in); |
---|
849 | | - free_cmd(ent); |
---|
| 922 | + cmd_ent_put(ent); |
---|
850 | 923 | } else { |
---|
851 | 924 | ent->ret = -EAGAIN; |
---|
852 | 925 | complete(&ent->done); |
---|
.. | .. |
---|
882 | 955 | ent->ts1 = ktime_get_ns(); |
---|
883 | 956 | cmd_mode = cmd->mode; |
---|
884 | 957 | |
---|
885 | | - if (ent->callback) |
---|
886 | | - schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); |
---|
| 958 | + if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout)) |
---|
| 959 | + cmd_ent_get(ent); |
---|
887 | 960 | set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); |
---|
888 | 961 | |
---|
| 962 | + cmd_ent_get(ent); /* for the _real_ FW event on completion */ |
---|
889 | 963 | /* Skip sending command to fw if internal error */ |
---|
890 | | - if (pci_channel_offline(dev->pdev) || |
---|
891 | | - dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { |
---|
| 964 | + if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) { |
---|
892 | 965 | u8 status = 0; |
---|
893 | 966 | u32 drv_synd; |
---|
894 | 967 | |
---|
.. | .. |
---|
896 | 969 | MLX5_SET(mbox_out, ent->out, status, status); |
---|
897 | 970 | MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); |
---|
898 | 971 | |
---|
899 | | - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); |
---|
900 | | - /* no doorbell, no need to keep the entry */ |
---|
901 | | - free_ent(cmd, ent->idx); |
---|
902 | | - if (ent->callback) |
---|
903 | | - free_cmd(ent); |
---|
| 972 | + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); |
---|
904 | 973 | return; |
---|
905 | 974 | } |
---|
906 | 975 | |
---|
.. | .. |
---|
908 | 977 | mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); |
---|
909 | 978 | wmb(); |
---|
910 | 979 | iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); |
---|
911 | | - mmiowb(); |
---|
912 | 980 | /* if not in polling don't use ent after this point */ |
---|
913 | 981 | if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { |
---|
914 | 982 | poll_timeout(ent); |
---|
915 | 983 | /* make sure we read the descriptor after ownership is SW */ |
---|
916 | 984 | rmb(); |
---|
917 | | - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT)); |
---|
| 985 | + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT)); |
---|
918 | 986 | } |
---|
919 | 987 | } |
---|
920 | 988 | |
---|
.. | .. |
---|
948 | 1016 | } |
---|
949 | 1017 | } |
---|
950 | 1018 | |
---|
| 1019 | +enum { |
---|
| 1020 | + MLX5_CMD_TIMEOUT_RECOVER_MSEC = 5 * 1000, |
---|
| 1021 | +}; |
---|
| 1022 | + |
---|
| 1023 | +static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev, |
---|
| 1024 | + struct mlx5_cmd_work_ent *ent) |
---|
| 1025 | +{ |
---|
| 1026 | + unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_RECOVER_MSEC); |
---|
| 1027 | + |
---|
| 1028 | + mlx5_cmd_eq_recover(dev); |
---|
| 1029 | + |
---|
| 1030 | + /* Re-wait on the ent->done after executing the recovery flow. If the |
---|
| 1031 | + * recovery flow (or any other recovery flow running simultaneously) |
---|
| 1032 | + * has recovered an EQE, it should cause the entry to be completed by |
---|
| 1033 | + * the command interface. |
---|
| 1034 | + */ |
---|
| 1035 | + if (wait_for_completion_timeout(&ent->done, timeout)) { |
---|
| 1036 | + mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx, |
---|
| 1037 | + mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); |
---|
| 1038 | + return; |
---|
| 1039 | + } |
---|
| 1040 | + |
---|
| 1041 | + mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx, |
---|
| 1042 | + mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); |
---|
| 1043 | + |
---|
| 1044 | + ent->ret = -ETIMEDOUT; |
---|
| 1045 | + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); |
---|
| 1046 | +} |
---|
| 1047 | + |
---|
951 | 1048 | static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) |
---|
952 | 1049 | { |
---|
953 | 1050 | unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); |
---|
.. | .. |
---|
959 | 1056 | ent->ret = -ECANCELED; |
---|
960 | 1057 | goto out_err; |
---|
961 | 1058 | } |
---|
962 | | - if (cmd->mode == CMD_MODE_POLLING || ent->polling) { |
---|
| 1059 | + if (cmd->mode == CMD_MODE_POLLING || ent->polling) |
---|
963 | 1060 | wait_for_completion(&ent->done); |
---|
964 | | - } else if (!wait_for_completion_timeout(&ent->done, timeout)) { |
---|
965 | | - ent->ret = -ETIMEDOUT; |
---|
966 | | - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); |
---|
967 | | - } |
---|
| 1061 | + else if (!wait_for_completion_timeout(&ent->done, timeout)) |
---|
| 1062 | + wait_func_handle_exec_timeout(dev, ent); |
---|
968 | 1063 | |
---|
969 | 1064 | out_err: |
---|
970 | 1065 | err = ent->ret; |
---|
.. | .. |
---|
1004 | 1099 | if (callback && page_queue) |
---|
1005 | 1100 | return -EINVAL; |
---|
1006 | 1101 | |
---|
1007 | | - ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context, |
---|
1008 | | - page_queue); |
---|
| 1102 | + ent = cmd_alloc_ent(cmd, in, out, uout, uout_size, |
---|
| 1103 | + callback, context, page_queue); |
---|
1009 | 1104 | if (IS_ERR(ent)) |
---|
1010 | 1105 | return PTR_ERR(ent); |
---|
| 1106 | + |
---|
| 1107 | + /* put for this ent is when consumed, depending on the use case |
---|
| 1108 | + * 1) (!callback) blocking flow: by caller after wait_func completes |
---|
| 1109 | + * 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled |
---|
| 1110 | + */ |
---|
1011 | 1111 | |
---|
1012 | 1112 | ent->token = token; |
---|
1013 | 1113 | ent->polling = force_polling; |
---|
.. | .. |
---|
1027 | 1127 | } |
---|
1028 | 1128 | |
---|
1029 | 1129 | if (callback) |
---|
1030 | | - goto out; |
---|
| 1130 | + goto out; /* mlx5_cmd_comp_handler() will put(ent) */ |
---|
1031 | 1131 | |
---|
1032 | 1132 | err = wait_func(dev, ent); |
---|
1033 | | - if (err == -ETIMEDOUT) |
---|
1034 | | - goto out; |
---|
1035 | | - if (err == -ECANCELED) |
---|
| 1133 | + if (err == -ETIMEDOUT || err == -ECANCELED) |
---|
1036 | 1134 | goto out_free; |
---|
1037 | 1135 | |
---|
1038 | 1136 | ds = ent->ts2 - ent->ts1; |
---|
1039 | 1137 | op = MLX5_GET(mbox_in, in->first.data, opcode); |
---|
1040 | | - if (op < ARRAY_SIZE(cmd->stats)) { |
---|
| 1138 | + if (op < MLX5_CMD_OP_MAX) { |
---|
1041 | 1139 | stats = &cmd->stats[op]; |
---|
1042 | 1140 | spin_lock_irq(&stats->lock); |
---|
1043 | 1141 | stats->sum += ds; |
---|
.. | .. |
---|
1050 | 1148 | *status = ent->status; |
---|
1051 | 1149 | |
---|
1052 | 1150 | out_free: |
---|
1053 | | - free_cmd(ent); |
---|
| 1151 | + cmd_ent_put(ent); |
---|
1054 | 1152 | out: |
---|
1055 | 1153 | return err; |
---|
1056 | 1154 | } |
---|
.. | .. |
---|
1324 | 1422 | return -EFAULT; |
---|
1325 | 1423 | |
---|
1326 | 1424 | err = sscanf(outlen_str, "%d", &outlen); |
---|
1327 | | - if (err < 0) |
---|
1328 | | - return err; |
---|
| 1425 | + if (err != 1) |
---|
| 1426 | + return -EINVAL; |
---|
1329 | 1427 | |
---|
1330 | 1428 | ptr = kzalloc(outlen, GFP_KERNEL); |
---|
1331 | 1429 | if (!ptr) |
---|
.. | .. |
---|
1351 | 1449 | struct mlx5_cmd *cmd = &dev->cmd; |
---|
1352 | 1450 | |
---|
1353 | 1451 | snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", |
---|
1354 | | - dev_name(&dev->pdev->dev)); |
---|
| 1452 | + dev_name(dev->device)); |
---|
1355 | 1453 | } |
---|
1356 | 1454 | |
---|
1357 | 1455 | static void clean_debug_files(struct mlx5_core_dev *dev) |
---|
.. | .. |
---|
1365 | 1463 | debugfs_remove_recursive(dbg->dbg_root); |
---|
1366 | 1464 | } |
---|
1367 | 1465 | |
---|
1368 | | -static int create_debugfs_files(struct mlx5_core_dev *dev) |
---|
| 1466 | +static void create_debugfs_files(struct mlx5_core_dev *dev) |
---|
1369 | 1467 | { |
---|
1370 | 1468 | struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; |
---|
1371 | | - int err = -ENOMEM; |
---|
1372 | | - |
---|
1373 | | - if (!mlx5_debugfs_root) |
---|
1374 | | - return 0; |
---|
1375 | 1469 | |
---|
1376 | 1470 | dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root); |
---|
1377 | | - if (!dbg->dbg_root) |
---|
1378 | | - return err; |
---|
1379 | 1471 | |
---|
1380 | | - dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root, |
---|
1381 | | - dev, &dfops); |
---|
1382 | | - if (!dbg->dbg_in) |
---|
1383 | | - goto err_dbg; |
---|
1384 | | - |
---|
1385 | | - dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root, |
---|
1386 | | - dev, &dfops); |
---|
1387 | | - if (!dbg->dbg_out) |
---|
1388 | | - goto err_dbg; |
---|
1389 | | - |
---|
1390 | | - dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root, |
---|
1391 | | - dev, &olfops); |
---|
1392 | | - if (!dbg->dbg_outlen) |
---|
1393 | | - goto err_dbg; |
---|
1394 | | - |
---|
1395 | | - dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root, |
---|
1396 | | - &dbg->status); |
---|
1397 | | - if (!dbg->dbg_status) |
---|
1398 | | - goto err_dbg; |
---|
1399 | | - |
---|
1400 | | - dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops); |
---|
1401 | | - if (!dbg->dbg_run) |
---|
1402 | | - goto err_dbg; |
---|
| 1472 | + debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops); |
---|
| 1473 | + debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops); |
---|
| 1474 | + debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops); |
---|
| 1475 | + debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status); |
---|
| 1476 | + debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops); |
---|
1403 | 1477 | |
---|
1404 | 1478 | mlx5_cmdif_debugfs_init(dev); |
---|
| 1479 | +} |
---|
1405 | 1480 | |
---|
1406 | | - return 0; |
---|
| 1481 | +void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode) |
---|
| 1482 | +{ |
---|
| 1483 | + struct mlx5_cmd *cmd = &dev->cmd; |
---|
| 1484 | + int i; |
---|
1407 | 1485 | |
---|
1408 | | -err_dbg: |
---|
1409 | | - clean_debug_files(dev); |
---|
1410 | | - return err; |
---|
| 1486 | + for (i = 0; i < cmd->max_reg_cmds; i++) |
---|
| 1487 | + down(&cmd->sem); |
---|
| 1488 | + down(&cmd->pages_sem); |
---|
| 1489 | + |
---|
| 1490 | + cmd->allowed_opcode = opcode; |
---|
| 1491 | + |
---|
| 1492 | + up(&cmd->pages_sem); |
---|
| 1493 | + for (i = 0; i < cmd->max_reg_cmds; i++) |
---|
| 1494 | + up(&cmd->sem); |
---|
1411 | 1495 | } |
---|
1412 | 1496 | |
---|
1413 | 1497 | static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) |
---|
.. | .. |
---|
1426 | 1510 | up(&cmd->sem); |
---|
1427 | 1511 | } |
---|
1428 | 1512 | |
---|
| 1513 | +static int cmd_comp_notifier(struct notifier_block *nb, |
---|
| 1514 | + unsigned long type, void *data) |
---|
| 1515 | +{ |
---|
| 1516 | + struct mlx5_core_dev *dev; |
---|
| 1517 | + struct mlx5_cmd *cmd; |
---|
| 1518 | + struct mlx5_eqe *eqe; |
---|
| 1519 | + |
---|
| 1520 | + cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb); |
---|
| 1521 | + dev = container_of(cmd, struct mlx5_core_dev, cmd); |
---|
| 1522 | + eqe = data; |
---|
| 1523 | + |
---|
| 1524 | + mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false); |
---|
| 1525 | + |
---|
| 1526 | + return NOTIFY_OK; |
---|
| 1527 | +} |
---|
1429 | 1528 | void mlx5_cmd_use_events(struct mlx5_core_dev *dev) |
---|
1430 | 1529 | { |
---|
| 1530 | + MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD); |
---|
| 1531 | + mlx5_eq_notifier_register(dev, &dev->cmd.nb); |
---|
1431 | 1532 | mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS); |
---|
1432 | 1533 | } |
---|
1433 | 1534 | |
---|
1434 | 1535 | void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) |
---|
1435 | 1536 | { |
---|
1436 | 1537 | mlx5_cmd_change_mod(dev, CMD_MODE_POLLING); |
---|
| 1538 | + mlx5_eq_notifier_unregister(dev, &dev->cmd.nb); |
---|
1437 | 1539 | } |
---|
1438 | 1540 | |
---|
1439 | 1541 | static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) |
---|
.. | .. |
---|
1449 | 1551 | } |
---|
1450 | 1552 | } |
---|
1451 | 1553 | |
---|
1452 | | -void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) |
---|
| 1554 | +static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) |
---|
1453 | 1555 | { |
---|
1454 | 1556 | struct mlx5_cmd *cmd = &dev->cmd; |
---|
1455 | 1557 | struct mlx5_cmd_work_ent *ent; |
---|
.. | .. |
---|
1466 | 1568 | vector = vec & 0xffffffff; |
---|
1467 | 1569 | for (i = 0; i < (1 << cmd->log_sz); i++) { |
---|
1468 | 1570 | if (test_bit(i, &vector)) { |
---|
1469 | | - struct semaphore *sem; |
---|
1470 | | - |
---|
1471 | 1571 | ent = cmd->ent_arr[i]; |
---|
1472 | 1572 | |
---|
1473 | 1573 | /* if we already completed the command, ignore it */ |
---|
.. | .. |
---|
1477 | 1577 | if (!forced) { |
---|
1478 | 1578 | mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", |
---|
1479 | 1579 | ent->idx); |
---|
1480 | | - free_ent(cmd, ent->idx); |
---|
1481 | | - free_cmd(ent); |
---|
| 1580 | + cmd_ent_put(ent); |
---|
1482 | 1581 | } |
---|
1483 | 1582 | continue; |
---|
1484 | 1583 | } |
---|
1485 | 1584 | |
---|
1486 | | - if (ent->callback) |
---|
1487 | | - cancel_delayed_work(&ent->cb_timeout_work); |
---|
1488 | | - if (ent->page_queue) |
---|
1489 | | - sem = &cmd->pages_sem; |
---|
1490 | | - else |
---|
1491 | | - sem = &cmd->sem; |
---|
| 1585 | + if (ent->callback && cancel_delayed_work(&ent->cb_timeout_work)) |
---|
| 1586 | + cmd_ent_put(ent); /* timeout work was canceled */ |
---|
| 1587 | + |
---|
| 1588 | + if (!forced || /* Real FW completion */ |
---|
| 1589 | + mlx5_cmd_is_down(dev) || /* No real FW completion is expected */ |
---|
| 1590 | + !opcode_allowed(cmd, ent->op)) |
---|
| 1591 | + cmd_ent_put(ent); |
---|
| 1592 | + |
---|
1492 | 1593 | ent->ts2 = ktime_get_ns(); |
---|
1493 | 1594 | memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); |
---|
1494 | 1595 | dump_command(dev, ent, 0); |
---|
.. | .. |
---|
1506 | 1607 | ent->ret, deliv_status_to_str(ent->status), ent->status); |
---|
1507 | 1608 | } |
---|
1508 | 1609 | |
---|
1509 | | - /* only real completion will free the entry slot */ |
---|
1510 | | - if (!forced) |
---|
1511 | | - free_ent(cmd, ent->idx); |
---|
1512 | | - |
---|
1513 | 1610 | if (ent->callback) { |
---|
1514 | 1611 | ds = ent->ts2 - ent->ts1; |
---|
1515 | | - if (ent->op < ARRAY_SIZE(cmd->stats)) { |
---|
| 1612 | + if (ent->op < MLX5_CMD_OP_MAX) { |
---|
1516 | 1613 | stats = &cmd->stats[ent->op]; |
---|
1517 | 1614 | spin_lock_irqsave(&stats->lock, flags); |
---|
1518 | 1615 | stats->sum += ds; |
---|
.. | .. |
---|
1537 | 1634 | free_msg(dev, ent->in); |
---|
1538 | 1635 | |
---|
1539 | 1636 | err = err ? err : ent->status; |
---|
1540 | | - if (!forced) |
---|
1541 | | - free_cmd(ent); |
---|
| 1637 | + /* final consumer is done, release ent */ |
---|
| 1638 | + cmd_ent_put(ent); |
---|
1542 | 1639 | callback(err, context); |
---|
1543 | 1640 | } else { |
---|
| 1641 | + /* release wait_func() so mlx5_cmd_invoke() |
---|
| 1642 | + * can make the final ent_put() |
---|
| 1643 | + */ |
---|
1544 | 1644 | complete(&ent->done); |
---|
1545 | 1645 | } |
---|
1546 | | - up(sem); |
---|
1547 | 1646 | } |
---|
1548 | 1647 | } |
---|
1549 | 1648 | } |
---|
1550 | | -EXPORT_SYMBOL(mlx5_cmd_comp_handler); |
---|
| 1649 | + |
---|
| 1650 | +void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev) |
---|
| 1651 | +{ |
---|
| 1652 | + struct mlx5_cmd *cmd = &dev->cmd; |
---|
| 1653 | + unsigned long bitmask; |
---|
| 1654 | + unsigned long flags; |
---|
| 1655 | + u64 vector; |
---|
| 1656 | + int i; |
---|
| 1657 | + |
---|
| 1658 | + /* wait for pending handlers to complete */ |
---|
| 1659 | + mlx5_eq_synchronize_cmd_irq(dev); |
---|
| 1660 | + spin_lock_irqsave(&dev->cmd.alloc_lock, flags); |
---|
| 1661 | + vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); |
---|
| 1662 | + if (!vector) |
---|
| 1663 | + goto no_trig; |
---|
| 1664 | + |
---|
| 1665 | + bitmask = vector; |
---|
| 1666 | + /* we must increment the allocated entries refcount before triggering the completions |
---|
| 1667 | + * to guarantee pending commands will not get freed in the meanwhile. |
---|
| 1668 | + * For that reason, it also has to be done inside the alloc_lock. |
---|
| 1669 | + */ |
---|
| 1670 | + for_each_set_bit(i, &bitmask, (1 << cmd->log_sz)) |
---|
| 1671 | + cmd_ent_get(cmd->ent_arr[i]); |
---|
| 1672 | + vector |= MLX5_TRIGGERED_CMD_COMP; |
---|
| 1673 | + spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); |
---|
| 1674 | + |
---|
| 1675 | + mlx5_core_dbg(dev, "vector 0x%llx\n", vector); |
---|
| 1676 | + mlx5_cmd_comp_handler(dev, vector, true); |
---|
| 1677 | + for_each_set_bit(i, &bitmask, (1 << cmd->log_sz)) |
---|
| 1678 | + cmd_ent_put(cmd->ent_arr[i]); |
---|
| 1679 | + return; |
---|
| 1680 | + |
---|
| 1681 | +no_trig: |
---|
| 1682 | + spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); |
---|
| 1683 | +} |
---|
| 1684 | + |
---|
| 1685 | +void mlx5_cmd_flush(struct mlx5_core_dev *dev) |
---|
| 1686 | +{ |
---|
| 1687 | + struct mlx5_cmd *cmd = &dev->cmd; |
---|
| 1688 | + int i; |
---|
| 1689 | + |
---|
| 1690 | + for (i = 0; i < cmd->max_reg_cmds; i++) { |
---|
| 1691 | + while (down_trylock(&cmd->sem)) { |
---|
| 1692 | + mlx5_cmd_trigger_completions(dev); |
---|
| 1693 | + cond_resched(); |
---|
| 1694 | + } |
---|
| 1695 | + } |
---|
| 1696 | + |
---|
| 1697 | + while (down_trylock(&cmd->pages_sem)) { |
---|
| 1698 | + mlx5_cmd_trigger_completions(dev); |
---|
| 1699 | + cond_resched(); |
---|
| 1700 | + } |
---|
| 1701 | + |
---|
| 1702 | + /* Unlock cmdif */ |
---|
| 1703 | + up(&cmd->pages_sem); |
---|
| 1704 | + for (i = 0; i < cmd->max_reg_cmds; i++) |
---|
| 1705 | + up(&cmd->sem); |
---|
| 1706 | +} |
---|
1551 | 1707 | |
---|
1552 | 1708 | static int status_to_err(u8 status) |
---|
1553 | 1709 | { |
---|
1554 | | - return status ? -1 : 0; /* TBD more meaningful codes */ |
---|
| 1710 | + switch (status) { |
---|
| 1711 | + case MLX5_CMD_DELIVERY_STAT_OK: |
---|
| 1712 | + case MLX5_DRIVER_STATUS_ABORTED: |
---|
| 1713 | + return 0; |
---|
| 1714 | + case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: |
---|
| 1715 | + case MLX5_CMD_DELIVERY_STAT_TOK_ERR: |
---|
| 1716 | + return -EBADR; |
---|
| 1717 | + case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: |
---|
| 1718 | + case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: |
---|
| 1719 | + case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: |
---|
| 1720 | + return -EFAULT; /* Bad address */ |
---|
| 1721 | + case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: |
---|
| 1722 | + case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: |
---|
| 1723 | + case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: |
---|
| 1724 | + case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: |
---|
| 1725 | + return -ENOMSG; |
---|
| 1726 | + case MLX5_CMD_DELIVERY_STAT_FW_ERR: |
---|
| 1727 | + return -EIO; |
---|
| 1728 | + default: |
---|
| 1729 | + return -EINVAL; |
---|
| 1730 | + } |
---|
1555 | 1731 | } |
---|
1556 | 1732 | |
---|
1557 | 1733 | static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, |
---|
.. | .. |
---|
1608 | 1784 | int err; |
---|
1609 | 1785 | u8 status = 0; |
---|
1610 | 1786 | u32 drv_synd; |
---|
| 1787 | + u16 opcode; |
---|
1611 | 1788 | u8 token; |
---|
1612 | 1789 | |
---|
1613 | | - if (pci_channel_offline(dev->pdev) || |
---|
1614 | | - dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { |
---|
1615 | | - u16 opcode = MLX5_GET(mbox_in, in, opcode); |
---|
1616 | | - |
---|
| 1790 | + opcode = MLX5_GET(mbox_in, in, opcode); |
---|
| 1791 | + if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) { |
---|
1617 | 1792 | err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); |
---|
1618 | 1793 | MLX5_SET(mbox_out, out, status, status); |
---|
1619 | 1794 | MLX5_SET(mbox_out, out, syndrome, drv_synd); |
---|
.. | .. |
---|
1677 | 1852 | } |
---|
1678 | 1853 | EXPORT_SYMBOL(mlx5_cmd_exec); |
---|
1679 | 1854 | |
---|
1680 | | -int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, |
---|
1681 | | - void *out, int out_size, mlx5_cmd_cbk_t callback, |
---|
1682 | | - void *context) |
---|
| 1855 | +void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, |
---|
| 1856 | + struct mlx5_async_ctx *ctx) |
---|
1683 | 1857 | { |
---|
1684 | | - return cmd_exec(dev, in, in_size, out, out_size, callback, context, |
---|
1685 | | - false); |
---|
| 1858 | + ctx->dev = dev; |
---|
| 1859 | + /* Starts at 1 to avoid doing wake_up if we are not cleaning up */ |
---|
| 1860 | + atomic_set(&ctx->num_inflight, 1); |
---|
| 1861 | + init_completion(&ctx->inflight_done); |
---|
| 1862 | +} |
---|
| 1863 | +EXPORT_SYMBOL(mlx5_cmd_init_async_ctx); |
---|
| 1864 | + |
---|
| 1865 | +/** |
---|
| 1866 | + * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx |
---|
| 1867 | + * @ctx: The ctx to clean |
---|
| 1868 | + * |
---|
| 1869 | + * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The |
---|
| 1870 | + * caller must ensure that mlx5_cmd_exec_cb() is not called during or after |
---|
| 1871 | + * the call mlx5_cleanup_async_ctx(). |
---|
| 1872 | + */ |
---|
| 1873 | +void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx) |
---|
| 1874 | +{ |
---|
| 1875 | + if (!atomic_dec_and_test(&ctx->num_inflight)) |
---|
| 1876 | + wait_for_completion(&ctx->inflight_done); |
---|
| 1877 | +} |
---|
| 1878 | +EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx); |
---|
| 1879 | + |
---|
| 1880 | +static void mlx5_cmd_exec_cb_handler(int status, void *_work) |
---|
| 1881 | +{ |
---|
| 1882 | + struct mlx5_async_work *work = _work; |
---|
| 1883 | + struct mlx5_async_ctx *ctx = work->ctx; |
---|
| 1884 | + |
---|
| 1885 | + work->user_callback(status, work); |
---|
| 1886 | + if (atomic_dec_and_test(&ctx->num_inflight)) |
---|
| 1887 | + complete(&ctx->inflight_done); |
---|
| 1888 | +} |
---|
| 1889 | + |
---|
| 1890 | +int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, |
---|
| 1891 | + void *out, int out_size, mlx5_async_cbk_t callback, |
---|
| 1892 | + struct mlx5_async_work *work) |
---|
| 1893 | +{ |
---|
| 1894 | + int ret; |
---|
| 1895 | + |
---|
| 1896 | + work->ctx = ctx; |
---|
| 1897 | + work->user_callback = callback; |
---|
| 1898 | + if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) |
---|
| 1899 | + return -EIO; |
---|
| 1900 | + ret = cmd_exec(ctx->dev, in, in_size, out, out_size, |
---|
| 1901 | + mlx5_cmd_exec_cb_handler, work, false); |
---|
| 1902 | + if (ret && atomic_dec_and_test(&ctx->num_inflight)) |
---|
| 1903 | + complete(&ctx->inflight_done); |
---|
| 1904 | + |
---|
| 1905 | + return ret; |
---|
1686 | 1906 | } |
---|
1687 | 1907 | EXPORT_SYMBOL(mlx5_cmd_exec_cb); |
---|
1688 | 1908 | |
---|
.. | .. |
---|
1753 | 1973 | |
---|
1754 | 1974 | static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) |
---|
1755 | 1975 | { |
---|
1756 | | - struct device *ddev = &dev->pdev->dev; |
---|
1757 | | - |
---|
1758 | | - cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, |
---|
1759 | | - &cmd->alloc_dma, GFP_KERNEL); |
---|
| 1976 | + cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, |
---|
| 1977 | + &cmd->alloc_dma, GFP_KERNEL); |
---|
1760 | 1978 | if (!cmd->cmd_alloc_buf) |
---|
1761 | 1979 | return -ENOMEM; |
---|
1762 | 1980 | |
---|
.. | .. |
---|
1768 | 1986 | return 0; |
---|
1769 | 1987 | } |
---|
1770 | 1988 | |
---|
1771 | | - dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, |
---|
| 1989 | + dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, |
---|
1772 | 1990 | cmd->alloc_dma); |
---|
1773 | | - cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, |
---|
1774 | | - 2 * MLX5_ADAPTER_PAGE_SIZE - 1, |
---|
1775 | | - &cmd->alloc_dma, GFP_KERNEL); |
---|
| 1991 | + cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), |
---|
| 1992 | + 2 * MLX5_ADAPTER_PAGE_SIZE - 1, |
---|
| 1993 | + &cmd->alloc_dma, GFP_KERNEL); |
---|
1776 | 1994 | if (!cmd->cmd_alloc_buf) |
---|
1777 | 1995 | return -ENOMEM; |
---|
1778 | 1996 | |
---|
.. | .. |
---|
1784 | 2002 | |
---|
1785 | 2003 | static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) |
---|
1786 | 2004 | { |
---|
1787 | | - struct device *ddev = &dev->pdev->dev; |
---|
1788 | | - |
---|
1789 | | - dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf, |
---|
| 2005 | + dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf, |
---|
1790 | 2006 | cmd->alloc_dma); |
---|
| 2007 | +} |
---|
| 2008 | + |
---|
| 2009 | +static u16 cmdif_rev(struct mlx5_core_dev *dev) |
---|
| 2010 | +{ |
---|
| 2011 | + return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; |
---|
1791 | 2012 | } |
---|
1792 | 2013 | |
---|
1793 | 2014 | int mlx5_cmd_init(struct mlx5_core_dev *dev) |
---|
.. | .. |
---|
1803 | 2024 | memset(cmd, 0, sizeof(*cmd)); |
---|
1804 | 2025 | cmd_if_rev = cmdif_rev(dev); |
---|
1805 | 2026 | if (cmd_if_rev != CMD_IF_REV) { |
---|
1806 | | - dev_err(&dev->pdev->dev, |
---|
1807 | | - "Driver cmdif rev(%d) differs from firmware's(%d)\n", |
---|
1808 | | - CMD_IF_REV, cmd_if_rev); |
---|
| 2027 | + mlx5_core_err(dev, |
---|
| 2028 | + "Driver cmdif rev(%d) differs from firmware's(%d)\n", |
---|
| 2029 | + CMD_IF_REV, cmd_if_rev); |
---|
1809 | 2030 | return -EINVAL; |
---|
1810 | 2031 | } |
---|
1811 | 2032 | |
---|
1812 | | - cmd->pool = dma_pool_create("mlx5_cmd", &dev->pdev->dev, size, align, |
---|
1813 | | - 0); |
---|
1814 | | - if (!cmd->pool) |
---|
| 2033 | + cmd->stats = kvzalloc(MLX5_CMD_OP_MAX * sizeof(*cmd->stats), GFP_KERNEL); |
---|
| 2034 | + if (!cmd->stats) |
---|
1815 | 2035 | return -ENOMEM; |
---|
| 2036 | + |
---|
| 2037 | + cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0); |
---|
| 2038 | + if (!cmd->pool) { |
---|
| 2039 | + err = -ENOMEM; |
---|
| 2040 | + goto dma_pool_err; |
---|
| 2041 | + } |
---|
1816 | 2042 | |
---|
1817 | 2043 | err = alloc_cmd_page(dev, cmd); |
---|
1818 | 2044 | if (err) |
---|
.. | .. |
---|
1822 | 2048 | cmd->log_sz = cmd_l >> 4 & 0xf; |
---|
1823 | 2049 | cmd->log_stride = cmd_l & 0xf; |
---|
1824 | 2050 | if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { |
---|
1825 | | - dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n", |
---|
1826 | | - 1 << cmd->log_sz); |
---|
| 2051 | + mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n", |
---|
| 2052 | + 1 << cmd->log_sz); |
---|
1827 | 2053 | err = -EINVAL; |
---|
1828 | 2054 | goto err_free_page; |
---|
1829 | 2055 | } |
---|
1830 | 2056 | |
---|
1831 | 2057 | if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { |
---|
1832 | | - dev_err(&dev->pdev->dev, "command queue size overflow\n"); |
---|
| 2058 | + mlx5_core_err(dev, "command queue size overflow\n"); |
---|
1833 | 2059 | err = -EINVAL; |
---|
1834 | 2060 | goto err_free_page; |
---|
1835 | 2061 | } |
---|
1836 | 2062 | |
---|
| 2063 | + cmd->state = MLX5_CMDIF_STATE_DOWN; |
---|
1837 | 2064 | cmd->checksum_disabled = 1; |
---|
1838 | 2065 | cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; |
---|
1839 | 2066 | cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1; |
---|
1840 | 2067 | |
---|
1841 | 2068 | cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; |
---|
1842 | 2069 | if (cmd->cmdif_rev > CMD_IF_REV) { |
---|
1843 | | - dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", |
---|
1844 | | - CMD_IF_REV, cmd->cmdif_rev); |
---|
| 2070 | + mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n", |
---|
| 2071 | + CMD_IF_REV, cmd->cmdif_rev); |
---|
1845 | 2072 | err = -EOPNOTSUPP; |
---|
1846 | 2073 | goto err_free_page; |
---|
1847 | 2074 | } |
---|
1848 | 2075 | |
---|
1849 | 2076 | spin_lock_init(&cmd->alloc_lock); |
---|
1850 | 2077 | spin_lock_init(&cmd->token_lock); |
---|
1851 | | - for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) |
---|
| 2078 | + for (i = 0; i < MLX5_CMD_OP_MAX; i++) |
---|
1852 | 2079 | spin_lock_init(&cmd->stats[i].lock); |
---|
1853 | 2080 | |
---|
1854 | 2081 | sema_init(&cmd->sem, cmd->max_reg_cmds); |
---|
.. | .. |
---|
1857 | 2084 | cmd_h = (u32)((u64)(cmd->dma) >> 32); |
---|
1858 | 2085 | cmd_l = (u32)(cmd->dma); |
---|
1859 | 2086 | if (cmd_l & 0xfff) { |
---|
1860 | | - dev_err(&dev->pdev->dev, "invalid command queue address\n"); |
---|
| 2087 | + mlx5_core_err(dev, "invalid command queue address\n"); |
---|
1861 | 2088 | err = -ENOMEM; |
---|
1862 | 2089 | goto err_free_page; |
---|
1863 | 2090 | } |
---|
.. | .. |
---|
1871 | 2098 | mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); |
---|
1872 | 2099 | |
---|
1873 | 2100 | cmd->mode = CMD_MODE_POLLING; |
---|
| 2101 | + cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL; |
---|
1874 | 2102 | |
---|
1875 | 2103 | create_msg_cache(dev); |
---|
1876 | 2104 | |
---|
1877 | 2105 | set_wqname(dev); |
---|
1878 | 2106 | cmd->wq = create_singlethread_workqueue(cmd->wq_name); |
---|
1879 | 2107 | if (!cmd->wq) { |
---|
1880 | | - dev_err(&dev->pdev->dev, "failed to create command workqueue\n"); |
---|
| 2108 | + mlx5_core_err(dev, "failed to create command workqueue\n"); |
---|
1881 | 2109 | err = -ENOMEM; |
---|
1882 | 2110 | goto err_cache; |
---|
1883 | 2111 | } |
---|
1884 | 2112 | |
---|
1885 | | - err = create_debugfs_files(dev); |
---|
1886 | | - if (err) { |
---|
1887 | | - err = -ENOMEM; |
---|
1888 | | - goto err_wq; |
---|
1889 | | - } |
---|
| 2113 | + create_debugfs_files(dev); |
---|
1890 | 2114 | |
---|
1891 | 2115 | return 0; |
---|
1892 | | - |
---|
1893 | | -err_wq: |
---|
1894 | | - destroy_workqueue(cmd->wq); |
---|
1895 | 2116 | |
---|
1896 | 2117 | err_cache: |
---|
1897 | 2118 | destroy_msg_cache(dev); |
---|
.. | .. |
---|
1901 | 2122 | |
---|
1902 | 2123 | err_free_pool: |
---|
1903 | 2124 | dma_pool_destroy(cmd->pool); |
---|
1904 | | - |
---|
| 2125 | +dma_pool_err: |
---|
| 2126 | + kvfree(cmd->stats); |
---|
1905 | 2127 | return err; |
---|
1906 | 2128 | } |
---|
1907 | 2129 | EXPORT_SYMBOL(mlx5_cmd_init); |
---|
.. | .. |
---|
1915 | 2137 | destroy_msg_cache(dev); |
---|
1916 | 2138 | free_cmd_page(dev, cmd); |
---|
1917 | 2139 | dma_pool_destroy(cmd->pool); |
---|
| 2140 | + kvfree(cmd->stats); |
---|
1918 | 2141 | } |
---|
1919 | 2142 | EXPORT_SYMBOL(mlx5_cmd_cleanup); |
---|
| 2143 | + |
---|
| 2144 | +void mlx5_cmd_set_state(struct mlx5_core_dev *dev, |
---|
| 2145 | + enum mlx5_cmdif_state cmdif_state) |
---|
| 2146 | +{ |
---|
| 2147 | + dev->cmd.state = cmdif_state; |
---|
| 2148 | +} |
---|
| 2149 | +EXPORT_SYMBOL(mlx5_cmd_set_state); |
---|