.. | .. |
---|
8 | 8 | #include <rdma/uverbs_types.h> |
---|
9 | 9 | #include <rdma/uverbs_ioctl.h> |
---|
10 | 10 | #include <rdma/mlx5_user_ioctl_cmds.h> |
---|
| 11 | +#include <rdma/mlx5_user_ioctl_verbs.h> |
---|
11 | 12 | #include <rdma/ib_umem.h> |
---|
| 13 | +#include <rdma/uverbs_std_types.h> |
---|
12 | 14 | #include <linux/mlx5/driver.h> |
---|
13 | 15 | #include <linux/mlx5/fs.h> |
---|
14 | 16 | #include "mlx5_ib.h" |
---|
| 17 | +#include "devx.h" |
---|
| 18 | +#include "qp.h" |
---|
| 19 | +#include <linux/xarray.h> |
---|
15 | 20 | |
---|
16 | 21 | #define UVERBS_MODULE_NAME mlx5_ib |
---|
17 | 22 | #include <rdma/uverbs_named_ioctl.h> |
---|
18 | 23 | |
---|
19 | | -#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in) |
---|
20 | | -struct devx_obj { |
---|
21 | | - struct mlx5_core_dev *mdev; |
---|
22 | | - u32 obj_id; |
---|
23 | | - u32 dinlen; /* destroy inbox length */ |
---|
24 | | - u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW]; |
---|
| 24 | +static void dispatch_event_fd(struct list_head *fd_list, const void *data); |
---|
| 25 | + |
---|
| 26 | +enum devx_obj_flags { |
---|
| 27 | + DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0, |
---|
| 28 | + DEVX_OBJ_FLAGS_DCT = 1 << 1, |
---|
| 29 | + DEVX_OBJ_FLAGS_CQ = 1 << 2, |
---|
| 30 | +}; |
---|
| 31 | + |
---|
| 32 | +struct devx_async_data { |
---|
| 33 | + struct mlx5_ib_dev *mdev; |
---|
| 34 | + struct list_head list; |
---|
| 35 | + struct devx_async_cmd_event_file *ev_file; |
---|
| 36 | + struct mlx5_async_work cb_work; |
---|
| 37 | + u16 cmd_out_len; |
---|
| 38 | + /* must be last field in this structure */ |
---|
| 39 | + struct mlx5_ib_uapi_devx_async_cmd_hdr hdr; |
---|
| 40 | +}; |
---|
| 41 | + |
---|
| 42 | +struct devx_async_event_data { |
---|
| 43 | + struct list_head list; /* headed in ev_file->event_list */ |
---|
| 44 | + struct mlx5_ib_uapi_devx_async_event_hdr hdr; |
---|
| 45 | +}; |
---|
| 46 | + |
---|
| 47 | +/* first level XA value data structure */ |
---|
| 48 | +struct devx_event { |
---|
| 49 | + struct xarray object_ids; /* second XA level, Key = object id */ |
---|
| 50 | + struct list_head unaffiliated_list; |
---|
| 51 | +}; |
---|
| 52 | + |
---|
| 53 | +/* second level XA value data structure */ |
---|
| 54 | +struct devx_obj_event { |
---|
| 55 | + struct rcu_head rcu; |
---|
| 56 | + struct list_head obj_sub_list; |
---|
| 57 | +}; |
---|
| 58 | + |
---|
| 59 | +struct devx_event_subscription { |
---|
| 60 | + struct list_head file_list; /* headed in ev_file-> |
---|
| 61 | + * subscribed_events_list |
---|
| 62 | + */ |
---|
| 63 | + struct list_head xa_list; /* headed in devx_event->unaffiliated_list or |
---|
| 64 | + * devx_obj_event->obj_sub_list |
---|
| 65 | + */ |
---|
| 66 | + struct list_head obj_list; /* headed in devx_object */ |
---|
| 67 | + struct list_head event_list; /* headed in ev_file->event_list or in |
---|
| 68 | + * temp list via subscription |
---|
| 69 | + */ |
---|
| 70 | + |
---|
| 71 | + u8 is_cleaned:1; |
---|
| 72 | + u32 xa_key_level1; |
---|
| 73 | + u32 xa_key_level2; |
---|
| 74 | + struct rcu_head rcu; |
---|
| 75 | + u64 cookie; |
---|
| 76 | + struct devx_async_event_file *ev_file; |
---|
| 77 | + struct eventfd_ctx *eventfd; |
---|
| 78 | +}; |
---|
| 79 | + |
---|
| 80 | +struct devx_async_event_file { |
---|
| 81 | + struct ib_uobject uobj; |
---|
| 82 | + /* Head of events that are subscribed to this FD */ |
---|
| 83 | + struct list_head subscribed_events_list; |
---|
| 84 | + spinlock_t lock; |
---|
| 85 | + wait_queue_head_t poll_wait; |
---|
| 86 | + struct list_head event_list; |
---|
| 87 | + struct mlx5_ib_dev *dev; |
---|
| 88 | + u8 omit_data:1; |
---|
| 89 | + u8 is_overflow_err:1; |
---|
| 90 | + u8 is_destroyed:1; |
---|
25 | 91 | }; |
---|
26 | 92 | |
---|
27 | 93 | struct devx_umem { |
---|
.. | .. |
---|
40 | 106 | u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; |
---|
41 | 107 | }; |
---|
42 | 108 | |
---|
43 | | -static struct mlx5_ib_ucontext *devx_ufile2uctx(struct ib_uverbs_file *file) |
---|
| 109 | +static struct mlx5_ib_ucontext * |
---|
| 110 | +devx_ufile2uctx(const struct uverbs_attr_bundle *attrs) |
---|
44 | 111 | { |
---|
45 | | - return to_mucontext(ib_uverbs_get_ucontext(file)); |
---|
| 112 | + return to_mucontext(ib_uverbs_get_ucontext(attrs)); |
---|
46 | 113 | } |
---|
47 | 114 | |
---|
48 | | -int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context) |
---|
| 115 | +int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user) |
---|
49 | 116 | { |
---|
50 | 117 | u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0}; |
---|
51 | 118 | u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; |
---|
52 | | - u64 general_obj_types; |
---|
53 | | - void *hdr; |
---|
| 119 | + void *uctx; |
---|
54 | 120 | int err; |
---|
| 121 | + u16 uid; |
---|
| 122 | + u32 cap = 0; |
---|
55 | 123 | |
---|
56 | | - hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr); |
---|
57 | | - |
---|
58 | | - general_obj_types = MLX5_CAP_GEN_64(dev->mdev, general_obj_types); |
---|
59 | | - if (!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UCTX) || |
---|
60 | | - !(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UMEM)) |
---|
| 124 | + /* 0 means not supported */ |
---|
| 125 | + if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx)) |
---|
61 | 126 | return -EINVAL; |
---|
62 | 127 | |
---|
63 | | - if (!capable(CAP_NET_RAW)) |
---|
64 | | - return -EPERM; |
---|
| 128 | + uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx); |
---|
| 129 | + if (is_user && capable(CAP_NET_RAW) && |
---|
| 130 | + (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX)) |
---|
| 131 | + cap |= MLX5_UCTX_CAP_RAW_TX; |
---|
| 132 | + if (is_user && capable(CAP_SYS_RAWIO) && |
---|
| 133 | + (MLX5_CAP_GEN(dev->mdev, uctx_cap) & |
---|
| 134 | + MLX5_UCTX_CAP_INTERNAL_DEV_RES)) |
---|
| 135 | + cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES; |
---|
65 | 136 | |
---|
66 | | - MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); |
---|
67 | | - MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_UCTX); |
---|
| 137 | + MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX); |
---|
| 138 | + MLX5_SET(uctx, uctx, cap, cap); |
---|
68 | 139 | |
---|
69 | 140 | err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); |
---|
70 | 141 | if (err) |
---|
71 | 142 | return err; |
---|
72 | 143 | |
---|
73 | | - context->devx_uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); |
---|
74 | | - return 0; |
---|
| 144 | + uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); |
---|
| 145 | + return uid; |
---|
75 | 146 | } |
---|
76 | 147 | |
---|
77 | | -void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, |
---|
78 | | - struct mlx5_ib_ucontext *context) |
---|
| 148 | +void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) |
---|
79 | 149 | { |
---|
80 | | - u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0}; |
---|
| 150 | + u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0}; |
---|
81 | 151 | u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; |
---|
82 | 152 | |
---|
83 | | - MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); |
---|
84 | | - MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX); |
---|
85 | | - MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, context->devx_uid); |
---|
| 153 | + MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX); |
---|
| 154 | + MLX5_SET(destroy_uctx_in, in, uid, uid); |
---|
86 | 155 | |
---|
87 | 156 | mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); |
---|
88 | 157 | } |
---|
89 | 158 | |
---|
90 | | -bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type) |
---|
| 159 | +static bool is_legacy_unaffiliated_event_num(u16 event_num) |
---|
91 | 160 | { |
---|
92 | | - struct devx_obj *devx_obj = obj; |
---|
93 | | - u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode); |
---|
94 | | - |
---|
95 | | - switch (opcode) { |
---|
96 | | - case MLX5_CMD_OP_DESTROY_TIR: |
---|
97 | | - *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR; |
---|
98 | | - *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, |
---|
99 | | - obj_id); |
---|
100 | | - return true; |
---|
101 | | - |
---|
102 | | - case MLX5_CMD_OP_DESTROY_FLOW_TABLE: |
---|
103 | | - *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; |
---|
104 | | - *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox, |
---|
105 | | - table_id); |
---|
| 161 | + switch (event_num) { |
---|
| 162 | + case MLX5_EVENT_TYPE_PORT_CHANGE: |
---|
106 | 163 | return true; |
---|
107 | 164 | default: |
---|
108 | 165 | return false; |
---|
109 | 166 | } |
---|
110 | 167 | } |
---|
111 | 168 | |
---|
112 | | -static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in) |
---|
| 169 | +static bool is_legacy_obj_event_num(u16 event_num) |
---|
| 170 | +{ |
---|
| 171 | + switch (event_num) { |
---|
| 172 | + case MLX5_EVENT_TYPE_PATH_MIG: |
---|
| 173 | + case MLX5_EVENT_TYPE_COMM_EST: |
---|
| 174 | + case MLX5_EVENT_TYPE_SQ_DRAINED: |
---|
| 175 | + case MLX5_EVENT_TYPE_SRQ_LAST_WQE: |
---|
| 176 | + case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: |
---|
| 177 | + case MLX5_EVENT_TYPE_CQ_ERROR: |
---|
| 178 | + case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: |
---|
| 179 | + case MLX5_EVENT_TYPE_PATH_MIG_FAILED: |
---|
| 180 | + case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: |
---|
| 181 | + case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: |
---|
| 182 | + case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: |
---|
| 183 | + case MLX5_EVENT_TYPE_DCT_DRAINED: |
---|
| 184 | + case MLX5_EVENT_TYPE_COMP: |
---|
| 185 | + case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION: |
---|
| 186 | + case MLX5_EVENT_TYPE_XRQ_ERROR: |
---|
| 187 | + return true; |
---|
| 188 | + default: |
---|
| 189 | + return false; |
---|
| 190 | + } |
---|
| 191 | +} |
---|
| 192 | + |
---|
| 193 | +static u16 get_legacy_obj_type(u16 opcode) |
---|
| 194 | +{ |
---|
| 195 | + switch (opcode) { |
---|
| 196 | + case MLX5_CMD_OP_CREATE_RQ: |
---|
| 197 | + return MLX5_EVENT_QUEUE_TYPE_RQ; |
---|
| 198 | + case MLX5_CMD_OP_CREATE_QP: |
---|
| 199 | + return MLX5_EVENT_QUEUE_TYPE_QP; |
---|
| 200 | + case MLX5_CMD_OP_CREATE_SQ: |
---|
| 201 | + return MLX5_EVENT_QUEUE_TYPE_SQ; |
---|
| 202 | + case MLX5_CMD_OP_CREATE_DCT: |
---|
| 203 | + return MLX5_EVENT_QUEUE_TYPE_DCT; |
---|
| 204 | + default: |
---|
| 205 | + return 0; |
---|
| 206 | + } |
---|
| 207 | +} |
---|
| 208 | + |
---|
| 209 | +static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num) |
---|
| 210 | +{ |
---|
| 211 | + u16 opcode; |
---|
| 212 | + |
---|
| 213 | + opcode = (obj->obj_id >> 32) & 0xffff; |
---|
| 214 | + |
---|
| 215 | + if (is_legacy_obj_event_num(event_num)) |
---|
| 216 | + return get_legacy_obj_type(opcode); |
---|
| 217 | + |
---|
| 218 | + switch (opcode) { |
---|
| 219 | + case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: |
---|
| 220 | + return (obj->obj_id >> 48); |
---|
| 221 | + case MLX5_CMD_OP_CREATE_RQ: |
---|
| 222 | + return MLX5_OBJ_TYPE_RQ; |
---|
| 223 | + case MLX5_CMD_OP_CREATE_QP: |
---|
| 224 | + return MLX5_OBJ_TYPE_QP; |
---|
| 225 | + case MLX5_CMD_OP_CREATE_SQ: |
---|
| 226 | + return MLX5_OBJ_TYPE_SQ; |
---|
| 227 | + case MLX5_CMD_OP_CREATE_DCT: |
---|
| 228 | + return MLX5_OBJ_TYPE_DCT; |
---|
| 229 | + case MLX5_CMD_OP_CREATE_TIR: |
---|
| 230 | + return MLX5_OBJ_TYPE_TIR; |
---|
| 231 | + case MLX5_CMD_OP_CREATE_TIS: |
---|
| 232 | + return MLX5_OBJ_TYPE_TIS; |
---|
| 233 | + case MLX5_CMD_OP_CREATE_PSV: |
---|
| 234 | + return MLX5_OBJ_TYPE_PSV; |
---|
| 235 | + case MLX5_OBJ_TYPE_MKEY: |
---|
| 236 | + return MLX5_OBJ_TYPE_MKEY; |
---|
| 237 | + case MLX5_CMD_OP_CREATE_RMP: |
---|
| 238 | + return MLX5_OBJ_TYPE_RMP; |
---|
| 239 | + case MLX5_CMD_OP_CREATE_XRC_SRQ: |
---|
| 240 | + return MLX5_OBJ_TYPE_XRC_SRQ; |
---|
| 241 | + case MLX5_CMD_OP_CREATE_XRQ: |
---|
| 242 | + return MLX5_OBJ_TYPE_XRQ; |
---|
| 243 | + case MLX5_CMD_OP_CREATE_RQT: |
---|
| 244 | + return MLX5_OBJ_TYPE_RQT; |
---|
| 245 | + case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: |
---|
| 246 | + return MLX5_OBJ_TYPE_FLOW_COUNTER; |
---|
| 247 | + case MLX5_CMD_OP_CREATE_CQ: |
---|
| 248 | + return MLX5_OBJ_TYPE_CQ; |
---|
| 249 | + default: |
---|
| 250 | + return 0; |
---|
| 251 | + } |
---|
| 252 | +} |
---|
| 253 | + |
---|
| 254 | +static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe) |
---|
| 255 | +{ |
---|
| 256 | + switch (event_type) { |
---|
| 257 | + case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: |
---|
| 258 | + case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: |
---|
| 259 | + case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: |
---|
| 260 | + case MLX5_EVENT_TYPE_SRQ_LAST_WQE: |
---|
| 261 | + case MLX5_EVENT_TYPE_PATH_MIG: |
---|
| 262 | + case MLX5_EVENT_TYPE_PATH_MIG_FAILED: |
---|
| 263 | + case MLX5_EVENT_TYPE_COMM_EST: |
---|
| 264 | + case MLX5_EVENT_TYPE_SQ_DRAINED: |
---|
| 265 | + case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: |
---|
| 266 | + case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: |
---|
| 267 | + return eqe->data.qp_srq.type; |
---|
| 268 | + case MLX5_EVENT_TYPE_CQ_ERROR: |
---|
| 269 | + case MLX5_EVENT_TYPE_XRQ_ERROR: |
---|
| 270 | + return 0; |
---|
| 271 | + case MLX5_EVENT_TYPE_DCT_DRAINED: |
---|
| 272 | + case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION: |
---|
| 273 | + return MLX5_EVENT_QUEUE_TYPE_DCT; |
---|
| 274 | + default: |
---|
| 275 | + return MLX5_GET(affiliated_event_header, &eqe->data, obj_type); |
---|
| 276 | + } |
---|
| 277 | +} |
---|
| 278 | + |
---|
| 279 | +static u32 get_dec_obj_id(u64 obj_id) |
---|
| 280 | +{ |
---|
| 281 | + return (obj_id & 0xffffffff); |
---|
| 282 | +} |
---|
| 283 | + |
---|
| 284 | +/* |
---|
| 285 | + * As the obj_id in the firmware is not globally unique the object type |
---|
| 286 | + * must be considered upon checking for a valid object id. |
---|
| 287 | + * For that the opcode of the creator command is encoded as part of the obj_id. |
---|
| 288 | + */ |
---|
| 289 | +static u64 get_enc_obj_id(u32 opcode, u32 obj_id) |
---|
| 290 | +{ |
---|
| 291 | + return ((u64)opcode << 32) | obj_id; |
---|
| 292 | +} |
---|
| 293 | + |
---|
| 294 | +static u64 devx_get_obj_id(const void *in) |
---|
113 | 295 | { |
---|
114 | 296 | u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); |
---|
115 | | - u32 obj_id; |
---|
| 297 | + u64 obj_id; |
---|
116 | 298 | |
---|
117 | 299 | switch (opcode) { |
---|
118 | 300 | case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: |
---|
119 | 301 | case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: |
---|
120 | | - obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id); |
---|
| 302 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT | |
---|
| 303 | + MLX5_GET(general_obj_in_cmd_hdr, in, |
---|
| 304 | + obj_type) << 16, |
---|
| 305 | + MLX5_GET(general_obj_in_cmd_hdr, in, |
---|
| 306 | + obj_id)); |
---|
121 | 307 | break; |
---|
122 | 308 | case MLX5_CMD_OP_QUERY_MKEY: |
---|
123 | | - obj_id = MLX5_GET(query_mkey_in, in, mkey_index); |
---|
| 309 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY, |
---|
| 310 | + MLX5_GET(query_mkey_in, in, |
---|
| 311 | + mkey_index)); |
---|
124 | 312 | break; |
---|
125 | 313 | case MLX5_CMD_OP_QUERY_CQ: |
---|
126 | | - obj_id = MLX5_GET(query_cq_in, in, cqn); |
---|
| 314 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ, |
---|
| 315 | + MLX5_GET(query_cq_in, in, cqn)); |
---|
127 | 316 | break; |
---|
128 | 317 | case MLX5_CMD_OP_MODIFY_CQ: |
---|
129 | | - obj_id = MLX5_GET(modify_cq_in, in, cqn); |
---|
| 318 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ, |
---|
| 319 | + MLX5_GET(modify_cq_in, in, cqn)); |
---|
130 | 320 | break; |
---|
131 | 321 | case MLX5_CMD_OP_QUERY_SQ: |
---|
132 | | - obj_id = MLX5_GET(query_sq_in, in, sqn); |
---|
| 322 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ, |
---|
| 323 | + MLX5_GET(query_sq_in, in, sqn)); |
---|
133 | 324 | break; |
---|
134 | 325 | case MLX5_CMD_OP_MODIFY_SQ: |
---|
135 | | - obj_id = MLX5_GET(modify_sq_in, in, sqn); |
---|
| 326 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ, |
---|
| 327 | + MLX5_GET(modify_sq_in, in, sqn)); |
---|
136 | 328 | break; |
---|
137 | 329 | case MLX5_CMD_OP_QUERY_RQ: |
---|
138 | | - obj_id = MLX5_GET(query_rq_in, in, rqn); |
---|
| 330 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ, |
---|
| 331 | + MLX5_GET(query_rq_in, in, rqn)); |
---|
139 | 332 | break; |
---|
140 | 333 | case MLX5_CMD_OP_MODIFY_RQ: |
---|
141 | | - obj_id = MLX5_GET(modify_rq_in, in, rqn); |
---|
| 334 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ, |
---|
| 335 | + MLX5_GET(modify_rq_in, in, rqn)); |
---|
142 | 336 | break; |
---|
143 | 337 | case MLX5_CMD_OP_QUERY_RMP: |
---|
144 | | - obj_id = MLX5_GET(query_rmp_in, in, rmpn); |
---|
| 338 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP, |
---|
| 339 | + MLX5_GET(query_rmp_in, in, rmpn)); |
---|
145 | 340 | break; |
---|
146 | 341 | case MLX5_CMD_OP_MODIFY_RMP: |
---|
147 | | - obj_id = MLX5_GET(modify_rmp_in, in, rmpn); |
---|
| 342 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP, |
---|
| 343 | + MLX5_GET(modify_rmp_in, in, rmpn)); |
---|
148 | 344 | break; |
---|
149 | 345 | case MLX5_CMD_OP_QUERY_RQT: |
---|
150 | | - obj_id = MLX5_GET(query_rqt_in, in, rqtn); |
---|
| 346 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT, |
---|
| 347 | + MLX5_GET(query_rqt_in, in, rqtn)); |
---|
151 | 348 | break; |
---|
152 | 349 | case MLX5_CMD_OP_MODIFY_RQT: |
---|
153 | | - obj_id = MLX5_GET(modify_rqt_in, in, rqtn); |
---|
| 350 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT, |
---|
| 351 | + MLX5_GET(modify_rqt_in, in, rqtn)); |
---|
154 | 352 | break; |
---|
155 | 353 | case MLX5_CMD_OP_QUERY_TIR: |
---|
156 | | - obj_id = MLX5_GET(query_tir_in, in, tirn); |
---|
| 354 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR, |
---|
| 355 | + MLX5_GET(query_tir_in, in, tirn)); |
---|
157 | 356 | break; |
---|
158 | 357 | case MLX5_CMD_OP_MODIFY_TIR: |
---|
159 | | - obj_id = MLX5_GET(modify_tir_in, in, tirn); |
---|
| 358 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR, |
---|
| 359 | + MLX5_GET(modify_tir_in, in, tirn)); |
---|
160 | 360 | break; |
---|
161 | 361 | case MLX5_CMD_OP_QUERY_TIS: |
---|
162 | | - obj_id = MLX5_GET(query_tis_in, in, tisn); |
---|
| 362 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS, |
---|
| 363 | + MLX5_GET(query_tis_in, in, tisn)); |
---|
163 | 364 | break; |
---|
164 | 365 | case MLX5_CMD_OP_MODIFY_TIS: |
---|
165 | | - obj_id = MLX5_GET(modify_tis_in, in, tisn); |
---|
| 366 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS, |
---|
| 367 | + MLX5_GET(modify_tis_in, in, tisn)); |
---|
166 | 368 | break; |
---|
167 | 369 | case MLX5_CMD_OP_QUERY_FLOW_TABLE: |
---|
168 | | - obj_id = MLX5_GET(query_flow_table_in, in, table_id); |
---|
| 370 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE, |
---|
| 371 | + MLX5_GET(query_flow_table_in, in, |
---|
| 372 | + table_id)); |
---|
169 | 373 | break; |
---|
170 | 374 | case MLX5_CMD_OP_MODIFY_FLOW_TABLE: |
---|
171 | | - obj_id = MLX5_GET(modify_flow_table_in, in, table_id); |
---|
| 375 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE, |
---|
| 376 | + MLX5_GET(modify_flow_table_in, in, |
---|
| 377 | + table_id)); |
---|
172 | 378 | break; |
---|
173 | 379 | case MLX5_CMD_OP_QUERY_FLOW_GROUP: |
---|
174 | | - obj_id = MLX5_GET(query_flow_group_in, in, group_id); |
---|
| 380 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP, |
---|
| 381 | + MLX5_GET(query_flow_group_in, in, |
---|
| 382 | + group_id)); |
---|
175 | 383 | break; |
---|
176 | 384 | case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: |
---|
177 | | - obj_id = MLX5_GET(query_fte_in, in, flow_index); |
---|
| 385 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY, |
---|
| 386 | + MLX5_GET(query_fte_in, in, |
---|
| 387 | + flow_index)); |
---|
178 | 388 | break; |
---|
179 | 389 | case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: |
---|
180 | | - obj_id = MLX5_GET(set_fte_in, in, flow_index); |
---|
| 390 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY, |
---|
| 391 | + MLX5_GET(set_fte_in, in, flow_index)); |
---|
181 | 392 | break; |
---|
182 | 393 | case MLX5_CMD_OP_QUERY_Q_COUNTER: |
---|
183 | | - obj_id = MLX5_GET(query_q_counter_in, in, counter_set_id); |
---|
| 394 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER, |
---|
| 395 | + MLX5_GET(query_q_counter_in, in, |
---|
| 396 | + counter_set_id)); |
---|
184 | 397 | break; |
---|
185 | 398 | case MLX5_CMD_OP_QUERY_FLOW_COUNTER: |
---|
186 | | - obj_id = MLX5_GET(query_flow_counter_in, in, flow_counter_id); |
---|
| 399 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER, |
---|
| 400 | + MLX5_GET(query_flow_counter_in, in, |
---|
| 401 | + flow_counter_id)); |
---|
187 | 402 | break; |
---|
188 | 403 | case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT: |
---|
189 | | - obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id); |
---|
| 404 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT, |
---|
| 405 | + MLX5_GET(general_obj_in_cmd_hdr, in, |
---|
| 406 | + obj_id)); |
---|
190 | 407 | break; |
---|
191 | 408 | case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: |
---|
192 | | - obj_id = MLX5_GET(query_scheduling_element_in, in, |
---|
193 | | - scheduling_element_id); |
---|
| 409 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT, |
---|
| 410 | + MLX5_GET(query_scheduling_element_in, |
---|
| 411 | + in, scheduling_element_id)); |
---|
194 | 412 | break; |
---|
195 | 413 | case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: |
---|
196 | | - obj_id = MLX5_GET(modify_scheduling_element_in, in, |
---|
197 | | - scheduling_element_id); |
---|
| 414 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT, |
---|
| 415 | + MLX5_GET(modify_scheduling_element_in, |
---|
| 416 | + in, scheduling_element_id)); |
---|
198 | 417 | break; |
---|
199 | 418 | case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: |
---|
200 | | - obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port); |
---|
| 419 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT, |
---|
| 420 | + MLX5_GET(add_vxlan_udp_dport_in, in, |
---|
| 421 | + vxlan_udp_port)); |
---|
201 | 422 | break; |
---|
202 | 423 | case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: |
---|
203 | | - obj_id = MLX5_GET(query_l2_table_entry_in, in, table_index); |
---|
| 424 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY, |
---|
| 425 | + MLX5_GET(query_l2_table_entry_in, in, |
---|
| 426 | + table_index)); |
---|
204 | 427 | break; |
---|
205 | 428 | case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: |
---|
206 | | - obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index); |
---|
| 429 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY, |
---|
| 430 | + MLX5_GET(set_l2_table_entry_in, in, |
---|
| 431 | + table_index)); |
---|
207 | 432 | break; |
---|
208 | 433 | case MLX5_CMD_OP_QUERY_QP: |
---|
209 | | - obj_id = MLX5_GET(query_qp_in, in, qpn); |
---|
| 434 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, |
---|
| 435 | + MLX5_GET(query_qp_in, in, qpn)); |
---|
210 | 436 | break; |
---|
211 | 437 | case MLX5_CMD_OP_RST2INIT_QP: |
---|
212 | | - obj_id = MLX5_GET(rst2init_qp_in, in, qpn); |
---|
| 438 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, |
---|
| 439 | + MLX5_GET(rst2init_qp_in, in, qpn)); |
---|
| 440 | + break; |
---|
| 441 | + case MLX5_CMD_OP_INIT2INIT_QP: |
---|
| 442 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, |
---|
| 443 | + MLX5_GET(init2init_qp_in, in, qpn)); |
---|
213 | 444 | break; |
---|
214 | 445 | case MLX5_CMD_OP_INIT2RTR_QP: |
---|
215 | | - obj_id = MLX5_GET(init2rtr_qp_in, in, qpn); |
---|
| 446 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, |
---|
| 447 | + MLX5_GET(init2rtr_qp_in, in, qpn)); |
---|
216 | 448 | break; |
---|
217 | 449 | case MLX5_CMD_OP_RTR2RTS_QP: |
---|
218 | | - obj_id = MLX5_GET(rtr2rts_qp_in, in, qpn); |
---|
| 450 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, |
---|
| 451 | + MLX5_GET(rtr2rts_qp_in, in, qpn)); |
---|
219 | 452 | break; |
---|
220 | 453 | case MLX5_CMD_OP_RTS2RTS_QP: |
---|
221 | | - obj_id = MLX5_GET(rts2rts_qp_in, in, qpn); |
---|
| 454 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, |
---|
| 455 | + MLX5_GET(rts2rts_qp_in, in, qpn)); |
---|
222 | 456 | break; |
---|
223 | 457 | case MLX5_CMD_OP_SQERR2RTS_QP: |
---|
224 | | - obj_id = MLX5_GET(sqerr2rts_qp_in, in, qpn); |
---|
| 458 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, |
---|
| 459 | + MLX5_GET(sqerr2rts_qp_in, in, qpn)); |
---|
225 | 460 | break; |
---|
226 | 461 | case MLX5_CMD_OP_2ERR_QP: |
---|
227 | | - obj_id = MLX5_GET(qp_2err_in, in, qpn); |
---|
| 462 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, |
---|
| 463 | + MLX5_GET(qp_2err_in, in, qpn)); |
---|
228 | 464 | break; |
---|
229 | 465 | case MLX5_CMD_OP_2RST_QP: |
---|
230 | | - obj_id = MLX5_GET(qp_2rst_in, in, qpn); |
---|
| 466 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, |
---|
| 467 | + MLX5_GET(qp_2rst_in, in, qpn)); |
---|
231 | 468 | break; |
---|
232 | 469 | case MLX5_CMD_OP_QUERY_DCT: |
---|
233 | | - obj_id = MLX5_GET(query_dct_in, in, dctn); |
---|
| 470 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT, |
---|
| 471 | + MLX5_GET(query_dct_in, in, dctn)); |
---|
234 | 472 | break; |
---|
235 | 473 | case MLX5_CMD_OP_QUERY_XRQ: |
---|
236 | | - obj_id = MLX5_GET(query_xrq_in, in, xrqn); |
---|
| 474 | + case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY: |
---|
| 475 | + case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS: |
---|
| 476 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ, |
---|
| 477 | + MLX5_GET(query_xrq_in, in, xrqn)); |
---|
237 | 478 | break; |
---|
238 | 479 | case MLX5_CMD_OP_QUERY_XRC_SRQ: |
---|
239 | | - obj_id = MLX5_GET(query_xrc_srq_in, in, xrc_srqn); |
---|
| 480 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ, |
---|
| 481 | + MLX5_GET(query_xrc_srq_in, in, |
---|
| 482 | + xrc_srqn)); |
---|
240 | 483 | break; |
---|
241 | 484 | case MLX5_CMD_OP_ARM_XRC_SRQ: |
---|
242 | | - obj_id = MLX5_GET(arm_xrc_srq_in, in, xrc_srqn); |
---|
| 485 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ, |
---|
| 486 | + MLX5_GET(arm_xrc_srq_in, in, xrc_srqn)); |
---|
243 | 487 | break; |
---|
244 | 488 | case MLX5_CMD_OP_QUERY_SRQ: |
---|
245 | | - obj_id = MLX5_GET(query_srq_in, in, srqn); |
---|
| 489 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ, |
---|
| 490 | + MLX5_GET(query_srq_in, in, srqn)); |
---|
246 | 491 | break; |
---|
247 | 492 | case MLX5_CMD_OP_ARM_RQ: |
---|
248 | | - obj_id = MLX5_GET(arm_rq_in, in, srq_number); |
---|
| 493 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ, |
---|
| 494 | + MLX5_GET(arm_rq_in, in, srq_number)); |
---|
249 | 495 | break; |
---|
250 | | - case MLX5_CMD_OP_DRAIN_DCT: |
---|
251 | 496 | case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: |
---|
252 | | - obj_id = MLX5_GET(drain_dct_in, in, dctn); |
---|
| 497 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT, |
---|
| 498 | + MLX5_GET(drain_dct_in, in, dctn)); |
---|
253 | 499 | break; |
---|
254 | 500 | case MLX5_CMD_OP_ARM_XRQ: |
---|
255 | | - obj_id = MLX5_GET(arm_xrq_in, in, xrqn); |
---|
| 501 | + case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: |
---|
| 502 | + case MLX5_CMD_OP_RELEASE_XRQ_ERROR: |
---|
| 503 | + case MLX5_CMD_OP_MODIFY_XRQ: |
---|
| 504 | + obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ, |
---|
| 505 | + MLX5_GET(arm_xrq_in, in, xrqn)); |
---|
256 | 506 | break; |
---|
| 507 | + case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT: |
---|
| 508 | + obj_id = get_enc_obj_id |
---|
| 509 | + (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT, |
---|
| 510 | + MLX5_GET(query_packet_reformat_context_in, |
---|
| 511 | + in, packet_reformat_id)); |
---|
| 512 | + break; |
---|
| 513 | + default: |
---|
| 514 | + obj_id = 0; |
---|
| 515 | + } |
---|
| 516 | + |
---|
| 517 | + return obj_id; |
---|
| 518 | +} |
---|
| 519 | + |
---|
| 520 | +static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs, |
---|
| 521 | + struct ib_uobject *uobj, const void *in) |
---|
| 522 | +{ |
---|
| 523 | + struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata); |
---|
| 524 | + u64 obj_id = devx_get_obj_id(in); |
---|
| 525 | + |
---|
| 526 | + if (!obj_id) |
---|
| 527 | + return false; |
---|
| 528 | + |
---|
| 529 | + switch (uobj_get_object_id(uobj)) { |
---|
| 530 | + case UVERBS_OBJECT_CQ: |
---|
| 531 | + return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ, |
---|
| 532 | + to_mcq(uobj->object)->mcq.cqn) == |
---|
| 533 | + obj_id; |
---|
| 534 | + |
---|
| 535 | + case UVERBS_OBJECT_SRQ: |
---|
| 536 | + { |
---|
| 537 | + struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq); |
---|
| 538 | + u16 opcode; |
---|
| 539 | + |
---|
| 540 | + switch (srq->common.res) { |
---|
| 541 | + case MLX5_RES_XSRQ: |
---|
| 542 | + opcode = MLX5_CMD_OP_CREATE_XRC_SRQ; |
---|
| 543 | + break; |
---|
| 544 | + case MLX5_RES_XRQ: |
---|
| 545 | + opcode = MLX5_CMD_OP_CREATE_XRQ; |
---|
| 546 | + break; |
---|
| 547 | + default: |
---|
| 548 | + if (!dev->mdev->issi) |
---|
| 549 | + opcode = MLX5_CMD_OP_CREATE_SRQ; |
---|
| 550 | + else |
---|
| 551 | + opcode = MLX5_CMD_OP_CREATE_RMP; |
---|
| 552 | + } |
---|
| 553 | + |
---|
| 554 | + return get_enc_obj_id(opcode, |
---|
| 555 | + to_msrq(uobj->object)->msrq.srqn) == |
---|
| 556 | + obj_id; |
---|
| 557 | + } |
---|
| 558 | + |
---|
| 559 | + case UVERBS_OBJECT_QP: |
---|
| 560 | + { |
---|
| 561 | + struct mlx5_ib_qp *qp = to_mqp(uobj->object); |
---|
| 562 | + |
---|
| 563 | + if (qp->type == IB_QPT_RAW_PACKET || |
---|
| 564 | + (qp->flags & IB_QP_CREATE_SOURCE_QPN)) { |
---|
| 565 | + struct mlx5_ib_raw_packet_qp *raw_packet_qp = |
---|
| 566 | + &qp->raw_packet_qp; |
---|
| 567 | + struct mlx5_ib_rq *rq = &raw_packet_qp->rq; |
---|
| 568 | + struct mlx5_ib_sq *sq = &raw_packet_qp->sq; |
---|
| 569 | + |
---|
| 570 | + return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ, |
---|
| 571 | + rq->base.mqp.qpn) == obj_id || |
---|
| 572 | + get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ, |
---|
| 573 | + sq->base.mqp.qpn) == obj_id || |
---|
| 574 | + get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR, |
---|
| 575 | + rq->tirn) == obj_id || |
---|
| 576 | + get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS, |
---|
| 577 | + sq->tisn) == obj_id); |
---|
| 578 | + } |
---|
| 579 | + |
---|
| 580 | + if (qp->type == MLX5_IB_QPT_DCT) |
---|
| 581 | + return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT, |
---|
| 582 | + qp->dct.mdct.mqp.qpn) == obj_id; |
---|
| 583 | + return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP, |
---|
| 584 | + qp->ibqp.qp_num) == obj_id; |
---|
| 585 | + } |
---|
| 586 | + |
---|
| 587 | + case UVERBS_OBJECT_WQ: |
---|
| 588 | + return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ, |
---|
| 589 | + to_mrwq(uobj->object)->core_qp.qpn) == |
---|
| 590 | + obj_id; |
---|
| 591 | + |
---|
| 592 | + case UVERBS_OBJECT_RWQ_IND_TBL: |
---|
| 593 | + return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT, |
---|
| 594 | + to_mrwq_ind_table(uobj->object)->rqtn) == |
---|
| 595 | + obj_id; |
---|
| 596 | + |
---|
| 597 | + case MLX5_IB_OBJECT_DEVX_OBJ: |
---|
| 598 | + { |
---|
| 599 | + u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); |
---|
| 600 | + struct devx_obj *devx_uobj = uobj->object; |
---|
| 601 | + |
---|
| 602 | + if (opcode == MLX5_CMD_OP_QUERY_FLOW_COUNTER && |
---|
| 603 | + devx_uobj->flow_counter_bulk_size) { |
---|
| 604 | + u64 end; |
---|
| 605 | + |
---|
| 606 | + end = devx_uobj->obj_id + |
---|
| 607 | + devx_uobj->flow_counter_bulk_size; |
---|
| 608 | + return devx_uobj->obj_id <= obj_id && end > obj_id; |
---|
| 609 | + } |
---|
| 610 | + |
---|
| 611 | + return devx_uobj->obj_id == obj_id; |
---|
| 612 | + } |
---|
| 613 | + |
---|
257 | 614 | default: |
---|
258 | 615 | return false; |
---|
259 | 616 | } |
---|
260 | | - |
---|
261 | | - if (obj_id == obj->obj_id) |
---|
262 | | - return true; |
---|
263 | | - |
---|
264 | | - return false; |
---|
265 | 617 | } |
---|
266 | 618 | |
---|
267 | | -static bool devx_is_obj_create_cmd(const void *in) |
---|
| 619 | +static void devx_set_umem_valid(const void *in) |
---|
268 | 620 | { |
---|
269 | 621 | u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); |
---|
270 | 622 | |
---|
271 | 623 | switch (opcode) { |
---|
| 624 | + case MLX5_CMD_OP_CREATE_MKEY: |
---|
| 625 | + MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1); |
---|
| 626 | + break; |
---|
| 627 | + case MLX5_CMD_OP_CREATE_CQ: |
---|
| 628 | + { |
---|
| 629 | + void *cqc; |
---|
| 630 | + |
---|
| 631 | + MLX5_SET(create_cq_in, in, cq_umem_valid, 1); |
---|
| 632 | + cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); |
---|
| 633 | + MLX5_SET(cqc, cqc, dbr_umem_valid, 1); |
---|
| 634 | + break; |
---|
| 635 | + } |
---|
| 636 | + case MLX5_CMD_OP_CREATE_QP: |
---|
| 637 | + { |
---|
| 638 | + void *qpc; |
---|
| 639 | + |
---|
| 640 | + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); |
---|
| 641 | + MLX5_SET(qpc, qpc, dbr_umem_valid, 1); |
---|
| 642 | + MLX5_SET(create_qp_in, in, wq_umem_valid, 1); |
---|
| 643 | + break; |
---|
| 644 | + } |
---|
| 645 | + |
---|
| 646 | + case MLX5_CMD_OP_CREATE_RQ: |
---|
| 647 | + { |
---|
| 648 | + void *rqc, *wq; |
---|
| 649 | + |
---|
| 650 | + rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); |
---|
| 651 | + wq = MLX5_ADDR_OF(rqc, rqc, wq); |
---|
| 652 | + MLX5_SET(wq, wq, dbr_umem_valid, 1); |
---|
| 653 | + MLX5_SET(wq, wq, wq_umem_valid, 1); |
---|
| 654 | + break; |
---|
| 655 | + } |
---|
| 656 | + |
---|
| 657 | + case MLX5_CMD_OP_CREATE_SQ: |
---|
| 658 | + { |
---|
| 659 | + void *sqc, *wq; |
---|
| 660 | + |
---|
| 661 | + sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); |
---|
| 662 | + wq = MLX5_ADDR_OF(sqc, sqc, wq); |
---|
| 663 | + MLX5_SET(wq, wq, dbr_umem_valid, 1); |
---|
| 664 | + MLX5_SET(wq, wq, wq_umem_valid, 1); |
---|
| 665 | + break; |
---|
| 666 | + } |
---|
| 667 | + |
---|
| 668 | + case MLX5_CMD_OP_MODIFY_CQ: |
---|
| 669 | + MLX5_SET(modify_cq_in, in, cq_umem_valid, 1); |
---|
| 670 | + break; |
---|
| 671 | + |
---|
| 672 | + case MLX5_CMD_OP_CREATE_RMP: |
---|
| 673 | + { |
---|
| 674 | + void *rmpc, *wq; |
---|
| 675 | + |
---|
| 676 | + rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx); |
---|
| 677 | + wq = MLX5_ADDR_OF(rmpc, rmpc, wq); |
---|
| 678 | + MLX5_SET(wq, wq, dbr_umem_valid, 1); |
---|
| 679 | + MLX5_SET(wq, wq, wq_umem_valid, 1); |
---|
| 680 | + break; |
---|
| 681 | + } |
---|
| 682 | + |
---|
| 683 | + case MLX5_CMD_OP_CREATE_XRQ: |
---|
| 684 | + { |
---|
| 685 | + void *xrqc, *wq; |
---|
| 686 | + |
---|
| 687 | + xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context); |
---|
| 688 | + wq = MLX5_ADDR_OF(xrqc, xrqc, wq); |
---|
| 689 | + MLX5_SET(wq, wq, dbr_umem_valid, 1); |
---|
| 690 | + MLX5_SET(wq, wq, wq_umem_valid, 1); |
---|
| 691 | + break; |
---|
| 692 | + } |
---|
| 693 | + |
---|
| 694 | + case MLX5_CMD_OP_CREATE_XRC_SRQ: |
---|
| 695 | + { |
---|
| 696 | + void *xrc_srqc; |
---|
| 697 | + |
---|
| 698 | + MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1); |
---|
| 699 | + xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in, |
---|
| 700 | + xrc_srq_context_entry); |
---|
| 701 | + MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1); |
---|
| 702 | + break; |
---|
| 703 | + } |
---|
| 704 | + |
---|
| 705 | + default: |
---|
| 706 | + return; |
---|
| 707 | + } |
---|
| 708 | +} |
---|
| 709 | + |
---|
| 710 | +static bool devx_is_obj_create_cmd(const void *in, u16 *opcode) |
---|
| 711 | +{ |
---|
| 712 | + *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); |
---|
| 713 | + |
---|
| 714 | + switch (*opcode) { |
---|
272 | 715 | case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: |
---|
273 | 716 | case MLX5_CMD_OP_CREATE_MKEY: |
---|
274 | 717 | case MLX5_CMD_OP_CREATE_CQ: |
---|
.. | .. |
---|
284 | 727 | case MLX5_CMD_OP_CREATE_FLOW_TABLE: |
---|
285 | 728 | case MLX5_CMD_OP_CREATE_FLOW_GROUP: |
---|
286 | 729 | case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: |
---|
287 | | - case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: |
---|
| 730 | + case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT: |
---|
288 | 731 | case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: |
---|
289 | 732 | case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: |
---|
290 | 733 | case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: |
---|
.. | .. |
---|
301 | 744 | { |
---|
302 | 745 | u16 op_mod = MLX5_GET(set_fte_in, in, op_mod); |
---|
303 | 746 | if (op_mod == 0) |
---|
| 747 | + return true; |
---|
| 748 | + return false; |
---|
| 749 | + } |
---|
| 750 | + case MLX5_CMD_OP_CREATE_PSV: |
---|
| 751 | + { |
---|
| 752 | + u8 num_psv = MLX5_GET(create_psv_in, in, num_psv); |
---|
| 753 | + |
---|
| 754 | + if (num_psv == 1) |
---|
304 | 755 | return true; |
---|
305 | 756 | return false; |
---|
306 | 757 | } |
---|
.. | .. |
---|
336 | 787 | case MLX5_CMD_OP_2RST_QP: |
---|
337 | 788 | case MLX5_CMD_OP_ARM_XRC_SRQ: |
---|
338 | 789 | case MLX5_CMD_OP_ARM_RQ: |
---|
339 | | - case MLX5_CMD_OP_DRAIN_DCT: |
---|
340 | 790 | case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: |
---|
341 | 791 | case MLX5_CMD_OP_ARM_XRQ: |
---|
| 792 | + case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: |
---|
| 793 | + case MLX5_CMD_OP_RELEASE_XRQ_ERROR: |
---|
| 794 | + case MLX5_CMD_OP_MODIFY_XRQ: |
---|
342 | 795 | return true; |
---|
343 | 796 | case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: |
---|
344 | 797 | { |
---|
.. | .. |
---|
380 | 833 | case MLX5_CMD_OP_QUERY_XRC_SRQ: |
---|
381 | 834 | case MLX5_CMD_OP_QUERY_DCT: |
---|
382 | 835 | case MLX5_CMD_OP_QUERY_XRQ: |
---|
| 836 | + case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY: |
---|
| 837 | + case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS: |
---|
| 838 | + case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT: |
---|
383 | 839 | return true; |
---|
384 | 840 | default: |
---|
385 | 841 | return false; |
---|
386 | 842 | } |
---|
387 | 843 | } |
---|
388 | 844 | |
---|
389 | | -static bool devx_is_general_cmd(void *in) |
---|
| 845 | +static bool devx_is_whitelist_cmd(void *in) |
---|
390 | 846 | { |
---|
391 | 847 | u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); |
---|
392 | 848 | |
---|
393 | 849 | switch (opcode) { |
---|
394 | 850 | case MLX5_CMD_OP_QUERY_HCA_CAP: |
---|
| 851 | + case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: |
---|
| 852 | + case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: |
---|
| 853 | + return true; |
---|
| 854 | + default: |
---|
| 855 | + return false; |
---|
| 856 | + } |
---|
| 857 | +} |
---|
| 858 | + |
---|
| 859 | +static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in) |
---|
| 860 | +{ |
---|
| 861 | + if (devx_is_whitelist_cmd(cmd_in)) { |
---|
| 862 | + struct mlx5_ib_dev *dev; |
---|
| 863 | + |
---|
| 864 | + if (c->devx_uid) |
---|
| 865 | + return c->devx_uid; |
---|
| 866 | + |
---|
| 867 | + dev = to_mdev(c->ibucontext.device); |
---|
| 868 | + if (dev->devx_whitelist_uid) |
---|
| 869 | + return dev->devx_whitelist_uid; |
---|
| 870 | + |
---|
| 871 | + return -EOPNOTSUPP; |
---|
| 872 | + } |
---|
| 873 | + |
---|
| 874 | + if (!c->devx_uid) |
---|
| 875 | + return -EINVAL; |
---|
| 876 | + |
---|
| 877 | + return c->devx_uid; |
---|
| 878 | +} |
---|
| 879 | + |
---|
| 880 | +static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev) |
---|
| 881 | +{ |
---|
| 882 | + u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode); |
---|
| 883 | + |
---|
| 884 | + /* Pass all cmds for vhca_tunnel as general, tracking is done in FW */ |
---|
| 885 | + if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) && |
---|
| 886 | + MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) || |
---|
| 887 | + (opcode >= MLX5_CMD_OP_GENERAL_START && |
---|
| 888 | + opcode < MLX5_CMD_OP_GENERAL_END)) |
---|
| 889 | + return true; |
---|
| 890 | + |
---|
| 891 | + switch (opcode) { |
---|
| 892 | + case MLX5_CMD_OP_QUERY_HCA_CAP: |
---|
| 893 | + case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: |
---|
| 894 | + case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: |
---|
395 | 895 | case MLX5_CMD_OP_QUERY_VPORT_STATE: |
---|
396 | 896 | case MLX5_CMD_OP_QUERY_ADAPTER: |
---|
397 | 897 | case MLX5_CMD_OP_QUERY_ISSI: |
---|
.. | .. |
---|
404 | 904 | case MLX5_CMD_OP_QUERY_CONG_STATUS: |
---|
405 | 905 | case MLX5_CMD_OP_QUERY_CONG_PARAMS: |
---|
406 | 906 | case MLX5_CMD_OP_QUERY_CONG_STATISTICS: |
---|
| 907 | + case MLX5_CMD_OP_QUERY_LAG: |
---|
407 | 908 | return true; |
---|
408 | 909 | default: |
---|
409 | 910 | return false; |
---|
.. | .. |
---|
411 | 912 | } |
---|
412 | 913 | |
---|
413 | 914 | static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)( |
---|
414 | | - struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) |
---|
| 915 | + struct uverbs_attr_bundle *attrs) |
---|
415 | 916 | { |
---|
416 | 917 | struct mlx5_ib_ucontext *c; |
---|
417 | 918 | struct mlx5_ib_dev *dev; |
---|
418 | 919 | int user_vector; |
---|
419 | 920 | int dev_eqn; |
---|
420 | | - unsigned int irqn; |
---|
421 | 921 | int err; |
---|
422 | 922 | |
---|
423 | 923 | if (uverbs_copy_from(&user_vector, attrs, |
---|
424 | 924 | MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC)) |
---|
425 | 925 | return -EFAULT; |
---|
426 | 926 | |
---|
427 | | - c = devx_ufile2uctx(file); |
---|
| 927 | + c = devx_ufile2uctx(attrs); |
---|
428 | 928 | if (IS_ERR(c)) |
---|
429 | 929 | return PTR_ERR(c); |
---|
430 | 930 | dev = to_mdev(c->ibucontext.device); |
---|
431 | 931 | |
---|
432 | | - err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn); |
---|
| 932 | + err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn); |
---|
433 | 933 | if (err < 0) |
---|
434 | 934 | return err; |
---|
435 | 935 | |
---|
.. | .. |
---|
461 | 961 | * queue or arm its CQ for event generation), no further harm is expected. |
---|
462 | 962 | */ |
---|
463 | 963 | static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)( |
---|
464 | | - struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) |
---|
| 964 | + struct uverbs_attr_bundle *attrs) |
---|
465 | 965 | { |
---|
466 | 966 | struct mlx5_ib_ucontext *c; |
---|
467 | 967 | struct mlx5_ib_dev *dev; |
---|
468 | 968 | u32 user_idx; |
---|
469 | 969 | s32 dev_idx; |
---|
470 | 970 | |
---|
471 | | - c = devx_ufile2uctx(file); |
---|
| 971 | + c = devx_ufile2uctx(attrs); |
---|
472 | 972 | if (IS_ERR(c)) |
---|
473 | 973 | return PTR_ERR(c); |
---|
474 | 974 | dev = to_mdev(c->ibucontext.device); |
---|
.. | .. |
---|
489 | 989 | } |
---|
490 | 990 | |
---|
491 | 991 | static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)( |
---|
492 | | - struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) |
---|
| 992 | + struct uverbs_attr_bundle *attrs) |
---|
493 | 993 | { |
---|
494 | 994 | struct mlx5_ib_ucontext *c; |
---|
495 | 995 | struct mlx5_ib_dev *dev; |
---|
.. | .. |
---|
499 | 999 | MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT); |
---|
500 | 1000 | void *cmd_out; |
---|
501 | 1001 | int err; |
---|
| 1002 | + int uid; |
---|
502 | 1003 | |
---|
503 | | - c = devx_ufile2uctx(file); |
---|
| 1004 | + c = devx_ufile2uctx(attrs); |
---|
504 | 1005 | if (IS_ERR(c)) |
---|
505 | 1006 | return PTR_ERR(c); |
---|
506 | 1007 | dev = to_mdev(c->ibucontext.device); |
---|
507 | 1008 | |
---|
508 | | - if (!c->devx_uid) |
---|
509 | | - return -EPERM; |
---|
| 1009 | + uid = devx_get_uid(c, cmd_in); |
---|
| 1010 | + if (uid < 0) |
---|
| 1011 | + return uid; |
---|
510 | 1012 | |
---|
511 | 1013 | /* Only white list of some general HCA commands are allowed for this method. */ |
---|
512 | | - if (!devx_is_general_cmd(cmd_in)) |
---|
| 1014 | + if (!devx_is_general_cmd(cmd_in, dev)) |
---|
513 | 1015 | return -EINVAL; |
---|
514 | 1016 | |
---|
515 | 1017 | cmd_out = uverbs_zalloc(attrs, cmd_out_len); |
---|
516 | 1018 | if (IS_ERR(cmd_out)) |
---|
517 | 1019 | return PTR_ERR(cmd_out); |
---|
518 | 1020 | |
---|
519 | | - MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid); |
---|
| 1021 | + MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid); |
---|
520 | 1022 | err = mlx5_cmd_exec(dev->mdev, cmd_in, |
---|
521 | 1023 | uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN), |
---|
522 | 1024 | cmd_out, cmd_out_len); |
---|
.. | .. |
---|
546 | 1048 | MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type); |
---|
547 | 1049 | break; |
---|
548 | 1050 | |
---|
| 1051 | + case MLX5_CMD_OP_CREATE_UMEM: |
---|
| 1052 | + MLX5_SET(general_obj_in_cmd_hdr, din, opcode, |
---|
| 1053 | + MLX5_CMD_OP_DESTROY_UMEM); |
---|
| 1054 | + break; |
---|
549 | 1055 | case MLX5_CMD_OP_CREATE_MKEY: |
---|
550 | 1056 | MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY); |
---|
551 | 1057 | break; |
---|
.. | .. |
---|
630 | 1136 | MLX5_SET(general_obj_in_cmd_hdr, din, opcode, |
---|
631 | 1137 | MLX5_CMD_OP_DEALLOC_FLOW_COUNTER); |
---|
632 | 1138 | break; |
---|
633 | | - case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: |
---|
| 1139 | + case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT: |
---|
634 | 1140 | MLX5_SET(general_obj_in_cmd_hdr, din, opcode, |
---|
635 | | - MLX5_CMD_OP_DEALLOC_ENCAP_HEADER); |
---|
| 1141 | + MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT); |
---|
636 | 1142 | break; |
---|
637 | 1143 | case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: |
---|
638 | 1144 | MLX5_SET(general_obj_in_cmd_hdr, din, opcode, |
---|
.. | .. |
---|
693 | 1199 | case MLX5_CMD_OP_ALLOC_XRCD: |
---|
694 | 1200 | MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD); |
---|
695 | 1201 | break; |
---|
| 1202 | + case MLX5_CMD_OP_CREATE_PSV: |
---|
| 1203 | + MLX5_SET(general_obj_in_cmd_hdr, din, opcode, |
---|
| 1204 | + MLX5_CMD_OP_DESTROY_PSV); |
---|
| 1205 | + MLX5_SET(destroy_psv_in, din, psvn, |
---|
| 1206 | + MLX5_GET(create_psv_out, out, psv0_index)); |
---|
| 1207 | + break; |
---|
696 | 1208 | default: |
---|
697 | 1209 | /* The entry must match to one of the devx_is_obj_create_cmd */ |
---|
698 | 1210 | WARN_ON(true); |
---|
.. | .. |
---|
700 | 1212 | } |
---|
701 | 1213 | } |
---|
702 | 1214 | |
---|
| 1215 | +static int devx_handle_mkey_indirect(struct devx_obj *obj, |
---|
| 1216 | + struct mlx5_ib_dev *dev, |
---|
| 1217 | + void *in, void *out) |
---|
| 1218 | +{ |
---|
| 1219 | + struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr; |
---|
| 1220 | + struct mlx5_core_mkey *mkey; |
---|
| 1221 | + void *mkc; |
---|
| 1222 | + u8 key; |
---|
| 1223 | + |
---|
| 1224 | + mkey = &devx_mr->mmkey; |
---|
| 1225 | + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); |
---|
| 1226 | + key = MLX5_GET(mkc, mkc, mkey_7_0); |
---|
| 1227 | + mkey->key = mlx5_idx_to_mkey( |
---|
| 1228 | + MLX5_GET(create_mkey_out, out, mkey_index)) | key; |
---|
| 1229 | + mkey->type = MLX5_MKEY_INDIRECT_DEVX; |
---|
| 1230 | + mkey->iova = MLX5_GET64(mkc, mkc, start_addr); |
---|
| 1231 | + mkey->size = MLX5_GET64(mkc, mkc, len); |
---|
| 1232 | + mkey->pd = MLX5_GET(mkc, mkc, pd); |
---|
| 1233 | + devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size); |
---|
| 1234 | + |
---|
| 1235 | + return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mkey->key), mkey, |
---|
| 1236 | + GFP_KERNEL)); |
---|
| 1237 | +} |
---|
| 1238 | + |
---|
| 1239 | +static int devx_handle_mkey_create(struct mlx5_ib_dev *dev, |
---|
| 1240 | + struct devx_obj *obj, |
---|
| 1241 | + void *in, int in_len) |
---|
| 1242 | +{ |
---|
| 1243 | + int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) + |
---|
| 1244 | + MLX5_FLD_SZ_BYTES(create_mkey_in, |
---|
| 1245 | + memory_key_mkey_entry); |
---|
| 1246 | + void *mkc; |
---|
| 1247 | + u8 access_mode; |
---|
| 1248 | + |
---|
| 1249 | + if (in_len < min_len) |
---|
| 1250 | + return -EINVAL; |
---|
| 1251 | + |
---|
| 1252 | + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); |
---|
| 1253 | + |
---|
| 1254 | + access_mode = MLX5_GET(mkc, mkc, access_mode_1_0); |
---|
| 1255 | + access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2; |
---|
| 1256 | + |
---|
| 1257 | + if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS || |
---|
| 1258 | + access_mode == MLX5_MKC_ACCESS_MODE_KSM) { |
---|
| 1259 | + if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) |
---|
| 1260 | + obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY; |
---|
| 1261 | + return 0; |
---|
| 1262 | + } |
---|
| 1263 | + |
---|
| 1264 | + MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1); |
---|
| 1265 | + return 0; |
---|
| 1266 | +} |
---|
| 1267 | + |
---|
| 1268 | +static void devx_cleanup_subscription(struct mlx5_ib_dev *dev, |
---|
| 1269 | + struct devx_event_subscription *sub) |
---|
| 1270 | +{ |
---|
| 1271 | + struct devx_event *event; |
---|
| 1272 | + struct devx_obj_event *xa_val_level2; |
---|
| 1273 | + |
---|
| 1274 | + if (sub->is_cleaned) |
---|
| 1275 | + return; |
---|
| 1276 | + |
---|
| 1277 | + sub->is_cleaned = 1; |
---|
| 1278 | + list_del_rcu(&sub->xa_list); |
---|
| 1279 | + |
---|
| 1280 | + if (list_empty(&sub->obj_list)) |
---|
| 1281 | + return; |
---|
| 1282 | + |
---|
| 1283 | + list_del_rcu(&sub->obj_list); |
---|
| 1284 | + /* check whether key level 1 for this obj_sub_list is empty */ |
---|
| 1285 | + event = xa_load(&dev->devx_event_table.event_xa, |
---|
| 1286 | + sub->xa_key_level1); |
---|
| 1287 | + WARN_ON(!event); |
---|
| 1288 | + |
---|
| 1289 | + xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2); |
---|
| 1290 | + if (list_empty(&xa_val_level2->obj_sub_list)) { |
---|
| 1291 | + xa_erase(&event->object_ids, |
---|
| 1292 | + sub->xa_key_level2); |
---|
| 1293 | + kfree_rcu(xa_val_level2, rcu); |
---|
| 1294 | + } |
---|
| 1295 | +} |
---|
| 1296 | + |
---|
703 | 1297 | static int devx_obj_cleanup(struct ib_uobject *uobject, |
---|
704 | | - enum rdma_remove_reason why) |
---|
| 1298 | + enum rdma_remove_reason why, |
---|
| 1299 | + struct uverbs_attr_bundle *attrs) |
---|
705 | 1300 | { |
---|
706 | 1301 | u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; |
---|
| 1302 | + struct mlx5_devx_event_table *devx_event_table; |
---|
707 | 1303 | struct devx_obj *obj = uobject->object; |
---|
| 1304 | + struct devx_event_subscription *sub_entry, *tmp; |
---|
| 1305 | + struct mlx5_ib_dev *dev; |
---|
708 | 1306 | int ret; |
---|
709 | 1307 | |
---|
710 | | - ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); |
---|
| 1308 | + dev = mlx5_udata_to_mdev(&attrs->driver_udata); |
---|
| 1309 | + if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { |
---|
| 1310 | + /* |
---|
| 1311 | + * The pagefault_single_data_segment() does commands against |
---|
| 1312 | + * the mmkey, we must wait for that to stop before freeing the |
---|
| 1313 | + * mkey, as another allocation could get the same mkey #. |
---|
| 1314 | + */ |
---|
| 1315 | + xa_erase(&obj->ib_dev->odp_mkeys, |
---|
| 1316 | + mlx5_base_mkey(obj->devx_mr.mmkey.key)); |
---|
| 1317 | + synchronize_srcu(&dev->odp_srcu); |
---|
| 1318 | + } |
---|
| 1319 | + |
---|
| 1320 | + if (obj->flags & DEVX_OBJ_FLAGS_DCT) |
---|
| 1321 | + ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct); |
---|
| 1322 | + else if (obj->flags & DEVX_OBJ_FLAGS_CQ) |
---|
| 1323 | + ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); |
---|
| 1324 | + else |
---|
| 1325 | + ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, |
---|
| 1326 | + obj->dinlen, out, sizeof(out)); |
---|
711 | 1327 | if (ib_is_destroy_retryable(ret, why, uobject)) |
---|
712 | 1328 | return ret; |
---|
| 1329 | + |
---|
| 1330 | + devx_event_table = &dev->devx_event_table; |
---|
| 1331 | + |
---|
| 1332 | + mutex_lock(&devx_event_table->event_xa_lock); |
---|
| 1333 | + list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list) |
---|
| 1334 | + devx_cleanup_subscription(dev, sub_entry); |
---|
| 1335 | + mutex_unlock(&devx_event_table->event_xa_lock); |
---|
713 | 1336 | |
---|
714 | 1337 | kfree(obj); |
---|
715 | 1338 | return ret; |
---|
716 | 1339 | } |
---|
717 | 1340 | |
---|
| 1341 | +static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) |
---|
| 1342 | +{ |
---|
| 1343 | + struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq); |
---|
| 1344 | + struct mlx5_devx_event_table *table; |
---|
| 1345 | + struct devx_event *event; |
---|
| 1346 | + struct devx_obj_event *obj_event; |
---|
| 1347 | + u32 obj_id = mcq->cqn; |
---|
| 1348 | + |
---|
| 1349 | + table = &obj->ib_dev->devx_event_table; |
---|
| 1350 | + rcu_read_lock(); |
---|
| 1351 | + event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP); |
---|
| 1352 | + if (!event) |
---|
| 1353 | + goto out; |
---|
| 1354 | + |
---|
| 1355 | + obj_event = xa_load(&event->object_ids, obj_id); |
---|
| 1356 | + if (!obj_event) |
---|
| 1357 | + goto out; |
---|
| 1358 | + |
---|
| 1359 | + dispatch_event_fd(&obj_event->obj_sub_list, eqe); |
---|
| 1360 | +out: |
---|
| 1361 | + rcu_read_unlock(); |
---|
| 1362 | +} |
---|
| 1363 | + |
---|
718 | 1364 | static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( |
---|
719 | | - struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) |
---|
| 1365 | + struct uverbs_attr_bundle *attrs) |
---|
720 | 1366 | { |
---|
721 | 1367 | void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN); |
---|
722 | 1368 | int cmd_out_len = uverbs_attr_get_len(attrs, |
---|
723 | 1369 | MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT); |
---|
| 1370 | + int cmd_in_len = uverbs_attr_get_len(attrs, |
---|
| 1371 | + MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN); |
---|
724 | 1372 | void *cmd_out; |
---|
725 | 1373 | struct ib_uobject *uobj = uverbs_attr_get_uobject( |
---|
726 | 1374 | attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE); |
---|
727 | | - struct mlx5_ib_ucontext *c = to_mucontext(uobj->context); |
---|
| 1375 | + struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( |
---|
| 1376 | + &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); |
---|
728 | 1377 | struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); |
---|
729 | 1378 | u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; |
---|
730 | 1379 | struct devx_obj *obj; |
---|
| 1380 | + u16 obj_type = 0; |
---|
731 | 1381 | int err; |
---|
| 1382 | + int uid; |
---|
| 1383 | + u32 obj_id; |
---|
| 1384 | + u16 opcode; |
---|
732 | 1385 | |
---|
733 | | - if (!c->devx_uid) |
---|
734 | | - return -EPERM; |
---|
| 1386 | + if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id)) |
---|
| 1387 | + return -EINVAL; |
---|
735 | 1388 | |
---|
736 | | - if (!devx_is_obj_create_cmd(cmd_in)) |
---|
| 1389 | + uid = devx_get_uid(c, cmd_in); |
---|
| 1390 | + if (uid < 0) |
---|
| 1391 | + return uid; |
---|
| 1392 | + |
---|
| 1393 | + if (!devx_is_obj_create_cmd(cmd_in, &opcode)) |
---|
737 | 1394 | return -EINVAL; |
---|
738 | 1395 | |
---|
739 | 1396 | cmd_out = uverbs_zalloc(attrs, cmd_out_len); |
---|
.. | .. |
---|
744 | 1401 | if (!obj) |
---|
745 | 1402 | return -ENOMEM; |
---|
746 | 1403 | |
---|
747 | | - MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid); |
---|
748 | | - err = mlx5_cmd_exec(dev->mdev, cmd_in, |
---|
749 | | - uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN), |
---|
750 | | - cmd_out, cmd_out_len); |
---|
| 1404 | + MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid); |
---|
| 1405 | + if (opcode == MLX5_CMD_OP_CREATE_MKEY) { |
---|
| 1406 | + err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len); |
---|
| 1407 | + if (err) |
---|
| 1408 | + goto obj_free; |
---|
| 1409 | + } else { |
---|
| 1410 | + devx_set_umem_valid(cmd_in); |
---|
| 1411 | + } |
---|
| 1412 | + |
---|
| 1413 | + if (opcode == MLX5_CMD_OP_CREATE_DCT) { |
---|
| 1414 | + obj->flags |= DEVX_OBJ_FLAGS_DCT; |
---|
| 1415 | + err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in, |
---|
| 1416 | + cmd_in_len, cmd_out, cmd_out_len); |
---|
| 1417 | + } else if (opcode == MLX5_CMD_OP_CREATE_CQ) { |
---|
| 1418 | + obj->flags |= DEVX_OBJ_FLAGS_CQ; |
---|
| 1419 | + obj->core_cq.comp = devx_cq_comp; |
---|
| 1420 | + err = mlx5_core_create_cq(dev->mdev, &obj->core_cq, |
---|
| 1421 | + cmd_in, cmd_in_len, cmd_out, |
---|
| 1422 | + cmd_out_len); |
---|
| 1423 | + } else { |
---|
| 1424 | + err = mlx5_cmd_exec(dev->mdev, cmd_in, |
---|
| 1425 | + cmd_in_len, |
---|
| 1426 | + cmd_out, cmd_out_len); |
---|
| 1427 | + } |
---|
| 1428 | + |
---|
751 | 1429 | if (err) |
---|
752 | 1430 | goto obj_free; |
---|
753 | 1431 | |
---|
| 1432 | + if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) { |
---|
| 1433 | + u32 bulk = MLX5_GET(alloc_flow_counter_in, |
---|
| 1434 | + cmd_in, |
---|
| 1435 | + flow_counter_bulk_log_size); |
---|
| 1436 | + |
---|
| 1437 | + if (bulk) |
---|
| 1438 | + bulk = 1 << bulk; |
---|
| 1439 | + else |
---|
| 1440 | + bulk = 128UL * MLX5_GET(alloc_flow_counter_in, |
---|
| 1441 | + cmd_in, |
---|
| 1442 | + flow_counter_bulk); |
---|
| 1443 | + obj->flow_counter_bulk_size = bulk; |
---|
| 1444 | + } |
---|
| 1445 | + |
---|
754 | 1446 | uobj->object = obj; |
---|
755 | | - obj->mdev = dev->mdev; |
---|
756 | | - devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, &obj->obj_id); |
---|
| 1447 | + INIT_LIST_HEAD(&obj->event_sub); |
---|
| 1448 | + obj->ib_dev = dev; |
---|
| 1449 | + devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, |
---|
| 1450 | + &obj_id); |
---|
757 | 1451 | WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32)); |
---|
758 | 1452 | |
---|
759 | 1453 | err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len); |
---|
760 | 1454 | if (err) |
---|
761 | 1455 | goto obj_destroy; |
---|
762 | 1456 | |
---|
| 1457 | + if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT) |
---|
| 1458 | + obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type); |
---|
| 1459 | + obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id); |
---|
| 1460 | + |
---|
| 1461 | + if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { |
---|
| 1462 | + err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out); |
---|
| 1463 | + if (err) |
---|
| 1464 | + goto obj_destroy; |
---|
| 1465 | + } |
---|
763 | 1466 | return 0; |
---|
764 | 1467 | |
---|
765 | 1468 | obj_destroy: |
---|
766 | | - mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); |
---|
| 1469 | + if (obj->flags & DEVX_OBJ_FLAGS_DCT) |
---|
| 1470 | + mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct); |
---|
| 1471 | + else if (obj->flags & DEVX_OBJ_FLAGS_CQ) |
---|
| 1472 | + mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); |
---|
| 1473 | + else |
---|
| 1474 | + mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out, |
---|
| 1475 | + sizeof(out)); |
---|
767 | 1476 | obj_free: |
---|
768 | 1477 | kfree(obj); |
---|
769 | 1478 | return err; |
---|
770 | 1479 | } |
---|
771 | 1480 | |
---|
772 | 1481 | static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)( |
---|
773 | | - struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) |
---|
| 1482 | + struct uverbs_attr_bundle *attrs) |
---|
774 | 1483 | { |
---|
775 | 1484 | void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN); |
---|
776 | 1485 | int cmd_out_len = uverbs_attr_get_len(attrs, |
---|
777 | 1486 | MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT); |
---|
778 | 1487 | struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, |
---|
779 | 1488 | MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE); |
---|
780 | | - struct mlx5_ib_ucontext *c = to_mucontext(uobj->context); |
---|
781 | | - struct devx_obj *obj = uobj->object; |
---|
| 1489 | + struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( |
---|
| 1490 | + &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); |
---|
| 1491 | + struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); |
---|
782 | 1492 | void *cmd_out; |
---|
783 | 1493 | int err; |
---|
| 1494 | + int uid; |
---|
784 | 1495 | |
---|
785 | | - if (!c->devx_uid) |
---|
786 | | - return -EPERM; |
---|
| 1496 | + if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id)) |
---|
| 1497 | + return -EINVAL; |
---|
| 1498 | + |
---|
| 1499 | + uid = devx_get_uid(c, cmd_in); |
---|
| 1500 | + if (uid < 0) |
---|
| 1501 | + return uid; |
---|
787 | 1502 | |
---|
788 | 1503 | if (!devx_is_obj_modify_cmd(cmd_in)) |
---|
789 | 1504 | return -EINVAL; |
---|
790 | 1505 | |
---|
791 | | - if (!devx_is_valid_obj_id(obj, cmd_in)) |
---|
| 1506 | + if (!devx_is_valid_obj_id(attrs, uobj, cmd_in)) |
---|
792 | 1507 | return -EINVAL; |
---|
793 | 1508 | |
---|
794 | 1509 | cmd_out = uverbs_zalloc(attrs, cmd_out_len); |
---|
795 | 1510 | if (IS_ERR(cmd_out)) |
---|
796 | 1511 | return PTR_ERR(cmd_out); |
---|
797 | 1512 | |
---|
798 | | - MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid); |
---|
799 | | - err = mlx5_cmd_exec(obj->mdev, cmd_in, |
---|
| 1513 | + MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid); |
---|
| 1514 | + devx_set_umem_valid(cmd_in); |
---|
| 1515 | + |
---|
| 1516 | + err = mlx5_cmd_exec(mdev->mdev, cmd_in, |
---|
800 | 1517 | uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN), |
---|
801 | 1518 | cmd_out, cmd_out_len); |
---|
802 | 1519 | if (err) |
---|
.. | .. |
---|
807 | 1524 | } |
---|
808 | 1525 | |
---|
809 | 1526 | static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)( |
---|
810 | | - struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) |
---|
| 1527 | + struct uverbs_attr_bundle *attrs) |
---|
811 | 1528 | { |
---|
812 | 1529 | void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN); |
---|
813 | 1530 | int cmd_out_len = uverbs_attr_get_len(attrs, |
---|
814 | 1531 | MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT); |
---|
815 | 1532 | struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, |
---|
816 | 1533 | MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE); |
---|
817 | | - struct mlx5_ib_ucontext *c = to_mucontext(uobj->context); |
---|
818 | | - struct devx_obj *obj = uobj->object; |
---|
| 1534 | + struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( |
---|
| 1535 | + &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); |
---|
819 | 1536 | void *cmd_out; |
---|
820 | 1537 | int err; |
---|
| 1538 | + int uid; |
---|
| 1539 | + struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); |
---|
821 | 1540 | |
---|
822 | | - if (!c->devx_uid) |
---|
823 | | - return -EPERM; |
---|
| 1541 | + if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id)) |
---|
| 1542 | + return -EINVAL; |
---|
| 1543 | + |
---|
| 1544 | + uid = devx_get_uid(c, cmd_in); |
---|
| 1545 | + if (uid < 0) |
---|
| 1546 | + return uid; |
---|
824 | 1547 | |
---|
825 | 1548 | if (!devx_is_obj_query_cmd(cmd_in)) |
---|
826 | 1549 | return -EINVAL; |
---|
827 | 1550 | |
---|
828 | | - if (!devx_is_valid_obj_id(obj, cmd_in)) |
---|
| 1551 | + if (!devx_is_valid_obj_id(attrs, uobj, cmd_in)) |
---|
829 | 1552 | return -EINVAL; |
---|
830 | 1553 | |
---|
831 | 1554 | cmd_out = uverbs_zalloc(attrs, cmd_out_len); |
---|
832 | 1555 | if (IS_ERR(cmd_out)) |
---|
833 | 1556 | return PTR_ERR(cmd_out); |
---|
834 | 1557 | |
---|
835 | | - MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid); |
---|
836 | | - err = mlx5_cmd_exec(obj->mdev, cmd_in, |
---|
| 1558 | + MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid); |
---|
| 1559 | + err = mlx5_cmd_exec(mdev->mdev, cmd_in, |
---|
837 | 1560 | uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN), |
---|
838 | 1561 | cmd_out, cmd_out_len); |
---|
839 | 1562 | if (err) |
---|
.. | .. |
---|
841 | 1564 | |
---|
842 | 1565 | return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT, |
---|
843 | 1566 | cmd_out, cmd_out_len); |
---|
| 1567 | +} |
---|
| 1568 | + |
---|
| 1569 | +struct devx_async_event_queue { |
---|
| 1570 | + spinlock_t lock; |
---|
| 1571 | + wait_queue_head_t poll_wait; |
---|
| 1572 | + struct list_head event_list; |
---|
| 1573 | + atomic_t bytes_in_use; |
---|
| 1574 | + u8 is_destroyed:1; |
---|
| 1575 | +}; |
---|
| 1576 | + |
---|
| 1577 | +struct devx_async_cmd_event_file { |
---|
| 1578 | + struct ib_uobject uobj; |
---|
| 1579 | + struct devx_async_event_queue ev_queue; |
---|
| 1580 | + struct mlx5_async_ctx async_ctx; |
---|
| 1581 | +}; |
---|
| 1582 | + |
---|
| 1583 | +static void devx_init_event_queue(struct devx_async_event_queue *ev_queue) |
---|
| 1584 | +{ |
---|
| 1585 | + spin_lock_init(&ev_queue->lock); |
---|
| 1586 | + INIT_LIST_HEAD(&ev_queue->event_list); |
---|
| 1587 | + init_waitqueue_head(&ev_queue->poll_wait); |
---|
| 1588 | + atomic_set(&ev_queue->bytes_in_use, 0); |
---|
| 1589 | + ev_queue->is_destroyed = 0; |
---|
| 1590 | +} |
---|
| 1591 | + |
---|
| 1592 | +static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)( |
---|
| 1593 | + struct uverbs_attr_bundle *attrs) |
---|
| 1594 | +{ |
---|
| 1595 | + struct devx_async_cmd_event_file *ev_file; |
---|
| 1596 | + |
---|
| 1597 | + struct ib_uobject *uobj = uverbs_attr_get_uobject( |
---|
| 1598 | + attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE); |
---|
| 1599 | + struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata); |
---|
| 1600 | + |
---|
| 1601 | + ev_file = container_of(uobj, struct devx_async_cmd_event_file, |
---|
| 1602 | + uobj); |
---|
| 1603 | + devx_init_event_queue(&ev_file->ev_queue); |
---|
| 1604 | + mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx); |
---|
| 1605 | + return 0; |
---|
| 1606 | +} |
---|
| 1607 | + |
---|
| 1608 | +static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)( |
---|
| 1609 | + struct uverbs_attr_bundle *attrs) |
---|
| 1610 | +{ |
---|
| 1611 | + struct ib_uobject *uobj = uverbs_attr_get_uobject( |
---|
| 1612 | + attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE); |
---|
| 1613 | + struct devx_async_event_file *ev_file; |
---|
| 1614 | + struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( |
---|
| 1615 | + &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); |
---|
| 1616 | + struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); |
---|
| 1617 | + u32 flags; |
---|
| 1618 | + int err; |
---|
| 1619 | + |
---|
| 1620 | + err = uverbs_get_flags32(&flags, attrs, |
---|
| 1621 | + MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS, |
---|
| 1622 | + MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA); |
---|
| 1623 | + |
---|
| 1624 | + if (err) |
---|
| 1625 | + return err; |
---|
| 1626 | + |
---|
| 1627 | + ev_file = container_of(uobj, struct devx_async_event_file, |
---|
| 1628 | + uobj); |
---|
| 1629 | + spin_lock_init(&ev_file->lock); |
---|
| 1630 | + INIT_LIST_HEAD(&ev_file->event_list); |
---|
| 1631 | + init_waitqueue_head(&ev_file->poll_wait); |
---|
| 1632 | + if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA) |
---|
| 1633 | + ev_file->omit_data = 1; |
---|
| 1634 | + INIT_LIST_HEAD(&ev_file->subscribed_events_list); |
---|
| 1635 | + ev_file->dev = dev; |
---|
| 1636 | + get_device(&dev->ib_dev.dev); |
---|
| 1637 | + return 0; |
---|
| 1638 | +} |
---|
| 1639 | + |
---|
| 1640 | +static void devx_query_callback(int status, struct mlx5_async_work *context) |
---|
| 1641 | +{ |
---|
| 1642 | + struct devx_async_data *async_data = |
---|
| 1643 | + container_of(context, struct devx_async_data, cb_work); |
---|
| 1644 | + struct devx_async_cmd_event_file *ev_file = async_data->ev_file; |
---|
| 1645 | + struct devx_async_event_queue *ev_queue = &ev_file->ev_queue; |
---|
| 1646 | + unsigned long flags; |
---|
| 1647 | + |
---|
| 1648 | + /* |
---|
| 1649 | + * Note that if the struct devx_async_cmd_event_file uobj begins to be |
---|
| 1650 | + * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this |
---|
| 1651 | + * routine returns, ensuring that it always remains valid here. |
---|
| 1652 | + */ |
---|
| 1653 | + spin_lock_irqsave(&ev_queue->lock, flags); |
---|
| 1654 | + list_add_tail(&async_data->list, &ev_queue->event_list); |
---|
| 1655 | + spin_unlock_irqrestore(&ev_queue->lock, flags); |
---|
| 1656 | + |
---|
| 1657 | + wake_up_interruptible(&ev_queue->poll_wait); |
---|
| 1658 | +} |
---|
| 1659 | + |
---|
| 1660 | +#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */ |
---|
| 1661 | + |
---|
| 1662 | +static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)( |
---|
| 1663 | + struct uverbs_attr_bundle *attrs) |
---|
| 1664 | +{ |
---|
| 1665 | + void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, |
---|
| 1666 | + MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN); |
---|
| 1667 | + struct ib_uobject *uobj = uverbs_attr_get_uobject( |
---|
| 1668 | + attrs, |
---|
| 1669 | + MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE); |
---|
| 1670 | + u16 cmd_out_len; |
---|
| 1671 | + struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( |
---|
| 1672 | + &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); |
---|
| 1673 | + struct ib_uobject *fd_uobj; |
---|
| 1674 | + int err; |
---|
| 1675 | + int uid; |
---|
| 1676 | + struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); |
---|
| 1677 | + struct devx_async_cmd_event_file *ev_file; |
---|
| 1678 | + struct devx_async_data *async_data; |
---|
| 1679 | + |
---|
| 1680 | + if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id)) |
---|
| 1681 | + return -EINVAL; |
---|
| 1682 | + |
---|
| 1683 | + uid = devx_get_uid(c, cmd_in); |
---|
| 1684 | + if (uid < 0) |
---|
| 1685 | + return uid; |
---|
| 1686 | + |
---|
| 1687 | + if (!devx_is_obj_query_cmd(cmd_in)) |
---|
| 1688 | + return -EINVAL; |
---|
| 1689 | + |
---|
| 1690 | + err = uverbs_get_const(&cmd_out_len, attrs, |
---|
| 1691 | + MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN); |
---|
| 1692 | + if (err) |
---|
| 1693 | + return err; |
---|
| 1694 | + |
---|
| 1695 | + if (!devx_is_valid_obj_id(attrs, uobj, cmd_in)) |
---|
| 1696 | + return -EINVAL; |
---|
| 1697 | + |
---|
| 1698 | + fd_uobj = uverbs_attr_get_uobject(attrs, |
---|
| 1699 | + MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD); |
---|
| 1700 | + if (IS_ERR(fd_uobj)) |
---|
| 1701 | + return PTR_ERR(fd_uobj); |
---|
| 1702 | + |
---|
| 1703 | + ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file, |
---|
| 1704 | + uobj); |
---|
| 1705 | + |
---|
| 1706 | + if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) > |
---|
| 1707 | + MAX_ASYNC_BYTES_IN_USE) { |
---|
| 1708 | + atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use); |
---|
| 1709 | + return -EAGAIN; |
---|
| 1710 | + } |
---|
| 1711 | + |
---|
| 1712 | + async_data = kvzalloc(struct_size(async_data, hdr.out_data, |
---|
| 1713 | + cmd_out_len), GFP_KERNEL); |
---|
| 1714 | + if (!async_data) { |
---|
| 1715 | + err = -ENOMEM; |
---|
| 1716 | + goto sub_bytes; |
---|
| 1717 | + } |
---|
| 1718 | + |
---|
| 1719 | + err = uverbs_copy_from(&async_data->hdr.wr_id, attrs, |
---|
| 1720 | + MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID); |
---|
| 1721 | + if (err) |
---|
| 1722 | + goto free_async; |
---|
| 1723 | + |
---|
| 1724 | + async_data->cmd_out_len = cmd_out_len; |
---|
| 1725 | + async_data->mdev = mdev; |
---|
| 1726 | + async_data->ev_file = ev_file; |
---|
| 1727 | + |
---|
| 1728 | + MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid); |
---|
| 1729 | + err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in, |
---|
| 1730 | + uverbs_attr_get_len(attrs, |
---|
| 1731 | + MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN), |
---|
| 1732 | + async_data->hdr.out_data, |
---|
| 1733 | + async_data->cmd_out_len, |
---|
| 1734 | + devx_query_callback, &async_data->cb_work); |
---|
| 1735 | + |
---|
| 1736 | + if (err) |
---|
| 1737 | + goto free_async; |
---|
| 1738 | + |
---|
| 1739 | + return 0; |
---|
| 1740 | + |
---|
| 1741 | +free_async: |
---|
| 1742 | + kvfree(async_data); |
---|
| 1743 | +sub_bytes: |
---|
| 1744 | + atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use); |
---|
| 1745 | + return err; |
---|
| 1746 | +} |
---|
| 1747 | + |
---|
| 1748 | +static void |
---|
| 1749 | +subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table, |
---|
| 1750 | + u32 key_level1, |
---|
| 1751 | + bool is_level2, |
---|
| 1752 | + u32 key_level2) |
---|
| 1753 | +{ |
---|
| 1754 | + struct devx_event *event; |
---|
| 1755 | + struct devx_obj_event *xa_val_level2; |
---|
| 1756 | + |
---|
| 1757 | + /* Level 1 is valid for future use, no need to free */ |
---|
| 1758 | + if (!is_level2) |
---|
| 1759 | + return; |
---|
| 1760 | + |
---|
| 1761 | + event = xa_load(&devx_event_table->event_xa, key_level1); |
---|
| 1762 | + WARN_ON(!event); |
---|
| 1763 | + |
---|
| 1764 | + xa_val_level2 = xa_load(&event->object_ids, |
---|
| 1765 | + key_level2); |
---|
| 1766 | + if (list_empty(&xa_val_level2->obj_sub_list)) { |
---|
| 1767 | + xa_erase(&event->object_ids, |
---|
| 1768 | + key_level2); |
---|
| 1769 | + kfree_rcu(xa_val_level2, rcu); |
---|
| 1770 | + } |
---|
| 1771 | +} |
---|
| 1772 | + |
---|
| 1773 | +static int |
---|
| 1774 | +subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table, |
---|
| 1775 | + u32 key_level1, |
---|
| 1776 | + bool is_level2, |
---|
| 1777 | + u32 key_level2) |
---|
| 1778 | +{ |
---|
| 1779 | + struct devx_obj_event *obj_event; |
---|
| 1780 | + struct devx_event *event; |
---|
| 1781 | + int err; |
---|
| 1782 | + |
---|
| 1783 | + event = xa_load(&devx_event_table->event_xa, key_level1); |
---|
| 1784 | + if (!event) { |
---|
| 1785 | + event = kzalloc(sizeof(*event), GFP_KERNEL); |
---|
| 1786 | + if (!event) |
---|
| 1787 | + return -ENOMEM; |
---|
| 1788 | + |
---|
| 1789 | + INIT_LIST_HEAD(&event->unaffiliated_list); |
---|
| 1790 | + xa_init(&event->object_ids); |
---|
| 1791 | + |
---|
| 1792 | + err = xa_insert(&devx_event_table->event_xa, |
---|
| 1793 | + key_level1, |
---|
| 1794 | + event, |
---|
| 1795 | + GFP_KERNEL); |
---|
| 1796 | + if (err) { |
---|
| 1797 | + kfree(event); |
---|
| 1798 | + return err; |
---|
| 1799 | + } |
---|
| 1800 | + } |
---|
| 1801 | + |
---|
| 1802 | + if (!is_level2) |
---|
| 1803 | + return 0; |
---|
| 1804 | + |
---|
| 1805 | + obj_event = xa_load(&event->object_ids, key_level2); |
---|
| 1806 | + if (!obj_event) { |
---|
| 1807 | + obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL); |
---|
| 1808 | + if (!obj_event) |
---|
| 1809 | + /* Level1 is valid for future use, no need to free */ |
---|
| 1810 | + return -ENOMEM; |
---|
| 1811 | + |
---|
| 1812 | + err = xa_insert(&event->object_ids, |
---|
| 1813 | + key_level2, |
---|
| 1814 | + obj_event, |
---|
| 1815 | + GFP_KERNEL); |
---|
| 1816 | + if (err) { |
---|
| 1817 | + kfree(obj_event); |
---|
| 1818 | + return err; |
---|
| 1819 | + } |
---|
| 1820 | + INIT_LIST_HEAD(&obj_event->obj_sub_list); |
---|
| 1821 | + } |
---|
| 1822 | + |
---|
| 1823 | + return 0; |
---|
| 1824 | +} |
---|
| 1825 | + |
---|
| 1826 | +static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list, |
---|
| 1827 | + struct devx_obj *obj) |
---|
| 1828 | +{ |
---|
| 1829 | + int i; |
---|
| 1830 | + |
---|
| 1831 | + for (i = 0; i < num_events; i++) { |
---|
| 1832 | + if (obj) { |
---|
| 1833 | + if (!is_legacy_obj_event_num(event_type_num_list[i])) |
---|
| 1834 | + return false; |
---|
| 1835 | + } else if (!is_legacy_unaffiliated_event_num( |
---|
| 1836 | + event_type_num_list[i])) { |
---|
| 1837 | + return false; |
---|
| 1838 | + } |
---|
| 1839 | + } |
---|
| 1840 | + |
---|
| 1841 | + return true; |
---|
| 1842 | +} |
---|
| 1843 | + |
---|
| 1844 | +#define MAX_SUPP_EVENT_NUM 255 |
---|
| 1845 | +static bool is_valid_events(struct mlx5_core_dev *dev, |
---|
| 1846 | + int num_events, u16 *event_type_num_list, |
---|
| 1847 | + struct devx_obj *obj) |
---|
| 1848 | +{ |
---|
| 1849 | + __be64 *aff_events; |
---|
| 1850 | + __be64 *unaff_events; |
---|
| 1851 | + int mask_entry; |
---|
| 1852 | + int mask_bit; |
---|
| 1853 | + int i; |
---|
| 1854 | + |
---|
| 1855 | + if (MLX5_CAP_GEN(dev, event_cap)) { |
---|
| 1856 | + aff_events = MLX5_CAP_DEV_EVENT(dev, |
---|
| 1857 | + user_affiliated_events); |
---|
| 1858 | + unaff_events = MLX5_CAP_DEV_EVENT(dev, |
---|
| 1859 | + user_unaffiliated_events); |
---|
| 1860 | + } else { |
---|
| 1861 | + return is_valid_events_legacy(num_events, event_type_num_list, |
---|
| 1862 | + obj); |
---|
| 1863 | + } |
---|
| 1864 | + |
---|
| 1865 | + for (i = 0; i < num_events; i++) { |
---|
| 1866 | + if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM) |
---|
| 1867 | + return false; |
---|
| 1868 | + |
---|
| 1869 | + mask_entry = event_type_num_list[i] / 64; |
---|
| 1870 | + mask_bit = event_type_num_list[i] % 64; |
---|
| 1871 | + |
---|
| 1872 | + if (obj) { |
---|
| 1873 | + /* CQ completion */ |
---|
| 1874 | + if (event_type_num_list[i] == 0) |
---|
| 1875 | + continue; |
---|
| 1876 | + |
---|
| 1877 | + if (!(be64_to_cpu(aff_events[mask_entry]) & |
---|
| 1878 | + (1ull << mask_bit))) |
---|
| 1879 | + return false; |
---|
| 1880 | + |
---|
| 1881 | + continue; |
---|
| 1882 | + } |
---|
| 1883 | + |
---|
| 1884 | + if (!(be64_to_cpu(unaff_events[mask_entry]) & |
---|
| 1885 | + (1ull << mask_bit))) |
---|
| 1886 | + return false; |
---|
| 1887 | + } |
---|
| 1888 | + |
---|
| 1889 | + return true; |
---|
| 1890 | +} |
---|
| 1891 | + |
---|
| 1892 | +#define MAX_NUM_EVENTS 16 |
---|
| 1893 | +static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)( |
---|
| 1894 | + struct uverbs_attr_bundle *attrs) |
---|
| 1895 | +{ |
---|
| 1896 | + struct ib_uobject *devx_uobj = uverbs_attr_get_uobject( |
---|
| 1897 | + attrs, |
---|
| 1898 | + MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE); |
---|
| 1899 | + struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( |
---|
| 1900 | + &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); |
---|
| 1901 | + struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); |
---|
| 1902 | + struct ib_uobject *fd_uobj; |
---|
| 1903 | + struct devx_obj *obj = NULL; |
---|
| 1904 | + struct devx_async_event_file *ev_file; |
---|
| 1905 | + struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table; |
---|
| 1906 | + u16 *event_type_num_list; |
---|
| 1907 | + struct devx_event_subscription *event_sub, *tmp_sub; |
---|
| 1908 | + struct list_head sub_list; |
---|
| 1909 | + int redirect_fd; |
---|
| 1910 | + bool use_eventfd = false; |
---|
| 1911 | + int num_events; |
---|
| 1912 | + int num_alloc_xa_entries = 0; |
---|
| 1913 | + u16 obj_type = 0; |
---|
| 1914 | + u64 cookie = 0; |
---|
| 1915 | + u32 obj_id = 0; |
---|
| 1916 | + int err; |
---|
| 1917 | + int i; |
---|
| 1918 | + |
---|
| 1919 | + if (!c->devx_uid) |
---|
| 1920 | + return -EINVAL; |
---|
| 1921 | + |
---|
| 1922 | + if (!IS_ERR(devx_uobj)) { |
---|
| 1923 | + obj = (struct devx_obj *)devx_uobj->object; |
---|
| 1924 | + if (obj) |
---|
| 1925 | + obj_id = get_dec_obj_id(obj->obj_id); |
---|
| 1926 | + } |
---|
| 1927 | + |
---|
| 1928 | + fd_uobj = uverbs_attr_get_uobject(attrs, |
---|
| 1929 | + MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE); |
---|
| 1930 | + if (IS_ERR(fd_uobj)) |
---|
| 1931 | + return PTR_ERR(fd_uobj); |
---|
| 1932 | + |
---|
| 1933 | + ev_file = container_of(fd_uobj, struct devx_async_event_file, |
---|
| 1934 | + uobj); |
---|
| 1935 | + |
---|
| 1936 | + if (uverbs_attr_is_valid(attrs, |
---|
| 1937 | + MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) { |
---|
| 1938 | + err = uverbs_copy_from(&redirect_fd, attrs, |
---|
| 1939 | + MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM); |
---|
| 1940 | + if (err) |
---|
| 1941 | + return err; |
---|
| 1942 | + |
---|
| 1943 | + use_eventfd = true; |
---|
| 1944 | + } |
---|
| 1945 | + |
---|
| 1946 | + if (uverbs_attr_is_valid(attrs, |
---|
| 1947 | + MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) { |
---|
| 1948 | + if (use_eventfd) |
---|
| 1949 | + return -EINVAL; |
---|
| 1950 | + |
---|
| 1951 | + err = uverbs_copy_from(&cookie, attrs, |
---|
| 1952 | + MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE); |
---|
| 1953 | + if (err) |
---|
| 1954 | + return err; |
---|
| 1955 | + } |
---|
| 1956 | + |
---|
| 1957 | + num_events = uverbs_attr_ptr_get_array_size( |
---|
| 1958 | + attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST, |
---|
| 1959 | + sizeof(u16)); |
---|
| 1960 | + |
---|
| 1961 | + if (num_events < 0) |
---|
| 1962 | + return num_events; |
---|
| 1963 | + |
---|
| 1964 | + if (num_events > MAX_NUM_EVENTS) |
---|
| 1965 | + return -EINVAL; |
---|
| 1966 | + |
---|
| 1967 | + event_type_num_list = uverbs_attr_get_alloced_ptr(attrs, |
---|
| 1968 | + MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST); |
---|
| 1969 | + |
---|
| 1970 | + if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj)) |
---|
| 1971 | + return -EINVAL; |
---|
| 1972 | + |
---|
| 1973 | + INIT_LIST_HEAD(&sub_list); |
---|
| 1974 | + |
---|
| 1975 | + /* Protect from concurrent subscriptions to same XA entries to allow |
---|
| 1976 | + * both to succeed |
---|
| 1977 | + */ |
---|
| 1978 | + mutex_lock(&devx_event_table->event_xa_lock); |
---|
| 1979 | + for (i = 0; i < num_events; i++) { |
---|
| 1980 | + u32 key_level1; |
---|
| 1981 | + |
---|
| 1982 | + if (obj) |
---|
| 1983 | + obj_type = get_dec_obj_type(obj, |
---|
| 1984 | + event_type_num_list[i]); |
---|
| 1985 | + key_level1 = event_type_num_list[i] | obj_type << 16; |
---|
| 1986 | + |
---|
| 1987 | + err = subscribe_event_xa_alloc(devx_event_table, |
---|
| 1988 | + key_level1, |
---|
| 1989 | + obj, |
---|
| 1990 | + obj_id); |
---|
| 1991 | + if (err) |
---|
| 1992 | + goto err; |
---|
| 1993 | + |
---|
| 1994 | + num_alloc_xa_entries++; |
---|
| 1995 | + event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL); |
---|
| 1996 | + if (!event_sub) { |
---|
| 1997 | + err = -ENOMEM; |
---|
| 1998 | + goto err; |
---|
| 1999 | + } |
---|
| 2000 | + |
---|
| 2001 | + list_add_tail(&event_sub->event_list, &sub_list); |
---|
| 2002 | + uverbs_uobject_get(&ev_file->uobj); |
---|
| 2003 | + if (use_eventfd) { |
---|
| 2004 | + event_sub->eventfd = |
---|
| 2005 | + eventfd_ctx_fdget(redirect_fd); |
---|
| 2006 | + |
---|
| 2007 | + if (IS_ERR(event_sub->eventfd)) { |
---|
| 2008 | + err = PTR_ERR(event_sub->eventfd); |
---|
| 2009 | + event_sub->eventfd = NULL; |
---|
| 2010 | + goto err; |
---|
| 2011 | + } |
---|
| 2012 | + } |
---|
| 2013 | + |
---|
| 2014 | + event_sub->cookie = cookie; |
---|
| 2015 | + event_sub->ev_file = ev_file; |
---|
| 2016 | + /* May be needed upon cleanup the devx object/subscription */ |
---|
| 2017 | + event_sub->xa_key_level1 = key_level1; |
---|
| 2018 | + event_sub->xa_key_level2 = obj_id; |
---|
| 2019 | + INIT_LIST_HEAD(&event_sub->obj_list); |
---|
| 2020 | + } |
---|
| 2021 | + |
---|
| 2022 | + /* Once all the allocations and the XA data insertions were done we |
---|
| 2023 | + * can go ahead and add all the subscriptions to the relevant lists |
---|
| 2024 | + * without concern of a failure. |
---|
| 2025 | + */ |
---|
| 2026 | + list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) { |
---|
| 2027 | + struct devx_event *event; |
---|
| 2028 | + struct devx_obj_event *obj_event; |
---|
| 2029 | + |
---|
| 2030 | + list_del_init(&event_sub->event_list); |
---|
| 2031 | + |
---|
| 2032 | + spin_lock_irq(&ev_file->lock); |
---|
| 2033 | + list_add_tail_rcu(&event_sub->file_list, |
---|
| 2034 | + &ev_file->subscribed_events_list); |
---|
| 2035 | + spin_unlock_irq(&ev_file->lock); |
---|
| 2036 | + |
---|
| 2037 | + event = xa_load(&devx_event_table->event_xa, |
---|
| 2038 | + event_sub->xa_key_level1); |
---|
| 2039 | + WARN_ON(!event); |
---|
| 2040 | + |
---|
| 2041 | + if (!obj) { |
---|
| 2042 | + list_add_tail_rcu(&event_sub->xa_list, |
---|
| 2043 | + &event->unaffiliated_list); |
---|
| 2044 | + continue; |
---|
| 2045 | + } |
---|
| 2046 | + |
---|
| 2047 | + obj_event = xa_load(&event->object_ids, obj_id); |
---|
| 2048 | + WARN_ON(!obj_event); |
---|
| 2049 | + list_add_tail_rcu(&event_sub->xa_list, |
---|
| 2050 | + &obj_event->obj_sub_list); |
---|
| 2051 | + list_add_tail_rcu(&event_sub->obj_list, |
---|
| 2052 | + &obj->event_sub); |
---|
| 2053 | + } |
---|
| 2054 | + |
---|
| 2055 | + mutex_unlock(&devx_event_table->event_xa_lock); |
---|
| 2056 | + return 0; |
---|
| 2057 | + |
---|
| 2058 | +err: |
---|
| 2059 | + list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) { |
---|
| 2060 | + list_del(&event_sub->event_list); |
---|
| 2061 | + |
---|
| 2062 | + subscribe_event_xa_dealloc(devx_event_table, |
---|
| 2063 | + event_sub->xa_key_level1, |
---|
| 2064 | + obj, |
---|
| 2065 | + obj_id); |
---|
| 2066 | + |
---|
| 2067 | + if (event_sub->eventfd) |
---|
| 2068 | + eventfd_ctx_put(event_sub->eventfd); |
---|
| 2069 | + uverbs_uobject_put(&event_sub->ev_file->uobj); |
---|
| 2070 | + kfree(event_sub); |
---|
| 2071 | + } |
---|
| 2072 | + |
---|
| 2073 | + mutex_unlock(&devx_event_table->event_xa_lock); |
---|
| 2074 | + return err; |
---|
844 | 2075 | } |
---|
845 | 2076 | |
---|
846 | 2077 | static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext, |
---|
.. | .. |
---|
870 | 2101 | if (err) |
---|
871 | 2102 | return err; |
---|
872 | 2103 | |
---|
873 | | - obj->umem = ib_umem_get(ucontext, addr, size, access, 0); |
---|
| 2104 | + obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access); |
---|
874 | 2105 | if (IS_ERR(obj->umem)) |
---|
875 | 2106 | return PTR_ERR(obj->umem); |
---|
876 | 2107 | |
---|
.. | .. |
---|
909 | 2140 | umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem); |
---|
910 | 2141 | mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt); |
---|
911 | 2142 | |
---|
912 | | - MLX5_SET(general_obj_in_cmd_hdr, cmd->in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); |
---|
913 | | - MLX5_SET(general_obj_in_cmd_hdr, cmd->in, obj_type, MLX5_OBJ_TYPE_UMEM); |
---|
| 2143 | + MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM); |
---|
914 | 2144 | MLX5_SET64(umem, umem, num_of_mtt, obj->ncont); |
---|
915 | 2145 | MLX5_SET(umem, umem, log_page_size, obj->page_shift - |
---|
916 | 2146 | MLX5_ADAPTER_PAGE_SHIFT); |
---|
.. | .. |
---|
921 | 2151 | } |
---|
922 | 2152 | |
---|
923 | 2153 | static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)( |
---|
924 | | - struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) |
---|
| 2154 | + struct uverbs_attr_bundle *attrs) |
---|
925 | 2155 | { |
---|
926 | 2156 | struct devx_umem_reg_cmd cmd; |
---|
927 | 2157 | struct devx_umem *obj; |
---|
928 | 2158 | struct ib_uobject *uobj = uverbs_attr_get_uobject( |
---|
929 | 2159 | attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE); |
---|
930 | 2160 | u32 obj_id; |
---|
931 | | - struct mlx5_ib_ucontext *c = to_mucontext(uobj->context); |
---|
| 2161 | + struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context( |
---|
| 2162 | + &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); |
---|
932 | 2163 | struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); |
---|
933 | 2164 | int err; |
---|
934 | 2165 | |
---|
935 | 2166 | if (!c->devx_uid) |
---|
936 | | - return -EPERM; |
---|
| 2167 | + return -EINVAL; |
---|
937 | 2168 | |
---|
938 | 2169 | obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL); |
---|
939 | 2170 | if (!obj) |
---|
.. | .. |
---|
949 | 2180 | |
---|
950 | 2181 | devx_umem_reg_cmd_build(dev, obj, &cmd); |
---|
951 | 2182 | |
---|
952 | | - MLX5_SET(general_obj_in_cmd_hdr, cmd.in, uid, c->devx_uid); |
---|
| 2183 | + MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid); |
---|
953 | 2184 | err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out, |
---|
954 | 2185 | sizeof(cmd.out)); |
---|
955 | 2186 | if (err) |
---|
.. | .. |
---|
958 | 2189 | obj->mdev = dev->mdev; |
---|
959 | 2190 | uobj->object = obj; |
---|
960 | 2191 | devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id); |
---|
961 | | - err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id)); |
---|
962 | | - if (err) |
---|
963 | | - goto err_umem_destroy; |
---|
| 2192 | + uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE); |
---|
964 | 2193 | |
---|
965 | | - return 0; |
---|
| 2194 | + err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, |
---|
| 2195 | + sizeof(obj_id)); |
---|
| 2196 | + return err; |
---|
966 | 2197 | |
---|
967 | | -err_umem_destroy: |
---|
968 | | - mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out)); |
---|
969 | 2198 | err_umem_release: |
---|
970 | 2199 | ib_umem_release(obj->umem); |
---|
971 | 2200 | err_obj_free: |
---|
.. | .. |
---|
974 | 2203 | } |
---|
975 | 2204 | |
---|
976 | 2205 | static int devx_umem_cleanup(struct ib_uobject *uobject, |
---|
977 | | - enum rdma_remove_reason why) |
---|
| 2206 | + enum rdma_remove_reason why, |
---|
| 2207 | + struct uverbs_attr_bundle *attrs) |
---|
978 | 2208 | { |
---|
979 | 2209 | struct devx_umem *obj = uobject->object; |
---|
980 | 2210 | u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; |
---|
.. | .. |
---|
988 | 2218 | kfree(obj); |
---|
989 | 2219 | return 0; |
---|
990 | 2220 | } |
---|
| 2221 | + |
---|
| 2222 | +static bool is_unaffiliated_event(struct mlx5_core_dev *dev, |
---|
| 2223 | + unsigned long event_type) |
---|
| 2224 | +{ |
---|
| 2225 | + __be64 *unaff_events; |
---|
| 2226 | + int mask_entry; |
---|
| 2227 | + int mask_bit; |
---|
| 2228 | + |
---|
| 2229 | + if (!MLX5_CAP_GEN(dev, event_cap)) |
---|
| 2230 | + return is_legacy_unaffiliated_event_num(event_type); |
---|
| 2231 | + |
---|
| 2232 | + unaff_events = MLX5_CAP_DEV_EVENT(dev, |
---|
| 2233 | + user_unaffiliated_events); |
---|
| 2234 | + WARN_ON(event_type > MAX_SUPP_EVENT_NUM); |
---|
| 2235 | + |
---|
| 2236 | + mask_entry = event_type / 64; |
---|
| 2237 | + mask_bit = event_type % 64; |
---|
| 2238 | + |
---|
| 2239 | + if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit))) |
---|
| 2240 | + return false; |
---|
| 2241 | + |
---|
| 2242 | + return true; |
---|
| 2243 | +} |
---|
| 2244 | + |
---|
| 2245 | +static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data) |
---|
| 2246 | +{ |
---|
| 2247 | + struct mlx5_eqe *eqe = data; |
---|
| 2248 | + u32 obj_id = 0; |
---|
| 2249 | + |
---|
| 2250 | + switch (event_type) { |
---|
| 2251 | + case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: |
---|
| 2252 | + case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: |
---|
| 2253 | + case MLX5_EVENT_TYPE_PATH_MIG: |
---|
| 2254 | + case MLX5_EVENT_TYPE_COMM_EST: |
---|
| 2255 | + case MLX5_EVENT_TYPE_SQ_DRAINED: |
---|
| 2256 | + case MLX5_EVENT_TYPE_SRQ_LAST_WQE: |
---|
| 2257 | + case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: |
---|
| 2258 | + case MLX5_EVENT_TYPE_PATH_MIG_FAILED: |
---|
| 2259 | + case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: |
---|
| 2260 | + case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: |
---|
| 2261 | + obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; |
---|
| 2262 | + break; |
---|
| 2263 | + case MLX5_EVENT_TYPE_XRQ_ERROR: |
---|
| 2264 | + obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff; |
---|
| 2265 | + break; |
---|
| 2266 | + case MLX5_EVENT_TYPE_DCT_DRAINED: |
---|
| 2267 | + case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION: |
---|
| 2268 | + obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; |
---|
| 2269 | + break; |
---|
| 2270 | + case MLX5_EVENT_TYPE_CQ_ERROR: |
---|
| 2271 | + obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; |
---|
| 2272 | + break; |
---|
| 2273 | + default: |
---|
| 2274 | + obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id); |
---|
| 2275 | + break; |
---|
| 2276 | + } |
---|
| 2277 | + |
---|
| 2278 | + return obj_id; |
---|
| 2279 | +} |
---|
| 2280 | + |
---|
| 2281 | +static int deliver_event(struct devx_event_subscription *event_sub, |
---|
| 2282 | + const void *data) |
---|
| 2283 | +{ |
---|
| 2284 | + struct devx_async_event_file *ev_file; |
---|
| 2285 | + struct devx_async_event_data *event_data; |
---|
| 2286 | + unsigned long flags; |
---|
| 2287 | + |
---|
| 2288 | + ev_file = event_sub->ev_file; |
---|
| 2289 | + |
---|
| 2290 | + if (ev_file->omit_data) { |
---|
| 2291 | + spin_lock_irqsave(&ev_file->lock, flags); |
---|
| 2292 | + if (!list_empty(&event_sub->event_list) || |
---|
| 2293 | + ev_file->is_destroyed) { |
---|
| 2294 | + spin_unlock_irqrestore(&ev_file->lock, flags); |
---|
| 2295 | + return 0; |
---|
| 2296 | + } |
---|
| 2297 | + |
---|
| 2298 | + list_add_tail(&event_sub->event_list, &ev_file->event_list); |
---|
| 2299 | + spin_unlock_irqrestore(&ev_file->lock, flags); |
---|
| 2300 | + wake_up_interruptible(&ev_file->poll_wait); |
---|
| 2301 | + return 0; |
---|
| 2302 | + } |
---|
| 2303 | + |
---|
| 2304 | + event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe), |
---|
| 2305 | + GFP_ATOMIC); |
---|
| 2306 | + if (!event_data) { |
---|
| 2307 | + spin_lock_irqsave(&ev_file->lock, flags); |
---|
| 2308 | + ev_file->is_overflow_err = 1; |
---|
| 2309 | + spin_unlock_irqrestore(&ev_file->lock, flags); |
---|
| 2310 | + return -ENOMEM; |
---|
| 2311 | + } |
---|
| 2312 | + |
---|
| 2313 | + event_data->hdr.cookie = event_sub->cookie; |
---|
| 2314 | + memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe)); |
---|
| 2315 | + |
---|
| 2316 | + spin_lock_irqsave(&ev_file->lock, flags); |
---|
| 2317 | + if (!ev_file->is_destroyed) |
---|
| 2318 | + list_add_tail(&event_data->list, &ev_file->event_list); |
---|
| 2319 | + else |
---|
| 2320 | + kfree(event_data); |
---|
| 2321 | + spin_unlock_irqrestore(&ev_file->lock, flags); |
---|
| 2322 | + wake_up_interruptible(&ev_file->poll_wait); |
---|
| 2323 | + |
---|
| 2324 | + return 0; |
---|
| 2325 | +} |
---|
| 2326 | + |
---|
| 2327 | +static void dispatch_event_fd(struct list_head *fd_list, |
---|
| 2328 | + const void *data) |
---|
| 2329 | +{ |
---|
| 2330 | + struct devx_event_subscription *item; |
---|
| 2331 | + |
---|
| 2332 | + list_for_each_entry_rcu(item, fd_list, xa_list) { |
---|
| 2333 | + if (item->eventfd) |
---|
| 2334 | + eventfd_signal(item->eventfd, 1); |
---|
| 2335 | + else |
---|
| 2336 | + deliver_event(item, data); |
---|
| 2337 | + } |
---|
| 2338 | +} |
---|
| 2339 | + |
---|
| 2340 | +static int devx_event_notifier(struct notifier_block *nb, |
---|
| 2341 | + unsigned long event_type, void *data) |
---|
| 2342 | +{ |
---|
| 2343 | + struct mlx5_devx_event_table *table; |
---|
| 2344 | + struct mlx5_ib_dev *dev; |
---|
| 2345 | + struct devx_event *event; |
---|
| 2346 | + struct devx_obj_event *obj_event; |
---|
| 2347 | + u16 obj_type = 0; |
---|
| 2348 | + bool is_unaffiliated; |
---|
| 2349 | + u32 obj_id; |
---|
| 2350 | + |
---|
| 2351 | + /* Explicit filtering to kernel events which may occur frequently */ |
---|
| 2352 | + if (event_type == MLX5_EVENT_TYPE_CMD || |
---|
| 2353 | + event_type == MLX5_EVENT_TYPE_PAGE_REQUEST) |
---|
| 2354 | + return NOTIFY_OK; |
---|
| 2355 | + |
---|
| 2356 | + table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb); |
---|
| 2357 | + dev = container_of(table, struct mlx5_ib_dev, devx_event_table); |
---|
| 2358 | + is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type); |
---|
| 2359 | + |
---|
| 2360 | + if (!is_unaffiliated) |
---|
| 2361 | + obj_type = get_event_obj_type(event_type, data); |
---|
| 2362 | + |
---|
| 2363 | + rcu_read_lock(); |
---|
| 2364 | + event = xa_load(&table->event_xa, event_type | (obj_type << 16)); |
---|
| 2365 | + if (!event) { |
---|
| 2366 | + rcu_read_unlock(); |
---|
| 2367 | + return NOTIFY_DONE; |
---|
| 2368 | + } |
---|
| 2369 | + |
---|
| 2370 | + if (is_unaffiliated) { |
---|
| 2371 | + dispatch_event_fd(&event->unaffiliated_list, data); |
---|
| 2372 | + rcu_read_unlock(); |
---|
| 2373 | + return NOTIFY_OK; |
---|
| 2374 | + } |
---|
| 2375 | + |
---|
| 2376 | + obj_id = devx_get_obj_id_from_event(event_type, data); |
---|
| 2377 | + obj_event = xa_load(&event->object_ids, obj_id); |
---|
| 2378 | + if (!obj_event) { |
---|
| 2379 | + rcu_read_unlock(); |
---|
| 2380 | + return NOTIFY_DONE; |
---|
| 2381 | + } |
---|
| 2382 | + |
---|
| 2383 | + dispatch_event_fd(&obj_event->obj_sub_list, data); |
---|
| 2384 | + |
---|
| 2385 | + rcu_read_unlock(); |
---|
| 2386 | + return NOTIFY_OK; |
---|
| 2387 | +} |
---|
| 2388 | + |
---|
| 2389 | +int mlx5_ib_devx_init(struct mlx5_ib_dev *dev) |
---|
| 2390 | +{ |
---|
| 2391 | + struct mlx5_devx_event_table *table = &dev->devx_event_table; |
---|
| 2392 | + int uid; |
---|
| 2393 | + |
---|
| 2394 | + uid = mlx5_ib_devx_create(dev, false); |
---|
| 2395 | + if (uid > 0) { |
---|
| 2396 | + dev->devx_whitelist_uid = uid; |
---|
| 2397 | + xa_init(&table->event_xa); |
---|
| 2398 | + mutex_init(&table->event_xa_lock); |
---|
| 2399 | + MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY); |
---|
| 2400 | + mlx5_eq_notifier_register(dev->mdev, &table->devx_nb); |
---|
| 2401 | + } |
---|
| 2402 | + |
---|
| 2403 | + return 0; |
---|
| 2404 | +} |
---|
| 2405 | + |
---|
| 2406 | +void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev) |
---|
| 2407 | +{ |
---|
| 2408 | + struct mlx5_devx_event_table *table = &dev->devx_event_table; |
---|
| 2409 | + struct devx_event_subscription *sub, *tmp; |
---|
| 2410 | + struct devx_event *event; |
---|
| 2411 | + void *entry; |
---|
| 2412 | + unsigned long id; |
---|
| 2413 | + |
---|
| 2414 | + if (dev->devx_whitelist_uid) { |
---|
| 2415 | + mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb); |
---|
| 2416 | + mutex_lock(&dev->devx_event_table.event_xa_lock); |
---|
| 2417 | + xa_for_each(&table->event_xa, id, entry) { |
---|
| 2418 | + event = entry; |
---|
| 2419 | + list_for_each_entry_safe( |
---|
| 2420 | + sub, tmp, &event->unaffiliated_list, xa_list) |
---|
| 2421 | + devx_cleanup_subscription(dev, sub); |
---|
| 2422 | + kfree(entry); |
---|
| 2423 | + } |
---|
| 2424 | + mutex_unlock(&dev->devx_event_table.event_xa_lock); |
---|
| 2425 | + xa_destroy(&table->event_xa); |
---|
| 2426 | + |
---|
| 2427 | + mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid); |
---|
| 2428 | + } |
---|
| 2429 | +} |
---|
| 2430 | + |
---|
| 2431 | +static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf, |
---|
| 2432 | + size_t count, loff_t *pos) |
---|
| 2433 | +{ |
---|
| 2434 | + struct devx_async_cmd_event_file *comp_ev_file = filp->private_data; |
---|
| 2435 | + struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; |
---|
| 2436 | + struct devx_async_data *event; |
---|
| 2437 | + int ret = 0; |
---|
| 2438 | + size_t eventsz; |
---|
| 2439 | + |
---|
| 2440 | + spin_lock_irq(&ev_queue->lock); |
---|
| 2441 | + |
---|
| 2442 | + while (list_empty(&ev_queue->event_list)) { |
---|
| 2443 | + spin_unlock_irq(&ev_queue->lock); |
---|
| 2444 | + |
---|
| 2445 | + if (filp->f_flags & O_NONBLOCK) |
---|
| 2446 | + return -EAGAIN; |
---|
| 2447 | + |
---|
| 2448 | + if (wait_event_interruptible( |
---|
| 2449 | + ev_queue->poll_wait, |
---|
| 2450 | + (!list_empty(&ev_queue->event_list) || |
---|
| 2451 | + ev_queue->is_destroyed))) { |
---|
| 2452 | + return -ERESTARTSYS; |
---|
| 2453 | + } |
---|
| 2454 | + |
---|
| 2455 | + spin_lock_irq(&ev_queue->lock); |
---|
| 2456 | + if (ev_queue->is_destroyed) { |
---|
| 2457 | + spin_unlock_irq(&ev_queue->lock); |
---|
| 2458 | + return -EIO; |
---|
| 2459 | + } |
---|
| 2460 | + } |
---|
| 2461 | + |
---|
| 2462 | + event = list_entry(ev_queue->event_list.next, |
---|
| 2463 | + struct devx_async_data, list); |
---|
| 2464 | + eventsz = event->cmd_out_len + |
---|
| 2465 | + sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr); |
---|
| 2466 | + |
---|
| 2467 | + if (eventsz > count) { |
---|
| 2468 | + spin_unlock_irq(&ev_queue->lock); |
---|
| 2469 | + return -ENOSPC; |
---|
| 2470 | + } |
---|
| 2471 | + |
---|
| 2472 | + list_del(ev_queue->event_list.next); |
---|
| 2473 | + spin_unlock_irq(&ev_queue->lock); |
---|
| 2474 | + |
---|
| 2475 | + if (copy_to_user(buf, &event->hdr, eventsz)) |
---|
| 2476 | + ret = -EFAULT; |
---|
| 2477 | + else |
---|
| 2478 | + ret = eventsz; |
---|
| 2479 | + |
---|
| 2480 | + atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use); |
---|
| 2481 | + kvfree(event); |
---|
| 2482 | + return ret; |
---|
| 2483 | +} |
---|
| 2484 | + |
---|
| 2485 | +static __poll_t devx_async_cmd_event_poll(struct file *filp, |
---|
| 2486 | + struct poll_table_struct *wait) |
---|
| 2487 | +{ |
---|
| 2488 | + struct devx_async_cmd_event_file *comp_ev_file = filp->private_data; |
---|
| 2489 | + struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; |
---|
| 2490 | + __poll_t pollflags = 0; |
---|
| 2491 | + |
---|
| 2492 | + poll_wait(filp, &ev_queue->poll_wait, wait); |
---|
| 2493 | + |
---|
| 2494 | + spin_lock_irq(&ev_queue->lock); |
---|
| 2495 | + if (ev_queue->is_destroyed) |
---|
| 2496 | + pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; |
---|
| 2497 | + else if (!list_empty(&ev_queue->event_list)) |
---|
| 2498 | + pollflags = EPOLLIN | EPOLLRDNORM; |
---|
| 2499 | + spin_unlock_irq(&ev_queue->lock); |
---|
| 2500 | + |
---|
| 2501 | + return pollflags; |
---|
| 2502 | +} |
---|
| 2503 | + |
---|
| 2504 | +static const struct file_operations devx_async_cmd_event_fops = { |
---|
| 2505 | + .owner = THIS_MODULE, |
---|
| 2506 | + .read = devx_async_cmd_event_read, |
---|
| 2507 | + .poll = devx_async_cmd_event_poll, |
---|
| 2508 | + .release = uverbs_uobject_fd_release, |
---|
| 2509 | + .llseek = no_llseek, |
---|
| 2510 | +}; |
---|
| 2511 | + |
---|
| 2512 | +static ssize_t devx_async_event_read(struct file *filp, char __user *buf, |
---|
| 2513 | + size_t count, loff_t *pos) |
---|
| 2514 | +{ |
---|
| 2515 | + struct devx_async_event_file *ev_file = filp->private_data; |
---|
| 2516 | + struct devx_event_subscription *event_sub; |
---|
| 2517 | + struct devx_async_event_data *event; |
---|
| 2518 | + int ret = 0; |
---|
| 2519 | + size_t eventsz; |
---|
| 2520 | + bool omit_data; |
---|
| 2521 | + void *event_data; |
---|
| 2522 | + |
---|
| 2523 | + omit_data = ev_file->omit_data; |
---|
| 2524 | + |
---|
| 2525 | + spin_lock_irq(&ev_file->lock); |
---|
| 2526 | + |
---|
| 2527 | + if (ev_file->is_overflow_err) { |
---|
| 2528 | + ev_file->is_overflow_err = 0; |
---|
| 2529 | + spin_unlock_irq(&ev_file->lock); |
---|
| 2530 | + return -EOVERFLOW; |
---|
| 2531 | + } |
---|
| 2532 | + |
---|
| 2533 | + |
---|
| 2534 | + while (list_empty(&ev_file->event_list)) { |
---|
| 2535 | + spin_unlock_irq(&ev_file->lock); |
---|
| 2536 | + |
---|
| 2537 | + if (filp->f_flags & O_NONBLOCK) |
---|
| 2538 | + return -EAGAIN; |
---|
| 2539 | + |
---|
| 2540 | + if (wait_event_interruptible(ev_file->poll_wait, |
---|
| 2541 | + (!list_empty(&ev_file->event_list) || |
---|
| 2542 | + ev_file->is_destroyed))) { |
---|
| 2543 | + return -ERESTARTSYS; |
---|
| 2544 | + } |
---|
| 2545 | + |
---|
| 2546 | + spin_lock_irq(&ev_file->lock); |
---|
| 2547 | + if (ev_file->is_destroyed) { |
---|
| 2548 | + spin_unlock_irq(&ev_file->lock); |
---|
| 2549 | + return -EIO; |
---|
| 2550 | + } |
---|
| 2551 | + } |
---|
| 2552 | + |
---|
| 2553 | + if (omit_data) { |
---|
| 2554 | + event_sub = list_first_entry(&ev_file->event_list, |
---|
| 2555 | + struct devx_event_subscription, |
---|
| 2556 | + event_list); |
---|
| 2557 | + eventsz = sizeof(event_sub->cookie); |
---|
| 2558 | + event_data = &event_sub->cookie; |
---|
| 2559 | + } else { |
---|
| 2560 | + event = list_first_entry(&ev_file->event_list, |
---|
| 2561 | + struct devx_async_event_data, list); |
---|
| 2562 | + eventsz = sizeof(struct mlx5_eqe) + |
---|
| 2563 | + sizeof(struct mlx5_ib_uapi_devx_async_event_hdr); |
---|
| 2564 | + event_data = &event->hdr; |
---|
| 2565 | + } |
---|
| 2566 | + |
---|
| 2567 | + if (eventsz > count) { |
---|
| 2568 | + spin_unlock_irq(&ev_file->lock); |
---|
| 2569 | + return -EINVAL; |
---|
| 2570 | + } |
---|
| 2571 | + |
---|
| 2572 | + if (omit_data) |
---|
| 2573 | + list_del_init(&event_sub->event_list); |
---|
| 2574 | + else |
---|
| 2575 | + list_del(&event->list); |
---|
| 2576 | + |
---|
| 2577 | + spin_unlock_irq(&ev_file->lock); |
---|
| 2578 | + |
---|
| 2579 | + if (copy_to_user(buf, event_data, eventsz)) |
---|
| 2580 | + /* This points to an application issue, not a kernel concern */ |
---|
| 2581 | + ret = -EFAULT; |
---|
| 2582 | + else |
---|
| 2583 | + ret = eventsz; |
---|
| 2584 | + |
---|
| 2585 | + if (!omit_data) |
---|
| 2586 | + kfree(event); |
---|
| 2587 | + return ret; |
---|
| 2588 | +} |
---|
| 2589 | + |
---|
| 2590 | +static __poll_t devx_async_event_poll(struct file *filp, |
---|
| 2591 | + struct poll_table_struct *wait) |
---|
| 2592 | +{ |
---|
| 2593 | + struct devx_async_event_file *ev_file = filp->private_data; |
---|
| 2594 | + __poll_t pollflags = 0; |
---|
| 2595 | + |
---|
| 2596 | + poll_wait(filp, &ev_file->poll_wait, wait); |
---|
| 2597 | + |
---|
| 2598 | + spin_lock_irq(&ev_file->lock); |
---|
| 2599 | + if (ev_file->is_destroyed) |
---|
| 2600 | + pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; |
---|
| 2601 | + else if (!list_empty(&ev_file->event_list)) |
---|
| 2602 | + pollflags = EPOLLIN | EPOLLRDNORM; |
---|
| 2603 | + spin_unlock_irq(&ev_file->lock); |
---|
| 2604 | + |
---|
| 2605 | + return pollflags; |
---|
| 2606 | +} |
---|
| 2607 | + |
---|
| 2608 | +static void devx_free_subscription(struct rcu_head *rcu) |
---|
| 2609 | +{ |
---|
| 2610 | + struct devx_event_subscription *event_sub = |
---|
| 2611 | + container_of(rcu, struct devx_event_subscription, rcu); |
---|
| 2612 | + |
---|
| 2613 | + if (event_sub->eventfd) |
---|
| 2614 | + eventfd_ctx_put(event_sub->eventfd); |
---|
| 2615 | + uverbs_uobject_put(&event_sub->ev_file->uobj); |
---|
| 2616 | + kfree(event_sub); |
---|
| 2617 | +} |
---|
| 2618 | + |
---|
| 2619 | +static const struct file_operations devx_async_event_fops = { |
---|
| 2620 | + .owner = THIS_MODULE, |
---|
| 2621 | + .read = devx_async_event_read, |
---|
| 2622 | + .poll = devx_async_event_poll, |
---|
| 2623 | + .release = uverbs_uobject_fd_release, |
---|
| 2624 | + .llseek = no_llseek, |
---|
| 2625 | +}; |
---|
| 2626 | + |
---|
| 2627 | +static int devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj, |
---|
| 2628 | + enum rdma_remove_reason why) |
---|
| 2629 | +{ |
---|
| 2630 | + struct devx_async_cmd_event_file *comp_ev_file = |
---|
| 2631 | + container_of(uobj, struct devx_async_cmd_event_file, |
---|
| 2632 | + uobj); |
---|
| 2633 | + struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; |
---|
| 2634 | + struct devx_async_data *entry, *tmp; |
---|
| 2635 | + |
---|
| 2636 | + spin_lock_irq(&ev_queue->lock); |
---|
| 2637 | + ev_queue->is_destroyed = 1; |
---|
| 2638 | + spin_unlock_irq(&ev_queue->lock); |
---|
| 2639 | + wake_up_interruptible(&ev_queue->poll_wait); |
---|
| 2640 | + |
---|
| 2641 | + mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx); |
---|
| 2642 | + |
---|
| 2643 | + spin_lock_irq(&comp_ev_file->ev_queue.lock); |
---|
| 2644 | + list_for_each_entry_safe(entry, tmp, |
---|
| 2645 | + &comp_ev_file->ev_queue.event_list, list) { |
---|
| 2646 | + list_del(&entry->list); |
---|
| 2647 | + kvfree(entry); |
---|
| 2648 | + } |
---|
| 2649 | + spin_unlock_irq(&comp_ev_file->ev_queue.lock); |
---|
| 2650 | + return 0; |
---|
| 2651 | +}; |
---|
| 2652 | + |
---|
| 2653 | +static int devx_async_event_destroy_uobj(struct ib_uobject *uobj, |
---|
| 2654 | + enum rdma_remove_reason why) |
---|
| 2655 | +{ |
---|
| 2656 | + struct devx_async_event_file *ev_file = |
---|
| 2657 | + container_of(uobj, struct devx_async_event_file, |
---|
| 2658 | + uobj); |
---|
| 2659 | + struct devx_event_subscription *event_sub, *event_sub_tmp; |
---|
| 2660 | + struct mlx5_ib_dev *dev = ev_file->dev; |
---|
| 2661 | + |
---|
| 2662 | + spin_lock_irq(&ev_file->lock); |
---|
| 2663 | + ev_file->is_destroyed = 1; |
---|
| 2664 | + |
---|
| 2665 | + /* free the pending events allocation */ |
---|
| 2666 | + if (ev_file->omit_data) { |
---|
| 2667 | + struct devx_event_subscription *event_sub, *tmp; |
---|
| 2668 | + |
---|
| 2669 | + list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list, |
---|
| 2670 | + event_list) |
---|
| 2671 | + list_del_init(&event_sub->event_list); |
---|
| 2672 | + |
---|
| 2673 | + } else { |
---|
| 2674 | + struct devx_async_event_data *entry, *tmp; |
---|
| 2675 | + |
---|
| 2676 | + list_for_each_entry_safe(entry, tmp, &ev_file->event_list, |
---|
| 2677 | + list) { |
---|
| 2678 | + list_del(&entry->list); |
---|
| 2679 | + kfree(entry); |
---|
| 2680 | + } |
---|
| 2681 | + } |
---|
| 2682 | + |
---|
| 2683 | + spin_unlock_irq(&ev_file->lock); |
---|
| 2684 | + wake_up_interruptible(&ev_file->poll_wait); |
---|
| 2685 | + |
---|
| 2686 | + mutex_lock(&dev->devx_event_table.event_xa_lock); |
---|
| 2687 | + /* delete the subscriptions which are related to this FD */ |
---|
| 2688 | + list_for_each_entry_safe(event_sub, event_sub_tmp, |
---|
| 2689 | + &ev_file->subscribed_events_list, file_list) { |
---|
| 2690 | + devx_cleanup_subscription(dev, event_sub); |
---|
| 2691 | + list_del_rcu(&event_sub->file_list); |
---|
| 2692 | + /* subscription may not be used by the read API any more */ |
---|
| 2693 | + call_rcu(&event_sub->rcu, devx_free_subscription); |
---|
| 2694 | + } |
---|
| 2695 | + mutex_unlock(&dev->devx_event_table.event_xa_lock); |
---|
| 2696 | + |
---|
| 2697 | + put_device(&dev->ib_dev.dev); |
---|
| 2698 | + return 0; |
---|
| 2699 | +}; |
---|
991 | 2700 | |
---|
992 | 2701 | DECLARE_UVERBS_NAMED_METHOD( |
---|
993 | 2702 | MLX5_IB_METHOD_DEVX_UMEM_REG, |
---|
.. | .. |
---|
1070 | 2779 | DECLARE_UVERBS_NAMED_METHOD( |
---|
1071 | 2780 | MLX5_IB_METHOD_DEVX_OBJ_MODIFY, |
---|
1072 | 2781 | UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE, |
---|
1073 | | - MLX5_IB_OBJECT_DEVX_OBJ, |
---|
| 2782 | + UVERBS_IDR_ANY_OBJECT, |
---|
1074 | 2783 | UVERBS_ACCESS_WRITE, |
---|
1075 | 2784 | UA_MANDATORY), |
---|
1076 | 2785 | UVERBS_ATTR_PTR_IN( |
---|
.. | .. |
---|
1086 | 2795 | DECLARE_UVERBS_NAMED_METHOD( |
---|
1087 | 2796 | MLX5_IB_METHOD_DEVX_OBJ_QUERY, |
---|
1088 | 2797 | UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE, |
---|
1089 | | - MLX5_IB_OBJECT_DEVX_OBJ, |
---|
| 2798 | + UVERBS_IDR_ANY_OBJECT, |
---|
1090 | 2799 | UVERBS_ACCESS_READ, |
---|
1091 | 2800 | UA_MANDATORY), |
---|
1092 | 2801 | UVERBS_ATTR_PTR_IN( |
---|
.. | .. |
---|
1099 | 2808 | UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)), |
---|
1100 | 2809 | UA_MANDATORY)); |
---|
1101 | 2810 | |
---|
| 2811 | +DECLARE_UVERBS_NAMED_METHOD( |
---|
| 2812 | + MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY, |
---|
| 2813 | + UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE, |
---|
| 2814 | + UVERBS_IDR_ANY_OBJECT, |
---|
| 2815 | + UVERBS_ACCESS_READ, |
---|
| 2816 | + UA_MANDATORY), |
---|
| 2817 | + UVERBS_ATTR_PTR_IN( |
---|
| 2818 | + MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN, |
---|
| 2819 | + UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)), |
---|
| 2820 | + UA_MANDATORY, |
---|
| 2821 | + UA_ALLOC_AND_COPY), |
---|
| 2822 | + UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN, |
---|
| 2823 | + u16, UA_MANDATORY), |
---|
| 2824 | + UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD, |
---|
| 2825 | + MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD, |
---|
| 2826 | + UVERBS_ACCESS_READ, |
---|
| 2827 | + UA_MANDATORY), |
---|
| 2828 | + UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID, |
---|
| 2829 | + UVERBS_ATTR_TYPE(u64), |
---|
| 2830 | + UA_MANDATORY)); |
---|
| 2831 | + |
---|
| 2832 | +DECLARE_UVERBS_NAMED_METHOD( |
---|
| 2833 | + MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT, |
---|
| 2834 | + UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE, |
---|
| 2835 | + MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD, |
---|
| 2836 | + UVERBS_ACCESS_READ, |
---|
| 2837 | + UA_MANDATORY), |
---|
| 2838 | + UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE, |
---|
| 2839 | + MLX5_IB_OBJECT_DEVX_OBJ, |
---|
| 2840 | + UVERBS_ACCESS_READ, |
---|
| 2841 | + UA_OPTIONAL), |
---|
| 2842 | + UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST, |
---|
| 2843 | + UVERBS_ATTR_MIN_SIZE(sizeof(u16)), |
---|
| 2844 | + UA_MANDATORY, |
---|
| 2845 | + UA_ALLOC_AND_COPY), |
---|
| 2846 | + UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE, |
---|
| 2847 | + UVERBS_ATTR_TYPE(u64), |
---|
| 2848 | + UA_OPTIONAL), |
---|
| 2849 | + UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM, |
---|
| 2850 | + UVERBS_ATTR_TYPE(u32), |
---|
| 2851 | + UA_OPTIONAL)); |
---|
| 2852 | + |
---|
1102 | 2853 | DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX, |
---|
1103 | 2854 | &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER), |
---|
1104 | 2855 | &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR), |
---|
1105 | | - &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN)); |
---|
| 2856 | + &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN), |
---|
| 2857 | + &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)); |
---|
1106 | 2858 | |
---|
1107 | 2859 | DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ, |
---|
1108 | 2860 | UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup), |
---|
1109 | 2861 | &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE), |
---|
1110 | 2862 | &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY), |
---|
1111 | 2863 | &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY), |
---|
1112 | | - &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY)); |
---|
| 2864 | + &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY), |
---|
| 2865 | + &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)); |
---|
1113 | 2866 | |
---|
1114 | 2867 | DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM, |
---|
1115 | 2868 | UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup), |
---|
1116 | 2869 | &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG), |
---|
1117 | 2870 | &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG)); |
---|
1118 | 2871 | |
---|
1119 | | -DECLARE_UVERBS_OBJECT_TREE(devx_objects, |
---|
1120 | | - &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX), |
---|
1121 | | - &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ), |
---|
1122 | | - &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM)); |
---|
1123 | 2872 | |
---|
1124 | | -const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void) |
---|
| 2873 | +DECLARE_UVERBS_NAMED_METHOD( |
---|
| 2874 | + MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC, |
---|
| 2875 | + UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE, |
---|
| 2876 | + MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD, |
---|
| 2877 | + UVERBS_ACCESS_NEW, |
---|
| 2878 | + UA_MANDATORY)); |
---|
| 2879 | + |
---|
| 2880 | +DECLARE_UVERBS_NAMED_OBJECT( |
---|
| 2881 | + MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD, |
---|
| 2882 | + UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file), |
---|
| 2883 | + devx_async_cmd_event_destroy_uobj, |
---|
| 2884 | + &devx_async_cmd_event_fops, "[devx_async_cmd]", |
---|
| 2885 | + O_RDONLY), |
---|
| 2886 | + &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)); |
---|
| 2887 | + |
---|
| 2888 | +DECLARE_UVERBS_NAMED_METHOD( |
---|
| 2889 | + MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC, |
---|
| 2890 | + UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE, |
---|
| 2891 | + MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD, |
---|
| 2892 | + UVERBS_ACCESS_NEW, |
---|
| 2893 | + UA_MANDATORY), |
---|
| 2894 | + UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS, |
---|
| 2895 | + enum mlx5_ib_uapi_devx_create_event_channel_flags, |
---|
| 2896 | + UA_MANDATORY)); |
---|
| 2897 | + |
---|
| 2898 | +DECLARE_UVERBS_NAMED_OBJECT( |
---|
| 2899 | + MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD, |
---|
| 2900 | + UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file), |
---|
| 2901 | + devx_async_event_destroy_uobj, |
---|
| 2902 | + &devx_async_event_fops, "[devx_async_event]", |
---|
| 2903 | + O_RDONLY), |
---|
| 2904 | + &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)); |
---|
| 2905 | + |
---|
| 2906 | +static bool devx_is_supported(struct ib_device *device) |
---|
1125 | 2907 | { |
---|
1126 | | - return &devx_objects; |
---|
| 2908 | + struct mlx5_ib_dev *dev = to_mdev(device); |
---|
| 2909 | + |
---|
| 2910 | + return MLX5_CAP_GEN(dev->mdev, log_max_uctx); |
---|
1127 | 2911 | } |
---|
| 2912 | + |
---|
| 2913 | +const struct uapi_definition mlx5_ib_devx_defs[] = { |
---|
| 2914 | + UAPI_DEF_CHAIN_OBJ_TREE_NAMED( |
---|
| 2915 | + MLX5_IB_OBJECT_DEVX, |
---|
| 2916 | + UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)), |
---|
| 2917 | + UAPI_DEF_CHAIN_OBJ_TREE_NAMED( |
---|
| 2918 | + MLX5_IB_OBJECT_DEVX_OBJ, |
---|
| 2919 | + UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)), |
---|
| 2920 | + UAPI_DEF_CHAIN_OBJ_TREE_NAMED( |
---|
| 2921 | + MLX5_IB_OBJECT_DEVX_UMEM, |
---|
| 2922 | + UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)), |
---|
| 2923 | + UAPI_DEF_CHAIN_OBJ_TREE_NAMED( |
---|
| 2924 | + MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD, |
---|
| 2925 | + UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)), |
---|
| 2926 | + UAPI_DEF_CHAIN_OBJ_TREE_NAMED( |
---|
| 2927 | + MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD, |
---|
| 2928 | + UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)), |
---|
| 2929 | + {}, |
---|
| 2930 | +}; |
---|