.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ |
---|
1 | 2 | /* |
---|
2 | | - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
---|
3 | | - * |
---|
4 | | - * This software is available to you under a choice of one of two |
---|
5 | | - * licenses. You may choose to be licensed under the terms of the GNU |
---|
6 | | - * General Public License (GPL) Version 2, available from the file |
---|
7 | | - * COPYING in the main directory of this source tree, or the |
---|
8 | | - * OpenIB.org BSD license below: |
---|
9 | | - * |
---|
10 | | - * Redistribution and use in source and binary forms, with or |
---|
11 | | - * without modification, are permitted provided that the following |
---|
12 | | - * conditions are met: |
---|
13 | | - * |
---|
14 | | - * - Redistributions of source code must retain the above |
---|
15 | | - * copyright notice, this list of conditions and the following |
---|
16 | | - * disclaimer. |
---|
17 | | - * |
---|
18 | | - * - Redistributions in binary form must reproduce the above |
---|
19 | | - * copyright notice, this list of conditions and the following |
---|
20 | | - * disclaimer in the documentation and/or other materials |
---|
21 | | - * provided with the distribution. |
---|
22 | | - * |
---|
23 | | - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
---|
24 | | - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
---|
25 | | - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
---|
26 | | - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
---|
27 | | - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
---|
28 | | - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
---|
29 | | - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
---|
30 | | - * SOFTWARE. |
---|
| 3 | + * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved. |
---|
31 | 4 | */ |
---|
32 | 5 | |
---|
33 | 6 | #ifndef MLX5_IB_H |
---|
.. | .. |
---|
36 | 9 | #include <linux/kernel.h> |
---|
37 | 10 | #include <linux/sched.h> |
---|
38 | 11 | #include <rdma/ib_verbs.h> |
---|
| 12 | +#include <rdma/ib_umem.h> |
---|
39 | 13 | #include <rdma/ib_smi.h> |
---|
40 | 14 | #include <linux/mlx5/driver.h> |
---|
41 | 15 | #include <linux/mlx5/cq.h> |
---|
| 16 | +#include <linux/mlx5/fs.h> |
---|
42 | 17 | #include <linux/mlx5/qp.h> |
---|
43 | | -#include <linux/mlx5/srq.h> |
---|
44 | 18 | #include <linux/types.h> |
---|
45 | 19 | #include <linux/mlx5/transobj.h> |
---|
46 | 20 | #include <rdma/ib_user_verbs.h> |
---|
47 | 21 | #include <rdma/mlx5-abi.h> |
---|
48 | 22 | #include <rdma/uverbs_ioctl.h> |
---|
49 | 23 | #include <rdma/mlx5_user_ioctl_cmds.h> |
---|
| 24 | +#include <rdma/mlx5_user_ioctl_verbs.h> |
---|
50 | 25 | |
---|
51 | | -#define mlx5_ib_dbg(dev, format, arg...) \ |
---|
52 | | -pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ |
---|
53 | | - __LINE__, current->pid, ##arg) |
---|
| 26 | +#include "srq.h" |
---|
54 | 27 | |
---|
55 | | -#define mlx5_ib_err(dev, format, arg...) \ |
---|
56 | | -pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ |
---|
57 | | - __LINE__, current->pid, ##arg) |
---|
| 28 | +#define mlx5_ib_dbg(_dev, format, arg...) \ |
---|
| 29 | + dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ |
---|
| 30 | + __LINE__, current->pid, ##arg) |
---|
58 | 31 | |
---|
59 | | -#define mlx5_ib_warn(dev, format, arg...) \ |
---|
60 | | -pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ |
---|
61 | | - __LINE__, current->pid, ##arg) |
---|
| 32 | +#define mlx5_ib_err(_dev, format, arg...) \ |
---|
| 33 | + dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ |
---|
| 34 | + __LINE__, current->pid, ##arg) |
---|
62 | 35 | |
---|
63 | | -#define field_avail(type, fld, sz) (offsetof(type, fld) + \ |
---|
64 | | - sizeof(((type *)0)->fld) <= (sz)) |
---|
| 36 | +#define mlx5_ib_warn(_dev, format, arg...) \ |
---|
| 37 | + dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ |
---|
| 38 | + __LINE__, current->pid, ##arg) |
---|
| 39 | + |
---|
65 | 40 | #define MLX5_IB_DEFAULT_UIDX 0xffffff |
---|
66 | 41 | #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index) |
---|
67 | 42 | |
---|
68 | 43 | #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size) |
---|
| 44 | + |
---|
| 45 | +enum { |
---|
| 46 | + MLX5_IB_MMAP_OFFSET_START = 9, |
---|
| 47 | + MLX5_IB_MMAP_OFFSET_END = 255, |
---|
| 48 | +}; |
---|
69 | 49 | |
---|
70 | 50 | enum { |
---|
71 | 51 | MLX5_IB_MMAP_CMD_SHIFT = 8, |
---|
.. | .. |
---|
114 | 94 | MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN, |
---|
115 | 95 | }; |
---|
116 | 96 | |
---|
117 | | -struct mlx5_ib_vma_private_data { |
---|
118 | | - struct list_head list; |
---|
119 | | - struct vm_area_struct *vma; |
---|
120 | | - /* protect vma_private_list add/del */ |
---|
121 | | - struct mutex *vma_private_list_mutex; |
---|
| 97 | +enum mlx5_ib_mmap_type { |
---|
| 98 | + MLX5_IB_MMAP_TYPE_MEMIC = 1, |
---|
| 99 | + MLX5_IB_MMAP_TYPE_VAR = 2, |
---|
| 100 | + MLX5_IB_MMAP_TYPE_UAR_WC = 3, |
---|
| 101 | + MLX5_IB_MMAP_TYPE_UAR_NC = 4, |
---|
| 102 | +}; |
---|
| 103 | + |
---|
| 104 | +struct mlx5_bfreg_info { |
---|
| 105 | + u32 *sys_pages; |
---|
| 106 | + int num_low_latency_bfregs; |
---|
| 107 | + unsigned int *count; |
---|
| 108 | + |
---|
| 109 | + /* |
---|
| 110 | + * protect bfreg allocation data structs |
---|
| 111 | + */ |
---|
| 112 | + struct mutex lock; |
---|
| 113 | + u32 ver; |
---|
| 114 | + u8 lib_uar_4k : 1; |
---|
| 115 | + u8 lib_uar_dyn : 1; |
---|
| 116 | + u32 num_sys_pages; |
---|
| 117 | + u32 num_static_sys_pages; |
---|
| 118 | + u32 total_num_bfregs; |
---|
| 119 | + u32 num_dyn_bfregs; |
---|
122 | 120 | }; |
---|
123 | 121 | |
---|
124 | 122 | struct mlx5_ib_ucontext { |
---|
.. | .. |
---|
132 | 130 | u8 cqe_version; |
---|
133 | 131 | /* Transport Domain number */ |
---|
134 | 132 | u32 tdn; |
---|
135 | | - struct list_head vma_private_list; |
---|
136 | | - /* protect vma_private_list add/del */ |
---|
137 | | - struct mutex vma_private_list_mutex; |
---|
138 | 133 | |
---|
139 | 134 | u64 lib_caps; |
---|
140 | | - DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES); |
---|
141 | 135 | u16 devx_uid; |
---|
142 | 136 | /* For RoCE LAG TX affinity */ |
---|
143 | 137 | atomic_t tx_port_affinity; |
---|
.. | .. |
---|
151 | 145 | struct mlx5_ib_pd { |
---|
152 | 146 | struct ib_pd ibpd; |
---|
153 | 147 | u32 pdn; |
---|
| 148 | + u16 uid; |
---|
| 149 | +}; |
---|
| 150 | + |
---|
| 151 | +enum { |
---|
| 152 | + MLX5_IB_FLOW_ACTION_MODIFY_HEADER, |
---|
| 153 | + MLX5_IB_FLOW_ACTION_PACKET_REFORMAT, |
---|
| 154 | + MLX5_IB_FLOW_ACTION_DECAP, |
---|
154 | 155 | }; |
---|
155 | 156 | |
---|
156 | 157 | #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1) |
---|
.. | .. |
---|
182 | 183 | struct mlx5_ib_match_params matcher_mask; |
---|
183 | 184 | int mask_len; |
---|
184 | 185 | enum mlx5_ib_flow_type flow_type; |
---|
| 186 | + enum mlx5_flow_namespace_type ns_type; |
---|
185 | 187 | u16 priority; |
---|
186 | 188 | struct mlx5_core_dev *mdev; |
---|
187 | 189 | atomic_t usecnt; |
---|
188 | 190 | u8 match_criteria_enable; |
---|
189 | 191 | }; |
---|
190 | 192 | |
---|
| 193 | +struct mlx5_ib_pp { |
---|
| 194 | + u16 index; |
---|
| 195 | + struct mlx5_core_dev *mdev; |
---|
| 196 | +}; |
---|
| 197 | + |
---|
191 | 198 | struct mlx5_ib_flow_db { |
---|
192 | 199 | struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT]; |
---|
| 200 | + struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT]; |
---|
193 | 201 | struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS]; |
---|
194 | 202 | struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS]; |
---|
| 203 | + struct mlx5_ib_flow_prio fdb; |
---|
| 204 | + struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT]; |
---|
| 205 | + struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT]; |
---|
195 | 206 | struct mlx5_flow_table *lag_demux_ft; |
---|
196 | 207 | /* Protect flow steering bypass flow tables |
---|
197 | 208 | * when add/del flow rules. |
---|
.. | .. |
---|
238 | 249 | * These flags are intended for internal use by the mlx5_ib driver, and they |
---|
239 | 250 | * rely on the range reserved for that use in the ib_qp_create_flags enum. |
---|
240 | 251 | */ |
---|
241 | | - |
---|
242 | | -/* Create a UD QP whose source QP number is 1 */ |
---|
243 | | -static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void) |
---|
244 | | -{ |
---|
245 | | - return IB_QP_CREATE_RESERVED_START; |
---|
246 | | -} |
---|
| 252 | +#define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START |
---|
| 253 | +#define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1) |
---|
247 | 254 | |
---|
248 | 255 | struct wr_list { |
---|
249 | 256 | u16 opcode; |
---|
.. | .. |
---|
256 | 263 | }; |
---|
257 | 264 | |
---|
258 | 265 | struct mlx5_ib_wq { |
---|
| 266 | + struct mlx5_frag_buf_ctrl fbc; |
---|
259 | 267 | u64 *wrid; |
---|
260 | 268 | u32 *wr_data; |
---|
261 | 269 | struct wr_list *w_list; |
---|
.. | .. |
---|
274 | 282 | unsigned tail; |
---|
275 | 283 | u16 cur_post; |
---|
276 | 284 | u16 last_poll; |
---|
277 | | - void *qend; |
---|
| 285 | + void *cur_edge; |
---|
278 | 286 | }; |
---|
279 | 287 | |
---|
280 | 288 | enum mlx5_ib_wq_flags { |
---|
.. | .. |
---|
286 | 294 | #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16 |
---|
287 | 295 | #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6 |
---|
288 | 296 | #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13 |
---|
| 297 | +#define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3 |
---|
289 | 298 | |
---|
290 | 299 | struct mlx5_ib_rwq { |
---|
291 | 300 | struct ib_wq ibwq; |
---|
.. | .. |
---|
301 | 310 | struct ib_umem *umem; |
---|
302 | 311 | size_t buf_size; |
---|
303 | 312 | unsigned int page_shift; |
---|
304 | | - int create_type; |
---|
305 | 313 | struct mlx5_db db; |
---|
306 | 314 | u32 user_index; |
---|
307 | 315 | u32 wqe_count; |
---|
.. | .. |
---|
310 | 318 | u32 create_flags; /* Use enum mlx5_ib_wq_flags */ |
---|
311 | 319 | }; |
---|
312 | 320 | |
---|
313 | | -enum { |
---|
314 | | - MLX5_QP_USER, |
---|
315 | | - MLX5_QP_KERNEL, |
---|
316 | | - MLX5_QP_EMPTY |
---|
317 | | -}; |
---|
318 | | - |
---|
319 | | -enum { |
---|
320 | | - MLX5_WQ_USER, |
---|
321 | | - MLX5_WQ_KERNEL |
---|
322 | | -}; |
---|
323 | | - |
---|
324 | 321 | struct mlx5_ib_rwq_ind_table { |
---|
325 | 322 | struct ib_rwq_ind_table ib_rwq_ind_tbl; |
---|
326 | 323 | u32 rqtn; |
---|
| 324 | + u16 uid; |
---|
327 | 325 | }; |
---|
328 | 326 | |
---|
329 | 327 | struct mlx5_ib_ubuffer { |
---|
.. | .. |
---|
386 | 384 | u32 *in; |
---|
387 | 385 | }; |
---|
388 | 386 | |
---|
| 387 | +struct mlx5_ib_gsi_qp { |
---|
| 388 | + struct ib_qp *rx_qp; |
---|
| 389 | + u8 port_num; |
---|
| 390 | + struct ib_qp_cap cap; |
---|
| 391 | + struct ib_cq *cq; |
---|
| 392 | + struct mlx5_ib_gsi_wr *outstanding_wrs; |
---|
| 393 | + u32 outstanding_pi, outstanding_ci; |
---|
| 394 | + int num_qps; |
---|
| 395 | + /* Protects access to the tx_qps. Post send operations synchronize |
---|
| 396 | + * with tx_qp creation in setup_qp(). Also protects the |
---|
| 397 | + * outstanding_wrs array and indices. |
---|
| 398 | + */ |
---|
| 399 | + spinlock_t lock; |
---|
| 400 | + struct ib_qp **tx_qps; |
---|
| 401 | +}; |
---|
| 402 | + |
---|
389 | 403 | struct mlx5_ib_qp { |
---|
390 | 404 | struct ib_qp ibqp; |
---|
391 | 405 | union { |
---|
.. | .. |
---|
393 | 407 | struct mlx5_ib_raw_packet_qp raw_packet_qp; |
---|
394 | 408 | struct mlx5_ib_rss_qp rss_qp; |
---|
395 | 409 | struct mlx5_ib_dct dct; |
---|
| 410 | + struct mlx5_ib_gsi_qp gsi; |
---|
396 | 411 | }; |
---|
397 | 412 | struct mlx5_frag_buf buf; |
---|
398 | 413 | |
---|
.. | .. |
---|
406 | 421 | /* serialize qp state modifications |
---|
407 | 422 | */ |
---|
408 | 423 | struct mutex mutex; |
---|
| 424 | + /* cached variant of create_flags from struct ib_qp_init_attr */ |
---|
409 | 425 | u32 flags; |
---|
410 | 426 | u8 port; |
---|
411 | 427 | u8 state; |
---|
412 | | - int wq_sig; |
---|
413 | | - int scat_cqe; |
---|
414 | 428 | int max_inline_data; |
---|
415 | 429 | struct mlx5_bf bf; |
---|
416 | | - int has_rq; |
---|
| 430 | + u8 has_rq:1; |
---|
| 431 | + u8 is_rss:1; |
---|
417 | 432 | |
---|
418 | 433 | /* only for user space QPs. For kernel |
---|
419 | 434 | * we have it from the bf object |
---|
420 | 435 | */ |
---|
421 | 436 | int bfregn; |
---|
422 | 437 | |
---|
423 | | - int create_type; |
---|
424 | | - |
---|
425 | | - /* Store signature errors */ |
---|
426 | | - bool signature_en; |
---|
427 | | - |
---|
428 | 438 | struct list_head qps_list; |
---|
429 | 439 | struct list_head cq_recv_list; |
---|
430 | 440 | struct list_head cq_send_list; |
---|
431 | 441 | struct mlx5_rate_limit rl; |
---|
432 | 442 | u32 underlay_qpn; |
---|
433 | | - bool tunnel_offload_en; |
---|
434 | | - /* storage for qp sub type when core qp type is IB_QPT_DRIVER */ |
---|
435 | | - enum ib_qp_type qp_sub_type; |
---|
| 443 | + u32 flags_en; |
---|
| 444 | + /* |
---|
| 445 | + * IB/core doesn't store low-level QP types, so |
---|
| 446 | + * store both MLX and IBTA types in the field below. |
---|
| 447 | + * IB_QPT_DRIVER will be break to DCI/DCT subtypes. |
---|
| 448 | + */ |
---|
| 449 | + enum ib_qp_type type; |
---|
| 450 | + /* A flag to indicate if there's a new counter is configured |
---|
| 451 | + * but not take effective |
---|
| 452 | + */ |
---|
| 453 | + u32 counter_pending; |
---|
| 454 | + u16 gsi_lag_port; |
---|
436 | 455 | }; |
---|
437 | 456 | |
---|
438 | 457 | struct mlx5_ib_cq_buf { |
---|
439 | 458 | struct mlx5_frag_buf_ctrl fbc; |
---|
| 459 | + struct mlx5_frag_buf frag_buf; |
---|
440 | 460 | struct ib_umem *umem; |
---|
441 | 461 | int cqe_size; |
---|
442 | 462 | int nent; |
---|
443 | | -}; |
---|
444 | | - |
---|
445 | | -enum mlx5_ib_qp_flags { |
---|
446 | | - MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, |
---|
447 | | - MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, |
---|
448 | | - MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL, |
---|
449 | | - MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND, |
---|
450 | | - MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV, |
---|
451 | | - MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5, |
---|
452 | | - /* QP uses 1 as its source QP number */ |
---|
453 | | - MLX5_IB_QP_SQPN_QP1 = 1 << 6, |
---|
454 | | - MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7, |
---|
455 | | - MLX5_IB_QP_RSS = 1 << 8, |
---|
456 | | - MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9, |
---|
457 | | - MLX5_IB_QP_UNDERLAY = 1 << 10, |
---|
458 | | - MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11, |
---|
459 | | - MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12, |
---|
460 | 463 | }; |
---|
461 | 464 | |
---|
462 | 465 | struct mlx5_umr_wr { |
---|
.. | .. |
---|
521 | 524 | struct mlx5_core_srq msrq; |
---|
522 | 525 | struct mlx5_frag_buf buf; |
---|
523 | 526 | struct mlx5_db db; |
---|
| 527 | + struct mlx5_frag_buf_ctrl fbc; |
---|
524 | 528 | u64 *wrid; |
---|
525 | 529 | /* protect SRQ hanlding |
---|
526 | 530 | */ |
---|
.. | .. |
---|
545 | 549 | MLX5_IB_MTT_WRITE = (1 << 1), |
---|
546 | 550 | }; |
---|
547 | 551 | |
---|
| 552 | +struct mlx5_user_mmap_entry { |
---|
| 553 | + struct rdma_user_mmap_entry rdma_entry; |
---|
| 554 | + u8 mmap_flag; |
---|
| 555 | + u64 address; |
---|
| 556 | + u32 page_idx; |
---|
| 557 | +}; |
---|
| 558 | + |
---|
548 | 559 | struct mlx5_ib_dm { |
---|
549 | 560 | struct ib_dm ibdm; |
---|
550 | 561 | phys_addr_t dev_addr; |
---|
| 562 | + u32 type; |
---|
| 563 | + size_t size; |
---|
| 564 | + union { |
---|
| 565 | + struct { |
---|
| 566 | + u32 obj_id; |
---|
| 567 | + } icm_dm; |
---|
| 568 | + /* other dm types specific params should be added here */ |
---|
| 569 | + }; |
---|
| 570 | + struct mlx5_user_mmap_entry mentry; |
---|
551 | 571 | }; |
---|
552 | 572 | |
---|
553 | 573 | #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) |
---|
554 | 574 | |
---|
555 | | -#define MLX5_IB_DM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\ |
---|
556 | | - IB_ACCESS_REMOTE_WRITE |\ |
---|
557 | | - IB_ACCESS_REMOTE_READ |\ |
---|
558 | | - IB_ACCESS_REMOTE_ATOMIC |\ |
---|
559 | | - IB_ZERO_BASED) |
---|
| 575 | +#define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\ |
---|
| 576 | + IB_ACCESS_REMOTE_WRITE |\ |
---|
| 577 | + IB_ACCESS_REMOTE_READ |\ |
---|
| 578 | + IB_ACCESS_REMOTE_ATOMIC |\ |
---|
| 579 | + IB_ZERO_BASED) |
---|
| 580 | + |
---|
| 581 | +#define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\ |
---|
| 582 | + IB_ACCESS_REMOTE_WRITE |\ |
---|
| 583 | + IB_ACCESS_REMOTE_READ |\ |
---|
| 584 | + IB_ZERO_BASED) |
---|
| 585 | + |
---|
| 586 | +#define mlx5_update_odp_stats(mr, counter_name, value) \ |
---|
| 587 | + atomic64_add(value, &((mr)->odp_stats.counter_name)) |
---|
560 | 588 | |
---|
561 | 589 | struct mlx5_ib_mr { |
---|
562 | 590 | struct ib_mr ibmr; |
---|
563 | 591 | void *descs; |
---|
564 | 592 | dma_addr_t desc_map; |
---|
565 | 593 | int ndescs; |
---|
| 594 | + int data_length; |
---|
| 595 | + int meta_ndescs; |
---|
| 596 | + int meta_length; |
---|
566 | 597 | int max_descs; |
---|
567 | 598 | int desc_size; |
---|
568 | 599 | int access_mode; |
---|
.. | .. |
---|
570 | 601 | struct ib_umem *umem; |
---|
571 | 602 | struct mlx5_shared_mr_info *smr_info; |
---|
572 | 603 | struct list_head list; |
---|
573 | | - int order; |
---|
574 | | - bool allocated_from_cache; |
---|
| 604 | + unsigned int order; |
---|
| 605 | + struct mlx5_cache_ent *cache_ent; |
---|
575 | 606 | int npages; |
---|
576 | 607 | struct mlx5_ib_dev *dev; |
---|
577 | 608 | u32 out[MLX5_ST_SZ_DW(create_mkey_out)]; |
---|
578 | 609 | struct mlx5_core_sig_ctx *sig; |
---|
579 | | - int live; |
---|
580 | 610 | void *descs_alloc; |
---|
581 | 611 | int access_flags; /* Needed for rereg MR */ |
---|
582 | 612 | |
---|
583 | 613 | struct mlx5_ib_mr *parent; |
---|
584 | | - atomic_t num_leaf_free; |
---|
585 | | - wait_queue_head_t q_leaf_free; |
---|
| 614 | + /* Needed for IB_MR_TYPE_INTEGRITY */ |
---|
| 615 | + struct mlx5_ib_mr *pi_mr; |
---|
| 616 | + struct mlx5_ib_mr *klm_mr; |
---|
| 617 | + struct mlx5_ib_mr *mtt_mr; |
---|
| 618 | + u64 data_iova; |
---|
| 619 | + u64 pi_iova; |
---|
| 620 | + |
---|
| 621 | + /* For ODP and implicit */ |
---|
| 622 | + atomic_t num_deferred_work; |
---|
| 623 | + wait_queue_head_t q_deferred_work; |
---|
| 624 | + struct xarray implicit_children; |
---|
| 625 | + union { |
---|
| 626 | + struct rcu_head rcu; |
---|
| 627 | + struct list_head elm; |
---|
| 628 | + struct work_struct work; |
---|
| 629 | + } odp_destroy; |
---|
| 630 | + struct ib_odp_counters odp_stats; |
---|
| 631 | + bool is_odp_implicit; |
---|
| 632 | + |
---|
| 633 | + struct mlx5_async_work cb_work; |
---|
586 | 634 | }; |
---|
| 635 | + |
---|
| 636 | +static inline bool is_odp_mr(struct mlx5_ib_mr *mr) |
---|
| 637 | +{ |
---|
| 638 | + return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem && |
---|
| 639 | + mr->umem->is_odp; |
---|
| 640 | +} |
---|
587 | 641 | |
---|
588 | 642 | struct mlx5_ib_mw { |
---|
589 | 643 | struct ib_mw ibmw; |
---|
| 644 | + struct mlx5_core_mkey mmkey; |
---|
| 645 | + int ndescs; |
---|
| 646 | +}; |
---|
| 647 | + |
---|
| 648 | +struct mlx5_ib_devx_mr { |
---|
590 | 649 | struct mlx5_core_mkey mmkey; |
---|
591 | 650 | int ndescs; |
---|
592 | 651 | }; |
---|
.. | .. |
---|
606 | 665 | struct semaphore sem; |
---|
607 | 666 | }; |
---|
608 | 667 | |
---|
609 | | -enum { |
---|
610 | | - MLX5_FMR_INVALID, |
---|
611 | | - MLX5_FMR_VALID, |
---|
612 | | - MLX5_FMR_BUSY, |
---|
613 | | -}; |
---|
614 | | - |
---|
615 | 668 | struct mlx5_cache_ent { |
---|
616 | 669 | struct list_head head; |
---|
617 | 670 | /* sync access to the cahce entry |
---|
.. | .. |
---|
619 | 672 | spinlock_t lock; |
---|
620 | 673 | |
---|
621 | 674 | |
---|
622 | | - struct dentry *dir; |
---|
623 | 675 | char name[4]; |
---|
624 | 676 | u32 order; |
---|
625 | 677 | u32 xlt; |
---|
626 | 678 | u32 access_mode; |
---|
627 | 679 | u32 page; |
---|
628 | 680 | |
---|
629 | | - u32 size; |
---|
630 | | - u32 cur; |
---|
631 | | - u32 miss; |
---|
632 | | - u32 limit; |
---|
| 681 | + u8 disabled:1; |
---|
| 682 | + u8 fill_to_high_water:1; |
---|
633 | 683 | |
---|
634 | | - struct dentry *fsize; |
---|
635 | | - struct dentry *fcur; |
---|
636 | | - struct dentry *fmiss; |
---|
637 | | - struct dentry *flimit; |
---|
| 684 | + /* |
---|
| 685 | + * - available_mrs is the length of list head, ie the number of MRs |
---|
| 686 | + * available for immediate allocation. |
---|
| 687 | + * - total_mrs is available_mrs plus all in use MRs that could be |
---|
| 688 | + * returned to the cache. |
---|
| 689 | + * - limit is the low water mark for available_mrs, 2* limit is the |
---|
| 690 | + * upper water mark. |
---|
| 691 | + * - pending is the number of MRs currently being created |
---|
| 692 | + */ |
---|
| 693 | + u32 total_mrs; |
---|
| 694 | + u32 available_mrs; |
---|
| 695 | + u32 limit; |
---|
| 696 | + u32 pending; |
---|
| 697 | + |
---|
| 698 | + /* Statistics */ |
---|
| 699 | + u32 miss; |
---|
638 | 700 | |
---|
639 | 701 | struct mlx5_ib_dev *dev; |
---|
640 | 702 | struct work_struct work; |
---|
641 | 703 | struct delayed_work dwork; |
---|
642 | | - int pending; |
---|
643 | | - struct completion compl; |
---|
644 | 704 | }; |
---|
645 | 705 | |
---|
646 | 706 | struct mlx5_mr_cache { |
---|
647 | 707 | struct workqueue_struct *wq; |
---|
648 | 708 | struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES]; |
---|
649 | | - int stopped; |
---|
650 | 709 | struct dentry *root; |
---|
651 | 710 | unsigned long last_add; |
---|
652 | 711 | }; |
---|
653 | 712 | |
---|
654 | | -struct mlx5_ib_gsi_qp; |
---|
655 | | - |
---|
656 | 713 | struct mlx5_ib_port_resources { |
---|
657 | | - struct mlx5_ib_resources *devr; |
---|
658 | 714 | struct mlx5_ib_gsi_qp *gsi; |
---|
659 | 715 | struct work_struct pkey_change_work; |
---|
660 | 716 | }; |
---|
661 | 717 | |
---|
662 | 718 | struct mlx5_ib_resources { |
---|
663 | 719 | struct ib_cq *c0; |
---|
664 | | - struct ib_xrcd *x0; |
---|
665 | | - struct ib_xrcd *x1; |
---|
| 720 | + u32 xrcdn0; |
---|
| 721 | + u32 xrcdn1; |
---|
666 | 722 | struct ib_pd *p0; |
---|
667 | 723 | struct ib_srq *s0; |
---|
668 | 724 | struct ib_srq *s1; |
---|
.. | .. |
---|
678 | 734 | u32 num_cong_counters; |
---|
679 | 735 | u32 num_ext_ppcnt_counters; |
---|
680 | 736 | u16 set_id; |
---|
681 | | - bool set_id_valid; |
---|
682 | 737 | }; |
---|
683 | 738 | |
---|
684 | 739 | struct mlx5_ib_multiport_info; |
---|
.. | .. |
---|
687 | 742 | struct mlx5_ib_multiport_info *mpi; |
---|
688 | 743 | /* To be held when accessing the multiport info */ |
---|
689 | 744 | spinlock_t mpi_lock; |
---|
690 | | -}; |
---|
691 | | - |
---|
692 | | -struct mlx5_ib_port { |
---|
693 | | - struct mlx5_ib_counters cnts; |
---|
694 | | - struct mlx5_ib_multiport mp; |
---|
695 | | - struct mlx5_ib_dbg_cc_params *dbg_cc_params; |
---|
696 | 745 | }; |
---|
697 | 746 | |
---|
698 | 747 | struct mlx5_roce { |
---|
.. | .. |
---|
706 | 755 | enum ib_port_state last_port_state; |
---|
707 | 756 | struct mlx5_ib_dev *dev; |
---|
708 | 757 | u8 native_port_num; |
---|
| 758 | +}; |
---|
| 759 | + |
---|
| 760 | +struct mlx5_ib_port { |
---|
| 761 | + struct mlx5_ib_counters cnts; |
---|
| 762 | + struct mlx5_ib_multiport mp; |
---|
| 763 | + struct mlx5_ib_dbg_cc_params *dbg_cc_params; |
---|
| 764 | + struct mlx5_roce roce; |
---|
| 765 | + struct mlx5_eswitch_rep *rep; |
---|
709 | 766 | }; |
---|
710 | 767 | |
---|
711 | 768 | struct mlx5_ib_dbg_param { |
---|
.. | .. |
---|
722 | 779 | MLX5_IB_DBG_CC_RP_BYTE_RESET, |
---|
723 | 780 | MLX5_IB_DBG_CC_RP_THRESHOLD, |
---|
724 | 781 | MLX5_IB_DBG_CC_RP_AI_RATE, |
---|
| 782 | + MLX5_IB_DBG_CC_RP_MAX_RATE, |
---|
725 | 783 | MLX5_IB_DBG_CC_RP_HAI_RATE, |
---|
726 | 784 | MLX5_IB_DBG_CC_RP_MIN_DEC_FAC, |
---|
727 | 785 | MLX5_IB_DBG_CC_RP_MIN_RATE, |
---|
.. | .. |
---|
731 | 789 | MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD, |
---|
732 | 790 | MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE, |
---|
733 | 791 | MLX5_IB_DBG_CC_RP_GD, |
---|
| 792 | + MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS, |
---|
734 | 793 | MLX5_IB_DBG_CC_NP_CNP_DSCP, |
---|
735 | 794 | MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE, |
---|
736 | 795 | MLX5_IB_DBG_CC_NP_CNP_PRIO, |
---|
.. | .. |
---|
746 | 805 | MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100, |
---|
747 | 806 | }; |
---|
748 | 807 | |
---|
749 | | -struct mlx5_ib_dbg_delay_drop { |
---|
750 | | - struct dentry *dir_debugfs; |
---|
751 | | - struct dentry *rqs_cnt_debugfs; |
---|
752 | | - struct dentry *events_cnt_debugfs; |
---|
753 | | - struct dentry *timeout_debugfs; |
---|
754 | | -}; |
---|
755 | | - |
---|
756 | 808 | struct mlx5_ib_delay_drop { |
---|
757 | 809 | struct mlx5_ib_dev *dev; |
---|
758 | 810 | struct work_struct delay_drop_work; |
---|
.. | .. |
---|
762 | 814 | bool activate; |
---|
763 | 815 | atomic_t events_cnt; |
---|
764 | 816 | atomic_t rqs_cnt; |
---|
765 | | - struct mlx5_ib_dbg_delay_drop *dbg; |
---|
| 817 | + struct dentry *dir_debugfs; |
---|
766 | 818 | }; |
---|
767 | 819 | |
---|
768 | 820 | enum mlx5_ib_stages { |
---|
769 | 821 | MLX5_IB_STAGE_INIT, |
---|
770 | | - MLX5_IB_STAGE_FLOW_DB, |
---|
| 822 | + MLX5_IB_STAGE_FS, |
---|
771 | 823 | MLX5_IB_STAGE_CAPS, |
---|
772 | 824 | MLX5_IB_STAGE_NON_DEFAULT_CB, |
---|
773 | 825 | MLX5_IB_STAGE_ROCE, |
---|
| 826 | + MLX5_IB_STAGE_QP, |
---|
| 827 | + MLX5_IB_STAGE_SRQ, |
---|
774 | 828 | MLX5_IB_STAGE_DEVICE_RESOURCES, |
---|
| 829 | + MLX5_IB_STAGE_DEVICE_NOTIFIER, |
---|
775 | 830 | MLX5_IB_STAGE_ODP, |
---|
776 | 831 | MLX5_IB_STAGE_COUNTERS, |
---|
777 | 832 | MLX5_IB_STAGE_CONG_DEBUGFS, |
---|
778 | 833 | MLX5_IB_STAGE_UAR, |
---|
779 | 834 | MLX5_IB_STAGE_BFREG, |
---|
780 | 835 | MLX5_IB_STAGE_PRE_IB_REG_UMR, |
---|
781 | | - MLX5_IB_STAGE_SPECS, |
---|
| 836 | + MLX5_IB_STAGE_WHITELIST_UID, |
---|
782 | 837 | MLX5_IB_STAGE_IB_REG, |
---|
783 | 838 | MLX5_IB_STAGE_POST_IB_REG_UMR, |
---|
784 | 839 | MLX5_IB_STAGE_DELAY_DROP, |
---|
785 | | - MLX5_IB_STAGE_CLASS_ATTR, |
---|
786 | | - MLX5_IB_STAGE_REP_REG, |
---|
| 840 | + MLX5_IB_STAGE_RESTRACK, |
---|
787 | 841 | MLX5_IB_STAGE_MAX, |
---|
788 | 842 | }; |
---|
789 | 843 | |
---|
.. | .. |
---|
803 | 857 | struct list_head list; |
---|
804 | 858 | struct mlx5_ib_dev *ibdev; |
---|
805 | 859 | struct mlx5_core_dev *mdev; |
---|
| 860 | + struct notifier_block mdev_events; |
---|
806 | 861 | struct completion unref_comp; |
---|
807 | 862 | u64 sys_image_guid; |
---|
808 | 863 | u32 mdev_refcnt; |
---|
.. | .. |
---|
817 | 872 | u64 ib_flags; |
---|
818 | 873 | struct mlx5_accel_esp_xfrm *ctx; |
---|
819 | 874 | } esp_aes_gcm; |
---|
| 875 | + struct { |
---|
| 876 | + struct mlx5_ib_dev *dev; |
---|
| 877 | + u32 sub_type; |
---|
| 878 | + union { |
---|
| 879 | + struct mlx5_modify_hdr *modify_hdr; |
---|
| 880 | + struct mlx5_pkt_reformat *pkt_reformat; |
---|
| 881 | + }; |
---|
| 882 | + } flow_action_raw; |
---|
820 | 883 | }; |
---|
821 | 884 | }; |
---|
822 | 885 | |
---|
823 | | -struct mlx5_memic { |
---|
| 886 | +struct mlx5_dm { |
---|
824 | 887 | struct mlx5_core_dev *dev; |
---|
825 | | - spinlock_t memic_lock; |
---|
| 888 | + /* This lock is used to protect the access to the shared |
---|
| 889 | + * allocation map when concurrent requests by different |
---|
| 890 | + * processes are handled. |
---|
| 891 | + */ |
---|
| 892 | + spinlock_t lock; |
---|
826 | 893 | DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES); |
---|
827 | 894 | }; |
---|
828 | 895 | |
---|
.. | .. |
---|
861 | 928 | return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs); |
---|
862 | 929 | } |
---|
863 | 930 | |
---|
| 931 | +int parse_flow_flow_action(struct mlx5_ib_flow_action *maction, |
---|
| 932 | + bool is_egress, |
---|
| 933 | + struct mlx5_flow_act *action); |
---|
| 934 | +struct mlx5_ib_lb_state { |
---|
| 935 | + /* protect the user_td */ |
---|
| 936 | + struct mutex mutex; |
---|
| 937 | + u32 user_td; |
---|
| 938 | + int qps; |
---|
| 939 | + bool enabled; |
---|
| 940 | +}; |
---|
| 941 | + |
---|
| 942 | +struct mlx5_ib_pf_eq { |
---|
| 943 | + struct notifier_block irq_nb; |
---|
| 944 | + struct mlx5_ib_dev *dev; |
---|
| 945 | + struct mlx5_eq *core; |
---|
| 946 | + struct work_struct work; |
---|
| 947 | + spinlock_t lock; /* Pagefaults spinlock */ |
---|
| 948 | + struct workqueue_struct *wq; |
---|
| 949 | + mempool_t *pool; |
---|
| 950 | +}; |
---|
| 951 | + |
---|
| 952 | +struct mlx5_devx_event_table { |
---|
| 953 | + struct mlx5_nb devx_nb; |
---|
| 954 | + /* serialize updating the event_xa */ |
---|
| 955 | + struct mutex event_xa_lock; |
---|
| 956 | + struct xarray event_xa; |
---|
| 957 | +}; |
---|
| 958 | + |
---|
| 959 | +struct mlx5_var_table { |
---|
| 960 | + /* serialize updating the bitmap */ |
---|
| 961 | + struct mutex bitmap_lock; |
---|
| 962 | + unsigned long *bitmap; |
---|
| 963 | + u64 hw_start_addr; |
---|
| 964 | + u32 stride_size; |
---|
| 965 | + u64 num_var_hw_entries; |
---|
| 966 | +}; |
---|
| 967 | + |
---|
864 | 968 | struct mlx5_ib_dev { |
---|
865 | 969 | struct ib_device ib_dev; |
---|
866 | | - const struct uverbs_object_tree_def *driver_trees[6]; |
---|
867 | 970 | struct mlx5_core_dev *mdev; |
---|
868 | | - struct mlx5_roce roce[MLX5_MAX_PORTS]; |
---|
| 971 | + struct notifier_block mdev_events; |
---|
869 | 972 | int num_ports; |
---|
870 | 973 | /* serialize update of capability mask |
---|
871 | 974 | */ |
---|
872 | 975 | struct mutex cap_mask_mutex; |
---|
873 | | - bool ib_active; |
---|
| 976 | + u8 ib_active:1; |
---|
| 977 | + u8 is_rep:1; |
---|
| 978 | + u8 lag_active:1; |
---|
| 979 | + u8 wc_support:1; |
---|
| 980 | + u8 fill_delay; |
---|
874 | 981 | struct umr_common umrc; |
---|
875 | 982 | /* sync used page count stats |
---|
876 | 983 | */ |
---|
877 | 984 | struct mlx5_ib_resources devr; |
---|
| 985 | + |
---|
| 986 | + atomic_t mkey_var; |
---|
878 | 987 | struct mlx5_mr_cache cache; |
---|
879 | 988 | struct timer_list delay_timer; |
---|
880 | 989 | /* Prevents soft lock on massive reg MRs */ |
---|
881 | 990 | struct mutex slow_path_mutex; |
---|
882 | | - int fill_delay; |
---|
883 | | -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
---|
884 | 991 | struct ib_odp_caps odp_caps; |
---|
885 | 992 | u64 odp_max_size; |
---|
| 993 | + struct mlx5_ib_pf_eq odp_pf_eq; |
---|
| 994 | + |
---|
886 | 995 | /* |
---|
887 | 996 | * Sleepable RCU that prevents destruction of MRs while they are still |
---|
888 | 997 | * being used by a page fault handler. |
---|
889 | 998 | */ |
---|
890 | | - struct srcu_struct mr_srcu; |
---|
| 999 | + struct srcu_struct odp_srcu; |
---|
| 1000 | + struct xarray odp_mkeys; |
---|
| 1001 | + |
---|
891 | 1002 | u32 null_mkey; |
---|
892 | | -#endif |
---|
893 | 1003 | struct mlx5_ib_flow_db *flow_db; |
---|
894 | 1004 | /* protect resources needed as part of reset flow */ |
---|
895 | 1005 | spinlock_t reset_flow_resource_lock; |
---|
.. | .. |
---|
897 | 1007 | /* Array with num_ports elements */ |
---|
898 | 1008 | struct mlx5_ib_port *port; |
---|
899 | 1009 | struct mlx5_sq_bfreg bfreg; |
---|
| 1010 | + struct mlx5_sq_bfreg wc_bfreg; |
---|
900 | 1011 | struct mlx5_sq_bfreg fp_bfreg; |
---|
901 | 1012 | struct mlx5_ib_delay_drop delay_drop; |
---|
902 | 1013 | const struct mlx5_ib_profile *profile; |
---|
903 | | - struct mlx5_eswitch_rep *rep; |
---|
904 | 1014 | |
---|
905 | | - /* protect the user_td */ |
---|
906 | | - struct mutex lb_mutex; |
---|
907 | | - u32 user_td; |
---|
| 1015 | + struct mlx5_ib_lb_state lb; |
---|
908 | 1016 | u8 umr_fence; |
---|
909 | 1017 | struct list_head ib_dev_list; |
---|
910 | 1018 | u64 sys_image_guid; |
---|
911 | | - struct mlx5_memic memic; |
---|
| 1019 | + struct mlx5_dm dm; |
---|
| 1020 | + u16 devx_whitelist_uid; |
---|
| 1021 | + struct mlx5_srq_table srq_table; |
---|
| 1022 | + struct mlx5_qp_table qp_table; |
---|
| 1023 | + struct mlx5_async_ctx async_ctx; |
---|
| 1024 | + struct mlx5_devx_event_table devx_event_table; |
---|
| 1025 | + struct mlx5_var_table var_table; |
---|
| 1026 | + |
---|
| 1027 | + struct xarray sig_mrs; |
---|
912 | 1028 | }; |
---|
913 | 1029 | |
---|
914 | 1030 | static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) |
---|
.. | .. |
---|
926 | 1042 | return container_of(ibdev, struct mlx5_ib_dev, ib_dev); |
---|
927 | 1043 | } |
---|
928 | 1044 | |
---|
| 1045 | +static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata) |
---|
| 1046 | +{ |
---|
| 1047 | + struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( |
---|
| 1048 | + udata, struct mlx5_ib_ucontext, ibucontext); |
---|
| 1049 | + |
---|
| 1050 | + return to_mdev(context->ibucontext.device); |
---|
| 1051 | +} |
---|
| 1052 | + |
---|
929 | 1053 | static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) |
---|
930 | 1054 | { |
---|
931 | 1055 | return container_of(ibcq, struct mlx5_ib_cq, ibcq); |
---|
.. | .. |
---|
939 | 1063 | static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp) |
---|
940 | 1064 | { |
---|
941 | 1065 | return container_of(core_qp, struct mlx5_ib_rwq, core_qp); |
---|
942 | | -} |
---|
943 | | - |
---|
944 | | -static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey) |
---|
945 | | -{ |
---|
946 | | - return container_of(mmkey, struct mlx5_ib_mr, mmkey); |
---|
947 | 1066 | } |
---|
948 | 1067 | |
---|
949 | 1068 | static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) |
---|
.. | .. |
---|
997 | 1116 | return container_of(ibact, struct mlx5_ib_flow_action, ib_action); |
---|
998 | 1117 | } |
---|
999 | 1118 | |
---|
1000 | | -int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, |
---|
| 1119 | +static inline struct mlx5_user_mmap_entry * |
---|
| 1120 | +to_mmmap(struct rdma_user_mmap_entry *rdma_entry) |
---|
| 1121 | +{ |
---|
| 1122 | + return container_of(rdma_entry, |
---|
| 1123 | + struct mlx5_user_mmap_entry, rdma_entry); |
---|
| 1124 | +} |
---|
| 1125 | + |
---|
| 1126 | +int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, |
---|
| 1127 | + struct ib_udata *udata, unsigned long virt, |
---|
1001 | 1128 | struct mlx5_db *db); |
---|
1002 | 1129 | void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); |
---|
1003 | 1130 | void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); |
---|
1004 | 1131 | void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); |
---|
1005 | 1132 | void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); |
---|
1006 | | -int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, |
---|
1007 | | - u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, |
---|
1008 | | - const void *in_mad, void *response_mad); |
---|
1009 | | -struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, |
---|
1010 | | - struct ib_udata *udata); |
---|
| 1133 | +int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, |
---|
| 1134 | + struct ib_udata *udata); |
---|
1011 | 1135 | int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); |
---|
1012 | | -int mlx5_ib_destroy_ah(struct ib_ah *ah); |
---|
1013 | | -struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, |
---|
1014 | | - struct ib_srq_init_attr *init_attr, |
---|
1015 | | - struct ib_udata *udata); |
---|
| 1136 | +static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags) |
---|
| 1137 | +{ |
---|
| 1138 | + return 0; |
---|
| 1139 | +} |
---|
| 1140 | +int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr, |
---|
| 1141 | + struct ib_udata *udata); |
---|
1016 | 1142 | int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, |
---|
1017 | 1143 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); |
---|
1018 | 1144 | int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); |
---|
1019 | | -int mlx5_ib_destroy_srq(struct ib_srq *srq); |
---|
| 1145 | +int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); |
---|
1020 | 1146 | int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, |
---|
1021 | 1147 | const struct ib_recv_wr **bad_wr); |
---|
| 1148 | +int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); |
---|
| 1149 | +void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp); |
---|
1022 | 1150 | struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, |
---|
1023 | 1151 | struct ib_qp_init_attr *init_attr, |
---|
1024 | 1152 | struct ib_udata *udata); |
---|
.. | .. |
---|
1026 | 1154 | int attr_mask, struct ib_udata *udata); |
---|
1027 | 1155 | int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, |
---|
1028 | 1156 | struct ib_qp_init_attr *qp_init_attr); |
---|
1029 | | -int mlx5_ib_destroy_qp(struct ib_qp *qp); |
---|
| 1157 | +int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); |
---|
1030 | 1158 | void mlx5_ib_drain_sq(struct ib_qp *qp); |
---|
1031 | 1159 | void mlx5_ib_drain_rq(struct ib_qp *qp); |
---|
1032 | | -int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, |
---|
1033 | | - const struct ib_send_wr **bad_wr); |
---|
1034 | | -int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, |
---|
1035 | | - const struct ib_recv_wr **bad_wr); |
---|
1036 | | -void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); |
---|
1037 | | -int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, |
---|
1038 | | - void *buffer, u32 length, |
---|
1039 | | - struct mlx5_ib_qp_base *base); |
---|
1040 | | -struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, |
---|
1041 | | - const struct ib_cq_init_attr *attr, |
---|
1042 | | - struct ib_ucontext *context, |
---|
1043 | | - struct ib_udata *udata); |
---|
1044 | | -int mlx5_ib_destroy_cq(struct ib_cq *cq); |
---|
| 1160 | +int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, |
---|
| 1161 | + size_t buflen, size_t *bc); |
---|
| 1162 | +int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer, |
---|
| 1163 | + size_t buflen, size_t *bc); |
---|
| 1164 | +int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer, |
---|
| 1165 | + size_t buflen, size_t *bc); |
---|
| 1166 | +int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, |
---|
| 1167 | + struct ib_udata *udata); |
---|
| 1168 | +int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); |
---|
1045 | 1169 | int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
---|
1046 | 1170 | int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); |
---|
1047 | 1171 | int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); |
---|
.. | .. |
---|
1050 | 1174 | struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
---|
1051 | 1175 | u64 virt_addr, int access_flags, |
---|
1052 | 1176 | struct ib_udata *udata); |
---|
1053 | | -struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, |
---|
1054 | | - struct ib_udata *udata); |
---|
| 1177 | +int mlx5_ib_advise_mr(struct ib_pd *pd, |
---|
| 1178 | + enum ib_uverbs_advise_mr_advice advice, |
---|
| 1179 | + u32 flags, |
---|
| 1180 | + struct ib_sge *sg_list, |
---|
| 1181 | + u32 num_sge, |
---|
| 1182 | + struct uverbs_attr_bundle *attrs); |
---|
| 1183 | +int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata); |
---|
1055 | 1184 | int mlx5_ib_dealloc_mw(struct ib_mw *mw); |
---|
1056 | 1185 | int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, |
---|
1057 | 1186 | int page_shift, int flags); |
---|
1058 | 1187 | struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, |
---|
| 1188 | + struct ib_udata *udata, |
---|
1059 | 1189 | int access_flags); |
---|
1060 | 1190 | void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr); |
---|
| 1191 | +void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr); |
---|
1061 | 1192 | int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, |
---|
1062 | 1193 | u64 length, u64 virt_addr, int access_flags, |
---|
1063 | 1194 | struct ib_pd *pd, struct ib_udata *udata); |
---|
1064 | | -int mlx5_ib_dereg_mr(struct ib_mr *ibmr); |
---|
1065 | | -struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, |
---|
1066 | | - enum ib_mr_type mr_type, |
---|
| 1195 | +int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); |
---|
| 1196 | +struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
---|
1067 | 1197 | u32 max_num_sg); |
---|
| 1198 | +struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, |
---|
| 1199 | + u32 max_num_sg, |
---|
| 1200 | + u32 max_num_meta_sg); |
---|
1068 | 1201 | int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
---|
1069 | 1202 | unsigned int *sg_offset); |
---|
| 1203 | +int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, |
---|
| 1204 | + int data_sg_nents, unsigned int *data_sg_offset, |
---|
| 1205 | + struct scatterlist *meta_sg, int meta_sg_nents, |
---|
| 1206 | + unsigned int *meta_sg_offset); |
---|
1070 | 1207 | int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
---|
1071 | 1208 | const struct ib_wc *in_wc, const struct ib_grh *in_grh, |
---|
1072 | | - const struct ib_mad_hdr *in, size_t in_mad_size, |
---|
1073 | | - struct ib_mad_hdr *out, size_t *out_mad_size, |
---|
1074 | | - u16 *out_mad_pkey_index); |
---|
1075 | | -struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, |
---|
1076 | | - struct ib_ucontext *context, |
---|
1077 | | - struct ib_udata *udata); |
---|
1078 | | -int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd); |
---|
| 1209 | + const struct ib_mad *in, struct ib_mad *out, |
---|
| 1210 | + size_t *out_mad_size, u16 *out_mad_pkey_index); |
---|
| 1211 | +int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); |
---|
| 1212 | +int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); |
---|
1079 | 1213 | int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); |
---|
1080 | 1214 | int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); |
---|
1081 | 1215 | int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, |
---|
.. | .. |
---|
1096 | 1230 | struct ib_port_attr *props); |
---|
1097 | 1231 | int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, |
---|
1098 | 1232 | struct ib_port_attr *props); |
---|
1099 | | -int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); |
---|
1100 | | -void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev); |
---|
1101 | 1233 | void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, |
---|
1102 | 1234 | unsigned long max_page_shift, |
---|
1103 | 1235 | int *count, int *shift, |
---|
.. | .. |
---|
1108 | 1240 | void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, |
---|
1109 | 1241 | int page_shift, __be64 *pas, int access_flags); |
---|
1110 | 1242 | void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); |
---|
1111 | | -int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq); |
---|
| 1243 | +int mlx5_ib_get_cqe_size(struct ib_cq *ibcq); |
---|
1112 | 1244 | int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); |
---|
1113 | 1245 | int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); |
---|
1114 | 1246 | |
---|
1115 | | -struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry); |
---|
| 1247 | +struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, |
---|
| 1248 | + unsigned int entry, int access_flags); |
---|
1116 | 1249 | void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); |
---|
| 1250 | +int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr); |
---|
| 1251 | + |
---|
1117 | 1252 | int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, |
---|
1118 | 1253 | struct ib_mr_status *mr_status); |
---|
1119 | 1254 | struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, |
---|
1120 | 1255 | struct ib_wq_init_attr *init_attr, |
---|
1121 | 1256 | struct ib_udata *udata); |
---|
1122 | | -int mlx5_ib_destroy_wq(struct ib_wq *wq); |
---|
| 1257 | +int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); |
---|
1123 | 1258 | int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, |
---|
1124 | 1259 | u32 wq_attr_mask, struct ib_udata *udata); |
---|
1125 | | -struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, |
---|
1126 | | - struct ib_rwq_ind_table_init_attr *init_attr, |
---|
1127 | | - struct ib_udata *udata); |
---|
| 1260 | +int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table, |
---|
| 1261 | + struct ib_rwq_ind_table_init_attr *init_attr, |
---|
| 1262 | + struct ib_udata *udata); |
---|
1128 | 1263 | int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); |
---|
1129 | | -bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev); |
---|
1130 | 1264 | struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev, |
---|
1131 | 1265 | struct ib_ucontext *context, |
---|
1132 | 1266 | struct ib_dm_alloc_attr *attr, |
---|
1133 | 1267 | struct uverbs_attr_bundle *attrs); |
---|
1134 | | -int mlx5_ib_dealloc_dm(struct ib_dm *ibdm); |
---|
| 1268 | +int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs); |
---|
1135 | 1269 | struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, |
---|
1136 | 1270 | struct ib_dm_mr_attr *attr, |
---|
1137 | 1271 | struct uverbs_attr_bundle *attrs); |
---|
1138 | 1272 | |
---|
1139 | 1273 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
---|
1140 | 1274 | void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev); |
---|
1141 | | -void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context, |
---|
1142 | | - struct mlx5_pagefault *pfault); |
---|
1143 | 1275 | int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev); |
---|
| 1276 | +void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev); |
---|
1144 | 1277 | int __init mlx5_ib_odp_init(void); |
---|
1145 | 1278 | void mlx5_ib_odp_cleanup(void); |
---|
1146 | | -void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, |
---|
1147 | | - unsigned long end); |
---|
1148 | 1279 | void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent); |
---|
1149 | | -void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset, |
---|
1150 | | - size_t nentries, struct mlx5_ib_mr *mr, int flags); |
---|
| 1280 | +void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, |
---|
| 1281 | + struct mlx5_ib_mr *mr, int flags); |
---|
| 1282 | + |
---|
| 1283 | +int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, |
---|
| 1284 | + enum ib_uverbs_advise_mr_advice advice, |
---|
| 1285 | + u32 flags, struct ib_sge *sg_list, u32 num_sge); |
---|
| 1286 | +int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable); |
---|
1151 | 1287 | #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ |
---|
1152 | 1288 | static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) |
---|
1153 | 1289 | { |
---|
.. | .. |
---|
1155 | 1291 | } |
---|
1156 | 1292 | |
---|
1157 | 1293 | static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; } |
---|
| 1294 | +static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {} |
---|
1158 | 1295 | static inline int mlx5_ib_odp_init(void) { return 0; } |
---|
1159 | 1296 | static inline void mlx5_ib_odp_cleanup(void) {} |
---|
1160 | 1297 | static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {} |
---|
1161 | | -static inline void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset, |
---|
1162 | | - size_t nentries, struct mlx5_ib_mr *mr, |
---|
1163 | | - int flags) {} |
---|
| 1298 | +static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, |
---|
| 1299 | + struct mlx5_ib_mr *mr, int flags) {} |
---|
1164 | 1300 | |
---|
| 1301 | +static inline int |
---|
| 1302 | +mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, |
---|
| 1303 | + enum ib_uverbs_advise_mr_advice advice, u32 flags, |
---|
| 1304 | + struct ib_sge *sg_list, u32 num_sge) |
---|
| 1305 | +{ |
---|
| 1306 | + return -EOPNOTSUPP; |
---|
| 1307 | +} |
---|
| 1308 | +static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable) |
---|
| 1309 | +{ |
---|
| 1310 | + return -EOPNOTSUPP; |
---|
| 1311 | +} |
---|
1165 | 1312 | #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ |
---|
1166 | 1313 | |
---|
| 1314 | +extern const struct mmu_interval_notifier_ops mlx5_mn_ops; |
---|
| 1315 | + |
---|
1167 | 1316 | /* Needed for rep profile */ |
---|
1168 | | -int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev); |
---|
1169 | | -void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev); |
---|
1170 | | -int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev); |
---|
1171 | | -int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev); |
---|
1172 | | -int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev); |
---|
1173 | | -int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev); |
---|
1174 | | -void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev); |
---|
1175 | | -int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev); |
---|
1176 | | -void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev); |
---|
1177 | | -int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev); |
---|
1178 | | -void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev); |
---|
1179 | | -int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev); |
---|
1180 | | -void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev); |
---|
1181 | | -void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev); |
---|
1182 | | -int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev); |
---|
1183 | | -void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev); |
---|
1184 | | -int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev); |
---|
1185 | | -int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev); |
---|
1186 | 1317 | void __mlx5_ib_remove(struct mlx5_ib_dev *dev, |
---|
1187 | 1318 | const struct mlx5_ib_profile *profile, |
---|
1188 | 1319 | int stage); |
---|
.. | .. |
---|
1195 | 1326 | u8 port, int state); |
---|
1196 | 1327 | int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, |
---|
1197 | 1328 | u8 port, struct ifla_vf_stats *stats); |
---|
| 1329 | +int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port, |
---|
| 1330 | + struct ifla_vf_guid *node_guid, |
---|
| 1331 | + struct ifla_vf_guid *port_guid); |
---|
1198 | 1332 | int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port, |
---|
1199 | 1333 | u64 guid, int type); |
---|
1200 | 1334 | |
---|
1201 | | -__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, |
---|
1202 | | - const struct ib_gid_attr *attr); |
---|
| 1335 | +__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev, |
---|
| 1336 | + const struct ib_gid_attr *attr); |
---|
1203 | 1337 | |
---|
1204 | 1338 | void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num); |
---|
1205 | | -int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num); |
---|
| 1339 | +void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num); |
---|
1206 | 1340 | |
---|
1207 | 1341 | /* GSI QP helper functions */ |
---|
1208 | | -struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd, |
---|
1209 | | - struct ib_qp_init_attr *init_attr); |
---|
1210 | | -int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp); |
---|
| 1342 | +int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp, |
---|
| 1343 | + struct ib_qp_init_attr *attr); |
---|
| 1344 | +int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp); |
---|
1211 | 1345 | int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, |
---|
1212 | 1346 | int attr_mask); |
---|
1213 | 1347 | int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, |
---|
.. | .. |
---|
1230 | 1364 | void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev, |
---|
1231 | 1365 | u8 port_num); |
---|
1232 | 1366 | |
---|
1233 | | -#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) |
---|
1234 | | -int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, |
---|
1235 | | - struct mlx5_ib_ucontext *context); |
---|
1236 | | -void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, |
---|
1237 | | - struct mlx5_ib_ucontext *context); |
---|
1238 | | -const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void); |
---|
1239 | | -struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add( |
---|
1240 | | - struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher, |
---|
1241 | | - void *cmd_in, int inlen, int dest_id, int dest_type); |
---|
1242 | | -bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type); |
---|
1243 | | -int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root); |
---|
1244 | | -#else |
---|
1245 | | -static inline int |
---|
1246 | | -mlx5_ib_devx_create(struct mlx5_ib_dev *dev, |
---|
1247 | | - struct mlx5_ib_ucontext *context) { return -EOPNOTSUPP; }; |
---|
1248 | | -static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, |
---|
1249 | | - struct mlx5_ib_ucontext *context) {} |
---|
1250 | | -static inline const struct uverbs_object_tree_def * |
---|
1251 | | -mlx5_ib_get_devx_tree(void) { return NULL; } |
---|
1252 | | -static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, |
---|
1253 | | - int *dest_type) |
---|
1254 | | -{ |
---|
1255 | | - return false; |
---|
1256 | | -} |
---|
1257 | | -static inline int |
---|
1258 | | -mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root) |
---|
1259 | | -{ |
---|
1260 | | - return 0; |
---|
1261 | | -} |
---|
1262 | | -#endif |
---|
| 1367 | +extern const struct uapi_definition mlx5_ib_devx_defs[]; |
---|
| 1368 | +extern const struct uapi_definition mlx5_ib_flow_defs[]; |
---|
| 1369 | +extern const struct uapi_definition mlx5_ib_qos_defs[]; |
---|
| 1370 | +extern const struct uapi_definition mlx5_ib_std_types_defs[]; |
---|
| 1371 | + |
---|
1263 | 1372 | static inline void init_query_mad(struct ib_smp *mad) |
---|
1264 | 1373 | { |
---|
1265 | 1374 | mad->base_version = 1; |
---|
.. | .. |
---|
1268 | 1377 | mad->method = IB_MGMT_METHOD_GET; |
---|
1269 | 1378 | } |
---|
1270 | 1379 | |
---|
1271 | | -static inline u8 convert_access(int acc) |
---|
1272 | | -{ |
---|
1273 | | - return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | |
---|
1274 | | - (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | |
---|
1275 | | - (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | |
---|
1276 | | - (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | |
---|
1277 | | - MLX5_PERM_LOCAL_READ; |
---|
1278 | | -} |
---|
1279 | | - |
---|
1280 | 1380 | static inline int is_qp1(enum ib_qp_type qp_type) |
---|
1281 | 1381 | { |
---|
1282 | | - return qp_type == MLX5_IB_QPT_HW_GSI; |
---|
| 1382 | + return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI; |
---|
1283 | 1383 | } |
---|
1284 | 1384 | |
---|
1285 | 1385 | #define MLX5_MAX_UMR_SHIFT 16 |
---|
.. | .. |
---|
1317 | 1417 | { |
---|
1318 | 1418 | u8 cqe_version = ucontext->cqe_version; |
---|
1319 | 1419 | |
---|
1320 | | - if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) && |
---|
1321 | | - !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) |
---|
| 1420 | + if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version && |
---|
| 1421 | + (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) |
---|
1322 | 1422 | return 0; |
---|
1323 | 1423 | |
---|
1324 | | - if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) != |
---|
1325 | | - !!cqe_version)) |
---|
| 1424 | + if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version) |
---|
1326 | 1425 | return -EINVAL; |
---|
1327 | 1426 | |
---|
1328 | 1427 | return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); |
---|
.. | .. |
---|
1335 | 1434 | { |
---|
1336 | 1435 | u8 cqe_version = ucontext->cqe_version; |
---|
1337 | 1436 | |
---|
1338 | | - if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) && |
---|
1339 | | - !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) |
---|
| 1437 | + if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version && |
---|
| 1438 | + (ucmd->uidx == MLX5_IB_DEFAULT_UIDX)) |
---|
1340 | 1439 | return 0; |
---|
1341 | 1440 | |
---|
1342 | | - if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) != |
---|
1343 | | - !!cqe_version)) |
---|
| 1441 | + if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version) |
---|
1344 | 1442 | return -EINVAL; |
---|
1345 | 1443 | |
---|
1346 | 1444 | return verify_assign_uidx(cqe_version, ucmd->uidx, user_index); |
---|
.. | .. |
---|
1364 | 1462 | int bfregn_to_uar_index(struct mlx5_ib_dev *dev, |
---|
1365 | 1463 | struct mlx5_bfreg_info *bfregi, u32 bfregn, |
---|
1366 | 1464 | bool dyn_bfreg); |
---|
| 1465 | + |
---|
| 1466 | +static inline bool mlx5_ib_can_load_pas_with_umr(struct mlx5_ib_dev *dev, |
---|
| 1467 | + size_t length) |
---|
| 1468 | +{ |
---|
| 1469 | + /* |
---|
| 1470 | + * umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is |
---|
| 1471 | + * always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka |
---|
| 1472 | + * MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey |
---|
| 1473 | + * can never be enabled without this capability. Simplify this weird |
---|
| 1474 | + * quirky hardware by just saying it can't use PAS lists with UMR at |
---|
| 1475 | + * all. |
---|
| 1476 | + */ |
---|
| 1477 | + if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) |
---|
| 1478 | + return false; |
---|
| 1479 | + |
---|
| 1480 | + /* |
---|
| 1481 | + * length is the size of the MR in bytes when mlx5_ib_update_xlt() is |
---|
| 1482 | + * used. |
---|
| 1483 | + */ |
---|
| 1484 | + if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) && |
---|
| 1485 | + length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE) |
---|
| 1486 | + return false; |
---|
| 1487 | + return true; |
---|
| 1488 | +} |
---|
| 1489 | + |
---|
| 1490 | +/* |
---|
| 1491 | + * true if an existing MR can be reconfigured to new access_flags using UMR. |
---|
| 1492 | + * Older HW cannot use UMR to update certain elements of the MKC. See |
---|
| 1493 | + * umr_check_mkey_mask(), get_umr_update_access_mask() and umr_check_mkey_mask() |
---|
| 1494 | + */ |
---|
| 1495 | +static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev, |
---|
| 1496 | + unsigned int current_access_flags, |
---|
| 1497 | + unsigned int target_access_flags) |
---|
| 1498 | +{ |
---|
| 1499 | + unsigned int diffs = current_access_flags ^ target_access_flags; |
---|
| 1500 | + |
---|
| 1501 | + if ((diffs & IB_ACCESS_REMOTE_ATOMIC) && |
---|
| 1502 | + MLX5_CAP_GEN(dev->mdev, atomic) && |
---|
| 1503 | + MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) |
---|
| 1504 | + return false; |
---|
| 1505 | + |
---|
| 1506 | + if ((diffs & IB_ACCESS_RELAXED_ORDERING) && |
---|
| 1507 | + MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) && |
---|
| 1508 | + !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) |
---|
| 1509 | + return false; |
---|
| 1510 | + |
---|
| 1511 | + if ((diffs & IB_ACCESS_RELAXED_ORDERING) && |
---|
| 1512 | + MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) && |
---|
| 1513 | + !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) |
---|
| 1514 | + return false; |
---|
| 1515 | + |
---|
| 1516 | + return true; |
---|
| 1517 | +} |
---|
| 1518 | + |
---|
| 1519 | +int mlx5_ib_test_wc(struct mlx5_ib_dev *dev); |
---|
| 1520 | + |
---|
| 1521 | +static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev) |
---|
| 1522 | +{ |
---|
| 1523 | + return dev->lag_active || |
---|
| 1524 | + (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 && |
---|
| 1525 | + MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity)); |
---|
| 1526 | +} |
---|
1367 | 1527 | #endif /* MLX5_IB_H */ |
---|