.. | .. |
---|
41 | 41 | #include <linux/semaphore.h> |
---|
42 | 42 | #include <linux/slab.h> |
---|
43 | 43 | #include <linux/vmalloc.h> |
---|
44 | | -#include <linux/radix-tree.h> |
---|
| 44 | +#include <linux/xarray.h> |
---|
45 | 45 | #include <linux/workqueue.h> |
---|
46 | 46 | #include <linux/mempool.h> |
---|
47 | 47 | #include <linux/interrupt.h> |
---|
48 | 48 | #include <linux/idr.h> |
---|
| 49 | +#include <linux/notifier.h> |
---|
| 50 | +#include <linux/refcount.h> |
---|
49 | 51 | |
---|
50 | 52 | #include <linux/mlx5/device.h> |
---|
51 | 53 | #include <linux/mlx5/doorbell.h> |
---|
52 | | -#include <linux/mlx5/srq.h> |
---|
| 54 | +#include <linux/mlx5/eq.h> |
---|
53 | 55 | #include <linux/timecounter.h> |
---|
54 | 56 | #include <linux/ptp_clock_kernel.h> |
---|
| 57 | +#include <net/devlink.h> |
---|
55 | 58 | |
---|
56 | 59 | enum { |
---|
57 | 60 | MLX5_BOARD_ID_LEN = 64, |
---|
58 | | - MLX5_MAX_NAME_LEN = 16, |
---|
59 | 61 | }; |
---|
60 | 62 | |
---|
61 | 63 | enum { |
---|
.. | .. |
---|
85 | 87 | }; |
---|
86 | 88 | |
---|
87 | 89 | enum { |
---|
88 | | - MLX5_EQ_VEC_PAGES = 0, |
---|
89 | | - MLX5_EQ_VEC_CMD = 1, |
---|
90 | | - MLX5_EQ_VEC_ASYNC = 2, |
---|
91 | | - MLX5_EQ_VEC_PFAULT = 3, |
---|
92 | | - MLX5_EQ_VEC_COMP_BASE, |
---|
93 | | -}; |
---|
94 | | - |
---|
95 | | -enum { |
---|
96 | | - MLX5_MAX_IRQ_NAME = 32 |
---|
97 | | -}; |
---|
98 | | - |
---|
99 | | -enum { |
---|
100 | | - MLX5_ATOMIC_MODE_IB_COMP = 1 << 16, |
---|
101 | | - MLX5_ATOMIC_MODE_CX = 2 << 16, |
---|
102 | | - MLX5_ATOMIC_MODE_8B = 3 << 16, |
---|
103 | | - MLX5_ATOMIC_MODE_16B = 4 << 16, |
---|
104 | | - MLX5_ATOMIC_MODE_32B = 5 << 16, |
---|
105 | | - MLX5_ATOMIC_MODE_64B = 6 << 16, |
---|
106 | | - MLX5_ATOMIC_MODE_128B = 7 << 16, |
---|
107 | | - MLX5_ATOMIC_MODE_256B = 8 << 16, |
---|
| 90 | + MLX5_ATOMIC_MODE_OFFSET = 16, |
---|
| 91 | + MLX5_ATOMIC_MODE_IB_COMP = 1, |
---|
| 92 | + MLX5_ATOMIC_MODE_CX = 2, |
---|
| 93 | + MLX5_ATOMIC_MODE_8B = 3, |
---|
| 94 | + MLX5_ATOMIC_MODE_16B = 4, |
---|
| 95 | + MLX5_ATOMIC_MODE_32B = 5, |
---|
| 96 | + MLX5_ATOMIC_MODE_64B = 6, |
---|
| 97 | + MLX5_ATOMIC_MODE_128B = 7, |
---|
| 98 | + MLX5_ATOMIC_MODE_256B = 8, |
---|
108 | 99 | }; |
---|
109 | 100 | |
---|
110 | 101 | enum { |
---|
.. | .. |
---|
118 | 109 | MLX5_REG_FPGA_CAP = 0x4022, |
---|
119 | 110 | MLX5_REG_FPGA_CTRL = 0x4023, |
---|
120 | 111 | MLX5_REG_FPGA_ACCESS_REG = 0x4024, |
---|
| 112 | + MLX5_REG_CORE_DUMP = 0x402e, |
---|
121 | 113 | MLX5_REG_PCAP = 0x5001, |
---|
122 | 114 | MLX5_REG_PMTU = 0x5003, |
---|
123 | 115 | MLX5_REG_PTYS = 0x5004, |
---|
.. | .. |
---|
133 | 125 | MLX5_REG_PVLC = 0x500f, |
---|
134 | 126 | MLX5_REG_PCMR = 0x5041, |
---|
135 | 127 | MLX5_REG_PMLP = 0x5002, |
---|
| 128 | + MLX5_REG_PPLM = 0x5023, |
---|
136 | 129 | MLX5_REG_PCAM = 0x507f, |
---|
137 | 130 | MLX5_REG_NODE_DESC = 0x6001, |
---|
138 | 131 | MLX5_REG_HOST_ENDIANNESS = 0x7004, |
---|
139 | 132 | MLX5_REG_MCIA = 0x9014, |
---|
| 133 | + MLX5_REG_MFRL = 0x9028, |
---|
140 | 134 | MLX5_REG_MLCR = 0x902b, |
---|
141 | 135 | MLX5_REG_MTRC_CAP = 0x9040, |
---|
142 | 136 | MLX5_REG_MTRC_CONF = 0x9041, |
---|
143 | 137 | MLX5_REG_MTRC_STDB = 0x9042, |
---|
144 | 138 | MLX5_REG_MTRC_CTRL = 0x9043, |
---|
| 139 | + MLX5_REG_MPEIN = 0x9050, |
---|
145 | 140 | MLX5_REG_MPCNT = 0x9051, |
---|
146 | 141 | MLX5_REG_MTPPS = 0x9053, |
---|
147 | 142 | MLX5_REG_MTPPSE = 0x9054, |
---|
148 | 143 | MLX5_REG_MPEGC = 0x9056, |
---|
| 144 | + MLX5_REG_MCQS = 0x9060, |
---|
149 | 145 | MLX5_REG_MCQI = 0x9061, |
---|
150 | 146 | MLX5_REG_MCC = 0x9062, |
---|
151 | 147 | MLX5_REG_MCDA = 0x9063, |
---|
152 | 148 | MLX5_REG_MCAM = 0x907f, |
---|
| 149 | + MLX5_REG_MIRC = 0x9162, |
---|
| 150 | + MLX5_REG_SBCAM = 0xB01F, |
---|
| 151 | + MLX5_REG_RESOURCE_DUMP = 0xC000, |
---|
153 | 152 | }; |
---|
154 | 153 | |
---|
155 | 154 | enum mlx5_qpts_trust_state { |
---|
.. | .. |
---|
162 | 161 | MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, |
---|
163 | 162 | }; |
---|
164 | 163 | |
---|
165 | | -enum mlx5_dct_atomic_mode { |
---|
166 | | - MLX5_ATOMIC_MODE_DCT_CX = 2, |
---|
167 | | -}; |
---|
168 | | - |
---|
169 | 164 | enum { |
---|
170 | 165 | MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, |
---|
171 | 166 | MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, |
---|
| 167 | + MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2, |
---|
| 168 | + MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3, |
---|
172 | 169 | }; |
---|
173 | 170 | |
---|
174 | 171 | enum mlx5_page_fault_resume_flags { |
---|
.. | .. |
---|
191 | 188 | MLX5_POLICY_INVALID = 0xffffffff |
---|
192 | 189 | }; |
---|
193 | 190 | |
---|
| 191 | +enum mlx5_coredev_type { |
---|
| 192 | + MLX5_COREDEV_PF, |
---|
| 193 | + MLX5_COREDEV_VF |
---|
| 194 | +}; |
---|
| 195 | + |
---|
194 | 196 | struct mlx5_field_desc { |
---|
195 | | - struct dentry *dent; |
---|
196 | 197 | int i; |
---|
197 | 198 | }; |
---|
198 | 199 | |
---|
.. | .. |
---|
201 | 202 | void *object; |
---|
202 | 203 | enum dbg_rsc_type type; |
---|
203 | 204 | struct dentry *root; |
---|
204 | | - struct mlx5_field_desc fields[0]; |
---|
| 205 | + struct mlx5_field_desc fields[]; |
---|
205 | 206 | }; |
---|
206 | 207 | |
---|
207 | 208 | enum mlx5_dev_event { |
---|
208 | | - MLX5_DEV_EVENT_SYS_ERROR, |
---|
209 | | - MLX5_DEV_EVENT_PORT_UP, |
---|
210 | | - MLX5_DEV_EVENT_PORT_DOWN, |
---|
211 | | - MLX5_DEV_EVENT_PORT_INITIALIZED, |
---|
212 | | - MLX5_DEV_EVENT_LID_CHANGE, |
---|
213 | | - MLX5_DEV_EVENT_PKEY_CHANGE, |
---|
214 | | - MLX5_DEV_EVENT_GUID_CHANGE, |
---|
215 | | - MLX5_DEV_EVENT_CLIENT_REREG, |
---|
216 | | - MLX5_DEV_EVENT_PPS, |
---|
217 | | - MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, |
---|
| 209 | + MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */ |
---|
| 210 | + MLX5_DEV_EVENT_PORT_AFFINITY = 129, |
---|
218 | 211 | }; |
---|
219 | 212 | |
---|
220 | 213 | enum mlx5_port_status { |
---|
.. | .. |
---|
222 | 215 | MLX5_PORT_DOWN = 2, |
---|
223 | 216 | }; |
---|
224 | 217 | |
---|
225 | | -enum mlx5_eq_type { |
---|
226 | | - MLX5_EQ_TYPE_COMP, |
---|
227 | | - MLX5_EQ_TYPE_ASYNC, |
---|
228 | | -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
---|
229 | | - MLX5_EQ_TYPE_PF, |
---|
230 | | -#endif |
---|
231 | | -}; |
---|
232 | | - |
---|
233 | | -struct mlx5_bfreg_info { |
---|
234 | | - u32 *sys_pages; |
---|
235 | | - int num_low_latency_bfregs; |
---|
236 | | - unsigned int *count; |
---|
237 | | - |
---|
238 | | - /* |
---|
239 | | - * protect bfreg allocation data structs |
---|
240 | | - */ |
---|
241 | | - struct mutex lock; |
---|
242 | | - u32 ver; |
---|
243 | | - bool lib_uar_4k; |
---|
244 | | - u32 num_sys_pages; |
---|
245 | | - u32 num_static_sys_pages; |
---|
246 | | - u32 total_num_bfregs; |
---|
247 | | - u32 num_dyn_bfregs; |
---|
| 218 | +enum mlx5_cmdif_state { |
---|
| 219 | + MLX5_CMDIF_STATE_UNINITIALIZED, |
---|
| 220 | + MLX5_CMDIF_STATE_UP, |
---|
| 221 | + MLX5_CMDIF_STATE_DOWN, |
---|
248 | 222 | }; |
---|
249 | 223 | |
---|
250 | 224 | struct mlx5_cmd_first { |
---|
.. | .. |
---|
261 | 235 | |
---|
262 | 236 | struct mlx5_cmd_debug { |
---|
263 | 237 | struct dentry *dbg_root; |
---|
264 | | - struct dentry *dbg_in; |
---|
265 | | - struct dentry *dbg_out; |
---|
266 | | - struct dentry *dbg_outlen; |
---|
267 | | - struct dentry *dbg_status; |
---|
268 | | - struct dentry *dbg_run; |
---|
269 | 238 | void *in_msg; |
---|
270 | 239 | void *out_msg; |
---|
271 | 240 | u8 status; |
---|
.. | .. |
---|
290 | 259 | u64 sum; |
---|
291 | 260 | u64 n; |
---|
292 | 261 | struct dentry *root; |
---|
293 | | - struct dentry *avg; |
---|
294 | | - struct dentry *count; |
---|
295 | 262 | /* protect command average calculations */ |
---|
296 | 263 | spinlock_t lock; |
---|
297 | 264 | }; |
---|
298 | 265 | |
---|
299 | 266 | struct mlx5_cmd { |
---|
| 267 | + struct mlx5_nb nb; |
---|
| 268 | + |
---|
| 269 | + enum mlx5_cmdif_state state; |
---|
300 | 270 | void *cmd_alloc_buf; |
---|
301 | 271 | dma_addr_t alloc_dma; |
---|
302 | 272 | int alloc_size; |
---|
.. | .. |
---|
323 | 293 | struct semaphore sem; |
---|
324 | 294 | struct semaphore pages_sem; |
---|
325 | 295 | int mode; |
---|
| 296 | + u16 allowed_opcode; |
---|
326 | 297 | struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; |
---|
327 | 298 | struct dma_pool *pool; |
---|
328 | 299 | struct mlx5_cmd_debug dbg; |
---|
329 | 300 | struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; |
---|
330 | 301 | int checksum_disabled; |
---|
331 | | - struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; |
---|
| 302 | + struct mlx5_cmd_stats *stats; |
---|
332 | 303 | }; |
---|
333 | 304 | |
---|
334 | 305 | struct mlx5_port_caps { |
---|
.. | .. |
---|
357 | 328 | }; |
---|
358 | 329 | |
---|
359 | 330 | struct mlx5_frag_buf_ctrl { |
---|
360 | | - struct mlx5_frag_buf frag_buf; |
---|
| 331 | + struct mlx5_buf_list *frags; |
---|
361 | 332 | u32 sz_m1; |
---|
362 | 333 | u16 frag_sz_m1; |
---|
363 | 334 | u16 strides_offset; |
---|
364 | 335 | u8 log_sz; |
---|
365 | 336 | u8 log_stride; |
---|
366 | 337 | u8 log_frag_strides; |
---|
367 | | -}; |
---|
368 | | - |
---|
369 | | -struct mlx5_eq_tasklet { |
---|
370 | | - struct list_head list; |
---|
371 | | - struct list_head process_list; |
---|
372 | | - struct tasklet_struct task; |
---|
373 | | - /* lock on completion tasklet list */ |
---|
374 | | - spinlock_t lock; |
---|
375 | | -}; |
---|
376 | | - |
---|
377 | | -struct mlx5_eq_pagefault { |
---|
378 | | - struct work_struct work; |
---|
379 | | - /* Pagefaults lock */ |
---|
380 | | - spinlock_t lock; |
---|
381 | | - struct workqueue_struct *wq; |
---|
382 | | - mempool_t *pool; |
---|
383 | | -}; |
---|
384 | | - |
---|
385 | | -struct mlx5_cq_table { |
---|
386 | | - /* protect radix tree */ |
---|
387 | | - spinlock_t lock; |
---|
388 | | - struct radix_tree_root tree; |
---|
389 | | -}; |
---|
390 | | - |
---|
391 | | -struct mlx5_eq { |
---|
392 | | - struct mlx5_core_dev *dev; |
---|
393 | | - struct mlx5_cq_table cq_table; |
---|
394 | | - __be32 __iomem *doorbell; |
---|
395 | | - u32 cons_index; |
---|
396 | | - struct mlx5_frag_buf buf; |
---|
397 | | - int size; |
---|
398 | | - unsigned int irqn; |
---|
399 | | - u8 eqn; |
---|
400 | | - int nent; |
---|
401 | | - u64 mask; |
---|
402 | | - struct list_head list; |
---|
403 | | - int index; |
---|
404 | | - struct mlx5_rsc_debug *dbg; |
---|
405 | | - enum mlx5_eq_type type; |
---|
406 | | - union { |
---|
407 | | - struct mlx5_eq_tasklet tasklet_ctx; |
---|
408 | | -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
---|
409 | | - struct mlx5_eq_pagefault pf_ctx; |
---|
410 | | -#endif |
---|
411 | | - }; |
---|
412 | 338 | }; |
---|
413 | 339 | |
---|
414 | 340 | struct mlx5_core_psv { |
---|
.. | .. |
---|
435 | 361 | enum { |
---|
436 | 362 | MLX5_MKEY_MR = 1, |
---|
437 | 363 | MLX5_MKEY_MW, |
---|
| 364 | + MLX5_MKEY_INDIRECT_DEVX, |
---|
438 | 365 | }; |
---|
439 | 366 | |
---|
440 | 367 | struct mlx5_core_mkey { |
---|
.. | .. |
---|
459 | 386 | |
---|
460 | 387 | struct mlx5_core_rsc_common { |
---|
461 | 388 | enum mlx5_res_type res; |
---|
462 | | - atomic_t refcount; |
---|
| 389 | + refcount_t refcount; |
---|
463 | 390 | struct completion free; |
---|
464 | | -}; |
---|
465 | | - |
---|
466 | | -struct mlx5_core_srq { |
---|
467 | | - struct mlx5_core_rsc_common common; /* must be first */ |
---|
468 | | - u32 srqn; |
---|
469 | | - int max; |
---|
470 | | - size_t max_gs; |
---|
471 | | - size_t max_avail_gather; |
---|
472 | | - int wqe_shift; |
---|
473 | | - void (*event) (struct mlx5_core_srq *, enum mlx5_event); |
---|
474 | | - |
---|
475 | | - atomic_t refcount; |
---|
476 | | - struct completion free; |
---|
477 | | -}; |
---|
478 | | - |
---|
479 | | -struct mlx5_eq_table { |
---|
480 | | - void __iomem *update_ci; |
---|
481 | | - void __iomem *update_arm_ci; |
---|
482 | | - struct list_head comp_eqs_list; |
---|
483 | | - struct mlx5_eq pages_eq; |
---|
484 | | - struct mlx5_eq async_eq; |
---|
485 | | - struct mlx5_eq cmd_eq; |
---|
486 | | -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
---|
487 | | - struct mlx5_eq pfault_eq; |
---|
488 | | -#endif |
---|
489 | | - int num_comp_vectors; |
---|
490 | | - /* protect EQs list |
---|
491 | | - */ |
---|
492 | | - spinlock_t lock; |
---|
493 | 391 | }; |
---|
494 | 392 | |
---|
495 | 393 | struct mlx5_uars_page { |
---|
.. | .. |
---|
531 | 429 | struct timer_list timer; |
---|
532 | 430 | u32 prev; |
---|
533 | 431 | int miss_counter; |
---|
534 | | - bool sick; |
---|
| 432 | + u8 synd; |
---|
| 433 | + u32 fatal_error; |
---|
| 434 | + u32 crdump_size; |
---|
535 | 435 | /* wq spinlock to synchronize draining */ |
---|
536 | 436 | spinlock_t wq_lock; |
---|
537 | 437 | struct workqueue_struct *wq; |
---|
538 | 438 | unsigned long flags; |
---|
539 | | - struct work_struct work; |
---|
| 439 | + struct work_struct fatal_report_work; |
---|
| 440 | + struct work_struct report_work; |
---|
540 | 441 | struct delayed_work recover_work; |
---|
| 442 | + struct devlink_health_reporter *fw_reporter; |
---|
| 443 | + struct devlink_health_reporter *fw_fatal_reporter; |
---|
541 | 444 | }; |
---|
542 | 445 | |
---|
543 | 446 | struct mlx5_qp_table { |
---|
| 447 | + struct notifier_block nb; |
---|
| 448 | + |
---|
544 | 449 | /* protect radix tree |
---|
545 | 450 | */ |
---|
546 | 451 | spinlock_t lock; |
---|
547 | | - struct radix_tree_root tree; |
---|
548 | | -}; |
---|
549 | | - |
---|
550 | | -struct mlx5_srq_table { |
---|
551 | | - /* protect radix tree |
---|
552 | | - */ |
---|
553 | | - spinlock_t lock; |
---|
554 | | - struct radix_tree_root tree; |
---|
555 | | -}; |
---|
556 | | - |
---|
557 | | -struct mlx5_mkey_table { |
---|
558 | | - /* protect radix tree |
---|
559 | | - */ |
---|
560 | | - rwlock_t lock; |
---|
561 | 452 | struct radix_tree_root tree; |
---|
562 | 453 | }; |
---|
563 | 454 | |
---|
.. | .. |
---|
565 | 456 | int enabled; |
---|
566 | 457 | u64 port_guid; |
---|
567 | 458 | u64 node_guid; |
---|
| 459 | + /* Valid bits are used to validate administrative guid only. |
---|
| 460 | + * Enabled after ndo_set_vf_guid |
---|
| 461 | + */ |
---|
| 462 | + u8 port_guid_valid:1; |
---|
| 463 | + u8 node_guid_valid:1; |
---|
568 | 464 | enum port_state_policy policy; |
---|
569 | 465 | }; |
---|
570 | 466 | |
---|
571 | 467 | struct mlx5_core_sriov { |
---|
572 | 468 | struct mlx5_vf_context *vfs_ctx; |
---|
573 | 469 | int num_vfs; |
---|
574 | | - int enabled_vfs; |
---|
| 470 | + u16 max_vfs; |
---|
575 | 471 | }; |
---|
576 | 472 | |
---|
577 | | -struct mlx5_irq_info { |
---|
578 | | - cpumask_var_t mask; |
---|
579 | | - char name[MLX5_MAX_IRQ_NAME]; |
---|
| 473 | +struct mlx5_fc_pool { |
---|
| 474 | + struct mlx5_core_dev *dev; |
---|
| 475 | + struct mutex pool_lock; /* protects pool lists */ |
---|
| 476 | + struct list_head fully_used; |
---|
| 477 | + struct list_head partially_used; |
---|
| 478 | + struct list_head unused; |
---|
| 479 | + int available_fcs; |
---|
| 480 | + int used_fcs; |
---|
| 481 | + int threshold; |
---|
580 | 482 | }; |
---|
581 | 483 | |
---|
582 | 484 | struct mlx5_fc_stats { |
---|
583 | | - struct rb_root counters; |
---|
584 | | - struct list_head addlist; |
---|
585 | | - /* protect addlist add/splice operations */ |
---|
586 | | - spinlock_t addlist_lock; |
---|
| 485 | + spinlock_t counters_idr_lock; /* protects counters_idr */ |
---|
| 486 | + struct idr counters_idr; |
---|
| 487 | + struct list_head counters; |
---|
| 488 | + struct llist_head addlist; |
---|
| 489 | + struct llist_head dellist; |
---|
587 | 490 | |
---|
588 | 491 | struct workqueue_struct *wq; |
---|
589 | 492 | struct delayed_work work; |
---|
590 | 493 | unsigned long next_query; |
---|
591 | 494 | unsigned long sampling_interval; /* jiffies */ |
---|
| 495 | + u32 *bulk_query_out; |
---|
| 496 | + struct mlx5_fc_pool fc_pool; |
---|
592 | 497 | }; |
---|
593 | 498 | |
---|
| 499 | +struct mlx5_events; |
---|
594 | 500 | struct mlx5_mpfs; |
---|
595 | 501 | struct mlx5_eswitch; |
---|
596 | 502 | struct mlx5_lag; |
---|
597 | | -struct mlx5_pagefault; |
---|
| 503 | +struct mlx5_devcom; |
---|
| 504 | +struct mlx5_fw_reset; |
---|
| 505 | +struct mlx5_eq_table; |
---|
| 506 | +struct mlx5_irq_table; |
---|
598 | 507 | |
---|
599 | 508 | struct mlx5_rate_limit { |
---|
600 | 509 | u32 rate; |
---|
.. | .. |
---|
603 | 512 | }; |
---|
604 | 513 | |
---|
605 | 514 | struct mlx5_rl_entry { |
---|
606 | | - struct mlx5_rate_limit rl; |
---|
607 | | - u16 index; |
---|
608 | | - u16 refcount; |
---|
| 515 | + u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)]; |
---|
| 516 | + u16 index; |
---|
| 517 | + u64 refcount; |
---|
| 518 | + u16 uid; |
---|
| 519 | + u8 dedicated : 1; |
---|
609 | 520 | }; |
---|
610 | 521 | |
---|
611 | 522 | struct mlx5_rl_table { |
---|
.. | .. |
---|
617 | 528 | struct mlx5_rl_entry *rl_entry; |
---|
618 | 529 | }; |
---|
619 | 530 | |
---|
620 | | -enum port_module_event_status_type { |
---|
621 | | - MLX5_MODULE_STATUS_PLUGGED = 0x1, |
---|
622 | | - MLX5_MODULE_STATUS_UNPLUGGED = 0x2, |
---|
623 | | - MLX5_MODULE_STATUS_ERROR = 0x3, |
---|
624 | | - MLX5_MODULE_STATUS_NUM = 0x3, |
---|
625 | | -}; |
---|
626 | | - |
---|
627 | | -enum port_module_event_error_type { |
---|
628 | | - MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED, |
---|
629 | | - MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE, |
---|
630 | | - MLX5_MODULE_EVENT_ERROR_BUS_STUCK, |
---|
631 | | - MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT, |
---|
632 | | - MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST, |
---|
633 | | - MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER, |
---|
634 | | - MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE, |
---|
635 | | - MLX5_MODULE_EVENT_ERROR_BAD_CABLE, |
---|
636 | | - MLX5_MODULE_EVENT_ERROR_UNKNOWN, |
---|
637 | | - MLX5_MODULE_EVENT_ERROR_NUM, |
---|
638 | | -}; |
---|
639 | | - |
---|
640 | | -struct mlx5_port_module_event_stats { |
---|
641 | | - u64 status_counters[MLX5_MODULE_STATUS_NUM]; |
---|
642 | | - u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM]; |
---|
| 531 | +struct mlx5_core_roce { |
---|
| 532 | + struct mlx5_flow_table *ft; |
---|
| 533 | + struct mlx5_flow_group *fg; |
---|
| 534 | + struct mlx5_flow_handle *allow_rule; |
---|
643 | 535 | }; |
---|
644 | 536 | |
---|
645 | 537 | struct mlx5_priv { |
---|
646 | | - char name[MLX5_MAX_NAME_LEN]; |
---|
647 | | - struct mlx5_eq_table eq_table; |
---|
648 | | - struct mlx5_irq_info *irq_info; |
---|
| 538 | + /* IRQ table valid only for real pci devices PF or VF */ |
---|
| 539 | + struct mlx5_irq_table *irq_table; |
---|
| 540 | + struct mlx5_eq_table *eq_table; |
---|
649 | 541 | |
---|
650 | 542 | /* pages stuff */ |
---|
| 543 | + struct mlx5_nb pg_nb; |
---|
651 | 544 | struct workqueue_struct *pg_wq; |
---|
652 | | - struct rb_root page_root; |
---|
| 545 | + struct xarray page_root_xa; |
---|
653 | 546 | int fw_pages; |
---|
654 | 547 | atomic_t reg_pages; |
---|
655 | 548 | struct list_head free_list; |
---|
656 | 549 | int vfs_pages; |
---|
| 550 | + int peer_pf_pages; |
---|
657 | 551 | |
---|
658 | 552 | struct mlx5_core_health health; |
---|
659 | 553 | |
---|
660 | | - struct mlx5_srq_table srq_table; |
---|
661 | | - |
---|
662 | 554 | /* start: qp staff */ |
---|
663 | | - struct mlx5_qp_table qp_table; |
---|
664 | 555 | struct dentry *qp_debugfs; |
---|
665 | 556 | struct dentry *eq_debugfs; |
---|
666 | 557 | struct dentry *cq_debugfs; |
---|
667 | 558 | struct dentry *cmdif_debugfs; |
---|
668 | 559 | /* end: qp staff */ |
---|
669 | | - |
---|
670 | | - /* start: mkey staff */ |
---|
671 | | - struct mlx5_mkey_table mkey_table; |
---|
672 | | - /* end: mkey staff */ |
---|
673 | 560 | |
---|
674 | 561 | /* start: alloc staff */ |
---|
675 | 562 | /* protect buffer alocation according to numa node */ |
---|
.. | .. |
---|
681 | 568 | /* end: alloc staff */ |
---|
682 | 569 | struct dentry *dbg_root; |
---|
683 | 570 | |
---|
684 | | - /* protect mkey key part */ |
---|
685 | | - spinlock_t mkey_lock; |
---|
686 | | - u8 mkey_key; |
---|
687 | | - |
---|
688 | 571 | struct list_head dev_list; |
---|
689 | 572 | struct list_head ctx_list; |
---|
690 | 573 | spinlock_t ctx_lock; |
---|
691 | | - |
---|
692 | | - struct list_head waiting_events_list; |
---|
693 | | - bool is_accum_events; |
---|
| 574 | + struct mlx5_events *events; |
---|
694 | 575 | |
---|
695 | 576 | struct mlx5_flow_steering *steering; |
---|
696 | 577 | struct mlx5_mpfs *mpfs; |
---|
697 | 578 | struct mlx5_eswitch *eswitch; |
---|
698 | 579 | struct mlx5_core_sriov sriov; |
---|
699 | 580 | struct mlx5_lag *lag; |
---|
700 | | - unsigned long pci_dev_data; |
---|
| 581 | + struct mlx5_devcom *devcom; |
---|
| 582 | + struct mlx5_fw_reset *fw_reset; |
---|
| 583 | + struct mlx5_core_roce roce; |
---|
701 | 584 | struct mlx5_fc_stats fc_stats; |
---|
702 | 585 | struct mlx5_rl_table rl_table; |
---|
703 | 586 | |
---|
704 | | - struct mlx5_port_module_event_stats pme_stats; |
---|
705 | | - |
---|
706 | | -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
---|
707 | | - void (*pfault)(struct mlx5_core_dev *dev, |
---|
708 | | - void *context, |
---|
709 | | - struct mlx5_pagefault *pfault); |
---|
710 | | - void *pfault_ctx; |
---|
711 | | - struct srcu_struct pfault_srcu; |
---|
712 | | -#endif |
---|
713 | 587 | struct mlx5_bfreg_data bfregs; |
---|
714 | 588 | struct mlx5_uars_page *uar; |
---|
715 | 589 | }; |
---|
716 | 590 | |
---|
717 | 591 | enum mlx5_device_state { |
---|
| 592 | + MLX5_DEVICE_STATE_UNINITIALIZED, |
---|
718 | 593 | MLX5_DEVICE_STATE_UP, |
---|
719 | 594 | MLX5_DEVICE_STATE_INTERNAL_ERROR, |
---|
720 | 595 | }; |
---|
.. | .. |
---|
734 | 609 | MLX5_PFAULT_RDMA = 1 << 2, |
---|
735 | 610 | }; |
---|
736 | 611 | |
---|
737 | | -/* Contains the details of a pagefault. */ |
---|
738 | | -struct mlx5_pagefault { |
---|
739 | | - u32 bytes_committed; |
---|
740 | | - u32 token; |
---|
741 | | - u8 event_subtype; |
---|
742 | | - u8 type; |
---|
743 | | - union { |
---|
744 | | - /* Initiator or send message responder pagefault details. */ |
---|
745 | | - struct { |
---|
746 | | - /* Received packet size, only valid for responders. */ |
---|
747 | | - u32 packet_size; |
---|
748 | | - /* |
---|
749 | | - * Number of resource holding WQE, depends on type. |
---|
750 | | - */ |
---|
751 | | - u32 wq_num; |
---|
752 | | - /* |
---|
753 | | - * WQE index. Refers to either the send queue or |
---|
754 | | - * receive queue, according to event_subtype. |
---|
755 | | - */ |
---|
756 | | - u16 wqe_index; |
---|
757 | | - } wqe; |
---|
758 | | - /* RDMA responder pagefault details */ |
---|
759 | | - struct { |
---|
760 | | - u32 r_key; |
---|
761 | | - /* |
---|
762 | | - * Received packet size, minimal size page fault |
---|
763 | | - * resolution required for forward progress. |
---|
764 | | - */ |
---|
765 | | - u32 packet_size; |
---|
766 | | - u32 rdma_op_len; |
---|
767 | | - u64 rdma_va; |
---|
768 | | - } rdma; |
---|
769 | | - }; |
---|
770 | | - |
---|
771 | | - struct mlx5_eq *eq; |
---|
772 | | - struct work_struct work; |
---|
773 | | -}; |
---|
774 | | - |
---|
775 | 612 | struct mlx5_td { |
---|
776 | 613 | /* protects tirs list changes while tirs refresh */ |
---|
777 | 614 | struct mutex list_lock; |
---|
.. | .. |
---|
784 | 621 | struct mlx5_td td; |
---|
785 | 622 | struct mlx5_core_mkey mkey; |
---|
786 | 623 | struct mlx5_sq_bfreg bfreg; |
---|
| 624 | +}; |
---|
| 625 | + |
---|
| 626 | +enum mlx5_sw_icm_type { |
---|
| 627 | + MLX5_SW_ICM_TYPE_STEERING, |
---|
| 628 | + MLX5_SW_ICM_TYPE_HEADER_MODIFY, |
---|
787 | 629 | }; |
---|
788 | 630 | |
---|
789 | 631 | #define MLX5_MAX_RESERVED_GIDS 8 |
---|
.. | .. |
---|
802 | 644 | u8 enabled; |
---|
803 | 645 | }; |
---|
804 | 646 | |
---|
805 | | -struct mlx5_clock { |
---|
806 | | - rwlock_t lock; |
---|
| 647 | +struct mlx5_timer { |
---|
807 | 648 | struct cyclecounter cycles; |
---|
808 | 649 | struct timecounter tc; |
---|
809 | | - struct hwtstamp_config hwtstamp_config; |
---|
810 | 650 | u32 nominal_c_mult; |
---|
811 | 651 | unsigned long overflow_period; |
---|
812 | 652 | struct delayed_work overflow_work; |
---|
813 | | - struct mlx5_core_dev *mdev; |
---|
| 653 | +}; |
---|
| 654 | + |
---|
| 655 | +struct mlx5_clock { |
---|
| 656 | + struct mlx5_nb pps_nb; |
---|
| 657 | + seqlock_t lock; |
---|
| 658 | + struct hwtstamp_config hwtstamp_config; |
---|
814 | 659 | struct ptp_clock *ptp; |
---|
815 | 660 | struct ptp_clock_info ptp_info; |
---|
816 | 661 | struct mlx5_pps pps_info; |
---|
| 662 | + struct mlx5_timer timer; |
---|
817 | 663 | }; |
---|
818 | 664 | |
---|
| 665 | +struct mlx5_dm; |
---|
819 | 666 | struct mlx5_fw_tracer; |
---|
820 | 667 | struct mlx5_vxlan; |
---|
| 668 | +struct mlx5_geneve; |
---|
| 669 | +struct mlx5_hv_vhca; |
---|
| 670 | + |
---|
| 671 | +#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) |
---|
| 672 | +#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) |
---|
821 | 673 | |
---|
822 | 674 | struct mlx5_core_dev { |
---|
| 675 | + struct device *device; |
---|
| 676 | + enum mlx5_coredev_type coredev_type; |
---|
823 | 677 | struct pci_dev *pdev; |
---|
824 | 678 | /* sync pci state */ |
---|
825 | 679 | struct mutex pci_status_mutex; |
---|
.. | .. |
---|
832 | 686 | u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; |
---|
833 | 687 | u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; |
---|
834 | 688 | u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; |
---|
835 | | - u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; |
---|
| 689 | + u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)]; |
---|
836 | 690 | u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; |
---|
837 | 691 | u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; |
---|
| 692 | + u8 embedded_cpu; |
---|
838 | 693 | } caps; |
---|
| 694 | + u64 sys_image_guid; |
---|
839 | 695 | phys_addr_t iseg_base; |
---|
840 | 696 | struct mlx5_init_seg __iomem *iseg; |
---|
| 697 | + phys_addr_t bar_addr; |
---|
841 | 698 | enum mlx5_device_state state; |
---|
842 | 699 | /* sync interface state */ |
---|
843 | 700 | struct mutex intf_state_mutex; |
---|
844 | 701 | unsigned long intf_state; |
---|
845 | | - void (*event) (struct mlx5_core_dev *dev, |
---|
846 | | - enum mlx5_dev_event event, |
---|
847 | | - unsigned long param); |
---|
848 | 702 | struct mlx5_priv priv; |
---|
849 | 703 | struct mlx5_profile *profile; |
---|
850 | | - atomic_t num_qps; |
---|
851 | 704 | u32 issi; |
---|
852 | 705 | struct mlx5e_resources mlx5e_res; |
---|
| 706 | + struct mlx5_dm *dm; |
---|
853 | 707 | struct mlx5_vxlan *vxlan; |
---|
| 708 | + struct mlx5_geneve *geneve; |
---|
854 | 709 | struct { |
---|
855 | 710 | struct mlx5_rsvd_gids reserved_gids; |
---|
856 | 711 | u32 roce_en; |
---|
.. | .. |
---|
858 | 713 | #ifdef CONFIG_MLX5_FPGA |
---|
859 | 714 | struct mlx5_fpga_device *fpga; |
---|
860 | 715 | #endif |
---|
861 | | -#ifdef CONFIG_RFS_ACCEL |
---|
862 | | - struct cpu_rmap *rmap; |
---|
| 716 | +#ifdef CONFIG_MLX5_ACCEL |
---|
| 717 | + const struct mlx5_accel_ipsec_ops *ipsec_ops; |
---|
863 | 718 | #endif |
---|
864 | 719 | struct mlx5_clock clock; |
---|
865 | 720 | struct mlx5_ib_clock_info *clock_info; |
---|
866 | | - struct page *clock_info_page; |
---|
867 | 721 | struct mlx5_fw_tracer *tracer; |
---|
| 722 | + struct mlx5_rsc_dump *rsc_dump; |
---|
| 723 | + u32 vsc_addr; |
---|
| 724 | + struct mlx5_hv_vhca *hv_vhca; |
---|
868 | 725 | }; |
---|
869 | 726 | |
---|
870 | 727 | struct mlx5_db { |
---|
.. | .. |
---|
915 | 772 | u64 ts2; |
---|
916 | 773 | u16 op; |
---|
917 | 774 | bool polling; |
---|
| 775 | + /* Track the max comp handlers */ |
---|
| 776 | + refcount_t refcnt; |
---|
918 | 777 | }; |
---|
919 | 778 | |
---|
920 | 779 | struct mlx5_pas { |
---|
.. | .. |
---|
940 | 799 | u64 node_guid; |
---|
941 | 800 | u32 cap_mask1; |
---|
942 | 801 | u32 cap_mask1_perm; |
---|
943 | | - u32 cap_mask2; |
---|
944 | | - u32 cap_mask2_perm; |
---|
| 802 | + u16 cap_mask2; |
---|
| 803 | + u16 cap_mask2_perm; |
---|
945 | 804 | u16 lid; |
---|
946 | 805 | u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */ |
---|
947 | 806 | u8 lmc; |
---|
.. | .. |
---|
984 | 843 | return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; |
---|
985 | 844 | } |
---|
986 | 845 | |
---|
987 | | -static inline u16 cmdif_rev(struct mlx5_core_dev *dev) |
---|
988 | | -{ |
---|
989 | | - return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; |
---|
990 | | -} |
---|
991 | | - |
---|
992 | 846 | static inline u32 mlx5_base_mkey(const u32 key) |
---|
993 | 847 | { |
---|
994 | 848 | return key & 0xffffff00u; |
---|
995 | 849 | } |
---|
996 | 850 | |
---|
997 | | -static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz, |
---|
| 851 | +static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags, |
---|
| 852 | + u8 log_stride, u8 log_sz, |
---|
998 | 853 | u16 strides_offset, |
---|
999 | 854 | struct mlx5_frag_buf_ctrl *fbc) |
---|
1000 | 855 | { |
---|
| 856 | + fbc->frags = frags; |
---|
1001 | 857 | fbc->log_stride = log_stride; |
---|
1002 | 858 | fbc->log_sz = log_sz; |
---|
1003 | 859 | fbc->sz_m1 = (1 << fbc->log_sz) - 1; |
---|
.. | .. |
---|
1006 | 862 | fbc->strides_offset = strides_offset; |
---|
1007 | 863 | } |
---|
1008 | 864 | |
---|
1009 | | -static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz, |
---|
| 865 | +static inline void mlx5_init_fbc(struct mlx5_buf_list *frags, |
---|
| 866 | + u8 log_stride, u8 log_sz, |
---|
1010 | 867 | struct mlx5_frag_buf_ctrl *fbc) |
---|
1011 | 868 | { |
---|
1012 | | - mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc); |
---|
1013 | | -} |
---|
1014 | | - |
---|
1015 | | -static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc, |
---|
1016 | | - void *cqc) |
---|
1017 | | -{ |
---|
1018 | | - mlx5_fill_fbc(6 + MLX5_GET(cqc, cqc, cqe_sz), |
---|
1019 | | - MLX5_GET(cqc, cqc, log_cq_size), |
---|
1020 | | - fbc); |
---|
| 869 | + mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc); |
---|
1021 | 870 | } |
---|
1022 | 871 | |
---|
1023 | 872 | static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, |
---|
.. | .. |
---|
1028 | 877 | ix += fbc->strides_offset; |
---|
1029 | 878 | frag = ix >> fbc->log_frag_strides; |
---|
1030 | 879 | |
---|
1031 | | - return fbc->frag_buf.frags[frag].buf + |
---|
1032 | | - ((fbc->frag_sz_m1 & ix) << fbc->log_stride); |
---|
| 880 | + return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); |
---|
1033 | 881 | } |
---|
1034 | 882 | |
---|
1035 | 883 | static inline u32 |
---|
.. | .. |
---|
1040 | 888 | return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); |
---|
1041 | 889 | } |
---|
1042 | 890 | |
---|
| 891 | +enum { |
---|
| 892 | + CMD_ALLOWED_OPCODE_ALL, |
---|
| 893 | +}; |
---|
| 894 | + |
---|
1043 | 895 | int mlx5_cmd_init(struct mlx5_core_dev *dev); |
---|
1044 | 896 | void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); |
---|
| 897 | +void mlx5_cmd_set_state(struct mlx5_core_dev *dev, |
---|
| 898 | + enum mlx5_cmdif_state cmdif_state); |
---|
1045 | 899 | void mlx5_cmd_use_events(struct mlx5_core_dev *dev); |
---|
1046 | 900 | void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); |
---|
| 901 | +void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); |
---|
| 902 | + |
---|
| 903 | +struct mlx5_async_ctx { |
---|
| 904 | + struct mlx5_core_dev *dev; |
---|
| 905 | + atomic_t num_inflight; |
---|
| 906 | + struct completion inflight_done; |
---|
| 907 | +}; |
---|
| 908 | + |
---|
| 909 | +struct mlx5_async_work; |
---|
| 910 | + |
---|
| 911 | +typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context); |
---|
| 912 | + |
---|
| 913 | +struct mlx5_async_work { |
---|
| 914 | + struct mlx5_async_ctx *ctx; |
---|
| 915 | + mlx5_async_cbk_t user_callback; |
---|
| 916 | +}; |
---|
| 917 | + |
---|
| 918 | +void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, |
---|
| 919 | + struct mlx5_async_ctx *ctx); |
---|
| 920 | +void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx); |
---|
| 921 | +int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, |
---|
| 922 | + void *out, int out_size, mlx5_async_cbk_t callback, |
---|
| 923 | + struct mlx5_async_work *work); |
---|
1047 | 924 | |
---|
1048 | 925 | int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, |
---|
1049 | 926 | int out_size); |
---|
1050 | | -int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, |
---|
1051 | | - void *out, int out_size, mlx5_cmd_cbk_t callback, |
---|
1052 | | - void *context); |
---|
| 927 | + |
---|
| 928 | +#define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out) \ |
---|
| 929 | + ({ \ |
---|
| 930 | + mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out, \ |
---|
| 931 | + MLX5_ST_SZ_BYTES(ifc_cmd##_out)); \ |
---|
| 932 | + }) |
---|
| 933 | + |
---|
| 934 | +#define mlx5_cmd_exec_in(dev, ifc_cmd, in) \ |
---|
| 935 | + ({ \ |
---|
| 936 | + u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {}; \ |
---|
| 937 | + mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out); \ |
---|
| 938 | + }) |
---|
| 939 | + |
---|
1053 | 940 | int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, |
---|
1054 | 941 | void *out, int out_size); |
---|
1055 | 942 | void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); |
---|
| 943 | +bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); |
---|
1056 | 944 | |
---|
1057 | 945 | int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); |
---|
1058 | 946 | int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); |
---|
1059 | 947 | int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); |
---|
| 948 | +void mlx5_health_flush(struct mlx5_core_dev *dev); |
---|
1060 | 949 | void mlx5_health_cleanup(struct mlx5_core_dev *dev); |
---|
1061 | 950 | int mlx5_health_init(struct mlx5_core_dev *dev); |
---|
1062 | 951 | void mlx5_start_health_poll(struct mlx5_core_dev *dev); |
---|
1063 | 952 | void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); |
---|
1064 | 953 | void mlx5_drain_health_wq(struct mlx5_core_dev *dev); |
---|
1065 | 954 | void mlx5_trigger_health_work(struct mlx5_core_dev *dev); |
---|
1066 | | -void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); |
---|
1067 | | -int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, |
---|
1068 | | - struct mlx5_frag_buf *buf, int node); |
---|
1069 | 955 | int mlx5_buf_alloc(struct mlx5_core_dev *dev, |
---|
1070 | 956 | int size, struct mlx5_frag_buf *buf); |
---|
1071 | 957 | void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); |
---|
.. | .. |
---|
1076 | 962 | gfp_t flags, int npages); |
---|
1077 | 963 | void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, |
---|
1078 | 964 | struct mlx5_cmd_mailbox *head); |
---|
1079 | | -int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, |
---|
1080 | | - struct mlx5_srq_attr *in); |
---|
1081 | | -int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); |
---|
1082 | | -int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, |
---|
1083 | | - struct mlx5_srq_attr *out); |
---|
1084 | | -int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, |
---|
1085 | | - u16 lwm, int is_srq); |
---|
1086 | | -void mlx5_init_mkey_table(struct mlx5_core_dev *dev); |
---|
1087 | | -void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev); |
---|
1088 | | -int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, |
---|
1089 | | - struct mlx5_core_mkey *mkey, |
---|
1090 | | - u32 *in, int inlen, |
---|
1091 | | - u32 *out, int outlen, |
---|
1092 | | - mlx5_cmd_cbk_t callback, void *context); |
---|
1093 | 965 | int mlx5_core_create_mkey(struct mlx5_core_dev *dev, |
---|
1094 | 966 | struct mlx5_core_mkey *mkey, |
---|
1095 | 967 | u32 *in, int inlen); |
---|
.. | .. |
---|
1099 | 971 | u32 *out, int outlen); |
---|
1100 | 972 | int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); |
---|
1101 | 973 | int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); |
---|
1102 | | -int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, |
---|
1103 | | - u16 opmod, u8 port); |
---|
1104 | | -void mlx5_pagealloc_init(struct mlx5_core_dev *dev); |
---|
| 974 | +int mlx5_pagealloc_init(struct mlx5_core_dev *dev); |
---|
1105 | 975 | void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); |
---|
1106 | | -int mlx5_pagealloc_start(struct mlx5_core_dev *dev); |
---|
| 976 | +void mlx5_pagealloc_start(struct mlx5_core_dev *dev); |
---|
1107 | 977 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); |
---|
1108 | 978 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, |
---|
1109 | | - s32 npages); |
---|
| 979 | + s32 npages, bool ec_function); |
---|
1110 | 980 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); |
---|
1111 | 981 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); |
---|
1112 | 982 | void mlx5_register_debugfs(void); |
---|
1113 | 983 | void mlx5_unregister_debugfs(void); |
---|
1114 | 984 | |
---|
1115 | 985 | void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); |
---|
| 986 | +void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm); |
---|
1116 | 987 | void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); |
---|
1117 | | -void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); |
---|
1118 | | -void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); |
---|
1119 | | -struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); |
---|
1120 | | -int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, |
---|
1121 | | - unsigned int *irqn); |
---|
| 988 | +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn); |
---|
1122 | 989 | int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); |
---|
1123 | 990 | int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); |
---|
1124 | 991 | |
---|
1125 | | -int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); |
---|
| 992 | +void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); |
---|
1126 | 993 | void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); |
---|
1127 | 994 | int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, |
---|
1128 | 995 | int size_in, void *data_out, int size_out, |
---|
.. | .. |
---|
1134 | 1001 | void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); |
---|
1135 | 1002 | |
---|
1136 | 1003 | const char *mlx5_command_str(int command); |
---|
1137 | | -int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); |
---|
| 1004 | +void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); |
---|
1138 | 1005 | void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); |
---|
1139 | 1006 | int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, |
---|
1140 | 1007 | int npsvs, u32 *sig_index); |
---|
.. | .. |
---|
1144 | 1011 | struct mlx5_odp_caps *odp_caps); |
---|
1145 | 1012 | int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, |
---|
1146 | 1013 | u8 port_num, void *out, size_t sz); |
---|
1147 | | -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
---|
1148 | | -int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token, |
---|
1149 | | - u32 wq_num, u8 type, int error); |
---|
1150 | | -#endif |
---|
1151 | 1014 | |
---|
1152 | 1015 | int mlx5_init_rl_table(struct mlx5_core_dev *dev); |
---|
1153 | 1016 | void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); |
---|
.. | .. |
---|
1155 | 1018 | struct mlx5_rate_limit *rl); |
---|
1156 | 1019 | void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl); |
---|
1157 | 1020 | bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); |
---|
| 1021 | +int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid, |
---|
| 1022 | + bool dedicated_entry, u16 *index); |
---|
| 1023 | +void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index); |
---|
1158 | 1024 | bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, |
---|
1159 | 1025 | struct mlx5_rate_limit *rl_1); |
---|
1160 | 1026 | int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, |
---|
1161 | 1027 | bool map_wc, bool fast_path); |
---|
1162 | 1028 | void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); |
---|
1163 | 1029 | |
---|
| 1030 | +unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev); |
---|
| 1031 | +struct cpumask * |
---|
| 1032 | +mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector); |
---|
1164 | 1033 | unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); |
---|
1165 | 1034 | int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, |
---|
1166 | 1035 | u8 roce_version, u8 roce_l3_type, const u8 *gid, |
---|
1167 | 1036 | const u8 *mac, bool vlan, u16 vlan_id, u8 port_num); |
---|
1168 | | - |
---|
1169 | | -static inline int fw_initializing(struct mlx5_core_dev *dev) |
---|
1170 | | -{ |
---|
1171 | | - return ioread32be(&dev->iseg->initializing) >> 31; |
---|
1172 | | -} |
---|
1173 | 1037 | |
---|
1174 | 1038 | static inline u32 mlx5_mkey_to_idx(u32 mkey) |
---|
1175 | 1039 | { |
---|
.. | .. |
---|
1201 | 1065 | enum { |
---|
1202 | 1066 | MLX5_INTERFACE_PROTOCOL_IB = 0, |
---|
1203 | 1067 | MLX5_INTERFACE_PROTOCOL_ETH = 1, |
---|
| 1068 | + MLX5_INTERFACE_PROTOCOL_VDPA = 2, |
---|
1204 | 1069 | }; |
---|
1205 | 1070 | |
---|
1206 | 1071 | struct mlx5_interface { |
---|
.. | .. |
---|
1208 | 1073 | void (*remove)(struct mlx5_core_dev *dev, void *context); |
---|
1209 | 1074 | int (*attach)(struct mlx5_core_dev *dev, void *context); |
---|
1210 | 1075 | void (*detach)(struct mlx5_core_dev *dev, void *context); |
---|
1211 | | - void (*event)(struct mlx5_core_dev *dev, void *context, |
---|
1212 | | - enum mlx5_dev_event event, unsigned long param); |
---|
1213 | | - void (*pfault)(struct mlx5_core_dev *dev, |
---|
1214 | | - void *context, |
---|
1215 | | - struct mlx5_pagefault *pfault); |
---|
1216 | | - void * (*get_dev)(void *context); |
---|
1217 | 1076 | int protocol; |
---|
1218 | 1077 | struct list_head list; |
---|
1219 | 1078 | }; |
---|
1220 | 1079 | |
---|
1221 | | -void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); |
---|
1222 | 1080 | int mlx5_register_interface(struct mlx5_interface *intf); |
---|
1223 | 1081 | void mlx5_unregister_interface(struct mlx5_interface *intf); |
---|
| 1082 | +int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); |
---|
| 1083 | +int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); |
---|
| 1084 | +int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb); |
---|
| 1085 | +int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb); |
---|
| 1086 | + |
---|
1224 | 1087 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); |
---|
1225 | 1088 | |
---|
1226 | 1089 | int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); |
---|
1227 | 1090 | int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); |
---|
| 1091 | +bool mlx5_lag_is_roce(struct mlx5_core_dev *dev); |
---|
| 1092 | +bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); |
---|
| 1093 | +bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev); |
---|
1228 | 1094 | bool mlx5_lag_is_active(struct mlx5_core_dev *dev); |
---|
1229 | 1095 | struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); |
---|
| 1096 | +u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, |
---|
| 1097 | + struct net_device *slave); |
---|
1230 | 1098 | int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, |
---|
1231 | 1099 | u64 *values, |
---|
1232 | 1100 | int num_counters, |
---|
1233 | 1101 | size_t *offsets); |
---|
1234 | 1102 | struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); |
---|
1235 | 1103 | void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); |
---|
| 1104 | +int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, |
---|
| 1105 | + u64 length, u32 log_alignment, u16 uid, |
---|
| 1106 | + phys_addr_t *addr, u32 *obj_id); |
---|
| 1107 | +int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, |
---|
| 1108 | + u64 length, u16 uid, phys_addr_t addr, u32 obj_id); |
---|
1236 | 1109 | |
---|
1237 | | -#ifndef CONFIG_MLX5_CORE_IPOIB |
---|
1238 | | -static inline |
---|
1239 | | -struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, |
---|
1240 | | - struct ib_device *ibdev, |
---|
1241 | | - const char *name, |
---|
1242 | | - void (*setup)(struct net_device *)) |
---|
1243 | | -{ |
---|
1244 | | - return ERR_PTR(-EOPNOTSUPP); |
---|
1245 | | -} |
---|
1246 | | -#else |
---|
| 1110 | +#ifdef CONFIG_MLX5_CORE_IPOIB |
---|
1247 | 1111 | struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, |
---|
1248 | 1112 | struct ib_device *ibdev, |
---|
1249 | 1113 | const char *name, |
---|
1250 | 1114 | void (*setup)(struct net_device *)); |
---|
1251 | 1115 | #endif /* CONFIG_MLX5_CORE_IPOIB */ |
---|
| 1116 | +int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, |
---|
| 1117 | + struct ib_device *device, |
---|
| 1118 | + struct rdma_netdev_alloc_params *params); |
---|
1252 | 1119 | |
---|
1253 | 1120 | struct mlx5_profile { |
---|
1254 | 1121 | u64 mask; |
---|
.. | .. |
---|
1263 | 1130 | MLX5_PCI_DEV_IS_VF = 1 << 0, |
---|
1264 | 1131 | }; |
---|
1265 | 1132 | |
---|
1266 | | -static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) |
---|
| 1133 | +static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev) |
---|
1267 | 1134 | { |
---|
1268 | | - return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); |
---|
| 1135 | + return dev->coredev_type == MLX5_COREDEV_PF; |
---|
1269 | 1136 | } |
---|
1270 | 1137 | |
---|
1271 | | -#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs((mdev)->pdev)) |
---|
1272 | | -#define MLX5_VPORT_MANAGER(mdev) \ |
---|
1273 | | - (MLX5_CAP_GEN(mdev, vport_group_manager) && \ |
---|
1274 | | - (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ |
---|
1275 | | - mlx5_core_is_pf(mdev)) |
---|
| 1138 | +static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev) |
---|
| 1139 | +{ |
---|
| 1140 | + return dev->coredev_type == MLX5_COREDEV_VF; |
---|
| 1141 | +} |
---|
| 1142 | + |
---|
| 1143 | +static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev) |
---|
| 1144 | +{ |
---|
| 1145 | + return dev->caps.embedded_cpu; |
---|
| 1146 | +} |
---|
| 1147 | + |
---|
| 1148 | +static inline bool |
---|
| 1149 | +mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev) |
---|
| 1150 | +{ |
---|
| 1151 | + return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager); |
---|
| 1152 | +} |
---|
| 1153 | + |
---|
| 1154 | +static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev) |
---|
| 1155 | +{ |
---|
| 1156 | + return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists); |
---|
| 1157 | +} |
---|
| 1158 | + |
---|
| 1159 | +static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev) |
---|
| 1160 | +{ |
---|
| 1161 | + return dev->priv.sriov.max_vfs; |
---|
| 1162 | +} |
---|
1276 | 1163 | |
---|
1277 | 1164 | static inline int mlx5_get_gid_table_len(u16 param) |
---|
1278 | 1165 | { |
---|
.. | .. |
---|
1318 | 1205 | MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, |
---|
1319 | 1206 | }; |
---|
1320 | 1207 | |
---|
1321 | | -static inline const struct cpumask * |
---|
1322 | | -mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector) |
---|
| 1208 | +static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev) |
---|
1323 | 1209 | { |
---|
1324 | | - return dev->priv.irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask; |
---|
| 1210 | + struct devlink *devlink = priv_to_devlink(dev); |
---|
| 1211 | + union devlink_param_value val; |
---|
| 1212 | + |
---|
| 1213 | + devlink_param_driverinit_value_get(devlink, |
---|
| 1214 | + DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, |
---|
| 1215 | + &val); |
---|
| 1216 | + return val.vbool; |
---|
1325 | 1217 | } |
---|
1326 | 1218 | |
---|
1327 | 1219 | #endif /* MLX5_DRIVER_H */ |
---|