hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/include/linux/mlx5/driver.h
....@@ -41,21 +41,23 @@
4141 #include <linux/semaphore.h>
4242 #include <linux/slab.h>
4343 #include <linux/vmalloc.h>
44
-#include <linux/radix-tree.h>
44
+#include <linux/xarray.h>
4545 #include <linux/workqueue.h>
4646 #include <linux/mempool.h>
4747 #include <linux/interrupt.h>
4848 #include <linux/idr.h>
49
+#include <linux/notifier.h>
50
+#include <linux/refcount.h>
4951
5052 #include <linux/mlx5/device.h>
5153 #include <linux/mlx5/doorbell.h>
52
-#include <linux/mlx5/srq.h>
54
+#include <linux/mlx5/eq.h>
5355 #include <linux/timecounter.h>
5456 #include <linux/ptp_clock_kernel.h>
57
+#include <net/devlink.h>
5558
5659 enum {
5760 MLX5_BOARD_ID_LEN = 64,
58
- MLX5_MAX_NAME_LEN = 16,
5961 };
6062
6163 enum {
....@@ -85,26 +87,15 @@
8587 };
8688
8789 enum {
88
- MLX5_EQ_VEC_PAGES = 0,
89
- MLX5_EQ_VEC_CMD = 1,
90
- MLX5_EQ_VEC_ASYNC = 2,
91
- MLX5_EQ_VEC_PFAULT = 3,
92
- MLX5_EQ_VEC_COMP_BASE,
93
-};
94
-
95
-enum {
96
- MLX5_MAX_IRQ_NAME = 32
97
-};
98
-
99
-enum {
100
- MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
101
- MLX5_ATOMIC_MODE_CX = 2 << 16,
102
- MLX5_ATOMIC_MODE_8B = 3 << 16,
103
- MLX5_ATOMIC_MODE_16B = 4 << 16,
104
- MLX5_ATOMIC_MODE_32B = 5 << 16,
105
- MLX5_ATOMIC_MODE_64B = 6 << 16,
106
- MLX5_ATOMIC_MODE_128B = 7 << 16,
107
- MLX5_ATOMIC_MODE_256B = 8 << 16,
90
+ MLX5_ATOMIC_MODE_OFFSET = 16,
91
+ MLX5_ATOMIC_MODE_IB_COMP = 1,
92
+ MLX5_ATOMIC_MODE_CX = 2,
93
+ MLX5_ATOMIC_MODE_8B = 3,
94
+ MLX5_ATOMIC_MODE_16B = 4,
95
+ MLX5_ATOMIC_MODE_32B = 5,
96
+ MLX5_ATOMIC_MODE_64B = 6,
97
+ MLX5_ATOMIC_MODE_128B = 7,
98
+ MLX5_ATOMIC_MODE_256B = 8,
10899 };
109100
110101 enum {
....@@ -118,6 +109,7 @@
118109 MLX5_REG_FPGA_CAP = 0x4022,
119110 MLX5_REG_FPGA_CTRL = 0x4023,
120111 MLX5_REG_FPGA_ACCESS_REG = 0x4024,
112
+ MLX5_REG_CORE_DUMP = 0x402e,
121113 MLX5_REG_PCAP = 0x5001,
122114 MLX5_REG_PMTU = 0x5003,
123115 MLX5_REG_PTYS = 0x5004,
....@@ -133,23 +125,30 @@
133125 MLX5_REG_PVLC = 0x500f,
134126 MLX5_REG_PCMR = 0x5041,
135127 MLX5_REG_PMLP = 0x5002,
128
+ MLX5_REG_PPLM = 0x5023,
136129 MLX5_REG_PCAM = 0x507f,
137130 MLX5_REG_NODE_DESC = 0x6001,
138131 MLX5_REG_HOST_ENDIANNESS = 0x7004,
139132 MLX5_REG_MCIA = 0x9014,
133
+ MLX5_REG_MFRL = 0x9028,
140134 MLX5_REG_MLCR = 0x902b,
141135 MLX5_REG_MTRC_CAP = 0x9040,
142136 MLX5_REG_MTRC_CONF = 0x9041,
143137 MLX5_REG_MTRC_STDB = 0x9042,
144138 MLX5_REG_MTRC_CTRL = 0x9043,
139
+ MLX5_REG_MPEIN = 0x9050,
145140 MLX5_REG_MPCNT = 0x9051,
146141 MLX5_REG_MTPPS = 0x9053,
147142 MLX5_REG_MTPPSE = 0x9054,
148143 MLX5_REG_MPEGC = 0x9056,
144
+ MLX5_REG_MCQS = 0x9060,
149145 MLX5_REG_MCQI = 0x9061,
150146 MLX5_REG_MCC = 0x9062,
151147 MLX5_REG_MCDA = 0x9063,
152148 MLX5_REG_MCAM = 0x907f,
149
+ MLX5_REG_MIRC = 0x9162,
150
+ MLX5_REG_SBCAM = 0xB01F,
151
+ MLX5_REG_RESOURCE_DUMP = 0xC000,
153152 };
154153
155154 enum mlx5_qpts_trust_state {
....@@ -162,13 +161,11 @@
162161 MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
163162 };
164163
165
-enum mlx5_dct_atomic_mode {
166
- MLX5_ATOMIC_MODE_DCT_CX = 2,
167
-};
168
-
169164 enum {
170165 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
171166 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
167
+ MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
168
+ MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
172169 };
173170
174171 enum mlx5_page_fault_resume_flags {
....@@ -191,8 +188,12 @@
191188 MLX5_POLICY_INVALID = 0xffffffff
192189 };
193190
191
+enum mlx5_coredev_type {
192
+ MLX5_COREDEV_PF,
193
+ MLX5_COREDEV_VF
194
+};
195
+
194196 struct mlx5_field_desc {
195
- struct dentry *dent;
196197 int i;
197198 };
198199
....@@ -201,20 +202,12 @@
201202 void *object;
202203 enum dbg_rsc_type type;
203204 struct dentry *root;
204
- struct mlx5_field_desc fields[0];
205
+ struct mlx5_field_desc fields[];
205206 };
206207
207208 enum mlx5_dev_event {
208
- MLX5_DEV_EVENT_SYS_ERROR,
209
- MLX5_DEV_EVENT_PORT_UP,
210
- MLX5_DEV_EVENT_PORT_DOWN,
211
- MLX5_DEV_EVENT_PORT_INITIALIZED,
212
- MLX5_DEV_EVENT_LID_CHANGE,
213
- MLX5_DEV_EVENT_PKEY_CHANGE,
214
- MLX5_DEV_EVENT_GUID_CHANGE,
215
- MLX5_DEV_EVENT_CLIENT_REREG,
216
- MLX5_DEV_EVENT_PPS,
217
- MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT,
209
+ MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
210
+ MLX5_DEV_EVENT_PORT_AFFINITY = 129,
218211 };
219212
220213 enum mlx5_port_status {
....@@ -222,29 +215,10 @@
222215 MLX5_PORT_DOWN = 2,
223216 };
224217
225
-enum mlx5_eq_type {
226
- MLX5_EQ_TYPE_COMP,
227
- MLX5_EQ_TYPE_ASYNC,
228
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
229
- MLX5_EQ_TYPE_PF,
230
-#endif
231
-};
232
-
233
-struct mlx5_bfreg_info {
234
- u32 *sys_pages;
235
- int num_low_latency_bfregs;
236
- unsigned int *count;
237
-
238
- /*
239
- * protect bfreg allocation data structs
240
- */
241
- struct mutex lock;
242
- u32 ver;
243
- bool lib_uar_4k;
244
- u32 num_sys_pages;
245
- u32 num_static_sys_pages;
246
- u32 total_num_bfregs;
247
- u32 num_dyn_bfregs;
218
+enum mlx5_cmdif_state {
219
+ MLX5_CMDIF_STATE_UNINITIALIZED,
220
+ MLX5_CMDIF_STATE_UP,
221
+ MLX5_CMDIF_STATE_DOWN,
248222 };
249223
250224 struct mlx5_cmd_first {
....@@ -261,11 +235,6 @@
261235
262236 struct mlx5_cmd_debug {
263237 struct dentry *dbg_root;
264
- struct dentry *dbg_in;
265
- struct dentry *dbg_out;
266
- struct dentry *dbg_outlen;
267
- struct dentry *dbg_status;
268
- struct dentry *dbg_run;
269238 void *in_msg;
270239 void *out_msg;
271240 u8 status;
....@@ -290,13 +259,14 @@
290259 u64 sum;
291260 u64 n;
292261 struct dentry *root;
293
- struct dentry *avg;
294
- struct dentry *count;
295262 /* protect command average calculations */
296263 spinlock_t lock;
297264 };
298265
299266 struct mlx5_cmd {
267
+ struct mlx5_nb nb;
268
+
269
+ enum mlx5_cmdif_state state;
300270 void *cmd_alloc_buf;
301271 dma_addr_t alloc_dma;
302272 int alloc_size;
....@@ -323,12 +293,13 @@
323293 struct semaphore sem;
324294 struct semaphore pages_sem;
325295 int mode;
296
+ u16 allowed_opcode;
326297 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
327298 struct dma_pool *pool;
328299 struct mlx5_cmd_debug dbg;
329300 struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
330301 int checksum_disabled;
331
- struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
302
+ struct mlx5_cmd_stats *stats;
332303 };
333304
334305 struct mlx5_port_caps {
....@@ -357,58 +328,13 @@
357328 };
358329
359330 struct mlx5_frag_buf_ctrl {
360
- struct mlx5_frag_buf frag_buf;
331
+ struct mlx5_buf_list *frags;
361332 u32 sz_m1;
362333 u16 frag_sz_m1;
363334 u16 strides_offset;
364335 u8 log_sz;
365336 u8 log_stride;
366337 u8 log_frag_strides;
367
-};
368
-
369
-struct mlx5_eq_tasklet {
370
- struct list_head list;
371
- struct list_head process_list;
372
- struct tasklet_struct task;
373
- /* lock on completion tasklet list */
374
- spinlock_t lock;
375
-};
376
-
377
-struct mlx5_eq_pagefault {
378
- struct work_struct work;
379
- /* Pagefaults lock */
380
- spinlock_t lock;
381
- struct workqueue_struct *wq;
382
- mempool_t *pool;
383
-};
384
-
385
-struct mlx5_cq_table {
386
- /* protect radix tree */
387
- spinlock_t lock;
388
- struct radix_tree_root tree;
389
-};
390
-
391
-struct mlx5_eq {
392
- struct mlx5_core_dev *dev;
393
- struct mlx5_cq_table cq_table;
394
- __be32 __iomem *doorbell;
395
- u32 cons_index;
396
- struct mlx5_frag_buf buf;
397
- int size;
398
- unsigned int irqn;
399
- u8 eqn;
400
- int nent;
401
- u64 mask;
402
- struct list_head list;
403
- int index;
404
- struct mlx5_rsc_debug *dbg;
405
- enum mlx5_eq_type type;
406
- union {
407
- struct mlx5_eq_tasklet tasklet_ctx;
408
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
409
- struct mlx5_eq_pagefault pf_ctx;
410
-#endif
411
- };
412338 };
413339
414340 struct mlx5_core_psv {
....@@ -435,6 +361,7 @@
435361 enum {
436362 MLX5_MKEY_MR = 1,
437363 MLX5_MKEY_MW,
364
+ MLX5_MKEY_INDIRECT_DEVX,
438365 };
439366
440367 struct mlx5_core_mkey {
....@@ -459,37 +386,8 @@
459386
460387 struct mlx5_core_rsc_common {
461388 enum mlx5_res_type res;
462
- atomic_t refcount;
389
+ refcount_t refcount;
463390 struct completion free;
464
-};
465
-
466
-struct mlx5_core_srq {
467
- struct mlx5_core_rsc_common common; /* must be first */
468
- u32 srqn;
469
- int max;
470
- size_t max_gs;
471
- size_t max_avail_gather;
472
- int wqe_shift;
473
- void (*event) (struct mlx5_core_srq *, enum mlx5_event);
474
-
475
- atomic_t refcount;
476
- struct completion free;
477
-};
478
-
479
-struct mlx5_eq_table {
480
- void __iomem *update_ci;
481
- void __iomem *update_arm_ci;
482
- struct list_head comp_eqs_list;
483
- struct mlx5_eq pages_eq;
484
- struct mlx5_eq async_eq;
485
- struct mlx5_eq cmd_eq;
486
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
487
- struct mlx5_eq pfault_eq;
488
-#endif
489
- int num_comp_vectors;
490
- /* protect EQs list
491
- */
492
- spinlock_t lock;
493391 };
494392
495393 struct mlx5_uars_page {
....@@ -531,33 +429,26 @@
531429 struct timer_list timer;
532430 u32 prev;
533431 int miss_counter;
534
- bool sick;
432
+ u8 synd;
433
+ u32 fatal_error;
434
+ u32 crdump_size;
535435 /* wq spinlock to synchronize draining */
536436 spinlock_t wq_lock;
537437 struct workqueue_struct *wq;
538438 unsigned long flags;
539
- struct work_struct work;
439
+ struct work_struct fatal_report_work;
440
+ struct work_struct report_work;
540441 struct delayed_work recover_work;
442
+ struct devlink_health_reporter *fw_reporter;
443
+ struct devlink_health_reporter *fw_fatal_reporter;
541444 };
542445
543446 struct mlx5_qp_table {
447
+ struct notifier_block nb;
448
+
544449 /* protect radix tree
545450 */
546451 spinlock_t lock;
547
- struct radix_tree_root tree;
548
-};
549
-
550
-struct mlx5_srq_table {
551
- /* protect radix tree
552
- */
553
- spinlock_t lock;
554
- struct radix_tree_root tree;
555
-};
556
-
557
-struct mlx5_mkey_table {
558
- /* protect radix tree
559
- */
560
- rwlock_t lock;
561452 struct radix_tree_root tree;
562453 };
563454
....@@ -565,36 +456,54 @@
565456 int enabled;
566457 u64 port_guid;
567458 u64 node_guid;
459
+ /* Valid bits are used to validate administrative guid only.
460
+ * Enabled after ndo_set_vf_guid
461
+ */
462
+ u8 port_guid_valid:1;
463
+ u8 node_guid_valid:1;
568464 enum port_state_policy policy;
569465 };
570466
571467 struct mlx5_core_sriov {
572468 struct mlx5_vf_context *vfs_ctx;
573469 int num_vfs;
574
- int enabled_vfs;
470
+ u16 max_vfs;
575471 };
576472
577
-struct mlx5_irq_info {
578
- cpumask_var_t mask;
579
- char name[MLX5_MAX_IRQ_NAME];
473
+struct mlx5_fc_pool {
474
+ struct mlx5_core_dev *dev;
475
+ struct mutex pool_lock; /* protects pool lists */
476
+ struct list_head fully_used;
477
+ struct list_head partially_used;
478
+ struct list_head unused;
479
+ int available_fcs;
480
+ int used_fcs;
481
+ int threshold;
580482 };
581483
582484 struct mlx5_fc_stats {
583
- struct rb_root counters;
584
- struct list_head addlist;
585
- /* protect addlist add/splice operations */
586
- spinlock_t addlist_lock;
485
+ spinlock_t counters_idr_lock; /* protects counters_idr */
486
+ struct idr counters_idr;
487
+ struct list_head counters;
488
+ struct llist_head addlist;
489
+ struct llist_head dellist;
587490
588491 struct workqueue_struct *wq;
589492 struct delayed_work work;
590493 unsigned long next_query;
591494 unsigned long sampling_interval; /* jiffies */
495
+ u32 *bulk_query_out;
496
+ struct mlx5_fc_pool fc_pool;
592497 };
593498
499
+struct mlx5_events;
594500 struct mlx5_mpfs;
595501 struct mlx5_eswitch;
596502 struct mlx5_lag;
597
-struct mlx5_pagefault;
503
+struct mlx5_devcom;
504
+struct mlx5_fw_reset;
505
+struct mlx5_eq_table;
506
+struct mlx5_irq_table;
598507
599508 struct mlx5_rate_limit {
600509 u32 rate;
....@@ -603,9 +512,11 @@
603512 };
604513
605514 struct mlx5_rl_entry {
606
- struct mlx5_rate_limit rl;
607
- u16 index;
608
- u16 refcount;
515
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
516
+ u16 index;
517
+ u64 refcount;
518
+ u16 uid;
519
+ u8 dedicated : 1;
609520 };
610521
611522 struct mlx5_rl_table {
....@@ -617,59 +528,35 @@
617528 struct mlx5_rl_entry *rl_entry;
618529 };
619530
620
-enum port_module_event_status_type {
621
- MLX5_MODULE_STATUS_PLUGGED = 0x1,
622
- MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
623
- MLX5_MODULE_STATUS_ERROR = 0x3,
624
- MLX5_MODULE_STATUS_NUM = 0x3,
625
-};
626
-
627
-enum port_module_event_error_type {
628
- MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
629
- MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
630
- MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
631
- MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
632
- MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
633
- MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
634
- MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
635
- MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
636
- MLX5_MODULE_EVENT_ERROR_UNKNOWN,
637
- MLX5_MODULE_EVENT_ERROR_NUM,
638
-};
639
-
640
-struct mlx5_port_module_event_stats {
641
- u64 status_counters[MLX5_MODULE_STATUS_NUM];
642
- u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
531
+struct mlx5_core_roce {
532
+ struct mlx5_flow_table *ft;
533
+ struct mlx5_flow_group *fg;
534
+ struct mlx5_flow_handle *allow_rule;
643535 };
644536
645537 struct mlx5_priv {
646
- char name[MLX5_MAX_NAME_LEN];
647
- struct mlx5_eq_table eq_table;
648
- struct mlx5_irq_info *irq_info;
538
+ /* IRQ table valid only for real pci devices PF or VF */
539
+ struct mlx5_irq_table *irq_table;
540
+ struct mlx5_eq_table *eq_table;
649541
650542 /* pages stuff */
543
+ struct mlx5_nb pg_nb;
651544 struct workqueue_struct *pg_wq;
652
- struct rb_root page_root;
545
+ struct xarray page_root_xa;
653546 int fw_pages;
654547 atomic_t reg_pages;
655548 struct list_head free_list;
656549 int vfs_pages;
550
+ int peer_pf_pages;
657551
658552 struct mlx5_core_health health;
659553
660
- struct mlx5_srq_table srq_table;
661
-
662554 /* start: qp staff */
663
- struct mlx5_qp_table qp_table;
664555 struct dentry *qp_debugfs;
665556 struct dentry *eq_debugfs;
666557 struct dentry *cq_debugfs;
667558 struct dentry *cmdif_debugfs;
668559 /* end: qp staff */
669
-
670
- /* start: mkey staff */
671
- struct mlx5_mkey_table mkey_table;
672
- /* end: mkey staff */
673560
674561 /* start: alloc staff */
675562 /* protect buffer alocation according to numa node */
....@@ -681,40 +568,28 @@
681568 /* end: alloc staff */
682569 struct dentry *dbg_root;
683570
684
- /* protect mkey key part */
685
- spinlock_t mkey_lock;
686
- u8 mkey_key;
687
-
688571 struct list_head dev_list;
689572 struct list_head ctx_list;
690573 spinlock_t ctx_lock;
691
-
692
- struct list_head waiting_events_list;
693
- bool is_accum_events;
574
+ struct mlx5_events *events;
694575
695576 struct mlx5_flow_steering *steering;
696577 struct mlx5_mpfs *mpfs;
697578 struct mlx5_eswitch *eswitch;
698579 struct mlx5_core_sriov sriov;
699580 struct mlx5_lag *lag;
700
- unsigned long pci_dev_data;
581
+ struct mlx5_devcom *devcom;
582
+ struct mlx5_fw_reset *fw_reset;
583
+ struct mlx5_core_roce roce;
701584 struct mlx5_fc_stats fc_stats;
702585 struct mlx5_rl_table rl_table;
703586
704
- struct mlx5_port_module_event_stats pme_stats;
705
-
706
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
707
- void (*pfault)(struct mlx5_core_dev *dev,
708
- void *context,
709
- struct mlx5_pagefault *pfault);
710
- void *pfault_ctx;
711
- struct srcu_struct pfault_srcu;
712
-#endif
713587 struct mlx5_bfreg_data bfregs;
714588 struct mlx5_uars_page *uar;
715589 };
716590
717591 enum mlx5_device_state {
592
+ MLX5_DEVICE_STATE_UNINITIALIZED,
718593 MLX5_DEVICE_STATE_UP,
719594 MLX5_DEVICE_STATE_INTERNAL_ERROR,
720595 };
....@@ -734,44 +609,6 @@
734609 MLX5_PFAULT_RDMA = 1 << 2,
735610 };
736611
737
-/* Contains the details of a pagefault. */
738
-struct mlx5_pagefault {
739
- u32 bytes_committed;
740
- u32 token;
741
- u8 event_subtype;
742
- u8 type;
743
- union {
744
- /* Initiator or send message responder pagefault details. */
745
- struct {
746
- /* Received packet size, only valid for responders. */
747
- u32 packet_size;
748
- /*
749
- * Number of resource holding WQE, depends on type.
750
- */
751
- u32 wq_num;
752
- /*
753
- * WQE index. Refers to either the send queue or
754
- * receive queue, according to event_subtype.
755
- */
756
- u16 wqe_index;
757
- } wqe;
758
- /* RDMA responder pagefault details */
759
- struct {
760
- u32 r_key;
761
- /*
762
- * Received packet size, minimal size page fault
763
- * resolution required for forward progress.
764
- */
765
- u32 packet_size;
766
- u32 rdma_op_len;
767
- u64 rdma_va;
768
- } rdma;
769
- };
770
-
771
- struct mlx5_eq *eq;
772
- struct work_struct work;
773
-};
774
-
775612 struct mlx5_td {
776613 /* protects tirs list changes while tirs refresh */
777614 struct mutex list_lock;
....@@ -784,6 +621,11 @@
784621 struct mlx5_td td;
785622 struct mlx5_core_mkey mkey;
786623 struct mlx5_sq_bfreg bfreg;
624
+};
625
+
626
+enum mlx5_sw_icm_type {
627
+ MLX5_SW_ICM_TYPE_STEERING,
628
+ MLX5_SW_ICM_TYPE_HEADER_MODIFY,
787629 };
788630
789631 #define MLX5_MAX_RESERVED_GIDS 8
....@@ -802,24 +644,36 @@
802644 u8 enabled;
803645 };
804646
805
-struct mlx5_clock {
806
- rwlock_t lock;
647
+struct mlx5_timer {
807648 struct cyclecounter cycles;
808649 struct timecounter tc;
809
- struct hwtstamp_config hwtstamp_config;
810650 u32 nominal_c_mult;
811651 unsigned long overflow_period;
812652 struct delayed_work overflow_work;
813
- struct mlx5_core_dev *mdev;
653
+};
654
+
655
+struct mlx5_clock {
656
+ struct mlx5_nb pps_nb;
657
+ seqlock_t lock;
658
+ struct hwtstamp_config hwtstamp_config;
814659 struct ptp_clock *ptp;
815660 struct ptp_clock_info ptp_info;
816661 struct mlx5_pps pps_info;
662
+ struct mlx5_timer timer;
817663 };
818664
665
+struct mlx5_dm;
819666 struct mlx5_fw_tracer;
820667 struct mlx5_vxlan;
668
+struct mlx5_geneve;
669
+struct mlx5_hv_vhca;
670
+
671
+#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
672
+#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
821673
822674 struct mlx5_core_dev {
675
+ struct device *device;
676
+ enum mlx5_coredev_type coredev_type;
823677 struct pci_dev *pdev;
824678 /* sync pci state */
825679 struct mutex pci_status_mutex;
....@@ -832,25 +686,26 @@
832686 u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
833687 u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
834688 u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
835
- u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
689
+ u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
836690 u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
837691 u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
692
+ u8 embedded_cpu;
838693 } caps;
694
+ u64 sys_image_guid;
839695 phys_addr_t iseg_base;
840696 struct mlx5_init_seg __iomem *iseg;
697
+ phys_addr_t bar_addr;
841698 enum mlx5_device_state state;
842699 /* sync interface state */
843700 struct mutex intf_state_mutex;
844701 unsigned long intf_state;
845
- void (*event) (struct mlx5_core_dev *dev,
846
- enum mlx5_dev_event event,
847
- unsigned long param);
848702 struct mlx5_priv priv;
849703 struct mlx5_profile *profile;
850
- atomic_t num_qps;
851704 u32 issi;
852705 struct mlx5e_resources mlx5e_res;
706
+ struct mlx5_dm *dm;
853707 struct mlx5_vxlan *vxlan;
708
+ struct mlx5_geneve *geneve;
854709 struct {
855710 struct mlx5_rsvd_gids reserved_gids;
856711 u32 roce_en;
....@@ -858,13 +713,15 @@
858713 #ifdef CONFIG_MLX5_FPGA
859714 struct mlx5_fpga_device *fpga;
860715 #endif
861
-#ifdef CONFIG_RFS_ACCEL
862
- struct cpu_rmap *rmap;
716
+#ifdef CONFIG_MLX5_ACCEL
717
+ const struct mlx5_accel_ipsec_ops *ipsec_ops;
863718 #endif
864719 struct mlx5_clock clock;
865720 struct mlx5_ib_clock_info *clock_info;
866
- struct page *clock_info_page;
867721 struct mlx5_fw_tracer *tracer;
722
+ struct mlx5_rsc_dump *rsc_dump;
723
+ u32 vsc_addr;
724
+ struct mlx5_hv_vhca *hv_vhca;
868725 };
869726
870727 struct mlx5_db {
....@@ -915,6 +772,8 @@
915772 u64 ts2;
916773 u16 op;
917774 bool polling;
775
+ /* Track the max comp handlers */
776
+ refcount_t refcnt;
918777 };
919778
920779 struct mlx5_pas {
....@@ -940,8 +799,8 @@
940799 u64 node_guid;
941800 u32 cap_mask1;
942801 u32 cap_mask1_perm;
943
- u32 cap_mask2;
944
- u32 cap_mask2_perm;
802
+ u16 cap_mask2;
803
+ u16 cap_mask2_perm;
945804 u16 lid;
946805 u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
947806 u8 lmc;
....@@ -984,20 +843,17 @@
984843 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
985844 }
986845
987
-static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
988
-{
989
- return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
990
-}
991
-
992846 static inline u32 mlx5_base_mkey(const u32 key)
993847 {
994848 return key & 0xffffff00u;
995849 }
996850
997
-static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
851
+static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
852
+ u8 log_stride, u8 log_sz,
998853 u16 strides_offset,
999854 struct mlx5_frag_buf_ctrl *fbc)
1000855 {
856
+ fbc->frags = frags;
1001857 fbc->log_stride = log_stride;
1002858 fbc->log_sz = log_sz;
1003859 fbc->sz_m1 = (1 << fbc->log_sz) - 1;
....@@ -1006,18 +862,11 @@
1006862 fbc->strides_offset = strides_offset;
1007863 }
1008864
1009
-static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
865
+static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
866
+ u8 log_stride, u8 log_sz,
1010867 struct mlx5_frag_buf_ctrl *fbc)
1011868 {
1012
- mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
1013
-}
1014
-
1015
-static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
1016
- void *cqc)
1017
-{
1018
- mlx5_fill_fbc(6 + MLX5_GET(cqc, cqc, cqe_sz),
1019
- MLX5_GET(cqc, cqc, log_cq_size),
1020
- fbc);
869
+ mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
1021870 }
1022871
1023872 static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
....@@ -1028,8 +877,7 @@
1028877 ix += fbc->strides_offset;
1029878 frag = ix >> fbc->log_frag_strides;
1030879
1031
- return fbc->frag_buf.frags[frag].buf +
1032
- ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
880
+ return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
1033881 }
1034882
1035883 static inline u32
....@@ -1040,32 +888,70 @@
1040888 return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
1041889 }
1042890
891
+enum {
892
+ CMD_ALLOWED_OPCODE_ALL,
893
+};
894
+
1043895 int mlx5_cmd_init(struct mlx5_core_dev *dev);
1044896 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
897
+void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
898
+ enum mlx5_cmdif_state cmdif_state);
1045899 void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
1046900 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
901
+void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
902
+
903
+struct mlx5_async_ctx {
904
+ struct mlx5_core_dev *dev;
905
+ atomic_t num_inflight;
906
+ struct completion inflight_done;
907
+};
908
+
909
+struct mlx5_async_work;
910
+
911
+typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
912
+
913
+struct mlx5_async_work {
914
+ struct mlx5_async_ctx *ctx;
915
+ mlx5_async_cbk_t user_callback;
916
+};
917
+
918
+void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
919
+ struct mlx5_async_ctx *ctx);
920
+void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
921
+int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
922
+ void *out, int out_size, mlx5_async_cbk_t callback,
923
+ struct mlx5_async_work *work);
1047924
1048925 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1049926 int out_size);
1050
-int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
1051
- void *out, int out_size, mlx5_cmd_cbk_t callback,
1052
- void *context);
927
+
928
+#define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out) \
929
+ ({ \
930
+ mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out, \
931
+ MLX5_ST_SZ_BYTES(ifc_cmd##_out)); \
932
+ })
933
+
934
+#define mlx5_cmd_exec_in(dev, ifc_cmd, in) \
935
+ ({ \
936
+ u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {}; \
937
+ mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out); \
938
+ })
939
+
1053940 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
1054941 void *out, int out_size);
1055942 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
943
+bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
1056944
1057945 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
1058946 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
1059947 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
948
+void mlx5_health_flush(struct mlx5_core_dev *dev);
1060949 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
1061950 int mlx5_health_init(struct mlx5_core_dev *dev);
1062951 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
1063952 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
1064953 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
1065954 void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
1066
-void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
1067
-int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
1068
- struct mlx5_frag_buf *buf, int node);
1069955 int mlx5_buf_alloc(struct mlx5_core_dev *dev,
1070956 int size, struct mlx5_frag_buf *buf);
1071957 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
....@@ -1076,20 +962,6 @@
1076962 gfp_t flags, int npages);
1077963 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
1078964 struct mlx5_cmd_mailbox *head);
1079
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
1080
- struct mlx5_srq_attr *in);
1081
-int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
1082
-int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
1083
- struct mlx5_srq_attr *out);
1084
-int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
1085
- u16 lwm, int is_srq);
1086
-void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
1087
-void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
1088
-int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
1089
- struct mlx5_core_mkey *mkey,
1090
- u32 *in, int inlen,
1091
- u32 *out, int outlen,
1092
- mlx5_cmd_cbk_t callback, void *context);
1093965 int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
1094966 struct mlx5_core_mkey *mkey,
1095967 u32 *in, int inlen);
....@@ -1099,30 +971,25 @@
1099971 u32 *out, int outlen);
1100972 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
1101973 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
1102
-int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
1103
- u16 opmod, u8 port);
1104
-void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
974
+int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
1105975 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
1106
-int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
976
+void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
1107977 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
1108978 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
1109
- s32 npages);
979
+ s32 npages, bool ec_function);
1110980 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
1111981 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
1112982 void mlx5_register_debugfs(void);
1113983 void mlx5_unregister_debugfs(void);
1114984
1115985 void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
986
+void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
1116987 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
1117
-void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
1118
-void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
1119
-struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
1120
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
1121
- unsigned int *irqn);
988
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);
1122989 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
1123990 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
1124991
1125
-int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
992
+void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
1126993 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
1127994 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
1128995 int size_in, void *data_out, int size_out,
....@@ -1134,7 +1001,7 @@
11341001 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
11351002
11361003 const char *mlx5_command_str(int command);
1137
-int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
1004
+void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
11381005 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
11391006 int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
11401007 int npsvs, u32 *sig_index);
....@@ -1144,10 +1011,6 @@
11441011 struct mlx5_odp_caps *odp_caps);
11451012 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
11461013 u8 port_num, void *out, size_t sz);
1147
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1148
-int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
1149
- u32 wq_num, u8 type, int error);
1150
-#endif
11511014
11521015 int mlx5_init_rl_table(struct mlx5_core_dev *dev);
11531016 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
....@@ -1155,21 +1018,22 @@
11551018 struct mlx5_rate_limit *rl);
11561019 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
11571020 bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
1021
+int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
1022
+ bool dedicated_entry, u16 *index);
1023
+void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index);
11581024 bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
11591025 struct mlx5_rate_limit *rl_1);
11601026 int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
11611027 bool map_wc, bool fast_path);
11621028 void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
11631029
1030
+unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
1031
+struct cpumask *
1032
+mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
11641033 unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
11651034 int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
11661035 u8 roce_version, u8 roce_l3_type, const u8 *gid,
11671036 const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
1168
-
1169
-static inline int fw_initializing(struct mlx5_core_dev *dev)
1170
-{
1171
- return ioread32be(&dev->iseg->initializing) >> 31;
1172
-}
11731037
11741038 static inline u32 mlx5_mkey_to_idx(u32 mkey)
11751039 {
....@@ -1201,6 +1065,7 @@
12011065 enum {
12021066 MLX5_INTERFACE_PROTOCOL_IB = 0,
12031067 MLX5_INTERFACE_PROTOCOL_ETH = 1,
1068
+ MLX5_INTERFACE_PROTOCOL_VDPA = 2,
12041069 };
12051070
12061071 struct mlx5_interface {
....@@ -1208,47 +1073,49 @@
12081073 void (*remove)(struct mlx5_core_dev *dev, void *context);
12091074 int (*attach)(struct mlx5_core_dev *dev, void *context);
12101075 void (*detach)(struct mlx5_core_dev *dev, void *context);
1211
- void (*event)(struct mlx5_core_dev *dev, void *context,
1212
- enum mlx5_dev_event event, unsigned long param);
1213
- void (*pfault)(struct mlx5_core_dev *dev,
1214
- void *context,
1215
- struct mlx5_pagefault *pfault);
1216
- void * (*get_dev)(void *context);
12171076 int protocol;
12181077 struct list_head list;
12191078 };
12201079
1221
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
12221080 int mlx5_register_interface(struct mlx5_interface *intf);
12231081 void mlx5_unregister_interface(struct mlx5_interface *intf);
1082
+int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
1083
+int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
1084
+int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
1085
+int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
1086
+
12241087 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
12251088
12261089 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
12271090 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
1091
+bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
1092
+bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
1093
+bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
12281094 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
12291095 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
1096
+u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
1097
+ struct net_device *slave);
12301098 int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
12311099 u64 *values,
12321100 int num_counters,
12331101 size_t *offsets);
12341102 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
12351103 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
1104
+int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
1105
+ u64 length, u32 log_alignment, u16 uid,
1106
+ phys_addr_t *addr, u32 *obj_id);
1107
+int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
1108
+ u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
12361109
1237
-#ifndef CONFIG_MLX5_CORE_IPOIB
1238
-static inline
1239
-struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1240
- struct ib_device *ibdev,
1241
- const char *name,
1242
- void (*setup)(struct net_device *))
1243
-{
1244
- return ERR_PTR(-EOPNOTSUPP);
1245
-}
1246
-#else
1110
+#ifdef CONFIG_MLX5_CORE_IPOIB
12471111 struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
12481112 struct ib_device *ibdev,
12491113 const char *name,
12501114 void (*setup)(struct net_device *));
12511115 #endif /* CONFIG_MLX5_CORE_IPOIB */
1116
+int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
1117
+ struct ib_device *device,
1118
+ struct rdma_netdev_alloc_params *params);
12521119
12531120 struct mlx5_profile {
12541121 u64 mask;
....@@ -1263,16 +1130,36 @@
12631130 MLX5_PCI_DEV_IS_VF = 1 << 0,
12641131 };
12651132
1266
-static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
1133
+static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev)
12671134 {
1268
- return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
1135
+ return dev->coredev_type == MLX5_COREDEV_PF;
12691136 }
12701137
1271
-#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs((mdev)->pdev))
1272
-#define MLX5_VPORT_MANAGER(mdev) \
1273
- (MLX5_CAP_GEN(mdev, vport_group_manager) && \
1274
- (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
1275
- mlx5_core_is_pf(mdev))
1138
+static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
1139
+{
1140
+ return dev->coredev_type == MLX5_COREDEV_VF;
1141
+}
1142
+
1143
+static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
1144
+{
1145
+ return dev->caps.embedded_cpu;
1146
+}
1147
+
1148
+static inline bool
1149
+mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev)
1150
+{
1151
+ return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager);
1152
+}
1153
+
1154
+static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev)
1155
+{
1156
+ return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists);
1157
+}
1158
+
1159
+static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev)
1160
+{
1161
+ return dev->priv.sriov.max_vfs;
1162
+}
12761163
12771164 static inline int mlx5_get_gid_table_len(u16 param)
12781165 {
....@@ -1318,10 +1205,15 @@
13181205 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
13191206 };
13201207
1321
-static inline const struct cpumask *
1322
-mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
1208
+static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
13231209 {
1324
- return dev->priv.irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask;
1210
+ struct devlink *devlink = priv_to_devlink(dev);
1211
+ union devlink_param_value val;
1212
+
1213
+ devlink_param_driverinit_value_get(devlink,
1214
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
1215
+ &val);
1216
+ return val.vbool;
13251217 }
13261218
13271219 #endif /* MLX5_DRIVER_H */