hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/infiniband/hw/mlx4/mlx4_ib.h
....@@ -80,16 +80,11 @@
8080 HW_BAR_COUNT
8181 };
8282
83
-struct mlx4_ib_vma_private_data {
84
- struct vm_area_struct *vma;
85
-};
86
-
8783 struct mlx4_ib_ucontext {
8884 struct ib_ucontext ibucontext;
8985 struct mlx4_uar uar;
9086 struct list_head db_page_list;
9187 struct mutex db_page_mutex;
92
- struct mlx4_ib_vma_private_data hw_bar_info[HW_BAR_COUNT];
9388 struct list_head wqn_ranges_list;
9489 struct mutex wqn_ranges_mutex; /* protect wqn_ranges_list */
9590 };
....@@ -149,11 +144,6 @@
149144 struct mlx4_ib_mw {
150145 struct ib_mw ibmw;
151146 struct mlx4_mw mmw;
152
-};
153
-
154
-struct mlx4_ib_fmr {
155
- struct ib_fmr ibfmr;
156
- struct mlx4_fmr mfmr;
157147 };
158148
159149 #define MAX_REGS_PER_FLOW 2
....@@ -243,7 +233,8 @@
243233 };
244234
245235 enum {
246
- MLX4_NUM_TUNNEL_BUFS = 256,
236
+ MLX4_NUM_TUNNEL_BUFS = 512,
237
+ MLX4_NUM_WIRE_BUFS = 2048,
247238 };
248239
249240 struct mlx4_ib_tunnel_header {
....@@ -308,6 +299,26 @@
308299 u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
309300 };
310301
302
+enum {
303
+ /*
304
+ * Largest possible UD header: send with GRH and immediate
305
+ * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
306
+ * tag. (LRH would only use 8 bytes, so Ethernet is the
307
+ * biggest case)
308
+ */
309
+ MLX4_IB_UD_HEADER_SIZE = 82,
310
+ MLX4_IB_LSO_HEADER_SPARE = 128,
311
+};
312
+
313
+struct mlx4_ib_sqp {
314
+ int pkey_index;
315
+ u32 qkey;
316
+ u32 send_psn;
317
+ struct ib_ud_header ud_header;
318
+ u8 header_buf[MLX4_IB_UD_HEADER_SIZE];
319
+ struct ib_qp *roce_v2_gsi;
320
+};
321
+
311322 struct mlx4_ib_qp {
312323 union {
313324 struct ib_qp ibqp;
....@@ -353,7 +364,10 @@
353364 struct mlx4_wqn_range *wqn_range;
354365 /* Number of RSS QP parents that uses this WQ */
355366 u32 rss_usecnt;
356
- struct mlx4_ib_rss *rss_ctx;
367
+ union {
368
+ struct mlx4_ib_rss *rss_ctx;
369
+ struct mlx4_ib_sqp *sqp;
370
+ };
357371 };
358372
359373 struct mlx4_ib_srq {
....@@ -374,6 +388,10 @@
374388 struct mlx4_ib_ah {
375389 struct ib_ah ibah;
376390 union mlx4_ext_av av;
391
+};
392
+
393
+struct mlx4_ib_rwq_ind_table {
394
+ struct ib_rwq_ind_table ib_rwq_ind_tbl;
377395 };
378396
379397 /****************************************/
....@@ -499,10 +517,12 @@
499517 struct mlx4_sriov_alias_guid alias_guid;
500518
501519 /* CM paravirtualization fields */
502
- struct list_head cm_list;
520
+ struct xarray pv_id_table;
521
+ u32 pv_id_next;
503522 spinlock_t id_map_lock;
504523 struct rb_root sl_id_map;
505
- struct idr pv_id_table;
524
+ struct list_head cm_list;
525
+ struct xarray xa_rej_tmout;
506526 };
507527
508528 struct gid_cache_context {
....@@ -514,6 +534,7 @@
514534 union ib_gid gid;
515535 enum ib_gid_type gid_type;
516536 struct gid_cache_context *ctx;
537
+ u16 vlan_id;
517538 };
518539
519540 struct mlx4_port_gid_table {
....@@ -526,6 +547,7 @@
526547 atomic64_t mac[MLX4_MAX_PORTS];
527548 struct notifier_block nb;
528549 struct mlx4_port_gid_table gids[MLX4_MAX_PORTS];
550
+ enum ib_port_state last_port_state[MLX4_MAX_PORTS];
529551 };
530552
531553 struct pkey_mgt {
....@@ -683,11 +705,6 @@
683705 return container_of(ibmw, struct mlx4_ib_mw, ibmw);
684706 }
685707
686
-static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
687
-{
688
- return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr);
689
-}
690
-
691708 static inline struct mlx4_ib_flow *to_mflow(struct ib_flow *ibflow)
692709 {
693710 return container_of(ibflow, struct mlx4_ib_flow, ibflow);
....@@ -728,7 +745,7 @@
728745 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
729746 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
730747
731
-int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
748
+int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
732749 struct mlx4_db *db);
733750 void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
734751
....@@ -738,43 +755,39 @@
738755 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
739756 u64 virt_addr, int access_flags,
740757 struct ib_udata *udata);
741
-int mlx4_ib_dereg_mr(struct ib_mr *mr);
742
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
743
- struct ib_udata *udata);
758
+int mlx4_ib_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
759
+int mlx4_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
744760 int mlx4_ib_dealloc_mw(struct ib_mw *mw);
745
-struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
746
- enum ib_mr_type mr_type,
761
+struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
747762 u32 max_num_sg);
748763 int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
749764 unsigned int *sg_offset);
750765 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
751766 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
752
-struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
753
- const struct ib_cq_init_attr *attr,
754
- struct ib_ucontext *context,
755
- struct ib_udata *udata);
756
-int mlx4_ib_destroy_cq(struct ib_cq *cq);
767
+int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
768
+ struct ib_udata *udata);
769
+int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
757770 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
758771 int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
759772 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
760773 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
761774
762
-struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
763
- struct ib_udata *udata);
764
-struct ib_ah *mlx4_ib_create_ah_slave(struct ib_pd *pd,
765
- struct rdma_ah_attr *ah_attr,
766
- int slave_sgid_index, u8 *s_mac,
767
- u16 vlan_tag);
775
+int mlx4_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
776
+ struct ib_udata *udata);
777
+int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
778
+ int slave_sgid_index, u8 *s_mac, u16 vlan_tag);
768779 int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
769
-int mlx4_ib_destroy_ah(struct ib_ah *ah);
780
+static inline int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags)
781
+{
782
+ return 0;
783
+}
770784
771
-struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
772
- struct ib_srq_init_attr *init_attr,
773
- struct ib_udata *udata);
785
+int mlx4_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
786
+ struct ib_udata *udata);
774787 int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
775788 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
776789 int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
777
-int mlx4_ib_destroy_srq(struct ib_srq *srq);
790
+int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
778791 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
779792 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
780793 const struct ib_recv_wr **bad_wr);
....@@ -782,7 +795,7 @@
782795 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
783796 struct ib_qp_init_attr *init_attr,
784797 struct ib_udata *udata);
785
-int mlx4_ib_destroy_qp(struct ib_qp *qp);
798
+int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
786799 void mlx4_ib_drain_sq(struct ib_qp *qp);
787800 void mlx4_ib_drain_rq(struct ib_qp *qp);
788801 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
....@@ -797,20 +810,13 @@
797810 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
798811 int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
799812 const void *in_mad, void *response_mad);
800
-int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
813
+int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
801814 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
802
- const struct ib_mad_hdr *in, size_t in_mad_size,
803
- struct ib_mad_hdr *out, size_t *out_mad_size,
804
- u16 *out_mad_pkey_index);
815
+ const struct ib_mad *in, struct ib_mad *out,
816
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
805817 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
806818 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
807819
808
-struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int mr_access_flags,
809
- struct ib_fmr_attr *fmr_attr);
810
-int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
811
- u64 iova);
812
-int mlx4_ib_unmap_fmr(struct list_head *fmr_list);
813
-int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
814820 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
815821 struct ib_port_attr *props, int netw_view);
816822 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
....@@ -917,15 +923,18 @@
917923 struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
918924 struct ib_wq_init_attr *init_attr,
919925 struct ib_udata *udata);
920
-int mlx4_ib_destroy_wq(struct ib_wq *wq);
926
+int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
921927 int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
922928 u32 wq_attr_mask, struct ib_udata *udata);
923929
924
-struct ib_rwq_ind_table
925
-*mlx4_ib_create_rwq_ind_table(struct ib_device *device,
926
- struct ib_rwq_ind_table_init_attr *init_attr,
927
- struct ib_udata *udata);
928
-int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
930
+int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl,
931
+ struct ib_rwq_ind_table_init_attr *init_attr,
932
+ struct ib_udata *udata);
933
+static inline int
934
+mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table)
935
+{
936
+ return 0;
937
+}
929938 int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
930939 int *num_of_mtts);
931940