.. | .. |
---|
80 | 80 | HW_BAR_COUNT |
---|
81 | 81 | }; |
---|
82 | 82 | |
---|
83 | | -struct mlx4_ib_vma_private_data { |
---|
84 | | - struct vm_area_struct *vma; |
---|
85 | | -}; |
---|
86 | | - |
---|
87 | 83 | struct mlx4_ib_ucontext { |
---|
88 | 84 | struct ib_ucontext ibucontext; |
---|
89 | 85 | struct mlx4_uar uar; |
---|
90 | 86 | struct list_head db_page_list; |
---|
91 | 87 | struct mutex db_page_mutex; |
---|
92 | | - struct mlx4_ib_vma_private_data hw_bar_info[HW_BAR_COUNT]; |
---|
93 | 88 | struct list_head wqn_ranges_list; |
---|
94 | 89 | struct mutex wqn_ranges_mutex; /* protect wqn_ranges_list */ |
---|
95 | 90 | }; |
---|
.. | .. |
---|
149 | 144 | struct mlx4_ib_mw { |
---|
150 | 145 | struct ib_mw ibmw; |
---|
151 | 146 | struct mlx4_mw mmw; |
---|
152 | | -}; |
---|
153 | | - |
---|
154 | | -struct mlx4_ib_fmr { |
---|
155 | | - struct ib_fmr ibfmr; |
---|
156 | | - struct mlx4_fmr mfmr; |
---|
157 | 147 | }; |
---|
158 | 148 | |
---|
159 | 149 | #define MAX_REGS_PER_FLOW 2 |
---|
.. | .. |
---|
243 | 233 | }; |
---|
244 | 234 | |
---|
245 | 235 | enum { |
---|
246 | | - MLX4_NUM_TUNNEL_BUFS = 256, |
---|
| 236 | + MLX4_NUM_TUNNEL_BUFS = 512, |
---|
| 237 | + MLX4_NUM_WIRE_BUFS = 2048, |
---|
247 | 238 | }; |
---|
248 | 239 | |
---|
249 | 240 | struct mlx4_ib_tunnel_header { |
---|
.. | .. |
---|
308 | 299 | u8 rss_key[MLX4_EN_RSS_KEY_SIZE]; |
---|
309 | 300 | }; |
---|
310 | 301 | |
---|
| 302 | +enum { |
---|
| 303 | + /* |
---|
| 304 | + * Largest possible UD header: send with GRH and immediate |
---|
| 305 | + * data plus 18 bytes for an Ethernet header with VLAN/802.1Q |
---|
| 306 | + * tag. (LRH would only use 8 bytes, so Ethernet is the |
---|
| 307 | + * biggest case) |
---|
| 308 | + */ |
---|
| 309 | + MLX4_IB_UD_HEADER_SIZE = 82, |
---|
| 310 | + MLX4_IB_LSO_HEADER_SPARE = 128, |
---|
| 311 | +}; |
---|
| 312 | + |
---|
| 313 | +struct mlx4_ib_sqp { |
---|
| 314 | + int pkey_index; |
---|
| 315 | + u32 qkey; |
---|
| 316 | + u32 send_psn; |
---|
| 317 | + struct ib_ud_header ud_header; |
---|
| 318 | + u8 header_buf[MLX4_IB_UD_HEADER_SIZE]; |
---|
| 319 | + struct ib_qp *roce_v2_gsi; |
---|
| 320 | +}; |
---|
| 321 | + |
---|
311 | 322 | struct mlx4_ib_qp { |
---|
312 | 323 | union { |
---|
313 | 324 | struct ib_qp ibqp; |
---|
.. | .. |
---|
353 | 364 | struct mlx4_wqn_range *wqn_range; |
---|
354 | 365 | /* Number of RSS QP parents that uses this WQ */ |
---|
355 | 366 | u32 rss_usecnt; |
---|
356 | | - struct mlx4_ib_rss *rss_ctx; |
---|
| 367 | + union { |
---|
| 368 | + struct mlx4_ib_rss *rss_ctx; |
---|
| 369 | + struct mlx4_ib_sqp *sqp; |
---|
| 370 | + }; |
---|
357 | 371 | }; |
---|
358 | 372 | |
---|
359 | 373 | struct mlx4_ib_srq { |
---|
.. | .. |
---|
374 | 388 | struct mlx4_ib_ah { |
---|
375 | 389 | struct ib_ah ibah; |
---|
376 | 390 | union mlx4_ext_av av; |
---|
| 391 | +}; |
---|
| 392 | + |
---|
| 393 | +struct mlx4_ib_rwq_ind_table { |
---|
| 394 | + struct ib_rwq_ind_table ib_rwq_ind_tbl; |
---|
377 | 395 | }; |
---|
378 | 396 | |
---|
379 | 397 | /****************************************/ |
---|
.. | .. |
---|
499 | 517 | struct mlx4_sriov_alias_guid alias_guid; |
---|
500 | 518 | |
---|
501 | 519 | /* CM paravirtualization fields */ |
---|
502 | | - struct list_head cm_list; |
---|
| 520 | + struct xarray pv_id_table; |
---|
| 521 | + u32 pv_id_next; |
---|
503 | 522 | spinlock_t id_map_lock; |
---|
504 | 523 | struct rb_root sl_id_map; |
---|
505 | | - struct idr pv_id_table; |
---|
| 524 | + struct list_head cm_list; |
---|
| 525 | + struct xarray xa_rej_tmout; |
---|
506 | 526 | }; |
---|
507 | 527 | |
---|
508 | 528 | struct gid_cache_context { |
---|
.. | .. |
---|
514 | 534 | union ib_gid gid; |
---|
515 | 535 | enum ib_gid_type gid_type; |
---|
516 | 536 | struct gid_cache_context *ctx; |
---|
| 537 | + u16 vlan_id; |
---|
517 | 538 | }; |
---|
518 | 539 | |
---|
519 | 540 | struct mlx4_port_gid_table { |
---|
.. | .. |
---|
526 | 547 | atomic64_t mac[MLX4_MAX_PORTS]; |
---|
527 | 548 | struct notifier_block nb; |
---|
528 | 549 | struct mlx4_port_gid_table gids[MLX4_MAX_PORTS]; |
---|
| 550 | + enum ib_port_state last_port_state[MLX4_MAX_PORTS]; |
---|
529 | 551 | }; |
---|
530 | 552 | |
---|
531 | 553 | struct pkey_mgt { |
---|
.. | .. |
---|
683 | 705 | return container_of(ibmw, struct mlx4_ib_mw, ibmw); |
---|
684 | 706 | } |
---|
685 | 707 | |
---|
686 | | -static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr) |
---|
687 | | -{ |
---|
688 | | - return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr); |
---|
689 | | -} |
---|
690 | | - |
---|
691 | 708 | static inline struct mlx4_ib_flow *to_mflow(struct ib_flow *ibflow) |
---|
692 | 709 | { |
---|
693 | 710 | return container_of(ibflow, struct mlx4_ib_flow, ibflow); |
---|
.. | .. |
---|
728 | 745 | int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev); |
---|
729 | 746 | void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev); |
---|
730 | 747 | |
---|
731 | | -int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, |
---|
| 748 | +int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt, |
---|
732 | 749 | struct mlx4_db *db); |
---|
733 | 750 | void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db); |
---|
734 | 751 | |
---|
.. | .. |
---|
738 | 755 | struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
---|
739 | 756 | u64 virt_addr, int access_flags, |
---|
740 | 757 | struct ib_udata *udata); |
---|
741 | | -int mlx4_ib_dereg_mr(struct ib_mr *mr); |
---|
742 | | -struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, |
---|
743 | | - struct ib_udata *udata); |
---|
| 758 | +int mlx4_ib_dereg_mr(struct ib_mr *mr, struct ib_udata *udata); |
---|
| 759 | +int mlx4_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata); |
---|
744 | 760 | int mlx4_ib_dealloc_mw(struct ib_mw *mw); |
---|
745 | | -struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, |
---|
746 | | - enum ib_mr_type mr_type, |
---|
| 761 | +struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, |
---|
747 | 762 | u32 max_num_sg); |
---|
748 | 763 | int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
---|
749 | 764 | unsigned int *sg_offset); |
---|
750 | 765 | int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); |
---|
751 | 766 | int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); |
---|
752 | | -struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, |
---|
753 | | - const struct ib_cq_init_attr *attr, |
---|
754 | | - struct ib_ucontext *context, |
---|
755 | | - struct ib_udata *udata); |
---|
756 | | -int mlx4_ib_destroy_cq(struct ib_cq *cq); |
---|
| 767 | +int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, |
---|
| 768 | + struct ib_udata *udata); |
---|
| 769 | +int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); |
---|
757 | 770 | int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
---|
758 | 771 | int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); |
---|
759 | 772 | void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); |
---|
760 | 773 | void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); |
---|
761 | 774 | |
---|
762 | | -struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, |
---|
763 | | - struct ib_udata *udata); |
---|
764 | | -struct ib_ah *mlx4_ib_create_ah_slave(struct ib_pd *pd, |
---|
765 | | - struct rdma_ah_attr *ah_attr, |
---|
766 | | - int slave_sgid_index, u8 *s_mac, |
---|
767 | | - u16 vlan_tag); |
---|
| 775 | +int mlx4_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, |
---|
| 776 | + struct ib_udata *udata); |
---|
| 777 | +int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, |
---|
| 778 | + int slave_sgid_index, u8 *s_mac, u16 vlan_tag); |
---|
768 | 779 | int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); |
---|
769 | | -int mlx4_ib_destroy_ah(struct ib_ah *ah); |
---|
| 780 | +static inline int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags) |
---|
| 781 | +{ |
---|
| 782 | + return 0; |
---|
| 783 | +} |
---|
770 | 784 | |
---|
771 | | -struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, |
---|
772 | | - struct ib_srq_init_attr *init_attr, |
---|
773 | | - struct ib_udata *udata); |
---|
| 785 | +int mlx4_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr, |
---|
| 786 | + struct ib_udata *udata); |
---|
774 | 787 | int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, |
---|
775 | 788 | enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); |
---|
776 | 789 | int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); |
---|
777 | | -int mlx4_ib_destroy_srq(struct ib_srq *srq); |
---|
| 790 | +int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); |
---|
778 | 791 | void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index); |
---|
779 | 792 | int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, |
---|
780 | 793 | const struct ib_recv_wr **bad_wr); |
---|
.. | .. |
---|
782 | 795 | struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, |
---|
783 | 796 | struct ib_qp_init_attr *init_attr, |
---|
784 | 797 | struct ib_udata *udata); |
---|
785 | | -int mlx4_ib_destroy_qp(struct ib_qp *qp); |
---|
| 798 | +int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); |
---|
786 | 799 | void mlx4_ib_drain_sq(struct ib_qp *qp); |
---|
787 | 800 | void mlx4_ib_drain_rq(struct ib_qp *qp); |
---|
788 | 801 | int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
---|
.. | .. |
---|
797 | 810 | int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags, |
---|
798 | 811 | int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, |
---|
799 | 812 | const void *in_mad, void *response_mad); |
---|
800 | | -int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
---|
| 813 | +int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
---|
801 | 814 | const struct ib_wc *in_wc, const struct ib_grh *in_grh, |
---|
802 | | - const struct ib_mad_hdr *in, size_t in_mad_size, |
---|
803 | | - struct ib_mad_hdr *out, size_t *out_mad_size, |
---|
804 | | - u16 *out_mad_pkey_index); |
---|
| 815 | + const struct ib_mad *in, struct ib_mad *out, |
---|
| 816 | + size_t *out_mad_size, u16 *out_mad_pkey_index); |
---|
805 | 817 | int mlx4_ib_mad_init(struct mlx4_ib_dev *dev); |
---|
806 | 818 | void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev); |
---|
807 | 819 | |
---|
808 | | -struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int mr_access_flags, |
---|
809 | | - struct ib_fmr_attr *fmr_attr); |
---|
810 | | -int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages, |
---|
811 | | - u64 iova); |
---|
812 | | -int mlx4_ib_unmap_fmr(struct list_head *fmr_list); |
---|
813 | | -int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr); |
---|
814 | 820 | int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port, |
---|
815 | 821 | struct ib_port_attr *props, int netw_view); |
---|
816 | 822 | int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, |
---|
.. | .. |
---|
917 | 923 | struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, |
---|
918 | 924 | struct ib_wq_init_attr *init_attr, |
---|
919 | 925 | struct ib_udata *udata); |
---|
920 | | -int mlx4_ib_destroy_wq(struct ib_wq *wq); |
---|
| 926 | +int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); |
---|
921 | 927 | int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, |
---|
922 | 928 | u32 wq_attr_mask, struct ib_udata *udata); |
---|
923 | 929 | |
---|
924 | | -struct ib_rwq_ind_table |
---|
925 | | -*mlx4_ib_create_rwq_ind_table(struct ib_device *device, |
---|
926 | | - struct ib_rwq_ind_table_init_attr *init_attr, |
---|
927 | | - struct ib_udata *udata); |
---|
928 | | -int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); |
---|
| 930 | +int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl, |
---|
| 931 | + struct ib_rwq_ind_table_init_attr *init_attr, |
---|
| 932 | + struct ib_udata *udata); |
---|
| 933 | +static inline int |
---|
| 934 | +mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table) |
---|
| 935 | +{ |
---|
| 936 | + return 0; |
---|
| 937 | +} |
---|
929 | 938 | int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, |
---|
930 | 939 | int *num_of_mtts); |
---|
931 | 940 | |
---|