.. | .. |
---|
56 | 56 | }; |
---|
57 | 57 | |
---|
58 | 58 | struct bnxt_re_pd { |
---|
| 59 | + struct ib_pd ib_pd; |
---|
59 | 60 | struct bnxt_re_dev *rdev; |
---|
60 | | - struct ib_pd ib_pd; |
---|
61 | 61 | struct bnxt_qplib_pd qplib_pd; |
---|
62 | 62 | struct bnxt_re_fence_data fence; |
---|
63 | 63 | }; |
---|
64 | 64 | |
---|
65 | 65 | struct bnxt_re_ah { |
---|
66 | | - struct bnxt_re_dev *rdev; |
---|
67 | 66 | struct ib_ah ib_ah; |
---|
| 67 | + struct bnxt_re_dev *rdev; |
---|
68 | 68 | struct bnxt_qplib_ah qplib_ah; |
---|
69 | 69 | }; |
---|
70 | 70 | |
---|
71 | 71 | struct bnxt_re_srq { |
---|
| 72 | + struct ib_srq ib_srq; |
---|
72 | 73 | struct bnxt_re_dev *rdev; |
---|
73 | 74 | u32 srq_limit; |
---|
74 | | - struct ib_srq ib_srq; |
---|
75 | 75 | struct bnxt_qplib_srq qplib_srq; |
---|
76 | 76 | struct ib_umem *umem; |
---|
77 | 77 | spinlock_t lock; /* protect srq */ |
---|
.. | .. |
---|
94 | 94 | }; |
---|
95 | 95 | |
---|
96 | 96 | struct bnxt_re_cq { |
---|
| 97 | + struct ib_cq ib_cq; |
---|
97 | 98 | struct bnxt_re_dev *rdev; |
---|
98 | 99 | spinlock_t cq_lock; /* protect cq */ |
---|
99 | 100 | u16 cq_count; |
---|
100 | 101 | u16 cq_period; |
---|
101 | | - struct ib_cq ib_cq; |
---|
102 | 102 | struct bnxt_qplib_cq qplib_cq; |
---|
103 | 103 | struct bnxt_qplib_cqe *cql; |
---|
104 | 104 | #define MAX_CQL_PER_POLL 1024 |
---|
.. | .. |
---|
122 | 122 | u64 *page_list; |
---|
123 | 123 | }; |
---|
124 | 124 | |
---|
125 | | -struct bnxt_re_fmr { |
---|
126 | | - struct bnxt_re_dev *rdev; |
---|
127 | | - struct ib_fmr ib_fmr; |
---|
128 | | - struct bnxt_qplib_mrw qplib_fmr; |
---|
129 | | -}; |
---|
130 | | - |
---|
131 | 125 | struct bnxt_re_mw { |
---|
132 | 126 | struct bnxt_re_dev *rdev; |
---|
133 | 127 | struct ib_mw ib_mw; |
---|
.. | .. |
---|
135 | 129 | }; |
---|
136 | 130 | |
---|
137 | 131 | struct bnxt_re_ucontext { |
---|
| 132 | + struct ib_ucontext ib_uctx; |
---|
138 | 133 | struct bnxt_re_dev *rdev; |
---|
139 | | - struct ib_ucontext ib_uctx; |
---|
140 | 134 | struct bnxt_qplib_dpi dpi; |
---|
141 | 135 | void *shpg; |
---|
142 | 136 | spinlock_t sh_lock; /* protect shpg */ |
---|
143 | 137 | }; |
---|
144 | 138 | |
---|
145 | | -struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num); |
---|
| 139 | +static inline u16 bnxt_re_get_swqe_size(int nsge) |
---|
| 140 | +{ |
---|
| 141 | + return sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge); |
---|
| 142 | +} |
---|
| 143 | + |
---|
| 144 | +static inline u16 bnxt_re_get_rwqe_size(int nsge) |
---|
| 145 | +{ |
---|
| 146 | + return sizeof(struct rq_wqe_hdr) + (nsge * sizeof(struct sq_sge)); |
---|
| 147 | +} |
---|
146 | 148 | |
---|
147 | 149 | int bnxt_re_query_device(struct ib_device *ibdev, |
---|
148 | 150 | struct ib_device_attr *ib_attr, |
---|
149 | 151 | struct ib_udata *udata); |
---|
150 | | -int bnxt_re_modify_device(struct ib_device *ibdev, |
---|
151 | | - int device_modify_mask, |
---|
152 | | - struct ib_device_modify *device_modify); |
---|
153 | 152 | int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num, |
---|
154 | 153 | struct ib_port_attr *port_attr); |
---|
155 | 154 | int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num, |
---|
.. | .. |
---|
163 | 162 | int index, union ib_gid *gid); |
---|
164 | 163 | enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, |
---|
165 | 164 | u8 port_num); |
---|
166 | | -struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, |
---|
167 | | - struct ib_ucontext *context, |
---|
168 | | - struct ib_udata *udata); |
---|
169 | | -int bnxt_re_dealloc_pd(struct ib_pd *pd); |
---|
170 | | -struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd, |
---|
171 | | - struct rdma_ah_attr *ah_attr, |
---|
172 | | - struct ib_udata *udata); |
---|
| 165 | +int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); |
---|
| 166 | +int bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); |
---|
| 167 | +int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, |
---|
| 168 | + struct ib_udata *udata); |
---|
173 | 169 | int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); |
---|
174 | 170 | int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); |
---|
175 | | -int bnxt_re_destroy_ah(struct ib_ah *ah); |
---|
176 | | -struct ib_srq *bnxt_re_create_srq(struct ib_pd *pd, |
---|
177 | | - struct ib_srq_init_attr *srq_init_attr, |
---|
178 | | - struct ib_udata *udata); |
---|
| 171 | +int bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags); |
---|
| 172 | +int bnxt_re_create_srq(struct ib_srq *srq, |
---|
| 173 | + struct ib_srq_init_attr *srq_init_attr, |
---|
| 174 | + struct ib_udata *udata); |
---|
179 | 175 | int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr, |
---|
180 | 176 | enum ib_srq_attr_mask srq_attr_mask, |
---|
181 | 177 | struct ib_udata *udata); |
---|
182 | 178 | int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); |
---|
183 | | -int bnxt_re_destroy_srq(struct ib_srq *srq); |
---|
| 179 | +int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata); |
---|
184 | 180 | int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr, |
---|
185 | 181 | const struct ib_recv_wr **bad_recv_wr); |
---|
186 | 182 | struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd, |
---|
.. | .. |
---|
190 | 186 | int qp_attr_mask, struct ib_udata *udata); |
---|
191 | 187 | int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, |
---|
192 | 188 | int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); |
---|
193 | | -int bnxt_re_destroy_qp(struct ib_qp *qp); |
---|
| 189 | +int bnxt_re_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); |
---|
194 | 190 | int bnxt_re_post_send(struct ib_qp *qp, const struct ib_send_wr *send_wr, |
---|
195 | 191 | const struct ib_send_wr **bad_send_wr); |
---|
196 | 192 | int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, |
---|
197 | 193 | const struct ib_recv_wr **bad_recv_wr); |
---|
198 | | -struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, |
---|
199 | | - const struct ib_cq_init_attr *attr, |
---|
200 | | - struct ib_ucontext *context, |
---|
201 | | - struct ib_udata *udata); |
---|
202 | | -int bnxt_re_destroy_cq(struct ib_cq *cq); |
---|
| 194 | +int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, |
---|
| 195 | + struct ib_udata *udata); |
---|
| 196 | +int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); |
---|
203 | 197 | int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); |
---|
204 | 198 | int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); |
---|
205 | 199 | struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags); |
---|
.. | .. |
---|
208 | 202 | unsigned int *sg_offset); |
---|
209 | 203 | struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, |
---|
210 | 204 | u32 max_num_sg); |
---|
211 | | -int bnxt_re_dereg_mr(struct ib_mr *mr); |
---|
| 205 | +int bnxt_re_dereg_mr(struct ib_mr *mr, struct ib_udata *udata); |
---|
212 | 206 | struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, |
---|
213 | 207 | struct ib_udata *udata); |
---|
214 | 208 | int bnxt_re_dealloc_mw(struct ib_mw *mw); |
---|
215 | 209 | struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
---|
216 | 210 | u64 virt_addr, int mr_access_flags, |
---|
217 | 211 | struct ib_udata *udata); |
---|
218 | | -struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev, |
---|
219 | | - struct ib_udata *udata); |
---|
220 | | -int bnxt_re_dealloc_ucontext(struct ib_ucontext *context); |
---|
| 212 | +int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata); |
---|
| 213 | +void bnxt_re_dealloc_ucontext(struct ib_ucontext *context); |
---|
221 | 214 | int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); |
---|
222 | 215 | |
---|
223 | 216 | unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp); |
---|