forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
....@@ -34,7 +34,7 @@
3434 #include <linux/mutex.h>
3535 #include <linux/list.h>
3636 #include <linux/spinlock.h>
37
-#include <linux/idr.h>
37
+#include <linux/xarray.h>
3838 #include <linux/completion.h>
3939 #include <linux/netdevice.h>
4040 #include <linux/sched/mm.h>
....@@ -315,16 +315,15 @@
315315 struct ib_device ibdev;
316316 struct c4iw_rdev rdev;
317317 u32 device_cap_flags;
318
- struct idr cqidr;
319
- struct idr qpidr;
320
- struct idr mmidr;
321
- spinlock_t lock;
318
+ struct xarray cqs;
319
+ struct xarray qps;
320
+ struct xarray mrs;
322321 struct mutex db_mutex;
323322 struct dentry *debugfs_root;
324323 enum db_state db_state;
325
- struct idr hwtid_idr;
326
- struct idr atid_idr;
327
- struct idr stid_idr;
324
+ struct xarray hwtids;
325
+ struct xarray atids;
326
+ struct xarray stids;
328327 struct list_head db_fc_list;
329328 u32 avail_ird;
330329 wait_queue_head_t wait;
....@@ -349,70 +348,12 @@
349348
350349 static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
351350 {
352
- return idr_find(&rhp->cqidr, cqid);
351
+ return xa_load(&rhp->cqs, cqid);
353352 }
354353
355354 static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
356355 {
357
- return idr_find(&rhp->qpidr, qpid);
358
-}
359
-
360
-static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
361
-{
362
- return idr_find(&rhp->mmidr, mmid);
363
-}
364
-
365
-static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
366
- void *handle, u32 id, int lock)
367
-{
368
- int ret;
369
-
370
- if (lock) {
371
- idr_preload(GFP_KERNEL);
372
- spin_lock_irq(&rhp->lock);
373
- }
374
-
375
- ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC);
376
-
377
- if (lock) {
378
- spin_unlock_irq(&rhp->lock);
379
- idr_preload_end();
380
- }
381
-
382
- return ret < 0 ? ret : 0;
383
-}
384
-
385
-static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
386
- void *handle, u32 id)
387
-{
388
- return _insert_handle(rhp, idr, handle, id, 1);
389
-}
390
-
391
-static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
392
- void *handle, u32 id)
393
-{
394
- return _insert_handle(rhp, idr, handle, id, 0);
395
-}
396
-
397
-static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
398
- u32 id, int lock)
399
-{
400
- if (lock)
401
- spin_lock_irq(&rhp->lock);
402
- idr_remove(idr, id);
403
- if (lock)
404
- spin_unlock_irq(&rhp->lock);
405
-}
406
-
407
-static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
408
-{
409
- _remove_handle(rhp, idr, id, 1);
410
-}
411
-
412
-static inline void remove_handle_nolock(struct c4iw_dev *rhp,
413
- struct idr *idr, u32 id)
414
-{
415
- _remove_handle(rhp, idr, id, 0);
356
+ return xa_load(&rhp->qps, qpid);
416357 }
417358
418359 extern uint c4iw_max_read_depth;
....@@ -549,13 +490,13 @@
549490 struct t4_wq wq;
550491 spinlock_t lock;
551492 struct mutex mutex;
552
- struct kref kref;
553493 wait_queue_head_t wait;
554494 int sq_sig_all;
555495 struct c4iw_srq *srq;
556
- struct work_struct free_work;
557496 struct c4iw_ucontext *ucontext;
558497 struct c4iw_wr_wait *wr_waitp;
498
+ struct completion qp_rel_comp;
499
+ refcount_t qp_refcnt;
559500 };
560501
561502 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
....@@ -589,25 +530,12 @@
589530 u32 key;
590531 spinlock_t mmap_lock;
591532 struct list_head mmaps;
592
- struct kref kref;
593533 bool is_32b_cqe;
594534 };
595535
596536 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
597537 {
598538 return container_of(c, struct c4iw_ucontext, ibucontext);
599
-}
600
-
601
-void _c4iw_free_ucontext(struct kref *kref);
602
-
603
-static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
604
-{
605
- kref_put(&ucontext->kref, _c4iw_free_ucontext);
606
-}
607
-
608
-static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
609
-{
610
- kref_get(&ucontext->kref);
611539 }
612540
613541 struct c4iw_mm_entry {
....@@ -779,7 +707,7 @@
779707 u8 flags;
780708 u8 revision;
781709 __be16 private_data_size;
782
- u8 private_data[0];
710
+ u8 private_data[];
783711 };
784712
785713 struct mpa_v2_conn_params {
....@@ -791,7 +719,7 @@
791719 u8 layer_etype;
792720 u8 ecode;
793721 __be16 hdrct_rsvd;
794
- u8 len_hdrs[0];
722
+ u8 len_hdrs[];
795723 };
796724
797725 #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
....@@ -982,6 +910,9 @@
982910 int rcv_win;
983911 u32 snd_wscale;
984912 struct c4iw_ep_stats stats;
913
+ u32 srqe_idx;
914
+ u32 rx_pdu_out_cnt;
915
+ struct sk_buff *peer_abort_skb;
985916 };
986917
987918 static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
....@@ -1048,34 +979,29 @@
1048979 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
1049980 void c4iw_qp_add_ref(struct ib_qp *qp);
1050981 void c4iw_qp_rem_ref(struct ib_qp *qp);
1051
-struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
1052
- enum ib_mr_type mr_type,
982
+struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1053983 u32 max_num_sg);
1054984 int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1055985 unsigned int *sg_offset);
1056986 int c4iw_dealloc_mw(struct ib_mw *mw);
1057987 void c4iw_dealloc(struct uld_ctx *ctx);
1058
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1059
- struct ib_udata *udata);
988
+int c4iw_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
1060989 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
1061990 u64 length, u64 virt, int acc,
1062991 struct ib_udata *udata);
1063992 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
1064
-int c4iw_dereg_mr(struct ib_mr *ib_mr);
1065
-int c4iw_destroy_cq(struct ib_cq *ib_cq);
1066
-struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
1067
- const struct ib_cq_init_attr *attr,
1068
- struct ib_ucontext *ib_context,
1069
- struct ib_udata *udata);
993
+int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
994
+int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
995
+int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
996
+ struct ib_udata *udata);
1070997 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1071998 int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
1072999 enum ib_srq_attr_mask srq_attr_mask,
10731000 struct ib_udata *udata);
1074
-int c4iw_destroy_srq(struct ib_srq *ib_srq);
1075
-struct ib_srq *c4iw_create_srq(struct ib_pd *pd,
1076
- struct ib_srq_init_attr *attrs,
1077
- struct ib_udata *udata);
1078
-int c4iw_destroy_qp(struct ib_qp *ib_qp);
1001
+int c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata);
1002
+int c4iw_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attrs,
1003
+ struct ib_udata *udata);
1004
+int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
10791005 struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
10801006 struct ib_qp_init_attr *attrs,
10811007 struct ib_udata *udata);
....@@ -1126,8 +1052,9 @@
11261052 const struct ib_recv_wr **bad_wr);
11271053 struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
11281054
1129
-typedef int c4iw_restrack_func(struct sk_buff *msg,
1130
- struct rdma_restrack_entry *res);
1131
-extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX];
1055
+int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr);
1056
+int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq);
1057
+int c4iw_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp);
1058
+int c4iw_fill_res_cm_id_entry(struct sk_buff *msg, struct rdma_cm_id *cm_id);
11321059
11331060 #endif