forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/infiniband/hw/hfi1/trace_tx.h
....@@ -53,6 +53,8 @@
5353 #include "hfi.h"
5454 #include "mad.h"
5555 #include "sdma.h"
56
+#include "ipoib.h"
57
+#include "user_sdma.h"
5658
5759 const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
5860
....@@ -114,19 +116,27 @@
114116 __field(u32, qpn)
115117 __field(u32, flags)
116118 __field(u32, s_flags)
119
+ __field(u32, ps_flags)
120
+ __field(unsigned long, iow_flags)
117121 ),
118122 TP_fast_assign(
119123 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
120124 __entry->flags = flags;
121125 __entry->qpn = qp->ibqp.qp_num;
122126 __entry->s_flags = qp->s_flags;
127
+ __entry->ps_flags =
128
+ ((struct hfi1_qp_priv *)qp->priv)->s_flags;
129
+ __entry->iow_flags =
130
+ ((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
123131 ),
124132 TP_printk(
125
- "[%s] qpn 0x%x flags 0x%x s_flags 0x%x",
133
+ "[%s] qpn 0x%x flags 0x%x s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx",
126134 __get_str(dev),
127135 __entry->qpn,
128136 __entry->flags,
129
- __entry->s_flags
137
+ __entry->s_flags,
138
+ __entry->ps_flags,
139
+ __entry->iow_flags
130140 )
131141 );
132142
....@@ -580,7 +590,7 @@
580590 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
581591 TP_ARGS(dd, ctxt, subctxt, i),
582592 TP_STRUCT__entry(
583
- DD_DEV_ENTRY(dd);
593
+ DD_DEV_ENTRY(dd)
584594 __field(u16, ctxt)
585595 __field(u8, subctxt)
586596 __field(u8, ver_opcode)
....@@ -643,6 +653,80 @@
643653 __get_str(dev), __entry->ctxt, __entry->subctxt,
644654 __entry->idx, show_usdma_complete_state(__entry->state),
645655 __entry->code)
656
+);
657
+
658
+TRACE_EVENT(hfi1_usdma_defer,
659
+ TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
660
+ struct sdma_engine *sde,
661
+ struct iowait *wait),
662
+ TP_ARGS(pq, sde, wait),
663
+ TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
664
+ __field(struct hfi1_user_sdma_pkt_q *, pq)
665
+ __field(struct sdma_engine *, sde)
666
+ __field(struct iowait *, wait)
667
+ __field(int, engine)
668
+ __field(int, empty)
669
+ ),
670
+ TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
671
+ __entry->pq = pq;
672
+ __entry->sde = sde;
673
+ __entry->wait = wait;
674
+ __entry->engine = sde->this_idx;
675
+ __entry->empty = list_empty(&__entry->wait->list);
676
+ ),
677
+ TP_printk("[%s] pq %llx sde %llx wait %llx engine %d empty %d",
678
+ __get_str(dev),
679
+ (unsigned long long)__entry->pq,
680
+ (unsigned long long)__entry->sde,
681
+ (unsigned long long)__entry->wait,
682
+ __entry->engine,
683
+ __entry->empty
684
+ )
685
+);
686
+
687
+TRACE_EVENT(hfi1_usdma_activate,
688
+ TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
689
+ struct iowait *wait,
690
+ int reason),
691
+ TP_ARGS(pq, wait, reason),
692
+ TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
693
+ __field(struct hfi1_user_sdma_pkt_q *, pq)
694
+ __field(struct iowait *, wait)
695
+ __field(int, reason)
696
+ ),
697
+ TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
698
+ __entry->pq = pq;
699
+ __entry->wait = wait;
700
+ __entry->reason = reason;
701
+ ),
702
+ TP_printk("[%s] pq %llx wait %llx reason %d",
703
+ __get_str(dev),
704
+ (unsigned long long)__entry->pq,
705
+ (unsigned long long)__entry->wait,
706
+ __entry->reason
707
+ )
708
+);
709
+
710
+TRACE_EVENT(hfi1_usdma_we,
711
+ TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
712
+ int we_ret),
713
+ TP_ARGS(pq, we_ret),
714
+ TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
715
+ __field(struct hfi1_user_sdma_pkt_q *, pq)
716
+ __field(int, state)
717
+ __field(int, we_ret)
718
+ ),
719
+ TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
720
+ __entry->pq = pq;
721
+ __entry->state = pq->state;
722
+ __entry->we_ret = we_ret;
723
+ ),
724
+ TP_printk("[%s] pq %llx state %d we_ret %d",
725
+ __get_str(dev),
726
+ (unsigned long long)__entry->pq,
727
+ __entry->state,
728
+ __entry->we_ret
729
+ )
646730 );
647731
648732 const char *print_u32_array(struct trace_seq *, u32 *, int);
....@@ -838,12 +922,121 @@
838922 TP_ARGS(qp, flag)
839923 );
840924
925
+DEFINE_EVENT(/* event */
926
+ hfi1_do_send_template, hfi1_rc_do_tid_send,
927
+ TP_PROTO(struct rvt_qp *qp, bool flag),
928
+ TP_ARGS(qp, flag)
929
+);
930
+
841931 DEFINE_EVENT(
842932 hfi1_do_send_template, hfi1_rc_expired_time_slice,
843933 TP_PROTO(struct rvt_qp *qp, bool flag),
844934 TP_ARGS(qp, flag)
845935 );
846936
937
+DECLARE_EVENT_CLASS(/* AIP */
938
+ hfi1_ipoib_txq_template,
939
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
940
+ TP_ARGS(txq),
941
+ TP_STRUCT__entry(/* entry */
942
+ DD_DEV_ENTRY(txq->priv->dd)
943
+ __field(struct hfi1_ipoib_txq *, txq)
944
+ __field(struct sdma_engine *, sde)
945
+ __field(ulong, head)
946
+ __field(ulong, tail)
947
+ __field(uint, used)
948
+ __field(uint, flow)
949
+ __field(int, stops)
950
+ __field(int, no_desc)
951
+ __field(u8, idx)
952
+ __field(u8, stopped)
953
+ ),
954
+ TP_fast_assign(/* assign */
955
+ DD_DEV_ASSIGN(txq->priv->dd)
956
+ __entry->txq = txq;
957
+ __entry->sde = txq->sde;
958
+ __entry->head = txq->tx_ring.head;
959
+ __entry->tail = txq->tx_ring.tail;
960
+ __entry->idx = txq->q_idx;
961
+ __entry->used =
962
+ txq->sent_txreqs -
963
+ atomic64_read(&txq->complete_txreqs);
964
+ __entry->flow = txq->flow.as_int;
965
+ __entry->stops = atomic_read(&txq->stops);
966
+ __entry->no_desc = atomic_read(&txq->no_desc);
967
+ __entry->stopped =
968
+ __netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
969
+ ),
970
+ TP_printk(/* print */
971
+ "[%s] txq %llx idx %u sde %llx head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
972
+ __get_str(dev),
973
+ (unsigned long long)__entry->txq,
974
+ __entry->idx,
975
+ (unsigned long long)__entry->sde,
976
+ __entry->head,
977
+ __entry->tail,
978
+ __entry->flow,
979
+ __entry->used,
980
+ __entry->stops,
981
+ __entry->no_desc,
982
+ __entry->stopped
983
+ )
984
+);
985
+
986
+DEFINE_EVENT(/* queue stop */
987
+ hfi1_ipoib_txq_template, hfi1_txq_stop,
988
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
989
+ TP_ARGS(txq)
990
+);
991
+
992
+DEFINE_EVENT(/* queue wake */
993
+ hfi1_ipoib_txq_template, hfi1_txq_wake,
994
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
995
+ TP_ARGS(txq)
996
+);
997
+
998
+DEFINE_EVENT(/* flow flush */
999
+ hfi1_ipoib_txq_template, hfi1_flow_flush,
1000
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
1001
+ TP_ARGS(txq)
1002
+);
1003
+
1004
+DEFINE_EVENT(/* flow switch */
1005
+ hfi1_ipoib_txq_template, hfi1_flow_switch,
1006
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
1007
+ TP_ARGS(txq)
1008
+);
1009
+
1010
+DEFINE_EVENT(/* wakeup */
1011
+ hfi1_ipoib_txq_template, hfi1_txq_wakeup,
1012
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
1013
+ TP_ARGS(txq)
1014
+);
1015
+
1016
+DEFINE_EVENT(/* full */
1017
+ hfi1_ipoib_txq_template, hfi1_txq_full,
1018
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
1019
+ TP_ARGS(txq)
1020
+);
1021
+
1022
+DEFINE_EVENT(/* queued */
1023
+ hfi1_ipoib_txq_template, hfi1_txq_queued,
1024
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
1025
+ TP_ARGS(txq)
1026
+);
1027
+
1028
+DEFINE_EVENT(/* xmit_stopped */
1029
+ hfi1_ipoib_txq_template, hfi1_txq_xmit_stopped,
1030
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
1031
+ TP_ARGS(txq)
1032
+);
1033
+
1034
+DEFINE_EVENT(/* xmit_unstopped */
1035
+ hfi1_ipoib_txq_template, hfi1_txq_xmit_unstopped,
1036
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
1037
+ TP_ARGS(txq)
1038
+);
1039
+
8471040 #endif /* __HFI1_TRACE_TX_H */
8481041
8491042 #undef TRACE_INCLUDE_PATH