From f9004dbfff8a3fbbd7e2a88c8a4327c7f2f8e5b2 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Wed, 31 Jan 2024 01:04:47 +0000 Subject: [PATCH] add driver 5G --- kernel/include/trace/events/rpcrdma.h | 1204 +++++++++++++++++++++++++++++++++++++------------------- 1 files changed, 797 insertions(+), 407 deletions(-) diff --git a/kernel/include/trace/events/rpcrdma.h b/kernel/include/trace/events/rpcrdma.h index 4a0b189..bf10657 100644 --- a/kernel/include/trace/events/rpcrdma.h +++ b/kernel/include/trace/events/rpcrdma.h @@ -10,12 +10,55 @@ #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_RPCRDMA_H +#include <linux/scatterlist.h> +#include <linux/sunrpc/rpc_rdma_cid.h> #include <linux/tracepoint.h> +#include <rdma/ib_cm.h> #include <trace/events/rdma.h> /** ** Event classes **/ + +DECLARE_EVENT_CLASS(rpcrdma_completion_class, + TP_PROTO( + const struct ib_wc *wc, + const struct rpc_rdma_cid *cid + ), + + TP_ARGS(wc, cid), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) + __field(unsigned long, status) + __field(unsigned int, vendor_err) + ), + + TP_fast_assign( + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; + __entry->status = wc->status; + if (wc->status) + __entry->vendor_err = wc->vendor_err; + else + __entry->vendor_err = 0; + ), + + TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)", + __entry->cq_id, __entry->completion_id, + rdma_show_wc_status(__entry->status), + __entry->status, __entry->vendor_err + ) +); + +#define DEFINE_COMPLETION_EVENT(name) \ + DEFINE_EVENT(rpcrdma_completion_class, name, \ + TP_PROTO( \ + const struct ib_wc *wc, \ + const struct rpc_rdma_cid *cid \ + ), \ + TP_ARGS(wc, cid)) DECLARE_EVENT_CLASS(xprtrdma_reply_event, TP_PROTO( @@ -84,6 +127,44 @@ ), \ TP_ARGS(r_xprt)) +DECLARE_EVENT_CLASS(xprtrdma_connect_class, + TP_PROTO( + const struct rpcrdma_xprt *r_xprt, + int rc + ), + + TP_ARGS(r_xprt, rc), + + TP_STRUCT__entry( + __field(const void *, r_xprt) + __field(int, rc) + __field(int, connect_status) + __string(addr, rpcrdma_addrstr(r_xprt)) + __string(port, rpcrdma_portstr(r_xprt)) + ), + + TP_fast_assign( + __entry->r_xprt = r_xprt; + __entry->rc = rc; + __entry->connect_status = r_xprt->rx_ep->re_connect_status; + __assign_str(addr, rpcrdma_addrstr(r_xprt)); + __assign_str(port, rpcrdma_portstr(r_xprt)); + ), + + TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d", + __get_str(addr), __get_str(port), __entry->r_xprt, + __entry->rc, __entry->connect_status + ) +); + +#define DEFINE_CONN_EVENT(name) \ + DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name, \ + TP_PROTO( \ + const struct rpcrdma_xprt *r_xprt, \ + int rc \ + ), \ + TP_ARGS(r_xprt, rc)) + DECLARE_EVENT_CLASS(xprtrdma_rdch_event, TP_PROTO( const struct rpc_task *task, @@ -97,7 +178,6 @@ TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) - __field(const void *, mr) __field(unsigned int, pos) __field(int, nents) __field(u32, handle) @@ -109,7 +189,6 @@ TP_fast_assign( __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; - __entry->mr = mr; __entry->pos = pos; __entry->nents = mr->mr_nents; __entry->handle = mr->mr_handle; @@ -118,8 +197,8 @@ __entry->nsegs = nsegs; ), - TP_printk("task:%u@%u mr=%p pos=%u %u@0x%016llx:0x%08x (%s)", - __entry->task_id, __entry->client_id, __entry->mr, + TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)", + __entry->task_id, __entry->client_id, __entry->pos, __entry->length, (unsigned long long)__entry->offset, __entry->handle, __entry->nents < __entry->nsegs ? "more" : "last" @@ -127,7 +206,7 @@ ); #define DEFINE_RDCH_EVENT(name) \ - DEFINE_EVENT(xprtrdma_rdch_event, name, \ + DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\ TP_PROTO( \ const struct rpc_task *task, \ unsigned int pos, \ @@ -148,7 +227,6 @@ TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) - __field(const void *, mr) __field(int, nents) __field(u32, handle) __field(u32, length) @@ -159,7 +237,6 @@ TP_fast_assign( __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; - __entry->mr = mr; __entry->nents = mr->mr_nents; __entry->handle = mr->mr_handle; __entry->length = mr->mr_length; @@ -167,8 +244,8 @@ __entry->nsegs = nsegs; ), - TP_printk("task:%u@%u mr=%p %u@0x%016llx:0x%08x (%s)", - __entry->task_id, __entry->client_id, __entry->mr, + TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)", + __entry->task_id, __entry->client_id, __entry->length, (unsigned long long)__entry->offset, __entry->handle, __entry->nents < __entry->nsegs ? "more" : "last" @@ -176,25 +253,13 @@ ); #define DEFINE_WRCH_EVENT(name) \ - DEFINE_EVENT(xprtrdma_wrch_event, name, \ + DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\ TP_PROTO( \ const struct rpc_task *task, \ struct rpcrdma_mr *mr, \ int nsegs \ ), \ TP_ARGS(task, mr, nsegs)) - -TRACE_DEFINE_ENUM(FRWR_IS_INVALID); -TRACE_DEFINE_ENUM(FRWR_IS_VALID); -TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR); -TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI); - -#define xprtrdma_show_frwr_state(x) \ - __print_symbolic(x, \ - { FRWR_IS_INVALID, "INVALID" }, \ - { FRWR_IS_VALID, "VALID" }, \ - { FRWR_FLUSHED_FR, "FLUSHED_FR" }, \ - { FRWR_FLUSHED_LI, "FLUSHED_LI" }) DECLARE_EVENT_CLASS(xprtrdma_frwr_done, TP_PROTO( @@ -205,23 +270,20 @@ TP_ARGS(wc, frwr), TP_STRUCT__entry( - __field(const void *, mr) - __field(unsigned int, state) + __field(u32, mr_id) __field(unsigned int, status) __field(unsigned int, vendor_err) ), TP_fast_assign( - __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr); - __entry->state = frwr->fr_state; + __entry->mr_id = frwr->fr_mr->res.id; __entry->status = wc->status; __entry->vendor_err = __entry->status ? wc->vendor_err : 0; ), TP_printk( - "mr=%p state=%s: %s (%u/0x%x)", - __entry->mr, xprtrdma_show_frwr_state(__entry->state), - rdma_show_wc_status(__entry->status), + "mr.id=%u: %s (%u/0x%x)", + __entry->mr_id, rdma_show_wc_status(__entry->status), __entry->status, __entry->vendor_err ) ); @@ -234,6 +296,18 @@ ), \ TP_ARGS(wc, frwr)) +TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL); +TRACE_DEFINE_ENUM(DMA_TO_DEVICE); +TRACE_DEFINE_ENUM(DMA_FROM_DEVICE); +TRACE_DEFINE_ENUM(DMA_NONE); + +#define xprtrdma_show_direction(x) \ + __print_symbolic(x, \ + { DMA_BIDIRECTIONAL, "BIDIR" }, \ + { DMA_TO_DEVICE, "TO_DEVICE" }, \ + { DMA_FROM_DEVICE, "FROM_DEVICE" }, \ + { DMA_NONE, "NONE" }) + DECLARE_EVENT_CLASS(xprtrdma_mr, TP_PROTO( const struct rpcrdma_mr *mr @@ -242,28 +316,32 @@ TP_ARGS(mr), TP_STRUCT__entry( - __field(const void *, mr) + __field(u32, mr_id) + __field(int, nents) __field(u32, handle) __field(u32, length) __field(u64, offset) + __field(u32, dir) ), TP_fast_assign( - __entry->mr = mr; + __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->nents = mr->mr_nents; __entry->handle = mr->mr_handle; __entry->length = mr->mr_length; __entry->offset = mr->mr_offset; + __entry->dir = mr->mr_dir; ), - TP_printk("mr=%p %u@0x%016llx:0x%08x", - __entry->mr, __entry->length, - (unsigned long long)__entry->offset, - __entry->handle + TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)", + __entry->mr_id, __entry->nents, __entry->length, + (unsigned long long)__entry->offset, __entry->handle, + xprtrdma_show_direction(__entry->dir) ) ); #define DEFINE_MR_EVENT(name) \ - DEFINE_EVENT(xprtrdma_mr, name, \ + DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \ TP_PROTO( \ const struct rpcrdma_mr *mr \ ), \ @@ -306,105 +384,136 @@ ** Connection events **/ -TRACE_EVENT(xprtrdma_conn_upcall, +TRACE_EVENT(xprtrdma_inline_thresh, TP_PROTO( - const struct rpcrdma_xprt *r_xprt, - struct rdma_cm_event *event + const struct rpcrdma_ep *ep ), - TP_ARGS(r_xprt, event), + TP_ARGS(ep), + + TP_STRUCT__entry( + __field(unsigned int, inline_send) + __field(unsigned int, inline_recv) + __field(unsigned int, max_send) + __field(unsigned int, max_recv) + __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) + __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) + ), + + TP_fast_assign( + const struct rdma_cm_id *id = ep->re_id; + + __entry->inline_send = ep->re_inline_send; + __entry->inline_recv = ep->re_inline_recv; + __entry->max_send = ep->re_max_inline_send; + __entry->max_recv = ep->re_max_inline_recv; + memcpy(__entry->srcaddr, &id->route.addr.src_addr, + sizeof(struct sockaddr_in6)); + memcpy(__entry->dstaddr, &id->route.addr.dst_addr, + sizeof(struct sockaddr_in6)); + ), + + TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u", + __entry->srcaddr, __entry->dstaddr, + __entry->inline_send, __entry->inline_recv, + __entry->max_send, __entry->max_recv + ) +); + +DEFINE_CONN_EVENT(connect); +DEFINE_CONN_EVENT(disconnect); + +DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc); + +TRACE_EVENT(xprtrdma_op_connect, + TP_PROTO( + const struct rpcrdma_xprt *r_xprt, + unsigned long delay + ), + + TP_ARGS(r_xprt, delay), TP_STRUCT__entry( __field(const void *, r_xprt) - __field(unsigned int, event) - __field(int, status) + __field(unsigned long, delay) __string(addr, rpcrdma_addrstr(r_xprt)) __string(port, rpcrdma_portstr(r_xprt)) ), TP_fast_assign( __entry->r_xprt = r_xprt; - __entry->event = event->event; - __entry->status = event->status; + __entry->delay = delay; __assign_str(addr, rpcrdma_addrstr(r_xprt)); __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)", - __get_str(addr), __get_str(port), - __entry->r_xprt, rdma_show_cm_event(__entry->event), - __entry->event, __entry->status + TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu", + __get_str(addr), __get_str(port), __entry->r_xprt, + __entry->delay ) ); -TRACE_EVENT(xprtrdma_disconnect, + +TRACE_EVENT(xprtrdma_op_set_cto, TP_PROTO( const struct rpcrdma_xprt *r_xprt, - int status + unsigned long connect, + unsigned long reconnect ), - TP_ARGS(r_xprt, status), + TP_ARGS(r_xprt, connect, reconnect), TP_STRUCT__entry( __field(const void *, r_xprt) - __field(int, status) - __field(int, connected) + __field(unsigned long, connect) + __field(unsigned long, reconnect) __string(addr, rpcrdma_addrstr(r_xprt)) __string(port, rpcrdma_portstr(r_xprt)) ), TP_fast_assign( __entry->r_xprt = r_xprt; - __entry->status = status; - __entry->connected = r_xprt->rx_ep.rep_connected; + __entry->connect = connect; + __entry->reconnect = reconnect; __assign_str(addr, rpcrdma_addrstr(r_xprt)); __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected", - __get_str(addr), __get_str(port), - __entry->r_xprt, __entry->status, - __entry->connected == 1 ? "still " : "dis" + TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu", + __get_str(addr), __get_str(port), __entry->r_xprt, + __entry->connect / HZ, __entry->reconnect / HZ ) ); -DEFINE_RXPRT_EVENT(xprtrdma_conn_start); -DEFINE_RXPRT_EVENT(xprtrdma_conn_tout); -DEFINE_RXPRT_EVENT(xprtrdma_create); -DEFINE_RXPRT_EVENT(xprtrdma_destroy); -DEFINE_RXPRT_EVENT(xprtrdma_remove); -DEFINE_RXPRT_EVENT(xprtrdma_reinsert); -DEFINE_RXPRT_EVENT(xprtrdma_reconnect); -DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc); - -TRACE_EVENT(xprtrdma_qp_error, +TRACE_EVENT(xprtrdma_qp_event, TP_PROTO( - const struct rpcrdma_xprt *r_xprt, + const struct rpcrdma_ep *ep, const struct ib_event *event ), - TP_ARGS(r_xprt, event), + TP_ARGS(ep, event), TP_STRUCT__entry( - __field(const void *, r_xprt) - __field(unsigned int, event) + __field(unsigned long, event) __string(name, event->device->name) - __string(addr, rpcrdma_addrstr(r_xprt)) - __string(port, rpcrdma_portstr(r_xprt)) + __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) + __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) ), TP_fast_assign( - __entry->r_xprt = r_xprt; + const struct rdma_cm_id *id = ep->re_id; + __entry->event = event->event; __assign_str(name, event->device->name); - __assign_str(addr, rpcrdma_addrstr(r_xprt)); - __assign_str(port, rpcrdma_portstr(r_xprt)); + memcpy(__entry->srcaddr, &id->route.addr.src_addr, + sizeof(struct sockaddr_in6)); + memcpy(__entry->dstaddr, &id->route.addr.dst_addr, + sizeof(struct sockaddr_in6)); ), - TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)", - __get_str(addr), __get_str(port), __entry->r_xprt, - __get_str(name), rdma_show_ib_event(__entry->event), - __entry->event + TP_printk("%pISpc -> %pISpc device=%s %s (%lu)", + __entry->srcaddr, __entry->dstaddr, __get_str(name), + rdma_show_ib_event(__entry->event), __entry->event ) ); @@ -422,26 +531,89 @@ TP_STRUCT__entry( __field(const void *, r_xprt) + __string(addr, rpcrdma_addrstr(r_xprt)) + __string(port, rpcrdma_portstr(r_xprt)) __field(unsigned int, count) ), TP_fast_assign( __entry->r_xprt = r_xprt; __entry->count = count; + __assign_str(addr, rpcrdma_addrstr(r_xprt)); + __assign_str(port, rpcrdma_portstr(r_xprt)); ), - TP_printk("r_xprt=%p: created %u MRs", - __entry->r_xprt, __entry->count + TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs", + __get_str(addr), __get_str(port), __entry->r_xprt, + __entry->count ) ); -DEFINE_RXPRT_EVENT(xprtrdma_nomrs); +TRACE_EVENT(xprtrdma_mr_get, + TP_PROTO( + const struct rpcrdma_req *req + ), -DEFINE_RDCH_EVENT(xprtrdma_read_chunk); -DEFINE_WRCH_EVENT(xprtrdma_write_chunk); -DEFINE_WRCH_EVENT(xprtrdma_reply_chunk); + TP_ARGS(req), + + TP_STRUCT__entry( + __field(const void *, req) + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + ), + + TP_fast_assign( + const struct rpc_rqst *rqst = &req->rl_slot; + + __entry->req = req; + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->xid = be32_to_cpu(rqst->rq_xid); + ), + + TP_printk("task:%u@%u xid=0x%08x req=%p", + __entry->task_id, __entry->client_id, __entry->xid, + __entry->req + ) +); + +TRACE_EVENT(xprtrdma_nomrs, + TP_PROTO( + const struct rpcrdma_req *req + ), + + TP_ARGS(req), + + TP_STRUCT__entry( + __field(const void *, req) + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + ), + + TP_fast_assign( + const struct rpc_rqst *rqst = &req->rl_slot; + + __entry->req = req; + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->xid = be32_to_cpu(rqst->rq_xid); + ), + + TP_printk("task:%u@%u xid=0x%08x req=%p", + __entry->task_id, __entry->client_id, __entry->xid, + __entry->req + ) +); + +DEFINE_RDCH_EVENT(read); +DEFINE_WRCH_EVENT(write); +DEFINE_WRCH_EVENT(reply); TRACE_DEFINE_ENUM(rpcrdma_noch); +TRACE_DEFINE_ENUM(rpcrdma_noch_pullup); +TRACE_DEFINE_ENUM(rpcrdma_noch_mapped); TRACE_DEFINE_ENUM(rpcrdma_readch); TRACE_DEFINE_ENUM(rpcrdma_areadch); TRACE_DEFINE_ENUM(rpcrdma_writech); @@ -450,6 +622,8 @@ #define xprtrdma_show_chunktype(x) \ __print_symbolic(x, \ { rpcrdma_noch, "inline" }, \ + { rpcrdma_noch_pullup, "pullup" }, \ + { rpcrdma_noch_mapped, "mapped" }, \ { rpcrdma_readch, "read list" }, \ { rpcrdma_areadch, "*read list" }, \ { rpcrdma_writech, "write list" }, \ @@ -457,13 +631,12 @@ TRACE_EVENT(xprtrdma_marshal, TP_PROTO( - const struct rpc_rqst *rqst, - unsigned int hdrlen, + const struct rpcrdma_req *req, unsigned int rtype, unsigned int wtype ), - TP_ARGS(rqst, hdrlen, rtype, wtype), + TP_ARGS(req, rtype, wtype), TP_STRUCT__entry( __field(unsigned int, task_id) @@ -478,10 +651,12 @@ ), TP_fast_assign( + const struct rpc_rqst *rqst = &req->rl_slot; + __entry->task_id = rqst->rq_task->tk_pid; __entry->client_id = rqst->rq_task->tk_client->cl_clid; __entry->xid = be32_to_cpu(rqst->rq_xid); - __entry->hdrlen = hdrlen; + __entry->hdrlen = req->rl_hdrbuf.len; __entry->headlen = rqst->rq_snd_buf.head[0].iov_len; __entry->pagelen = rqst->rq_snd_buf.page_len; __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len; @@ -498,53 +673,113 @@ ) ); -TRACE_EVENT(xprtrdma_post_send, - TP_PROTO( - const struct rpcrdma_req *req, - int status +TRACE_EVENT(xprtrdma_marshal_failed, + TP_PROTO(const struct rpc_rqst *rqst, + int ret ), - TP_ARGS(req, status), + TP_ARGS(rqst, ret), TP_STRUCT__entry( - __field(const void *, req) - __field(int, num_sge) - __field(bool, signaled) - __field(int, status) + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + __field(int, ret) ), TP_fast_assign( - __entry->req = req; - __entry->num_sge = req->rl_sendctx->sc_wr.num_sge; - __entry->signaled = req->rl_sendctx->sc_wr.send_flags & - IB_SEND_SIGNALED; - __entry->status = status; + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->xid = be32_to_cpu(rqst->rq_xid); + __entry->ret = ret; ), - TP_printk("req=%p, %d SGEs%s, status=%d", - __entry->req, __entry->num_sge, - (__entry->signaled ? ", signaled" : ""), - __entry->status + TP_printk("task:%u@%u xid=0x%08x: ret=%d", + __entry->task_id, __entry->client_id, __entry->xid, + __entry->ret + ) +); + +TRACE_EVENT(xprtrdma_prepsend_failed, + TP_PROTO(const struct rpc_rqst *rqst, + int ret + ), + + TP_ARGS(rqst, ret), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + __field(int, ret) + ), + + TP_fast_assign( + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->xid = be32_to_cpu(rqst->rq_xid); + __entry->ret = ret; + ), + + TP_printk("task:%u@%u xid=0x%08x: ret=%d", + __entry->task_id, __entry->client_id, __entry->xid, + __entry->ret + ) +); + +TRACE_EVENT(xprtrdma_post_send, + TP_PROTO( + const struct rpcrdma_req *req + ), + + TP_ARGS(req), + + TP_STRUCT__entry( + __field(const void *, req) + __field(const void *, sc) + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(int, num_sge) + __field(int, signaled) + ), + + TP_fast_assign( + const struct rpc_rqst *rqst = &req->rl_slot; + + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client ? + rqst->rq_task->tk_client->cl_clid : -1; + __entry->req = req; + __entry->sc = req->rl_sendctx; + __entry->num_sge = req->rl_wr.num_sge; + __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED; + ), + + TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %s", + __entry->task_id, __entry->client_id, + __entry->req, __entry->sc, __entry->num_sge, + (__entry->num_sge == 1 ? "" : "s"), + (__entry->signaled ? "signaled" : "") ) ); TRACE_EVENT(xprtrdma_post_recv, TP_PROTO( - const struct ib_cqe *cqe + const struct rpcrdma_rep *rep ), - TP_ARGS(cqe), + TP_ARGS(rep), TP_STRUCT__entry( - __field(const void *, cqe) + __field(const void *, rep) ), TP_fast_assign( - __entry->cqe = cqe; + __entry->rep = rep; ), - TP_printk("cqe=%p", - __entry->cqe + TP_printk("rep=%p", + __entry->rep ) ); @@ -570,7 +805,7 @@ __entry->r_xprt = r_xprt; __entry->count = count; __entry->status = status; - __entry->posted = r_xprt->rx_buf.rb_posted_receives; + __entry->posted = r_xprt->rx_ep->re_receive_count; __assign_str(addr, rpcrdma_addrstr(r_xprt)); __assign_str(port, rpcrdma_portstr(r_xprt)); ), @@ -578,6 +813,31 @@ TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)", __get_str(addr), __get_str(port), __entry->r_xprt, __entry->count, __entry->posted, __entry->status + ) +); + +TRACE_EVENT(xprtrdma_post_linv, + TP_PROTO( + const struct rpcrdma_req *req, + int status + ), + + TP_ARGS(req, status), + + TP_STRUCT__entry( + __field(const void *, req) + __field(int, status) + __field(u32, xid) + ), + + TP_fast_assign( + __entry->req = req; + __entry->status = status; + __entry->xid = be32_to_cpu(req->rl_slot.rq_xid); + ), + + TP_printk("req=%p xid=0x%08x status=%d", + __entry->req, __entry->xid, __entry->status ) ); @@ -595,6 +855,7 @@ TP_STRUCT__entry( __field(const void *, req) + __field(const void *, sc) __field(unsigned int, unmap_count) __field(unsigned int, status) __field(unsigned int, vendor_err) @@ -602,13 +863,14 @@ TP_fast_assign( __entry->req = sc->sc_req; + __entry->sc = sc; __entry->unmap_count = sc->sc_unmap_count; __entry->status = wc->status; __entry->vendor_err = __entry->status ? wc->vendor_err : 0; ), - TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)", - __entry->req, __entry->unmap_count, + TP_printk("req=%p sc=%p unmapped=%u: %s (%u/0x%x)", + __entry->req, __entry->sc, __entry->unmap_count, rdma_show_wc_status(__entry->status), __entry->status, __entry->vendor_err ) @@ -622,14 +884,15 @@ TP_ARGS(wc), TP_STRUCT__entry( - __field(const void *, cqe) + __field(const void *, rep) __field(u32, byte_len) __field(unsigned int, status) __field(u32, vendor_err) ), TP_fast_assign( - __entry->cqe = wc->wr_cqe; + __entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep, + rr_cqe); __entry->status = wc->status; if (wc->status) { __entry->byte_len = 0; @@ -640,8 +903,8 @@ } ), - TP_printk("cqe=%p %u bytes: %s (%u/0x%x)", - __entry->cqe, __entry->byte_len, + TP_printk("rep=%p %u bytes: %s (%u/0x%x)", + __entry->rep, __entry->byte_len, rdma_show_wc_status(__entry->status), __entry->status, __entry->vendor_err ) @@ -650,12 +913,150 @@ DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake); +DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done); -DEFINE_MR_EVENT(xprtrdma_localinv); -DEFINE_MR_EVENT(xprtrdma_dma_map); -DEFINE_MR_EVENT(xprtrdma_dma_unmap); -DEFINE_MR_EVENT(xprtrdma_remoteinv); -DEFINE_MR_EVENT(xprtrdma_recover_mr); +TRACE_EVENT(xprtrdma_frwr_alloc, + TP_PROTO( + const struct rpcrdma_mr *mr, + int rc + ), + + TP_ARGS(mr, rc), + + TP_STRUCT__entry( + __field(u32, mr_id) + __field(int, rc) + ), + + TP_fast_assign( + __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->rc = rc; + ), + + TP_printk("mr.id=%u: rc=%d", + __entry->mr_id, __entry->rc + ) +); + +TRACE_EVENT(xprtrdma_frwr_dereg, + TP_PROTO( + const struct rpcrdma_mr *mr, + int rc + ), + + TP_ARGS(mr, rc), + + TP_STRUCT__entry( + __field(u32, mr_id) + __field(int, nents) + __field(u32, handle) + __field(u32, length) + __field(u64, offset) + __field(u32, dir) + __field(int, rc) + ), + + TP_fast_assign( + __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->nents = mr->mr_nents; + __entry->handle = mr->mr_handle; + __entry->length = mr->mr_length; + __entry->offset = mr->mr_offset; + __entry->dir = mr->mr_dir; + __entry->rc = rc; + ), + + TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d", + __entry->mr_id, __entry->nents, __entry->length, + (unsigned long long)__entry->offset, __entry->handle, + xprtrdma_show_direction(__entry->dir), + __entry->rc + ) +); + +TRACE_EVENT(xprtrdma_frwr_sgerr, + TP_PROTO( + const struct rpcrdma_mr *mr, + int sg_nents + ), + + TP_ARGS(mr, sg_nents), + + TP_STRUCT__entry( + __field(u32, mr_id) + __field(u64, addr) + __field(u32, dir) + __field(int, nents) + ), + + TP_fast_assign( + __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->addr = mr->mr_sg->dma_address; + __entry->dir = mr->mr_dir; + __entry->nents = sg_nents; + ), + + TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d", + __entry->mr_id, __entry->addr, + xprtrdma_show_direction(__entry->dir), + __entry->nents + ) +); + +TRACE_EVENT(xprtrdma_frwr_maperr, + TP_PROTO( + const struct rpcrdma_mr *mr, + int num_mapped + ), + + TP_ARGS(mr, num_mapped), + + TP_STRUCT__entry( + __field(u32, mr_id) + __field(u64, addr) + __field(u32, dir) + __field(int, num_mapped) + __field(int, nents) + ), + + TP_fast_assign( + __entry->mr_id = mr->frwr.fr_mr->res.id; + __entry->addr = mr->mr_sg->dma_address; + __entry->dir = mr->mr_dir; + __entry->num_mapped = num_mapped; + __entry->nents = mr->mr_nents; + ), + + TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d", + __entry->mr_id, __entry->addr, + xprtrdma_show_direction(__entry->dir), + __entry->num_mapped, __entry->nents + ) +); + +DEFINE_MR_EVENT(localinv); +DEFINE_MR_EVENT(map); +DEFINE_MR_EVENT(unmap); +DEFINE_MR_EVENT(reminv); +DEFINE_MR_EVENT(recycle); + +TRACE_EVENT(xprtrdma_dma_maperr, + TP_PROTO( + u64 addr + ), + + TP_ARGS(addr), + + TP_STRUCT__entry( + __field(u64, addr) + ), + + TP_fast_assign( + __entry->addr = addr; + ), + + TP_printk("dma addr=0x%llx\n", __entry->addr) +); /** ** Reply events @@ -730,66 +1131,32 @@ TRACE_EVENT(xprtrdma_fixup, TP_PROTO( const struct rpc_rqst *rqst, - int len, - int hdrlen + unsigned long fixup ), - TP_ARGS(rqst, len, hdrlen), + TP_ARGS(rqst, fixup), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) - __field(const void *, base) - __field(int, len) - __field(int, hdrlen) + __field(unsigned long, fixup) + __field(size_t, headlen) + __field(unsigned int, pagelen) + __field(size_t, taillen) ), TP_fast_assign( __entry->task_id = rqst->rq_task->tk_pid; __entry->client_id = rqst->rq_task->tk_client->cl_clid; - __entry->base = rqst->rq_rcv_buf.head[0].iov_base; - __entry->len = len; - __entry->hdrlen = hdrlen; + __entry->fixup = fixup; + __entry->headlen = rqst->rq_rcv_buf.head[0].iov_len; + __entry->pagelen = rqst->rq_rcv_buf.page_len; + __entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len; ), - TP_printk("task:%u@%u base=%p len=%d hdrlen=%d", - __entry->task_id, __entry->client_id, - __entry->base, __entry->len, __entry->hdrlen - ) -); - -TRACE_EVENT(xprtrdma_fixup_pg, - TP_PROTO( - const struct rpc_rqst *rqst, - int pageno, - const void *pos, - int len, - int curlen - ), - - TP_ARGS(rqst, pageno, pos, len, curlen), - - TP_STRUCT__entry( - __field(unsigned int, task_id) - __field(unsigned int, client_id) - __field(const void *, pos) - __field(int, pageno) - __field(int, len) - __field(int, curlen) - ), - - TP_fast_assign( - __entry->task_id = rqst->rq_task->tk_pid; - __entry->client_id = rqst->rq_task->tk_client->cl_clid; - __entry->pos = pos; - __entry->pageno = pageno; - __entry->len = len; - __entry->curlen = curlen; - ), - - TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d", - __entry->task_id, __entry->client_id, - __entry->pageno, __entry->pos, __entry->len, __entry->curlen + TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu", + __entry->task_id, __entry->client_id, __entry->fixup, + __entry->headlen, __entry->pagelen, __entry->taillen ) ); @@ -817,68 +1184,6 @@ TP_printk("%u@0x%016llx:0x%08x", __entry->length, (unsigned long long)__entry->offset, __entry->handle - ) -); - -/** - ** Allocation/release of rpcrdma_reqs and rpcrdma_reps - **/ - -TRACE_EVENT(xprtrdma_allocate, - TP_PROTO( - const struct rpc_task *task, - const struct rpcrdma_req *req - ), - - TP_ARGS(task, req), - - TP_STRUCT__entry( - __field(unsigned int, task_id) - __field(unsigned int, client_id) - __field(const void *, req) - __field(size_t, callsize) - __field(size_t, rcvsize) - ), - - TP_fast_assign( - __entry->task_id = task->tk_pid; - __entry->client_id = task->tk_client->cl_clid; - __entry->req = req; - __entry->callsize = task->tk_rqstp->rq_callsize; - __entry->rcvsize = task->tk_rqstp->rq_rcvsize; - ), - - TP_printk("task:%u@%u req=%p (%zu, %zu)", - __entry->task_id, __entry->client_id, - __entry->req, __entry->callsize, __entry->rcvsize - ) -); - -TRACE_EVENT(xprtrdma_rpc_done, - TP_PROTO( - const struct rpc_task *task, - const struct rpcrdma_req *req - ), - - TP_ARGS(task, req), - - TP_STRUCT__entry( - __field(unsigned int, task_id) - __field(unsigned int, client_id) - __field(const void *, req) - __field(const void *, rep) - ), - - TP_fast_assign( - __entry->task_id = task->tk_pid; - __entry->client_id = task->tk_client->cl_clid; - __entry->req = req; - __entry->rep = req->rl_reply; - ), - - TP_printk("task:%u@%u req=%p rep=%p", - __entry->task_id, __entry->client_id, - __entry->req, __entry->rep ) ); @@ -949,38 +1254,42 @@ ** Server-side RPC/RDMA events **/ -DECLARE_EVENT_CLASS(svcrdma_xprt_event, +DECLARE_EVENT_CLASS(svcrdma_accept_class, TP_PROTO( - const struct svc_xprt *xprt + const struct svcxprt_rdma *rdma, + long status ), - TP_ARGS(xprt), + TP_ARGS(rdma, status), TP_STRUCT__entry( - __field(const void *, xprt) - __string(addr, xprt->xpt_remotebuf) + __field(long, status) + __string(addr, rdma->sc_xprt.xpt_remotebuf) ), TP_fast_assign( - __entry->xprt = xprt; - __assign_str(addr, xprt->xpt_remotebuf); + __entry->status = status; + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); ), - TP_printk("xprt=%p addr=%s", - __entry->xprt, __get_str(addr) + TP_printk("addr=%s status=%ld", + __get_str(addr), __entry->status ) ); -#define DEFINE_XPRT_EVENT(name) \ - DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name, \ - TP_PROTO( \ - const struct svc_xprt *xprt \ - ), \ - TP_ARGS(xprt)) +#define DEFINE_ACCEPT_EVENT(name) \ + DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \ + TP_PROTO( \ + const struct svcxprt_rdma *rdma, \ + long status \ + ), \ + TP_ARGS(rdma, status)) -DEFINE_XPRT_EVENT(accept); -DEFINE_XPRT_EVENT(fail); -DEFINE_XPRT_EVENT(free); +DEFINE_ACCEPT_EVENT(pd); +DEFINE_ACCEPT_EVENT(qp); +DEFINE_ACCEPT_EVENT(fabric); +DEFINE_ACCEPT_EVENT(initdepth); +DEFINE_ACCEPT_EVENT(accept); TRACE_DEFINE_ENUM(RDMA_MSG); TRACE_DEFINE_ENUM(RDMA_NOMSG); @@ -998,13 +1307,16 @@ TRACE_EVENT(svcrdma_decode_rqst, TP_PROTO( + const struct svc_rdma_recv_ctxt *ctxt, __be32 *p, unsigned int hdrlen ), - TP_ARGS(p, hdrlen), + TP_ARGS(ctxt, p, hdrlen), TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) __field(u32, xid) __field(u32, vers) __field(u32, proc) @@ -1013,6 +1325,8 @@ ), TP_fast_assign( + __entry->cq_id = ctxt->rc_cid.ci_queue_id; + __entry->completion_id = ctxt->rc_cid.ci_completion_id; __entry->xid = be32_to_cpup(p++); __entry->vers = be32_to_cpup(p++); __entry->credits = be32_to_cpup(p++); @@ -1020,37 +1334,48 @@ __entry->hdrlen = hdrlen; ), - TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u", + TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u", + __entry->cq_id, __entry->completion_id, __entry->xid, __entry->vers, __entry->credits, show_rpcrdma_proc(__entry->proc), __entry->hdrlen) ); -TRACE_EVENT(svcrdma_decode_short, +TRACE_EVENT(svcrdma_decode_short_err, TP_PROTO( + const struct svc_rdma_recv_ctxt *ctxt, unsigned int hdrlen ), - TP_ARGS(hdrlen), + TP_ARGS(ctxt, hdrlen), TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) __field(unsigned int, hdrlen) ), TP_fast_assign( + __entry->cq_id = ctxt->rc_cid.ci_queue_id; + __entry->completion_id = ctxt->rc_cid.ci_completion_id; __entry->hdrlen = hdrlen; ), - TP_printk("hdrlen=%u", __entry->hdrlen) + TP_printk("cq.id=%u cid=%d hdrlen=%u", + __entry->cq_id, __entry->completion_id, + __entry->hdrlen) ); DECLARE_EVENT_CLASS(svcrdma_badreq_event, TP_PROTO( + const struct svc_rdma_recv_ctxt *ctxt, __be32 *p ), - TP_ARGS(p), + TP_ARGS(ctxt, p), TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) __field(u32, xid) __field(u32, vers) __field(u32, proc) @@ -1058,22 +1383,27 @@ ), TP_fast_assign( + __entry->cq_id = ctxt->rc_cid.ci_queue_id; + __entry->completion_id = ctxt->rc_cid.ci_completion_id; __entry->xid = be32_to_cpup(p++); __entry->vers = be32_to_cpup(p++); __entry->credits = be32_to_cpup(p++); __entry->proc = be32_to_cpup(p); ), - TP_printk("xid=0x%08x vers=%u credits=%u proc=%u", + TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u", + __entry->cq_id, __entry->completion_id, __entry->xid, __entry->vers, __entry->credits, __entry->proc) ); #define DEFINE_BADREQ_EVENT(name) \ - DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\ + DEFINE_EVENT(svcrdma_badreq_event, \ + svcrdma_decode_##name##_err, \ TP_PROTO( \ + const struct svc_rdma_recv_ctxt *ctxt, \ __be32 *p \ ), \ - TP_ARGS(p)) + TP_ARGS(ctxt, p)) DEFINE_BADREQ_EVENT(badvers); DEFINE_BADREQ_EVENT(drop); @@ -1108,7 +1438,7 @@ ); #define DEFINE_SEGMENT_EVENT(name) \ - DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\ + DEFINE_EVENT(svcrdma_segment_event, svcrdma_##name,\ TP_PROTO( \ u32 handle, \ u32 length, \ @@ -1116,8 +1446,11 @@ ), \ TP_ARGS(handle, length, offset)) -DEFINE_SEGMENT_EVENT(rseg); -DEFINE_SEGMENT_EVENT(wseg); +DEFINE_SEGMENT_EVENT(decode_wseg); +DEFINE_SEGMENT_EVENT(encode_rseg); +DEFINE_SEGMENT_EVENT(send_rseg); +DEFINE_SEGMENT_EVENT(encode_wseg); +DEFINE_SEGMENT_EVENT(send_wseg); DECLARE_EVENT_CLASS(svcrdma_chunk_event, TP_PROTO( @@ -1140,17 +1473,19 @@ ); #define DEFINE_CHUNK_EVENT(name) \ - DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\ + DEFINE_EVENT(svcrdma_chunk_event, svcrdma_##name, \ TP_PROTO( \ u32 length \ ), \ TP_ARGS(length)) -DEFINE_CHUNK_EVENT(pzr); -DEFINE_CHUNK_EVENT(write); -DEFINE_CHUNK_EVENT(reply); +DEFINE_CHUNK_EVENT(send_pzr); +DEFINE_CHUNK_EVENT(encode_write_chunk); +DEFINE_CHUNK_EVENT(send_write_chunk); +DEFINE_CHUNK_EVENT(encode_read_chunk); +DEFINE_CHUNK_EVENT(send_reply_chunk); -TRACE_EVENT(svcrdma_encode_read, +TRACE_EVENT(svcrdma_send_read_chunk, TP_PROTO( u32 length, u32 position @@ -1207,57 +1542,180 @@ ** Server-side RDMA API events **/ -TRACE_EVENT(svcrdma_dma_map_page, +DECLARE_EVENT_CLASS(svcrdma_dma_map_class, TP_PROTO( const struct svcxprt_rdma *rdma, - const void *page + u64 dma_addr, + u32 length ), - TP_ARGS(rdma, page), + TP_ARGS(rdma, dma_addr, length), TP_STRUCT__entry( - __field(const void *, page); + __field(u64, dma_addr) + __field(u32, length) __string(device, rdma->sc_cm_id->device->name) __string(addr, rdma->sc_xprt.xpt_remotebuf) ), TP_fast_assign( - __entry->page = page; + __entry->dma_addr = dma_addr; + __entry->length = length; __assign_str(device, rdma->sc_cm_id->device->name); __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); ), - TP_printk("addr=%s device=%s page=%p", - __get_str(addr), __get_str(device), __entry->page + TP_printk("addr=%s device=%s dma_addr=%llu length=%u", + __get_str(addr), __get_str(device), + __entry->dma_addr, __entry->length ) ); -TRACE_EVENT(svcrdma_dma_map_rwctx, +#define DEFINE_SVC_DMA_EVENT(name) \ + DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \ + TP_PROTO( \ + const struct svcxprt_rdma *rdma,\ + u64 dma_addr, \ + u32 length \ + ), \ + TP_ARGS(rdma, dma_addr, length)) + +DEFINE_SVC_DMA_EVENT(dma_map_page); +DEFINE_SVC_DMA_EVENT(dma_unmap_page); + +TRACE_EVENT(svcrdma_dma_map_rw_err, TP_PROTO( const struct svcxprt_rdma *rdma, + unsigned int nents, int status ), - TP_ARGS(rdma, status), + TP_ARGS(rdma, nents, status), TP_STRUCT__entry( __field(int, status) + __field(unsigned int, nents) __string(device, rdma->sc_cm_id->device->name) __string(addr, rdma->sc_xprt.xpt_remotebuf) ), TP_fast_assign( __entry->status = status; + __entry->nents = nents; __assign_str(device, rdma->sc_cm_id->device->name); __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); ), - TP_printk("addr=%s device=%s status=%d", - __get_str(addr), __get_str(device), __entry->status + TP_printk("addr=%s device=%s nents=%u status=%d", + __get_str(addr), __get_str(device), __entry->nents, + __entry->status ) ); -TRACE_EVENT(svcrdma_send_failed, +TRACE_EVENT(svcrdma_no_rwctx_err, + TP_PROTO( + const struct svcxprt_rdma *rdma, + unsigned int num_sges + ), + + TP_ARGS(rdma, num_sges), + + TP_STRUCT__entry( + __field(unsigned int, num_sges) + __string(device, rdma->sc_cm_id->device->name) + __string(addr, rdma->sc_xprt.xpt_remotebuf) + ), + + TP_fast_assign( + __entry->num_sges = num_sges; + __assign_str(device, rdma->sc_cm_id->device->name); + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + ), + + TP_printk("addr=%s device=%s num_sges=%d", + __get_str(addr), __get_str(device), __entry->num_sges + ) +); + +TRACE_EVENT(svcrdma_page_overrun_err, + TP_PROTO( + const struct svcxprt_rdma *rdma, + const struct svc_rqst *rqst, + unsigned int pageno + ), + + TP_ARGS(rdma, rqst, pageno), + + TP_STRUCT__entry( + __field(unsigned int, pageno) + __field(u32, xid) + __string(device, rdma->sc_cm_id->device->name) + __string(addr, rdma->sc_xprt.xpt_remotebuf) + ), + + TP_fast_assign( + __entry->pageno = pageno; + __entry->xid = __be32_to_cpu(rqst->rq_xid); + __assign_str(device, rdma->sc_cm_id->device->name); + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + ), + + TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr), + __get_str(device), __entry->xid, __entry->pageno + ) +); + +TRACE_EVENT(svcrdma_small_wrch_err, + TP_PROTO( + const struct svcxprt_rdma *rdma, + unsigned int remaining, + unsigned int seg_no, + unsigned int num_segs + ), + + TP_ARGS(rdma, remaining, seg_no, num_segs), + + TP_STRUCT__entry( + __field(unsigned int, remaining) + __field(unsigned int, seg_no) + __field(unsigned int, num_segs) + __string(device, rdma->sc_cm_id->device->name) + __string(addr, rdma->sc_xprt.xpt_remotebuf) + ), + + TP_fast_assign( + __entry->remaining = remaining; + __entry->seg_no = seg_no; + __entry->num_segs = num_segs; + __assign_str(device, rdma->sc_cm_id->device->name); + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + ), + + TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u", + __get_str(addr), __get_str(device), __entry->remaining, + __entry->seg_no, __entry->num_segs + ) +); + +TRACE_EVENT(svcrdma_send_pullup, + TP_PROTO( + unsigned int len + ), + + TP_ARGS(len), + + TP_STRUCT__entry( + __field(unsigned int, len) + ), + + TP_fast_assign( + __entry->len = len; + ), + + TP_printk("len=%u", __entry->len) +); + +TRACE_EVENT(svcrdma_send_err, TP_PROTO( const struct svc_rqst *rqst, int status @@ -1268,195 +1726,127 @@ TP_STRUCT__entry( __field(int, status) __field(u32, xid) - __field(const void *, xprt) __string(addr, rqst->rq_xprt->xpt_remotebuf) ), TP_fast_assign( __entry->status = status; __entry->xid = __be32_to_cpu(rqst->rq_xid); - __entry->xprt = rqst->rq_xprt; __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); ), - TP_printk("xprt=%p addr=%s xid=0x%08x status=%d", - __entry->xprt, __get_str(addr), + TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr), __entry->xid, __entry->status ) ); -DECLARE_EVENT_CLASS(svcrdma_sendcomp_event, - TP_PROTO( - const struct ib_wc *wc - ), - - TP_ARGS(wc), - - TP_STRUCT__entry( - __field(const void *, cqe) - __field(unsigned int, status) - __field(unsigned int, vendor_err) - ), - - TP_fast_assign( - __entry->cqe = wc->wr_cqe; - __entry->status = wc->status; - if (wc->status) - __entry->vendor_err = wc->vendor_err; - else - __entry->vendor_err = 0; - ), - - TP_printk("cqe=%p status=%s (%u/0x%x)", - __entry->cqe, rdma_show_wc_status(__entry->status), - __entry->status, __entry->vendor_err - ) -); - -#define DEFINE_SENDCOMP_EVENT(name) \ - DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name, \ - TP_PROTO( \ - const struct ib_wc *wc \ - ), \ - TP_ARGS(wc)) - TRACE_EVENT(svcrdma_post_send, TP_PROTO( - const struct ib_send_wr *wr + const struct svc_rdma_send_ctxt *ctxt ), - TP_ARGS(wr), + TP_ARGS(ctxt), TP_STRUCT__entry( - __field(const void *, cqe) + __field(u32, cq_id) + __field(int, completion_id) __field(unsigned int, num_sge) __field(u32, inv_rkey) ), TP_fast_assign( - __entry->cqe = wr->wr_cqe; + const struct ib_send_wr *wr = &ctxt->sc_send_wr; + + __entry->cq_id = ctxt->sc_cid.ci_queue_id; + __entry->completion_id = ctxt->sc_cid.ci_completion_id; __entry->num_sge = wr->num_sge; __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ? wr->ex.invalidate_rkey : 0; ), - TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x", - __entry->cqe, __entry->num_sge, - __entry->inv_rkey + TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x", + __entry->cq_id, __entry->completion_id, + __entry->num_sge, __entry->inv_rkey ) ); -DEFINE_SENDCOMP_EVENT(send); +DEFINE_COMPLETION_EVENT(svcrdma_wc_send); TRACE_EVENT(svcrdma_post_recv, TP_PROTO( - const struct ib_recv_wr *wr, + const struct svc_rdma_recv_ctxt *ctxt + ), + + TP_ARGS(ctxt), + + TP_STRUCT__entry( + __field(u32, cq_id) + __field(int, completion_id) + ), + + TP_fast_assign( + __entry->cq_id = ctxt->rc_cid.ci_queue_id; + __entry->completion_id = ctxt->rc_cid.ci_completion_id; + ), + + TP_printk("cq.id=%d cid=%d", + __entry->cq_id, __entry->completion_id + ) +); + +DEFINE_COMPLETION_EVENT(svcrdma_wc_receive); + +TRACE_EVENT(svcrdma_rq_post_err, + TP_PROTO( + const struct svcxprt_rdma *rdma, int status ), - TP_ARGS(wr, status), + TP_ARGS(rdma, status), TP_STRUCT__entry( - __field(const void *, cqe) __field(int, status) + __string(addr, rdma->sc_xprt.xpt_remotebuf) ), TP_fast_assign( - __entry->cqe = wr->wr_cqe; __entry->status = status; + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); ), - TP_printk("cqe=%p status=%d", - __entry->cqe, __entry->status + TP_printk("addr=%s status=%d", + __get_str(addr), __entry->status ) ); -TRACE_EVENT(svcrdma_wc_receive, +TRACE_EVENT(svcrdma_post_chunk, TP_PROTO( - const struct ib_wc *wc - ), - - TP_ARGS(wc), - - TP_STRUCT__entry( - __field(const void *, cqe) - __field(u32, byte_len) - __field(unsigned int, status) - __field(u32, vendor_err) - ), - - TP_fast_assign( - __entry->cqe = wc->wr_cqe; - __entry->status = wc->status; - if (wc->status) { - __entry->byte_len = 0; - __entry->vendor_err = wc->vendor_err; - } else { - __entry->byte_len = wc->byte_len; - __entry->vendor_err = 0; - } - ), - - TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)", - __entry->cqe, __entry->byte_len, - rdma_show_wc_status(__entry->status), - __entry->status, __entry->vendor_err - ) -); - -TRACE_EVENT(svcrdma_post_rw, - TP_PROTO( - const void *cqe, + const struct rpc_rdma_cid *cid, int sqecount ), - TP_ARGS(cqe, sqecount), + TP_ARGS(cid, sqecount), TP_STRUCT__entry( - __field(const void *, cqe) + __field(u32, cq_id) + __field(int, completion_id) __field(int, sqecount) ), TP_fast_assign( - __entry->cqe = cqe; + __entry->cq_id = cid->ci_queue_id; + __entry->completion_id = cid->ci_completion_id; __entry->sqecount = sqecount; ), - TP_printk("cqe=%p sqecount=%d", - __entry->cqe, __entry->sqecount + TP_printk("cq.id=%u cid=%d sqecount=%d", + __entry->cq_id, __entry->completion_id, + __entry->sqecount ) ); -DEFINE_SENDCOMP_EVENT(read); -DEFINE_SENDCOMP_EVENT(write); - -TRACE_EVENT(svcrdma_cm_event, - TP_PROTO( - const struct rdma_cm_event *event, - const struct sockaddr *sap - ), - - TP_ARGS(event, sap), - - TP_STRUCT__entry( - __field(unsigned int, event) - __field(int, status) - __array(__u8, addr, INET6_ADDRSTRLEN + 10) - ), - - TP_fast_assign( - __entry->event = event->event; - __entry->status = event->status; - snprintf(__entry->addr, sizeof(__entry->addr) - 1, - "%pISpc", sap); - ), - - TP_printk("addr=%s event=%s (%u/%d)", - __entry->addr, - rdma_show_cm_event(__entry->event), - __entry->event, __entry->status - ) -); +DEFINE_COMPLETION_EVENT(svcrdma_wc_read); +DEFINE_COMPLETION_EVENT(svcrdma_wc_write); TRACE_EVENT(svcrdma_qp_error, TP_PROTO( -- Gitblit v1.6.2