1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/tracepoint.h>
14 #include <trace/events/rdma.h>
15 
16 /**
17  ** Event classes
18  **/
19 
20 DECLARE_EVENT_CLASS(xprtrdma_reply_event,
21 	TP_PROTO(
22 		const struct rpcrdma_rep *rep
23 	),
24 
25 	TP_ARGS(rep),
26 
27 	TP_STRUCT__entry(
28 		__field(const void *, rep)
29 		__field(const void *, r_xprt)
30 		__field(u32, xid)
31 		__field(u32, version)
32 		__field(u32, proc)
33 	),
34 
35 	TP_fast_assign(
36 		__entry->rep = rep;
37 		__entry->r_xprt = rep->rr_rxprt;
38 		__entry->xid = be32_to_cpu(rep->rr_xid);
39 		__entry->version = be32_to_cpu(rep->rr_vers);
40 		__entry->proc = be32_to_cpu(rep->rr_proc);
41 	),
42 
43 	TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
44 		__entry->r_xprt, __entry->xid, __entry->rep,
45 		__entry->version, __entry->proc
46 	)
47 );
48 
49 #define DEFINE_REPLY_EVENT(name)					\
50 		DEFINE_EVENT(xprtrdma_reply_event, name,		\
51 				TP_PROTO(				\
52 					const struct rpcrdma_rep *rep	\
53 				),					\
54 				TP_ARGS(rep))
55 
56 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
57 	TP_PROTO(
58 		const struct rpcrdma_xprt *r_xprt
59 	),
60 
61 	TP_ARGS(r_xprt),
62 
63 	TP_STRUCT__entry(
64 		__field(const void *, r_xprt)
65 		__string(addr, rpcrdma_addrstr(r_xprt))
66 		__string(port, rpcrdma_portstr(r_xprt))
67 	),
68 
69 	TP_fast_assign(
70 		__entry->r_xprt = r_xprt;
71 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
72 		__assign_str(port, rpcrdma_portstr(r_xprt));
73 	),
74 
75 	TP_printk("peer=[%s]:%s r_xprt=%p",
76 		__get_str(addr), __get_str(port), __entry->r_xprt
77 	)
78 );
79 
80 #define DEFINE_RXPRT_EVENT(name)					\
81 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
82 				TP_PROTO(				\
83 					const struct rpcrdma_xprt *r_xprt \
84 				),					\
85 				TP_ARGS(r_xprt))
86 
87 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
88 	TP_PROTO(
89 		const struct rpc_task *task,
90 		unsigned int pos,
91 		struct rpcrdma_mr *mr,
92 		int nsegs
93 	),
94 
95 	TP_ARGS(task, pos, mr, nsegs),
96 
97 	TP_STRUCT__entry(
98 		__field(unsigned int, task_id)
99 		__field(unsigned int, client_id)
100 		__field(const void *, mr)
101 		__field(unsigned int, pos)
102 		__field(int, nents)
103 		__field(u32, handle)
104 		__field(u32, length)
105 		__field(u64, offset)
106 		__field(int, nsegs)
107 	),
108 
109 	TP_fast_assign(
110 		__entry->task_id = task->tk_pid;
111 		__entry->client_id = task->tk_client->cl_clid;
112 		__entry->mr = mr;
113 		__entry->pos = pos;
114 		__entry->nents = mr->mr_nents;
115 		__entry->handle = mr->mr_handle;
116 		__entry->length = mr->mr_length;
117 		__entry->offset = mr->mr_offset;
118 		__entry->nsegs = nsegs;
119 	),
120 
121 	TP_printk("task:%u@%u mr=%p pos=%u %u@0x%016llx:0x%08x (%s)",
122 		__entry->task_id, __entry->client_id, __entry->mr,
123 		__entry->pos, __entry->length,
124 		(unsigned long long)__entry->offset, __entry->handle,
125 		__entry->nents < __entry->nsegs ? "more" : "last"
126 	)
127 );
128 
129 #define DEFINE_RDCH_EVENT(name)						\
130 		DEFINE_EVENT(xprtrdma_rdch_event, name,			\
131 				TP_PROTO(				\
132 					const struct rpc_task *task,	\
133 					unsigned int pos,		\
134 					struct rpcrdma_mr *mr,		\
135 					int nsegs			\
136 				),					\
137 				TP_ARGS(task, pos, mr, nsegs))
138 
139 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
140 	TP_PROTO(
141 		const struct rpc_task *task,
142 		struct rpcrdma_mr *mr,
143 		int nsegs
144 	),
145 
146 	TP_ARGS(task, mr, nsegs),
147 
148 	TP_STRUCT__entry(
149 		__field(unsigned int, task_id)
150 		__field(unsigned int, client_id)
151 		__field(const void *, mr)
152 		__field(int, nents)
153 		__field(u32, handle)
154 		__field(u32, length)
155 		__field(u64, offset)
156 		__field(int, nsegs)
157 	),
158 
159 	TP_fast_assign(
160 		__entry->task_id = task->tk_pid;
161 		__entry->client_id = task->tk_client->cl_clid;
162 		__entry->mr = mr;
163 		__entry->nents = mr->mr_nents;
164 		__entry->handle = mr->mr_handle;
165 		__entry->length = mr->mr_length;
166 		__entry->offset = mr->mr_offset;
167 		__entry->nsegs = nsegs;
168 	),
169 
170 	TP_printk("task:%u@%u mr=%p %u@0x%016llx:0x%08x (%s)",
171 		__entry->task_id, __entry->client_id, __entry->mr,
172 		__entry->length, (unsigned long long)__entry->offset,
173 		__entry->handle,
174 		__entry->nents < __entry->nsegs ? "more" : "last"
175 	)
176 );
177 
178 #define DEFINE_WRCH_EVENT(name)						\
179 		DEFINE_EVENT(xprtrdma_wrch_event, name,			\
180 				TP_PROTO(				\
181 					const struct rpc_task *task,	\
182 					struct rpcrdma_mr *mr,		\
183 					int nsegs			\
184 				),					\
185 				TP_ARGS(task, mr, nsegs))
186 
187 TRACE_DEFINE_ENUM(FRWR_IS_INVALID);
188 TRACE_DEFINE_ENUM(FRWR_IS_VALID);
189 TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR);
190 TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI);
191 
192 #define xprtrdma_show_frwr_state(x)					\
193 		__print_symbolic(x,					\
194 				{ FRWR_IS_INVALID, "INVALID" },		\
195 				{ FRWR_IS_VALID, "VALID" },		\
196 				{ FRWR_FLUSHED_FR, "FLUSHED_FR" },	\
197 				{ FRWR_FLUSHED_LI, "FLUSHED_LI" })
198 
199 DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
200 	TP_PROTO(
201 		const struct ib_wc *wc,
202 		const struct rpcrdma_frwr *frwr
203 	),
204 
205 	TP_ARGS(wc, frwr),
206 
207 	TP_STRUCT__entry(
208 		__field(const void *, mr)
209 		__field(unsigned int, state)
210 		__field(unsigned int, status)
211 		__field(unsigned int, vendor_err)
212 	),
213 
214 	TP_fast_assign(
215 		__entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
216 		__entry->state = frwr->fr_state;
217 		__entry->status = wc->status;
218 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
219 	),
220 
221 	TP_printk(
222 		"mr=%p state=%s: %s (%u/0x%x)",
223 		__entry->mr, xprtrdma_show_frwr_state(__entry->state),
224 		rdma_show_wc_status(__entry->status),
225 		__entry->status, __entry->vendor_err
226 	)
227 );
228 
229 #define DEFINE_FRWR_DONE_EVENT(name)					\
230 		DEFINE_EVENT(xprtrdma_frwr_done, name,			\
231 				TP_PROTO(				\
232 					const struct ib_wc *wc,		\
233 					const struct rpcrdma_frwr *frwr	\
234 				),					\
235 				TP_ARGS(wc, frwr))
236 
237 DECLARE_EVENT_CLASS(xprtrdma_mr,
238 	TP_PROTO(
239 		const struct rpcrdma_mr *mr
240 	),
241 
242 	TP_ARGS(mr),
243 
244 	TP_STRUCT__entry(
245 		__field(const void *, mr)
246 		__field(u32, handle)
247 		__field(u32, length)
248 		__field(u64, offset)
249 	),
250 
251 	TP_fast_assign(
252 		__entry->mr = mr;
253 		__entry->handle = mr->mr_handle;
254 		__entry->length = mr->mr_length;
255 		__entry->offset = mr->mr_offset;
256 	),
257 
258 	TP_printk("mr=%p %u@0x%016llx:0x%08x",
259 		__entry->mr, __entry->length,
260 		(unsigned long long)__entry->offset,
261 		__entry->handle
262 	)
263 );
264 
265 #define DEFINE_MR_EVENT(name) \
266 		DEFINE_EVENT(xprtrdma_mr, name, \
267 				TP_PROTO( \
268 					const struct rpcrdma_mr *mr \
269 				), \
270 				TP_ARGS(mr))
271 
272 DECLARE_EVENT_CLASS(xprtrdma_cb_event,
273 	TP_PROTO(
274 		const struct rpc_rqst *rqst
275 	),
276 
277 	TP_ARGS(rqst),
278 
279 	TP_STRUCT__entry(
280 		__field(const void *, rqst)
281 		__field(const void *, rep)
282 		__field(const void *, req)
283 		__field(u32, xid)
284 	),
285 
286 	TP_fast_assign(
287 		__entry->rqst = rqst;
288 		__entry->req = rpcr_to_rdmar(rqst);
289 		__entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
290 		__entry->xid = be32_to_cpu(rqst->rq_xid);
291 	),
292 
293 	TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
294 		__entry->xid, __entry->rqst, __entry->req, __entry->rep
295 	)
296 );
297 
298 #define DEFINE_CB_EVENT(name)						\
299 		DEFINE_EVENT(xprtrdma_cb_event, name,			\
300 				TP_PROTO(				\
301 					const struct rpc_rqst *rqst	\
302 				),					\
303 				TP_ARGS(rqst))
304 
305 /**
306  ** Connection events
307  **/
308 
309 TRACE_EVENT(xprtrdma_conn_upcall,
310 	TP_PROTO(
311 		const struct rpcrdma_xprt *r_xprt,
312 		struct rdma_cm_event *event
313 	),
314 
315 	TP_ARGS(r_xprt, event),
316 
317 	TP_STRUCT__entry(
318 		__field(const void *, r_xprt)
319 		__field(unsigned int, event)
320 		__field(int, status)
321 		__string(addr, rpcrdma_addrstr(r_xprt))
322 		__string(port, rpcrdma_portstr(r_xprt))
323 	),
324 
325 	TP_fast_assign(
326 		__entry->r_xprt = r_xprt;
327 		__entry->event = event->event;
328 		__entry->status = event->status;
329 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
330 		__assign_str(port, rpcrdma_portstr(r_xprt));
331 	),
332 
333 	TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
334 		__get_str(addr), __get_str(port),
335 		__entry->r_xprt, rdma_show_cm_event(__entry->event),
336 		__entry->event, __entry->status
337 	)
338 );
339 
340 TRACE_EVENT(xprtrdma_disconnect,
341 	TP_PROTO(
342 		const struct rpcrdma_xprt *r_xprt,
343 		int status
344 	),
345 
346 	TP_ARGS(r_xprt, status),
347 
348 	TP_STRUCT__entry(
349 		__field(const void *, r_xprt)
350 		__field(int, status)
351 		__field(int, connected)
352 		__string(addr, rpcrdma_addrstr(r_xprt))
353 		__string(port, rpcrdma_portstr(r_xprt))
354 	),
355 
356 	TP_fast_assign(
357 		__entry->r_xprt = r_xprt;
358 		__entry->status = status;
359 		__entry->connected = r_xprt->rx_ep.rep_connected;
360 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
361 		__assign_str(port, rpcrdma_portstr(r_xprt));
362 	),
363 
364 	TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
365 		__get_str(addr), __get_str(port),
366 		__entry->r_xprt, __entry->status,
367 		__entry->connected == 1 ? "still " : "dis"
368 	)
369 );
370 
371 DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
372 DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
373 DEFINE_RXPRT_EVENT(xprtrdma_create);
374 DEFINE_RXPRT_EVENT(xprtrdma_destroy);
375 DEFINE_RXPRT_EVENT(xprtrdma_remove);
376 DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
377 DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
378 DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc);
379 
380 TRACE_EVENT(xprtrdma_qp_error,
381 	TP_PROTO(
382 		const struct rpcrdma_xprt *r_xprt,
383 		const struct ib_event *event
384 	),
385 
386 	TP_ARGS(r_xprt, event),
387 
388 	TP_STRUCT__entry(
389 		__field(const void *, r_xprt)
390 		__field(unsigned int, event)
391 		__string(name, event->device->name)
392 		__string(addr, rpcrdma_addrstr(r_xprt))
393 		__string(port, rpcrdma_portstr(r_xprt))
394 	),
395 
396 	TP_fast_assign(
397 		__entry->r_xprt = r_xprt;
398 		__entry->event = event->event;
399 		__assign_str(name, event->device->name);
400 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
401 		__assign_str(port, rpcrdma_portstr(r_xprt));
402 	),
403 
404 	TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
405 		__get_str(addr), __get_str(port), __entry->r_xprt,
406 		__get_str(name), rdma_show_ib_event(__entry->event),
407 		__entry->event
408 	)
409 );
410 
411 /**
412  ** Call events
413  **/
414 
415 TRACE_EVENT(xprtrdma_createmrs,
416 	TP_PROTO(
417 		const struct rpcrdma_xprt *r_xprt,
418 		unsigned int count
419 	),
420 
421 	TP_ARGS(r_xprt, count),
422 
423 	TP_STRUCT__entry(
424 		__field(const void *, r_xprt)
425 		__field(unsigned int, count)
426 	),
427 
428 	TP_fast_assign(
429 		__entry->r_xprt = r_xprt;
430 		__entry->count = count;
431 	),
432 
433 	TP_printk("r_xprt=%p: created %u MRs",
434 		__entry->r_xprt, __entry->count
435 	)
436 );
437 
438 DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
439 
440 DEFINE_RDCH_EVENT(xprtrdma_read_chunk);
441 DEFINE_WRCH_EVENT(xprtrdma_write_chunk);
442 DEFINE_WRCH_EVENT(xprtrdma_reply_chunk);
443 
444 TRACE_DEFINE_ENUM(rpcrdma_noch);
445 TRACE_DEFINE_ENUM(rpcrdma_readch);
446 TRACE_DEFINE_ENUM(rpcrdma_areadch);
447 TRACE_DEFINE_ENUM(rpcrdma_writech);
448 TRACE_DEFINE_ENUM(rpcrdma_replych);
449 
450 #define xprtrdma_show_chunktype(x)					\
451 		__print_symbolic(x,					\
452 				{ rpcrdma_noch, "inline" },		\
453 				{ rpcrdma_readch, "read list" },	\
454 				{ rpcrdma_areadch, "*read list" },	\
455 				{ rpcrdma_writech, "write list" },	\
456 				{ rpcrdma_replych, "reply chunk" })
457 
458 TRACE_EVENT(xprtrdma_marshal,
459 	TP_PROTO(
460 		const struct rpc_rqst *rqst,
461 		unsigned int hdrlen,
462 		unsigned int rtype,
463 		unsigned int wtype
464 	),
465 
466 	TP_ARGS(rqst, hdrlen, rtype, wtype),
467 
468 	TP_STRUCT__entry(
469 		__field(unsigned int, task_id)
470 		__field(unsigned int, client_id)
471 		__field(u32, xid)
472 		__field(unsigned int, hdrlen)
473 		__field(unsigned int, headlen)
474 		__field(unsigned int, pagelen)
475 		__field(unsigned int, taillen)
476 		__field(unsigned int, rtype)
477 		__field(unsigned int, wtype)
478 	),
479 
480 	TP_fast_assign(
481 		__entry->task_id = rqst->rq_task->tk_pid;
482 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
483 		__entry->xid = be32_to_cpu(rqst->rq_xid);
484 		__entry->hdrlen = hdrlen;
485 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
486 		__entry->pagelen = rqst->rq_snd_buf.page_len;
487 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
488 		__entry->rtype = rtype;
489 		__entry->wtype = wtype;
490 	),
491 
492 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
493 		__entry->task_id, __entry->client_id, __entry->xid,
494 		__entry->hdrlen,
495 		__entry->headlen, __entry->pagelen, __entry->taillen,
496 		xprtrdma_show_chunktype(__entry->rtype),
497 		xprtrdma_show_chunktype(__entry->wtype)
498 	)
499 );
500 
501 TRACE_EVENT(xprtrdma_post_send,
502 	TP_PROTO(
503 		const struct rpcrdma_req *req,
504 		int status
505 	),
506 
507 	TP_ARGS(req, status),
508 
509 	TP_STRUCT__entry(
510 		__field(const void *, req)
511 		__field(int, num_sge)
512 		__field(bool, signaled)
513 		__field(int, status)
514 	),
515 
516 	TP_fast_assign(
517 		__entry->req = req;
518 		__entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
519 		__entry->signaled = req->rl_sendctx->sc_wr.send_flags &
520 				    IB_SEND_SIGNALED;
521 		__entry->status = status;
522 	),
523 
524 	TP_printk("req=%p, %d SGEs%s, status=%d",
525 		__entry->req, __entry->num_sge,
526 		(__entry->signaled ? ", signaled" : ""),
527 		__entry->status
528 	)
529 );
530 
531 TRACE_EVENT(xprtrdma_post_recv,
532 	TP_PROTO(
533 		const struct ib_cqe *cqe
534 	),
535 
536 	TP_ARGS(cqe),
537 
538 	TP_STRUCT__entry(
539 		__field(const void *, cqe)
540 	),
541 
542 	TP_fast_assign(
543 		__entry->cqe = cqe;
544 	),
545 
546 	TP_printk("cqe=%p",
547 		__entry->cqe
548 	)
549 );
550 
551 TRACE_EVENT(xprtrdma_post_recvs,
552 	TP_PROTO(
553 		const struct rpcrdma_xprt *r_xprt,
554 		unsigned int count,
555 		int status
556 	),
557 
558 	TP_ARGS(r_xprt, count, status),
559 
560 	TP_STRUCT__entry(
561 		__field(const void *, r_xprt)
562 		__field(unsigned int, count)
563 		__field(int, status)
564 		__field(int, posted)
565 		__string(addr, rpcrdma_addrstr(r_xprt))
566 		__string(port, rpcrdma_portstr(r_xprt))
567 	),
568 
569 	TP_fast_assign(
570 		__entry->r_xprt = r_xprt;
571 		__entry->count = count;
572 		__entry->status = status;
573 		__entry->posted = r_xprt->rx_buf.rb_posted_receives;
574 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
575 		__assign_str(port, rpcrdma_portstr(r_xprt));
576 	),
577 
578 	TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
579 		__get_str(addr), __get_str(port), __entry->r_xprt,
580 		__entry->count, __entry->posted, __entry->status
581 	)
582 );
583 
584 /**
585  ** Completion events
586  **/
587 
588 TRACE_EVENT(xprtrdma_wc_send,
589 	TP_PROTO(
590 		const struct rpcrdma_sendctx *sc,
591 		const struct ib_wc *wc
592 	),
593 
594 	TP_ARGS(sc, wc),
595 
596 	TP_STRUCT__entry(
597 		__field(const void *, req)
598 		__field(unsigned int, unmap_count)
599 		__field(unsigned int, status)
600 		__field(unsigned int, vendor_err)
601 	),
602 
603 	TP_fast_assign(
604 		__entry->req = sc->sc_req;
605 		__entry->unmap_count = sc->sc_unmap_count;
606 		__entry->status = wc->status;
607 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
608 	),
609 
610 	TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
611 		__entry->req, __entry->unmap_count,
612 		rdma_show_wc_status(__entry->status),
613 		__entry->status, __entry->vendor_err
614 	)
615 );
616 
617 TRACE_EVENT(xprtrdma_wc_receive,
618 	TP_PROTO(
619 		const struct ib_wc *wc
620 	),
621 
622 	TP_ARGS(wc),
623 
624 	TP_STRUCT__entry(
625 		__field(const void *, cqe)
626 		__field(u32, byte_len)
627 		__field(unsigned int, status)
628 		__field(u32, vendor_err)
629 	),
630 
631 	TP_fast_assign(
632 		__entry->cqe = wc->wr_cqe;
633 		__entry->status = wc->status;
634 		if (wc->status) {
635 			__entry->byte_len = 0;
636 			__entry->vendor_err = wc->vendor_err;
637 		} else {
638 			__entry->byte_len = wc->byte_len;
639 			__entry->vendor_err = 0;
640 		}
641 	),
642 
643 	TP_printk("cqe=%p %u bytes: %s (%u/0x%x)",
644 		__entry->cqe, __entry->byte_len,
645 		rdma_show_wc_status(__entry->status),
646 		__entry->status, __entry->vendor_err
647 	)
648 );
649 
650 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
651 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
652 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
653 
654 DEFINE_MR_EVENT(xprtrdma_localinv);
655 DEFINE_MR_EVENT(xprtrdma_dma_map);
656 DEFINE_MR_EVENT(xprtrdma_dma_unmap);
657 DEFINE_MR_EVENT(xprtrdma_remoteinv);
658 DEFINE_MR_EVENT(xprtrdma_recover_mr);
659 
660 /**
661  ** Reply events
662  **/
663 
664 TRACE_EVENT(xprtrdma_reply,
665 	TP_PROTO(
666 		const struct rpc_task *task,
667 		const struct rpcrdma_rep *rep,
668 		const struct rpcrdma_req *req,
669 		unsigned int credits
670 	),
671 
672 	TP_ARGS(task, rep, req, credits),
673 
674 	TP_STRUCT__entry(
675 		__field(unsigned int, task_id)
676 		__field(unsigned int, client_id)
677 		__field(const void *, rep)
678 		__field(const void *, req)
679 		__field(u32, xid)
680 		__field(unsigned int, credits)
681 	),
682 
683 	TP_fast_assign(
684 		__entry->task_id = task->tk_pid;
685 		__entry->client_id = task->tk_client->cl_clid;
686 		__entry->rep = rep;
687 		__entry->req = req;
688 		__entry->xid = be32_to_cpu(rep->rr_xid);
689 		__entry->credits = credits;
690 	),
691 
692 	TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
693 		__entry->task_id, __entry->client_id, __entry->xid,
694 		__entry->credits, __entry->rep, __entry->req
695 	)
696 );
697 
698 TRACE_EVENT(xprtrdma_defer_cmp,
699 	TP_PROTO(
700 		const struct rpcrdma_rep *rep
701 	),
702 
703 	TP_ARGS(rep),
704 
705 	TP_STRUCT__entry(
706 		__field(unsigned int, task_id)
707 		__field(unsigned int, client_id)
708 		__field(const void *, rep)
709 		__field(u32, xid)
710 	),
711 
712 	TP_fast_assign(
713 		__entry->task_id = rep->rr_rqst->rq_task->tk_pid;
714 		__entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
715 		__entry->rep = rep;
716 		__entry->xid = be32_to_cpu(rep->rr_xid);
717 	),
718 
719 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
720 		__entry->task_id, __entry->client_id, __entry->xid,
721 		__entry->rep
722 	)
723 );
724 
725 DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
726 DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
727 DEFINE_REPLY_EVENT(xprtrdma_reply_short);
728 DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
729 
730 TRACE_EVENT(xprtrdma_fixup,
731 	TP_PROTO(
732 		const struct rpc_rqst *rqst,
733 		int len,
734 		int hdrlen
735 	),
736 
737 	TP_ARGS(rqst, len, hdrlen),
738 
739 	TP_STRUCT__entry(
740 		__field(unsigned int, task_id)
741 		__field(unsigned int, client_id)
742 		__field(const void *, base)
743 		__field(int, len)
744 		__field(int, hdrlen)
745 	),
746 
747 	TP_fast_assign(
748 		__entry->task_id = rqst->rq_task->tk_pid;
749 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
750 		__entry->base = rqst->rq_rcv_buf.head[0].iov_base;
751 		__entry->len = len;
752 		__entry->hdrlen = hdrlen;
753 	),
754 
755 	TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
756 		__entry->task_id, __entry->client_id,
757 		__entry->base, __entry->len, __entry->hdrlen
758 	)
759 );
760 
761 TRACE_EVENT(xprtrdma_fixup_pg,
762 	TP_PROTO(
763 		const struct rpc_rqst *rqst,
764 		int pageno,
765 		const void *pos,
766 		int len,
767 		int curlen
768 	),
769 
770 	TP_ARGS(rqst, pageno, pos, len, curlen),
771 
772 	TP_STRUCT__entry(
773 		__field(unsigned int, task_id)
774 		__field(unsigned int, client_id)
775 		__field(const void *, pos)
776 		__field(int, pageno)
777 		__field(int, len)
778 		__field(int, curlen)
779 	),
780 
781 	TP_fast_assign(
782 		__entry->task_id = rqst->rq_task->tk_pid;
783 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
784 		__entry->pos = pos;
785 		__entry->pageno = pageno;
786 		__entry->len = len;
787 		__entry->curlen = curlen;
788 	),
789 
790 	TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
791 		__entry->task_id, __entry->client_id,
792 		__entry->pageno, __entry->pos, __entry->len, __entry->curlen
793 	)
794 );
795 
796 TRACE_EVENT(xprtrdma_decode_seg,
797 	TP_PROTO(
798 		u32 handle,
799 		u32 length,
800 		u64 offset
801 	),
802 
803 	TP_ARGS(handle, length, offset),
804 
805 	TP_STRUCT__entry(
806 		__field(u32, handle)
807 		__field(u32, length)
808 		__field(u64, offset)
809 	),
810 
811 	TP_fast_assign(
812 		__entry->handle = handle;
813 		__entry->length = length;
814 		__entry->offset = offset;
815 	),
816 
817 	TP_printk("%u@0x%016llx:0x%08x",
818 		__entry->length, (unsigned long long)__entry->offset,
819 		__entry->handle
820 	)
821 );
822 
823 /**
824  ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
825  **/
826 
827 TRACE_EVENT(xprtrdma_allocate,
828 	TP_PROTO(
829 		const struct rpc_task *task,
830 		const struct rpcrdma_req *req
831 	),
832 
833 	TP_ARGS(task, req),
834 
835 	TP_STRUCT__entry(
836 		__field(unsigned int, task_id)
837 		__field(unsigned int, client_id)
838 		__field(const void *, req)
839 		__field(size_t, callsize)
840 		__field(size_t, rcvsize)
841 	),
842 
843 	TP_fast_assign(
844 		__entry->task_id = task->tk_pid;
845 		__entry->client_id = task->tk_client->cl_clid;
846 		__entry->req = req;
847 		__entry->callsize = task->tk_rqstp->rq_callsize;
848 		__entry->rcvsize = task->tk_rqstp->rq_rcvsize;
849 	),
850 
851 	TP_printk("task:%u@%u req=%p (%zu, %zu)",
852 		__entry->task_id, __entry->client_id,
853 		__entry->req, __entry->callsize, __entry->rcvsize
854 	)
855 );
856 
857 TRACE_EVENT(xprtrdma_rpc_done,
858 	TP_PROTO(
859 		const struct rpc_task *task,
860 		const struct rpcrdma_req *req
861 	),
862 
863 	TP_ARGS(task, req),
864 
865 	TP_STRUCT__entry(
866 		__field(unsigned int, task_id)
867 		__field(unsigned int, client_id)
868 		__field(const void *, req)
869 		__field(const void *, rep)
870 	),
871 
872 	TP_fast_assign(
873 		__entry->task_id = task->tk_pid;
874 		__entry->client_id = task->tk_client->cl_clid;
875 		__entry->req = req;
876 		__entry->rep = req->rl_reply;
877 	),
878 
879 	TP_printk("task:%u@%u req=%p rep=%p",
880 		__entry->task_id, __entry->client_id,
881 		__entry->req, __entry->rep
882 	)
883 );
884 
885 /**
886  ** Callback events
887  **/
888 
889 TRACE_EVENT(xprtrdma_cb_setup,
890 	TP_PROTO(
891 		const struct rpcrdma_xprt *r_xprt,
892 		unsigned int reqs
893 	),
894 
895 	TP_ARGS(r_xprt, reqs),
896 
897 	TP_STRUCT__entry(
898 		__field(const void *, r_xprt)
899 		__field(unsigned int, reqs)
900 		__string(addr, rpcrdma_addrstr(r_xprt))
901 		__string(port, rpcrdma_portstr(r_xprt))
902 	),
903 
904 	TP_fast_assign(
905 		__entry->r_xprt = r_xprt;
906 		__entry->reqs = reqs;
907 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
908 		__assign_str(port, rpcrdma_portstr(r_xprt));
909 	),
910 
911 	TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
912 		__get_str(addr), __get_str(port),
913 		__entry->r_xprt, __entry->reqs
914 	)
915 );
916 
917 DEFINE_CB_EVENT(xprtrdma_cb_call);
918 DEFINE_CB_EVENT(xprtrdma_cb_reply);
919 
920 TRACE_EVENT(xprtrdma_leaked_rep,
921 	TP_PROTO(
922 		const struct rpc_rqst *rqst,
923 		const struct rpcrdma_rep *rep
924 	),
925 
926 	TP_ARGS(rqst, rep),
927 
928 	TP_STRUCT__entry(
929 		__field(unsigned int, task_id)
930 		__field(unsigned int, client_id)
931 		__field(u32, xid)
932 		__field(const void *, rep)
933 	),
934 
935 	TP_fast_assign(
936 		__entry->task_id = rqst->rq_task->tk_pid;
937 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
938 		__entry->xid = be32_to_cpu(rqst->rq_xid);
939 		__entry->rep = rep;
940 	),
941 
942 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
943 		__entry->task_id, __entry->client_id, __entry->xid,
944 		__entry->rep
945 	)
946 );
947 
948 /**
949  ** Server-side RPC/RDMA events
950  **/
951 
952 DECLARE_EVENT_CLASS(svcrdma_xprt_event,
953 	TP_PROTO(
954 		const struct svc_xprt *xprt
955 	),
956 
957 	TP_ARGS(xprt),
958 
959 	TP_STRUCT__entry(
960 		__field(const void *, xprt)
961 		__string(addr, xprt->xpt_remotebuf)
962 	),
963 
964 	TP_fast_assign(
965 		__entry->xprt = xprt;
966 		__assign_str(addr, xprt->xpt_remotebuf);
967 	),
968 
969 	TP_printk("xprt=%p addr=%s",
970 		__entry->xprt, __get_str(addr)
971 	)
972 );
973 
974 #define DEFINE_XPRT_EVENT(name)						\
975 		DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name,	\
976 				TP_PROTO(				\
977 					const struct svc_xprt *xprt	\
978 				),					\
979 				TP_ARGS(xprt))
980 
981 DEFINE_XPRT_EVENT(accept);
982 DEFINE_XPRT_EVENT(fail);
983 DEFINE_XPRT_EVENT(free);
984 
985 TRACE_DEFINE_ENUM(RDMA_MSG);
986 TRACE_DEFINE_ENUM(RDMA_NOMSG);
987 TRACE_DEFINE_ENUM(RDMA_MSGP);
988 TRACE_DEFINE_ENUM(RDMA_DONE);
989 TRACE_DEFINE_ENUM(RDMA_ERROR);
990 
991 #define show_rpcrdma_proc(x)						\
992 		__print_symbolic(x,					\
993 				{ RDMA_MSG, "RDMA_MSG" },		\
994 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
995 				{ RDMA_MSGP, "RDMA_MSGP" },		\
996 				{ RDMA_DONE, "RDMA_DONE" },		\
997 				{ RDMA_ERROR, "RDMA_ERROR" })
998 
999 TRACE_EVENT(svcrdma_decode_rqst,
1000 	TP_PROTO(
1001 		__be32 *p,
1002 		unsigned int hdrlen
1003 	),
1004 
1005 	TP_ARGS(p, hdrlen),
1006 
1007 	TP_STRUCT__entry(
1008 		__field(u32, xid)
1009 		__field(u32, vers)
1010 		__field(u32, proc)
1011 		__field(u32, credits)
1012 		__field(unsigned int, hdrlen)
1013 	),
1014 
1015 	TP_fast_assign(
1016 		__entry->xid = be32_to_cpup(p++);
1017 		__entry->vers = be32_to_cpup(p++);
1018 		__entry->credits = be32_to_cpup(p++);
1019 		__entry->proc = be32_to_cpup(p);
1020 		__entry->hdrlen = hdrlen;
1021 	),
1022 
1023 	TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1024 		__entry->xid, __entry->vers, __entry->credits,
1025 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1026 );
1027 
1028 TRACE_EVENT(svcrdma_decode_short,
1029 	TP_PROTO(
1030 		unsigned int hdrlen
1031 	),
1032 
1033 	TP_ARGS(hdrlen),
1034 
1035 	TP_STRUCT__entry(
1036 		__field(unsigned int, hdrlen)
1037 	),
1038 
1039 	TP_fast_assign(
1040 		__entry->hdrlen = hdrlen;
1041 	),
1042 
1043 	TP_printk("hdrlen=%u", __entry->hdrlen)
1044 );
1045 
1046 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1047 	TP_PROTO(
1048 		__be32 *p
1049 	),
1050 
1051 	TP_ARGS(p),
1052 
1053 	TP_STRUCT__entry(
1054 		__field(u32, xid)
1055 		__field(u32, vers)
1056 		__field(u32, proc)
1057 		__field(u32, credits)
1058 	),
1059 
1060 	TP_fast_assign(
1061 		__entry->xid = be32_to_cpup(p++);
1062 		__entry->vers = be32_to_cpup(p++);
1063 		__entry->credits = be32_to_cpup(p++);
1064 		__entry->proc = be32_to_cpup(p);
1065 	),
1066 
1067 	TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
1068 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1069 );
1070 
1071 #define DEFINE_BADREQ_EVENT(name)					\
1072 		DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
1073 				TP_PROTO(				\
1074 					__be32 *p			\
1075 				),					\
1076 				TP_ARGS(p))
1077 
1078 DEFINE_BADREQ_EVENT(badvers);
1079 DEFINE_BADREQ_EVENT(drop);
1080 DEFINE_BADREQ_EVENT(badproc);
1081 DEFINE_BADREQ_EVENT(parse);
1082 
1083 DECLARE_EVENT_CLASS(svcrdma_segment_event,
1084 	TP_PROTO(
1085 		u32 handle,
1086 		u32 length,
1087 		u64 offset
1088 	),
1089 
1090 	TP_ARGS(handle, length, offset),
1091 
1092 	TP_STRUCT__entry(
1093 		__field(u32, handle)
1094 		__field(u32, length)
1095 		__field(u64, offset)
1096 	),
1097 
1098 	TP_fast_assign(
1099 		__entry->handle = handle;
1100 		__entry->length = length;
1101 		__entry->offset = offset;
1102 	),
1103 
1104 	TP_printk("%u@0x%016llx:0x%08x",
1105 		__entry->length, (unsigned long long)__entry->offset,
1106 		__entry->handle
1107 	)
1108 );
1109 
1110 #define DEFINE_SEGMENT_EVENT(name)					\
1111 		DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
1112 				TP_PROTO(				\
1113 					u32 handle,			\
1114 					u32 length,			\
1115 					u64 offset			\
1116 				),					\
1117 				TP_ARGS(handle, length, offset))
1118 
1119 DEFINE_SEGMENT_EVENT(rseg);
1120 DEFINE_SEGMENT_EVENT(wseg);
1121 
1122 DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1123 	TP_PROTO(
1124 		u32 length
1125 	),
1126 
1127 	TP_ARGS(length),
1128 
1129 	TP_STRUCT__entry(
1130 		__field(u32, length)
1131 	),
1132 
1133 	TP_fast_assign(
1134 		__entry->length = length;
1135 	),
1136 
1137 	TP_printk("length=%u",
1138 		__entry->length
1139 	)
1140 );
1141 
1142 #define DEFINE_CHUNK_EVENT(name)					\
1143 		DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
1144 				TP_PROTO(				\
1145 					u32 length			\
1146 				),					\
1147 				TP_ARGS(length))
1148 
1149 DEFINE_CHUNK_EVENT(pzr);
1150 DEFINE_CHUNK_EVENT(write);
1151 DEFINE_CHUNK_EVENT(reply);
1152 
1153 TRACE_EVENT(svcrdma_encode_read,
1154 	TP_PROTO(
1155 		u32 length,
1156 		u32 position
1157 	),
1158 
1159 	TP_ARGS(length, position),
1160 
1161 	TP_STRUCT__entry(
1162 		__field(u32, length)
1163 		__field(u32, position)
1164 	),
1165 
1166 	TP_fast_assign(
1167 		__entry->length = length;
1168 		__entry->position = position;
1169 	),
1170 
1171 	TP_printk("length=%u position=%u",
1172 		__entry->length, __entry->position
1173 	)
1174 );
1175 
1176 DECLARE_EVENT_CLASS(svcrdma_error_event,
1177 	TP_PROTO(
1178 		__be32 xid
1179 	),
1180 
1181 	TP_ARGS(xid),
1182 
1183 	TP_STRUCT__entry(
1184 		__field(u32, xid)
1185 	),
1186 
1187 	TP_fast_assign(
1188 		__entry->xid = be32_to_cpu(xid);
1189 	),
1190 
1191 	TP_printk("xid=0x%08x",
1192 		__entry->xid
1193 	)
1194 );
1195 
1196 #define DEFINE_ERROR_EVENT(name)					\
1197 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1198 				TP_PROTO(				\
1199 					__be32 xid			\
1200 				),					\
1201 				TP_ARGS(xid))
1202 
1203 DEFINE_ERROR_EVENT(vers);
1204 DEFINE_ERROR_EVENT(chunk);
1205 
1206 /**
1207  ** Server-side RDMA API events
1208  **/
1209 
1210 TRACE_EVENT(svcrdma_dma_map_page,
1211 	TP_PROTO(
1212 		const struct svcxprt_rdma *rdma,
1213 		const void *page
1214 	),
1215 
1216 	TP_ARGS(rdma, page),
1217 
1218 	TP_STRUCT__entry(
1219 		__field(const void *, page);
1220 		__string(device, rdma->sc_cm_id->device->name)
1221 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1222 	),
1223 
1224 	TP_fast_assign(
1225 		__entry->page = page;
1226 		__assign_str(device, rdma->sc_cm_id->device->name);
1227 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1228 	),
1229 
1230 	TP_printk("addr=%s device=%s page=%p",
1231 		__get_str(addr), __get_str(device), __entry->page
1232 	)
1233 );
1234 
1235 TRACE_EVENT(svcrdma_dma_map_rwctx,
1236 	TP_PROTO(
1237 		const struct svcxprt_rdma *rdma,
1238 		int status
1239 	),
1240 
1241 	TP_ARGS(rdma, status),
1242 
1243 	TP_STRUCT__entry(
1244 		__field(int, status)
1245 		__string(device, rdma->sc_cm_id->device->name)
1246 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1247 	),
1248 
1249 	TP_fast_assign(
1250 		__entry->status = status;
1251 		__assign_str(device, rdma->sc_cm_id->device->name);
1252 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1253 	),
1254 
1255 	TP_printk("addr=%s device=%s status=%d",
1256 		__get_str(addr), __get_str(device), __entry->status
1257 	)
1258 );
1259 
1260 TRACE_EVENT(svcrdma_send_failed,
1261 	TP_PROTO(
1262 		const struct svc_rqst *rqst,
1263 		int status
1264 	),
1265 
1266 	TP_ARGS(rqst, status),
1267 
1268 	TP_STRUCT__entry(
1269 		__field(int, status)
1270 		__field(u32, xid)
1271 		__field(const void *, xprt)
1272 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1273 	),
1274 
1275 	TP_fast_assign(
1276 		__entry->status = status;
1277 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1278 		__entry->xprt = rqst->rq_xprt;
1279 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1280 	),
1281 
1282 	TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
1283 		__entry->xprt, __get_str(addr),
1284 		__entry->xid, __entry->status
1285 	)
1286 );
1287 
1288 DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
1289 	TP_PROTO(
1290 		const struct ib_wc *wc
1291 	),
1292 
1293 	TP_ARGS(wc),
1294 
1295 	TP_STRUCT__entry(
1296 		__field(const void *, cqe)
1297 		__field(unsigned int, status)
1298 		__field(unsigned int, vendor_err)
1299 	),
1300 
1301 	TP_fast_assign(
1302 		__entry->cqe = wc->wr_cqe;
1303 		__entry->status = wc->status;
1304 		if (wc->status)
1305 			__entry->vendor_err = wc->vendor_err;
1306 		else
1307 			__entry->vendor_err = 0;
1308 	),
1309 
1310 	TP_printk("cqe=%p status=%s (%u/0x%x)",
1311 		__entry->cqe, rdma_show_wc_status(__entry->status),
1312 		__entry->status, __entry->vendor_err
1313 	)
1314 );
1315 
1316 #define DEFINE_SENDCOMP_EVENT(name)					\
1317 		DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name,	\
1318 				TP_PROTO(				\
1319 					const struct ib_wc *wc		\
1320 				),					\
1321 				TP_ARGS(wc))
1322 
1323 TRACE_EVENT(svcrdma_post_send,
1324 	TP_PROTO(
1325 		const struct ib_send_wr *wr
1326 	),
1327 
1328 	TP_ARGS(wr),
1329 
1330 	TP_STRUCT__entry(
1331 		__field(const void *, cqe)
1332 		__field(unsigned int, num_sge)
1333 		__field(u32, inv_rkey)
1334 	),
1335 
1336 	TP_fast_assign(
1337 		__entry->cqe = wr->wr_cqe;
1338 		__entry->num_sge = wr->num_sge;
1339 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1340 					wr->ex.invalidate_rkey : 0;
1341 	),
1342 
1343 	TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x",
1344 		__entry->cqe, __entry->num_sge,
1345 		__entry->inv_rkey
1346 	)
1347 );
1348 
1349 DEFINE_SENDCOMP_EVENT(send);
1350 
1351 TRACE_EVENT(svcrdma_post_recv,
1352 	TP_PROTO(
1353 		const struct ib_recv_wr *wr,
1354 		int status
1355 	),
1356 
1357 	TP_ARGS(wr, status),
1358 
1359 	TP_STRUCT__entry(
1360 		__field(const void *, cqe)
1361 		__field(int, status)
1362 	),
1363 
1364 	TP_fast_assign(
1365 		__entry->cqe = wr->wr_cqe;
1366 		__entry->status = status;
1367 	),
1368 
1369 	TP_printk("cqe=%p status=%d",
1370 		__entry->cqe, __entry->status
1371 	)
1372 );
1373 
1374 TRACE_EVENT(svcrdma_wc_receive,
1375 	TP_PROTO(
1376 		const struct ib_wc *wc
1377 	),
1378 
1379 	TP_ARGS(wc),
1380 
1381 	TP_STRUCT__entry(
1382 		__field(const void *, cqe)
1383 		__field(u32, byte_len)
1384 		__field(unsigned int, status)
1385 		__field(u32, vendor_err)
1386 	),
1387 
1388 	TP_fast_assign(
1389 		__entry->cqe = wc->wr_cqe;
1390 		__entry->status = wc->status;
1391 		if (wc->status) {
1392 			__entry->byte_len = 0;
1393 			__entry->vendor_err = wc->vendor_err;
1394 		} else {
1395 			__entry->byte_len = wc->byte_len;
1396 			__entry->vendor_err = 0;
1397 		}
1398 	),
1399 
1400 	TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
1401 		__entry->cqe, __entry->byte_len,
1402 		rdma_show_wc_status(__entry->status),
1403 		__entry->status, __entry->vendor_err
1404 	)
1405 );
1406 
1407 TRACE_EVENT(svcrdma_post_rw,
1408 	TP_PROTO(
1409 		const void *cqe,
1410 		int sqecount
1411 	),
1412 
1413 	TP_ARGS(cqe, sqecount),
1414 
1415 	TP_STRUCT__entry(
1416 		__field(const void *, cqe)
1417 		__field(int, sqecount)
1418 	),
1419 
1420 	TP_fast_assign(
1421 		__entry->cqe = cqe;
1422 		__entry->sqecount = sqecount;
1423 	),
1424 
1425 	TP_printk("cqe=%p sqecount=%d",
1426 		__entry->cqe, __entry->sqecount
1427 	)
1428 );
1429 
1430 DEFINE_SENDCOMP_EVENT(read);
1431 DEFINE_SENDCOMP_EVENT(write);
1432 
1433 TRACE_EVENT(svcrdma_cm_event,
1434 	TP_PROTO(
1435 		const struct rdma_cm_event *event,
1436 		const struct sockaddr *sap
1437 	),
1438 
1439 	TP_ARGS(event, sap),
1440 
1441 	TP_STRUCT__entry(
1442 		__field(unsigned int, event)
1443 		__field(int, status)
1444 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1445 	),
1446 
1447 	TP_fast_assign(
1448 		__entry->event = event->event;
1449 		__entry->status = event->status;
1450 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1451 			 "%pISpc", sap);
1452 	),
1453 
1454 	TP_printk("addr=%s event=%s (%u/%d)",
1455 		__entry->addr,
1456 		rdma_show_cm_event(__entry->event),
1457 		__entry->event, __entry->status
1458 	)
1459 );
1460 
1461 TRACE_EVENT(svcrdma_qp_error,
1462 	TP_PROTO(
1463 		const struct ib_event *event,
1464 		const struct sockaddr *sap
1465 	),
1466 
1467 	TP_ARGS(event, sap),
1468 
1469 	TP_STRUCT__entry(
1470 		__field(unsigned int, event)
1471 		__string(device, event->device->name)
1472 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1473 	),
1474 
1475 	TP_fast_assign(
1476 		__entry->event = event->event;
1477 		__assign_str(device, event->device->name);
1478 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1479 			 "%pISpc", sap);
1480 	),
1481 
1482 	TP_printk("addr=%s dev=%s event=%s (%u)",
1483 		__entry->addr, __get_str(device),
1484 		rdma_show_ib_event(__entry->event), __entry->event
1485 	)
1486 );
1487 
1488 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1489 	TP_PROTO(
1490 		const struct svcxprt_rdma *rdma
1491 	),
1492 
1493 	TP_ARGS(rdma),
1494 
1495 	TP_STRUCT__entry(
1496 		__field(int, avail)
1497 		__field(int, depth)
1498 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1499 	),
1500 
1501 	TP_fast_assign(
1502 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1503 		__entry->depth = rdma->sc_sq_depth;
1504 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1505 	),
1506 
1507 	TP_printk("addr=%s sc_sq_avail=%d/%d",
1508 		__get_str(addr), __entry->avail, __entry->depth
1509 	)
1510 );
1511 
1512 #define DEFINE_SQ_EVENT(name)						\
1513 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1514 				TP_PROTO(				\
1515 					const struct svcxprt_rdma *rdma \
1516 				),					\
1517 				TP_ARGS(rdma))
1518 
1519 DEFINE_SQ_EVENT(full);
1520 DEFINE_SQ_EVENT(retry);
1521 
1522 TRACE_EVENT(svcrdma_sq_post_err,
1523 	TP_PROTO(
1524 		const struct svcxprt_rdma *rdma,
1525 		int status
1526 	),
1527 
1528 	TP_ARGS(rdma, status),
1529 
1530 	TP_STRUCT__entry(
1531 		__field(int, avail)
1532 		__field(int, depth)
1533 		__field(int, status)
1534 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1535 	),
1536 
1537 	TP_fast_assign(
1538 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1539 		__entry->depth = rdma->sc_sq_depth;
1540 		__entry->status = status;
1541 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1542 	),
1543 
1544 	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
1545 		__get_str(addr), __entry->avail, __entry->depth,
1546 		__entry->status
1547 	)
1548 );
1549 
1550 #endif /* _TRACE_RPCRDMA_H */
1551 
1552 #include <trace/define_trace.h>
1553