xref: /linux/include/trace/events/rpcrdma.h (revision 2c92ca84)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/sunrpc/rpc_rdma_cid.h>
15 #include <linux/tracepoint.h>
16 #include <rdma/ib_cm.h>
17 
18 #include <trace/misc/rdma.h>
19 #include <trace/misc/sunrpc.h>
20 
21 /**
22  ** Event classes
23  **/
24 
25 DECLARE_EVENT_CLASS(rpcrdma_simple_cid_class,
26 	TP_PROTO(
27 		const struct rpc_rdma_cid *cid
28 	),
29 
30 	TP_ARGS(cid),
31 
32 	TP_STRUCT__entry(
33 		__field(u32, cq_id)
34 		__field(int, completion_id)
35 	),
36 
37 	TP_fast_assign(
38 		__entry->cq_id = cid->ci_queue_id;
39 		__entry->completion_id = cid->ci_completion_id;
40 	),
41 
42 	TP_printk("cq.id=%d cid=%d",
43 		__entry->cq_id, __entry->completion_id
44 	)
45 );
46 
47 #define DEFINE_SIMPLE_CID_EVENT(name)					\
48 		DEFINE_EVENT(rpcrdma_simple_cid_class, name,		\
49 				TP_PROTO(				\
50 					const struct rpc_rdma_cid *cid	\
51 				),					\
52 				TP_ARGS(cid)				\
53 		)
54 
55 DECLARE_EVENT_CLASS(rpcrdma_completion_class,
56 	TP_PROTO(
57 		const struct ib_wc *wc,
58 		const struct rpc_rdma_cid *cid
59 	),
60 
61 	TP_ARGS(wc, cid),
62 
63 	TP_STRUCT__entry(
64 		__field(u32, cq_id)
65 		__field(int, completion_id)
66 		__field(unsigned long, status)
67 		__field(unsigned int, vendor_err)
68 	),
69 
70 	TP_fast_assign(
71 		__entry->cq_id = cid->ci_queue_id;
72 		__entry->completion_id = cid->ci_completion_id;
73 		__entry->status = wc->status;
74 		if (wc->status)
75 			__entry->vendor_err = wc->vendor_err;
76 		else
77 			__entry->vendor_err = 0;
78 	),
79 
80 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
81 		__entry->cq_id, __entry->completion_id,
82 		rdma_show_wc_status(__entry->status),
83 		__entry->status, __entry->vendor_err
84 	)
85 );
86 
87 #define DEFINE_COMPLETION_EVENT(name)					\
88 		DEFINE_EVENT(rpcrdma_completion_class, name,		\
89 				TP_PROTO(				\
90 					const struct ib_wc *wc,		\
91 					const struct rpc_rdma_cid *cid	\
92 				),					\
93 				TP_ARGS(wc, cid))
94 
95 DECLARE_EVENT_CLASS(rpcrdma_send_flush_class,
96 	TP_PROTO(
97 		const struct ib_wc *wc,
98 		const struct rpc_rdma_cid *cid
99 	),
100 
101 	TP_ARGS(wc, cid),
102 
103 	TP_STRUCT__entry(
104 		__field(u32, cq_id)
105 		__field(int, completion_id)
106 		__field(unsigned long, status)
107 		__field(unsigned int, vendor_err)
108 	),
109 
110 	TP_fast_assign(
111 		__entry->cq_id = cid->ci_queue_id;
112 		__entry->completion_id = cid->ci_completion_id;
113 		__entry->status = wc->status;
114 		__entry->vendor_err = wc->vendor_err;
115 	),
116 
117 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
118 		__entry->cq_id, __entry->completion_id,
119 		rdma_show_wc_status(__entry->status),
120 		__entry->status, __entry->vendor_err
121 	)
122 );
123 
124 #define DEFINE_SEND_FLUSH_EVENT(name)					\
125 		DEFINE_EVENT(rpcrdma_send_flush_class, name,		\
126 				TP_PROTO(				\
127 					const struct ib_wc *wc,		\
128 					const struct rpc_rdma_cid *cid	\
129 				),					\
130 				TP_ARGS(wc, cid))
131 
132 DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
133 	TP_PROTO(
134 		const struct ib_wc *wc,
135 		const struct rpc_rdma_cid *cid
136 	),
137 
138 	TP_ARGS(wc, cid),
139 
140 	TP_STRUCT__entry(
141 		__field(u32, cq_id)
142 		__field(int, completion_id)
143 		__field(unsigned long, status)
144 		__field(unsigned int, vendor_err)
145 	),
146 
147 	TP_fast_assign(
148 		__entry->cq_id = cid->ci_queue_id;
149 		__entry->completion_id = cid->ci_completion_id;
150 		__entry->status = wc->status;
151 		if (wc->status)
152 			__entry->vendor_err = wc->vendor_err;
153 		else
154 			__entry->vendor_err = 0;
155 	),
156 
157 	TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)",
158 		__entry->cq_id, __entry->completion_id,
159 		rdma_show_wc_status(__entry->status),
160 		__entry->status, __entry->vendor_err
161 	)
162 );
163 
164 #define DEFINE_MR_COMPLETION_EVENT(name)				\
165 		DEFINE_EVENT(rpcrdma_mr_completion_class, name,		\
166 				TP_PROTO(				\
167 					const struct ib_wc *wc,		\
168 					const struct rpc_rdma_cid *cid	\
169 				),					\
170 				TP_ARGS(wc, cid))
171 
172 DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
173 	TP_PROTO(
174 		const struct ib_wc *wc,
175 		const struct rpc_rdma_cid *cid
176 	),
177 
178 	TP_ARGS(wc, cid),
179 
180 	TP_STRUCT__entry(
181 		__field(u32, cq_id)
182 		__field(int, completion_id)
183 		__field(u32, received)
184 		__field(unsigned long, status)
185 		__field(unsigned int, vendor_err)
186 	),
187 
188 	TP_fast_assign(
189 		__entry->cq_id = cid->ci_queue_id;
190 		__entry->completion_id = cid->ci_completion_id;
191 		__entry->status = wc->status;
192 		if (wc->status) {
193 			__entry->received = 0;
194 			__entry->vendor_err = wc->vendor_err;
195 		} else {
196 			__entry->received = wc->byte_len;
197 			__entry->vendor_err = 0;
198 		}
199 	),
200 
201 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
202 		__entry->cq_id, __entry->completion_id,
203 		rdma_show_wc_status(__entry->status),
204 		__entry->status, __entry->vendor_err,
205 		__entry->received
206 	)
207 );
208 
209 #define DEFINE_RECEIVE_COMPLETION_EVENT(name)				\
210 		DEFINE_EVENT(rpcrdma_receive_completion_class, name,	\
211 				TP_PROTO(				\
212 					const struct ib_wc *wc,		\
213 					const struct rpc_rdma_cid *cid	\
214 				),					\
215 				TP_ARGS(wc, cid))
216 
217 DECLARE_EVENT_CLASS(rpcrdma_receive_success_class,
218 	TP_PROTO(
219 		const struct ib_wc *wc,
220 		const struct rpc_rdma_cid *cid
221 	),
222 
223 	TP_ARGS(wc, cid),
224 
225 	TP_STRUCT__entry(
226 		__field(u32, cq_id)
227 		__field(int, completion_id)
228 		__field(u32, received)
229 	),
230 
231 	TP_fast_assign(
232 		__entry->cq_id = cid->ci_queue_id;
233 		__entry->completion_id = cid->ci_completion_id;
234 		__entry->received = wc->byte_len;
235 	),
236 
237 	TP_printk("cq.id=%u cid=%d received=%u",
238 		__entry->cq_id, __entry->completion_id,
239 		__entry->received
240 	)
241 );
242 
243 #define DEFINE_RECEIVE_SUCCESS_EVENT(name)				\
244 		DEFINE_EVENT(rpcrdma_receive_success_class, name,	\
245 				TP_PROTO(				\
246 					const struct ib_wc *wc,		\
247 					const struct rpc_rdma_cid *cid	\
248 				),					\
249 				TP_ARGS(wc, cid))
250 
251 DECLARE_EVENT_CLASS(rpcrdma_receive_flush_class,
252 	TP_PROTO(
253 		const struct ib_wc *wc,
254 		const struct rpc_rdma_cid *cid
255 	),
256 
257 	TP_ARGS(wc, cid),
258 
259 	TP_STRUCT__entry(
260 		__field(u32, cq_id)
261 		__field(int, completion_id)
262 		__field(unsigned long, status)
263 		__field(unsigned int, vendor_err)
264 	),
265 
266 	TP_fast_assign(
267 		__entry->cq_id = cid->ci_queue_id;
268 		__entry->completion_id = cid->ci_completion_id;
269 		__entry->status = wc->status;
270 		__entry->vendor_err = wc->vendor_err;
271 	),
272 
273 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
274 		__entry->cq_id, __entry->completion_id,
275 		rdma_show_wc_status(__entry->status),
276 		__entry->status, __entry->vendor_err
277 	)
278 );
279 
280 #define DEFINE_RECEIVE_FLUSH_EVENT(name)				\
281 		DEFINE_EVENT(rpcrdma_receive_flush_class, name,		\
282 				TP_PROTO(				\
283 					const struct ib_wc *wc,		\
284 					const struct rpc_rdma_cid *cid	\
285 				),					\
286 				TP_ARGS(wc, cid))
287 
288 DECLARE_EVENT_CLASS(xprtrdma_reply_class,
289 	TP_PROTO(
290 		const struct rpcrdma_rep *rep
291 	),
292 
293 	TP_ARGS(rep),
294 
295 	TP_STRUCT__entry(
296 		__field(u32, xid)
297 		__field(u32, version)
298 		__field(u32, proc)
299 		__string(addr, rpcrdma_addrstr(rep->rr_rxprt))
300 		__string(port, rpcrdma_portstr(rep->rr_rxprt))
301 	),
302 
303 	TP_fast_assign(
304 		__entry->xid = be32_to_cpu(rep->rr_xid);
305 		__entry->version = be32_to_cpu(rep->rr_vers);
306 		__entry->proc = be32_to_cpu(rep->rr_proc);
307 		__assign_str(addr);
308 		__assign_str(port);
309 	),
310 
311 	TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
312 		__get_str(addr), __get_str(port),
313 		__entry->xid, __entry->version, __entry->proc
314 	)
315 );
316 
317 #define DEFINE_REPLY_EVENT(name)					\
318 		DEFINE_EVENT(xprtrdma_reply_class,			\
319 				xprtrdma_reply_##name##_err,		\
320 				TP_PROTO(				\
321 					const struct rpcrdma_rep *rep	\
322 				),					\
323 				TP_ARGS(rep))
324 
325 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
326 	TP_PROTO(
327 		const struct rpcrdma_xprt *r_xprt
328 	),
329 
330 	TP_ARGS(r_xprt),
331 
332 	TP_STRUCT__entry(
333 		__string(addr, rpcrdma_addrstr(r_xprt))
334 		__string(port, rpcrdma_portstr(r_xprt))
335 	),
336 
337 	TP_fast_assign(
338 		__assign_str(addr);
339 		__assign_str(port);
340 	),
341 
342 	TP_printk("peer=[%s]:%s",
343 		__get_str(addr), __get_str(port)
344 	)
345 );
346 
347 #define DEFINE_RXPRT_EVENT(name)					\
348 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
349 				TP_PROTO(				\
350 					const struct rpcrdma_xprt *r_xprt \
351 				),					\
352 				TP_ARGS(r_xprt))
353 
354 DECLARE_EVENT_CLASS(xprtrdma_connect_class,
355 	TP_PROTO(
356 		const struct rpcrdma_xprt *r_xprt,
357 		int rc
358 	),
359 
360 	TP_ARGS(r_xprt, rc),
361 
362 	TP_STRUCT__entry(
363 		__field(int, rc)
364 		__field(int, connect_status)
365 		__string(addr, rpcrdma_addrstr(r_xprt))
366 		__string(port, rpcrdma_portstr(r_xprt))
367 	),
368 
369 	TP_fast_assign(
370 		__entry->rc = rc;
371 		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
372 		__assign_str(addr);
373 		__assign_str(port);
374 	),
375 
376 	TP_printk("peer=[%s]:%s rc=%d connection status=%d",
377 		__get_str(addr), __get_str(port),
378 		__entry->rc, __entry->connect_status
379 	)
380 );
381 
382 #define DEFINE_CONN_EVENT(name)						\
383 		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
384 				TP_PROTO(				\
385 					const struct rpcrdma_xprt *r_xprt, \
386 					int rc				\
387 				),					\
388 				TP_ARGS(r_xprt, rc))
389 
390 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
391 	TP_PROTO(
392 		const struct rpc_task *task,
393 		unsigned int pos,
394 		struct rpcrdma_mr *mr,
395 		int nsegs
396 	),
397 
398 	TP_ARGS(task, pos, mr, nsegs),
399 
400 	TP_STRUCT__entry(
401 		__field(unsigned int, task_id)
402 		__field(unsigned int, client_id)
403 		__field(unsigned int, pos)
404 		__field(int, nents)
405 		__field(u32, handle)
406 		__field(u32, length)
407 		__field(u64, offset)
408 		__field(int, nsegs)
409 	),
410 
411 	TP_fast_assign(
412 		__entry->task_id = task->tk_pid;
413 		__entry->client_id = task->tk_client->cl_clid;
414 		__entry->pos = pos;
415 		__entry->nents = mr->mr_nents;
416 		__entry->handle = mr->mr_handle;
417 		__entry->length = mr->mr_length;
418 		__entry->offset = mr->mr_offset;
419 		__entry->nsegs = nsegs;
420 	),
421 
422 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
423 		  " pos=%u %u@0x%016llx:0x%08x (%s)",
424 		__entry->task_id, __entry->client_id,
425 		__entry->pos, __entry->length,
426 		(unsigned long long)__entry->offset, __entry->handle,
427 		__entry->nents < __entry->nsegs ? "more" : "last"
428 	)
429 );
430 
431 #define DEFINE_RDCH_EVENT(name)						\
432 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
433 				TP_PROTO(				\
434 					const struct rpc_task *task,	\
435 					unsigned int pos,		\
436 					struct rpcrdma_mr *mr,		\
437 					int nsegs			\
438 				),					\
439 				TP_ARGS(task, pos, mr, nsegs))
440 
441 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
442 	TP_PROTO(
443 		const struct rpc_task *task,
444 		struct rpcrdma_mr *mr,
445 		int nsegs
446 	),
447 
448 	TP_ARGS(task, mr, nsegs),
449 
450 	TP_STRUCT__entry(
451 		__field(unsigned int, task_id)
452 		__field(unsigned int, client_id)
453 		__field(int, nents)
454 		__field(u32, handle)
455 		__field(u32, length)
456 		__field(u64, offset)
457 		__field(int, nsegs)
458 	),
459 
460 	TP_fast_assign(
461 		__entry->task_id = task->tk_pid;
462 		__entry->client_id = task->tk_client->cl_clid;
463 		__entry->nents = mr->mr_nents;
464 		__entry->handle = mr->mr_handle;
465 		__entry->length = mr->mr_length;
466 		__entry->offset = mr->mr_offset;
467 		__entry->nsegs = nsegs;
468 	),
469 
470 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
471 		  " %u@0x%016llx:0x%08x (%s)",
472 		__entry->task_id, __entry->client_id,
473 		__entry->length, (unsigned long long)__entry->offset,
474 		__entry->handle,
475 		__entry->nents < __entry->nsegs ? "more" : "last"
476 	)
477 );
478 
479 #define DEFINE_WRCH_EVENT(name)						\
480 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
481 				TP_PROTO(				\
482 					const struct rpc_task *task,	\
483 					struct rpcrdma_mr *mr,		\
484 					int nsegs			\
485 				),					\
486 				TP_ARGS(task, mr, nsegs))
487 
488 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
489 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
490 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
491 TRACE_DEFINE_ENUM(DMA_NONE);
492 
493 #define xprtrdma_show_direction(x)					\
494 		__print_symbolic(x,					\
495 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
496 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
497 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
498 				{ DMA_NONE, "NONE" })
499 
500 DECLARE_EVENT_CLASS(xprtrdma_mr_class,
501 	TP_PROTO(
502 		const struct rpcrdma_mr *mr
503 	),
504 
505 	TP_ARGS(mr),
506 
507 	TP_STRUCT__entry(
508 		__field(unsigned int, task_id)
509 		__field(unsigned int, client_id)
510 		__field(u32, mr_id)
511 		__field(int, nents)
512 		__field(u32, handle)
513 		__field(u32, length)
514 		__field(u64, offset)
515 		__field(u32, dir)
516 	),
517 
518 	TP_fast_assign(
519 		const struct rpcrdma_req *req = mr->mr_req;
520 
521 		if (req) {
522 			const struct rpc_task *task = req->rl_slot.rq_task;
523 
524 			__entry->task_id = task->tk_pid;
525 			__entry->client_id = task->tk_client->cl_clid;
526 		} else {
527 			__entry->task_id = 0;
528 			__entry->client_id = -1;
529 		}
530 		__entry->mr_id  = mr->mr_ibmr->res.id;
531 		__entry->nents  = mr->mr_nents;
532 		__entry->handle = mr->mr_handle;
533 		__entry->length = mr->mr_length;
534 		__entry->offset = mr->mr_offset;
535 		__entry->dir    = mr->mr_dir;
536 	),
537 
538 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
539 		  " mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
540 		__entry->task_id, __entry->client_id,
541 		__entry->mr_id, __entry->nents, __entry->length,
542 		(unsigned long long)__entry->offset, __entry->handle,
543 		xprtrdma_show_direction(__entry->dir)
544 	)
545 );
546 
547 #define DEFINE_MR_EVENT(name)						\
548 		DEFINE_EVENT(xprtrdma_mr_class,				\
549 				xprtrdma_mr_##name,			\
550 				TP_PROTO(				\
551 					const struct rpcrdma_mr *mr	\
552 				),					\
553 				TP_ARGS(mr))
554 
555 DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
556 	TP_PROTO(
557 		const struct rpcrdma_mr *mr
558 	),
559 
560 	TP_ARGS(mr),
561 
562 	TP_STRUCT__entry(
563 		__field(u32, mr_id)
564 		__field(int, nents)
565 		__field(u32, handle)
566 		__field(u32, length)
567 		__field(u64, offset)
568 		__field(u32, dir)
569 	),
570 
571 	TP_fast_assign(
572 		__entry->mr_id  = mr->mr_ibmr->res.id;
573 		__entry->nents  = mr->mr_nents;
574 		__entry->handle = mr->mr_handle;
575 		__entry->length = mr->mr_length;
576 		__entry->offset = mr->mr_offset;
577 		__entry->dir    = mr->mr_dir;
578 	),
579 
580 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
581 		__entry->mr_id, __entry->nents, __entry->length,
582 		(unsigned long long)__entry->offset, __entry->handle,
583 		xprtrdma_show_direction(__entry->dir)
584 	)
585 );
586 
587 #define DEFINE_ANON_MR_EVENT(name)					\
588 		DEFINE_EVENT(xprtrdma_anonymous_mr_class,		\
589 				xprtrdma_mr_##name,			\
590 				TP_PROTO(				\
591 					const struct rpcrdma_mr *mr	\
592 				),					\
593 				TP_ARGS(mr))
594 
595 DECLARE_EVENT_CLASS(xprtrdma_callback_class,
596 	TP_PROTO(
597 		const struct rpcrdma_xprt *r_xprt,
598 		const struct rpc_rqst *rqst
599 	),
600 
601 	TP_ARGS(r_xprt, rqst),
602 
603 	TP_STRUCT__entry(
604 		__field(u32, xid)
605 		__string(addr, rpcrdma_addrstr(r_xprt))
606 		__string(port, rpcrdma_portstr(r_xprt))
607 	),
608 
609 	TP_fast_assign(
610 		__entry->xid = be32_to_cpu(rqst->rq_xid);
611 		__assign_str(addr);
612 		__assign_str(port);
613 	),
614 
615 	TP_printk("peer=[%s]:%s xid=0x%08x",
616 		__get_str(addr), __get_str(port), __entry->xid
617 	)
618 );
619 
620 #define DEFINE_CALLBACK_EVENT(name)					\
621 		DEFINE_EVENT(xprtrdma_callback_class,			\
622 				xprtrdma_cb_##name,			\
623 				TP_PROTO(				\
624 					const struct rpcrdma_xprt *r_xprt, \
625 					const struct rpc_rqst *rqst	\
626 				),					\
627 				TP_ARGS(r_xprt, rqst))
628 
629 /**
630  ** Connection events
631  **/
632 
633 TRACE_EVENT(xprtrdma_inline_thresh,
634 	TP_PROTO(
635 		const struct rpcrdma_ep *ep
636 	),
637 
638 	TP_ARGS(ep),
639 
640 	TP_STRUCT__entry(
641 		__field(unsigned int, inline_send)
642 		__field(unsigned int, inline_recv)
643 		__field(unsigned int, max_send)
644 		__field(unsigned int, max_recv)
645 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
646 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
647 	),
648 
649 	TP_fast_assign(
650 		const struct rdma_cm_id *id = ep->re_id;
651 
652 		__entry->inline_send = ep->re_inline_send;
653 		__entry->inline_recv = ep->re_inline_recv;
654 		__entry->max_send = ep->re_max_inline_send;
655 		__entry->max_recv = ep->re_max_inline_recv;
656 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
657 		       sizeof(struct sockaddr_in6));
658 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
659 		       sizeof(struct sockaddr_in6));
660 	),
661 
662 	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
663 		__entry->srcaddr, __entry->dstaddr,
664 		__entry->inline_send, __entry->inline_recv,
665 		__entry->max_send, __entry->max_recv
666 	)
667 );
668 
669 DEFINE_CONN_EVENT(connect);
670 DEFINE_CONN_EVENT(disconnect);
671 
672 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
673 
674 TRACE_EVENT(xprtrdma_op_connect,
675 	TP_PROTO(
676 		const struct rpcrdma_xprt *r_xprt,
677 		unsigned long delay
678 	),
679 
680 	TP_ARGS(r_xprt, delay),
681 
682 	TP_STRUCT__entry(
683 		__field(unsigned long, delay)
684 		__string(addr, rpcrdma_addrstr(r_xprt))
685 		__string(port, rpcrdma_portstr(r_xprt))
686 	),
687 
688 	TP_fast_assign(
689 		__entry->delay = delay;
690 		__assign_str(addr);
691 		__assign_str(port);
692 	),
693 
694 	TP_printk("peer=[%s]:%s delay=%lu",
695 		__get_str(addr), __get_str(port), __entry->delay
696 	)
697 );
698 
699 
700 TRACE_EVENT(xprtrdma_op_set_cto,
701 	TP_PROTO(
702 		const struct rpcrdma_xprt *r_xprt,
703 		unsigned long connect,
704 		unsigned long reconnect
705 	),
706 
707 	TP_ARGS(r_xprt, connect, reconnect),
708 
709 	TP_STRUCT__entry(
710 		__field(unsigned long, connect)
711 		__field(unsigned long, reconnect)
712 		__string(addr, rpcrdma_addrstr(r_xprt))
713 		__string(port, rpcrdma_portstr(r_xprt))
714 	),
715 
716 	TP_fast_assign(
717 		__entry->connect = connect;
718 		__entry->reconnect = reconnect;
719 		__assign_str(addr);
720 		__assign_str(port);
721 	),
722 
723 	TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu",
724 		__get_str(addr), __get_str(port),
725 		__entry->connect / HZ, __entry->reconnect / HZ
726 	)
727 );
728 
729 /**
730  ** Call events
731  **/
732 
733 TRACE_EVENT(xprtrdma_createmrs,
734 	TP_PROTO(
735 		const struct rpcrdma_xprt *r_xprt,
736 		unsigned int count
737 	),
738 
739 	TP_ARGS(r_xprt, count),
740 
741 	TP_STRUCT__entry(
742 		__string(addr, rpcrdma_addrstr(r_xprt))
743 		__string(port, rpcrdma_portstr(r_xprt))
744 		__field(unsigned int, count)
745 	),
746 
747 	TP_fast_assign(
748 		__entry->count = count;
749 		__assign_str(addr);
750 		__assign_str(port);
751 	),
752 
753 	TP_printk("peer=[%s]:%s created %u MRs",
754 		__get_str(addr), __get_str(port), __entry->count
755 	)
756 );
757 
758 TRACE_EVENT(xprtrdma_nomrs_err,
759 	TP_PROTO(
760 		const struct rpcrdma_xprt *r_xprt,
761 		const struct rpcrdma_req *req
762 	),
763 
764 	TP_ARGS(r_xprt, req),
765 
766 	TP_STRUCT__entry(
767 		__field(unsigned int, task_id)
768 		__field(unsigned int, client_id)
769 		__string(addr, rpcrdma_addrstr(r_xprt))
770 		__string(port, rpcrdma_portstr(r_xprt))
771 	),
772 
773 	TP_fast_assign(
774 		const struct rpc_rqst *rqst = &req->rl_slot;
775 
776 		__entry->task_id = rqst->rq_task->tk_pid;
777 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
778 		__assign_str(addr);
779 		__assign_str(port);
780 	),
781 
782 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " peer=[%s]:%s",
783 		__entry->task_id, __entry->client_id,
784 		__get_str(addr), __get_str(port)
785 	)
786 );
787 
788 DEFINE_RDCH_EVENT(read);
789 DEFINE_WRCH_EVENT(write);
790 DEFINE_WRCH_EVENT(reply);
791 DEFINE_WRCH_EVENT(wp);
792 
793 TRACE_DEFINE_ENUM(rpcrdma_noch);
794 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
795 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
796 TRACE_DEFINE_ENUM(rpcrdma_readch);
797 TRACE_DEFINE_ENUM(rpcrdma_areadch);
798 TRACE_DEFINE_ENUM(rpcrdma_writech);
799 TRACE_DEFINE_ENUM(rpcrdma_replych);
800 
801 #define xprtrdma_show_chunktype(x)					\
802 		__print_symbolic(x,					\
803 				{ rpcrdma_noch, "inline" },		\
804 				{ rpcrdma_noch_pullup, "pullup" },	\
805 				{ rpcrdma_noch_mapped, "mapped" },	\
806 				{ rpcrdma_readch, "read list" },	\
807 				{ rpcrdma_areadch, "*read list" },	\
808 				{ rpcrdma_writech, "write list" },	\
809 				{ rpcrdma_replych, "reply chunk" })
810 
811 TRACE_EVENT(xprtrdma_marshal,
812 	TP_PROTO(
813 		const struct rpcrdma_req *req,
814 		unsigned int rtype,
815 		unsigned int wtype
816 	),
817 
818 	TP_ARGS(req, rtype, wtype),
819 
820 	TP_STRUCT__entry(
821 		__field(unsigned int, task_id)
822 		__field(unsigned int, client_id)
823 		__field(u32, xid)
824 		__field(unsigned int, hdrlen)
825 		__field(unsigned int, headlen)
826 		__field(unsigned int, pagelen)
827 		__field(unsigned int, taillen)
828 		__field(unsigned int, rtype)
829 		__field(unsigned int, wtype)
830 	),
831 
832 	TP_fast_assign(
833 		const struct rpc_rqst *rqst = &req->rl_slot;
834 
835 		__entry->task_id = rqst->rq_task->tk_pid;
836 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
837 		__entry->xid = be32_to_cpu(rqst->rq_xid);
838 		__entry->hdrlen = req->rl_hdrbuf.len;
839 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
840 		__entry->pagelen = rqst->rq_snd_buf.page_len;
841 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
842 		__entry->rtype = rtype;
843 		__entry->wtype = wtype;
844 	),
845 
846 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
847 		  " xid=0x%08x hdr=%u xdr=%u/%u/%u %s/%s",
848 		__entry->task_id, __entry->client_id, __entry->xid,
849 		__entry->hdrlen,
850 		__entry->headlen, __entry->pagelen, __entry->taillen,
851 		xprtrdma_show_chunktype(__entry->rtype),
852 		xprtrdma_show_chunktype(__entry->wtype)
853 	)
854 );
855 
856 TRACE_EVENT(xprtrdma_marshal_failed,
857 	TP_PROTO(const struct rpc_rqst *rqst,
858 		 int ret
859 	),
860 
861 	TP_ARGS(rqst, ret),
862 
863 	TP_STRUCT__entry(
864 		__field(unsigned int, task_id)
865 		__field(unsigned int, client_id)
866 		__field(u32, xid)
867 		__field(int, ret)
868 	),
869 
870 	TP_fast_assign(
871 		__entry->task_id = rqst->rq_task->tk_pid;
872 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
873 		__entry->xid = be32_to_cpu(rqst->rq_xid);
874 		__entry->ret = ret;
875 	),
876 
877 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
878 		__entry->task_id, __entry->client_id, __entry->xid,
879 		__entry->ret
880 	)
881 );
882 
883 TRACE_EVENT(xprtrdma_prepsend_failed,
884 	TP_PROTO(const struct rpc_rqst *rqst,
885 		 int ret
886 	),
887 
888 	TP_ARGS(rqst, ret),
889 
890 	TP_STRUCT__entry(
891 		__field(unsigned int, task_id)
892 		__field(unsigned int, client_id)
893 		__field(u32, xid)
894 		__field(int, ret)
895 	),
896 
897 	TP_fast_assign(
898 		__entry->task_id = rqst->rq_task->tk_pid;
899 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
900 		__entry->xid = be32_to_cpu(rqst->rq_xid);
901 		__entry->ret = ret;
902 	),
903 
904 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
905 		__entry->task_id, __entry->client_id, __entry->xid,
906 		__entry->ret
907 	)
908 );
909 
910 TRACE_EVENT(xprtrdma_post_send,
911 	TP_PROTO(
912 		const struct rpcrdma_req *req
913 	),
914 
915 	TP_ARGS(req),
916 
917 	TP_STRUCT__entry(
918 		__field(u32, cq_id)
919 		__field(int, completion_id)
920 		__field(unsigned int, task_id)
921 		__field(unsigned int, client_id)
922 		__field(int, num_sge)
923 		__field(int, signaled)
924 	),
925 
926 	TP_fast_assign(
927 		const struct rpc_rqst *rqst = &req->rl_slot;
928 		const struct rpcrdma_sendctx *sc = req->rl_sendctx;
929 
930 		__entry->cq_id = sc->sc_cid.ci_queue_id;
931 		__entry->completion_id = sc->sc_cid.ci_completion_id;
932 		__entry->task_id = rqst->rq_task->tk_pid;
933 		__entry->client_id = rqst->rq_task->tk_client ?
934 				     rqst->rq_task->tk_client->cl_clid : -1;
935 		__entry->num_sge = req->rl_wr.num_sge;
936 		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
937 	),
938 
939 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u cid=%d (%d SGE%s) %s",
940 		__entry->task_id, __entry->client_id,
941 		__entry->cq_id, __entry->completion_id,
942 		__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
943 		(__entry->signaled ? "signaled" : "")
944 	)
945 );
946 
947 TRACE_EVENT(xprtrdma_post_send_err,
948 	TP_PROTO(
949 		const struct rpcrdma_xprt *r_xprt,
950 		const struct rpcrdma_req *req,
951 		int rc
952 	),
953 
954 	TP_ARGS(r_xprt, req, rc),
955 
956 	TP_STRUCT__entry(
957 		__field(u32, cq_id)
958 		__field(unsigned int, task_id)
959 		__field(unsigned int, client_id)
960 		__field(int, rc)
961 	),
962 
963 	TP_fast_assign(
964 		const struct rpc_rqst *rqst = &req->rl_slot;
965 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
966 
967 		__entry->cq_id = ep ? ep->re_attr.recv_cq->res.id : 0;
968 		__entry->task_id = rqst->rq_task->tk_pid;
969 		__entry->client_id = rqst->rq_task->tk_client ?
970 				     rqst->rq_task->tk_client->cl_clid : -1;
971 		__entry->rc = rc;
972 	),
973 
974 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u rc=%d",
975 		__entry->task_id, __entry->client_id,
976 		__entry->cq_id, __entry->rc
977 	)
978 );
979 
980 DEFINE_SIMPLE_CID_EVENT(xprtrdma_post_recv);
981 
982 TRACE_EVENT(xprtrdma_post_recvs,
983 	TP_PROTO(
984 		const struct rpcrdma_xprt *r_xprt,
985 		unsigned int count
986 	),
987 
988 	TP_ARGS(r_xprt, count),
989 
990 	TP_STRUCT__entry(
991 		__field(u32, cq_id)
992 		__field(unsigned int, count)
993 		__field(int, posted)
994 		__string(addr, rpcrdma_addrstr(r_xprt))
995 		__string(port, rpcrdma_portstr(r_xprt))
996 	),
997 
998 	TP_fast_assign(
999 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
1000 
1001 		__entry->cq_id = ep->re_attr.recv_cq->res.id;
1002 		__entry->count = count;
1003 		__entry->posted = ep->re_receive_count;
1004 		__assign_str(addr);
1005 		__assign_str(port);
1006 	),
1007 
1008 	TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active",
1009 		__get_str(addr), __get_str(port), __entry->cq_id,
1010 		__entry->count, __entry->posted
1011 	)
1012 );
1013 
1014 TRACE_EVENT(xprtrdma_post_recvs_err,
1015 	TP_PROTO(
1016 		const struct rpcrdma_xprt *r_xprt,
1017 		int status
1018 	),
1019 
1020 	TP_ARGS(r_xprt, status),
1021 
1022 	TP_STRUCT__entry(
1023 		__field(u32, cq_id)
1024 		__field(int, status)
1025 		__string(addr, rpcrdma_addrstr(r_xprt))
1026 		__string(port, rpcrdma_portstr(r_xprt))
1027 	),
1028 
1029 	TP_fast_assign(
1030 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
1031 
1032 		__entry->cq_id = ep->re_attr.recv_cq->res.id;
1033 		__entry->status = status;
1034 		__assign_str(addr);
1035 		__assign_str(port);
1036 	),
1037 
1038 	TP_printk("peer=[%s]:%s cq.id=%d rc=%d",
1039 		__get_str(addr), __get_str(port), __entry->cq_id,
1040 		__entry->status
1041 	)
1042 );
1043 
1044 TRACE_EVENT(xprtrdma_post_linv_err,
1045 	TP_PROTO(
1046 		const struct rpcrdma_req *req,
1047 		int status
1048 	),
1049 
1050 	TP_ARGS(req, status),
1051 
1052 	TP_STRUCT__entry(
1053 		__field(unsigned int, task_id)
1054 		__field(unsigned int, client_id)
1055 		__field(int, status)
1056 	),
1057 
1058 	TP_fast_assign(
1059 		const struct rpc_task *task = req->rl_slot.rq_task;
1060 
1061 		__entry->task_id = task->tk_pid;
1062 		__entry->client_id = task->tk_client->cl_clid;
1063 		__entry->status = status;
1064 	),
1065 
1066 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d",
1067 		__entry->task_id, __entry->client_id, __entry->status
1068 	)
1069 );
1070 
1071 /**
1072  ** Completion events
1073  **/
1074 
1075 DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);
1076 
1077 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
1078 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg);
1079 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li);
1080 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake);
1081 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done);
1082 
1083 TRACE_EVENT(xprtrdma_frwr_alloc,
1084 	TP_PROTO(
1085 		const struct rpcrdma_mr *mr,
1086 		int rc
1087 	),
1088 
1089 	TP_ARGS(mr, rc),
1090 
1091 	TP_STRUCT__entry(
1092 		__field(u32, mr_id)
1093 		__field(int, rc)
1094 	),
1095 
1096 	TP_fast_assign(
1097 		__entry->mr_id = mr->mr_ibmr->res.id;
1098 		__entry->rc = rc;
1099 	),
1100 
1101 	TP_printk("mr.id=%u: rc=%d",
1102 		__entry->mr_id, __entry->rc
1103 	)
1104 );
1105 
1106 TRACE_EVENT(xprtrdma_frwr_dereg,
1107 	TP_PROTO(
1108 		const struct rpcrdma_mr *mr,
1109 		int rc
1110 	),
1111 
1112 	TP_ARGS(mr, rc),
1113 
1114 	TP_STRUCT__entry(
1115 		__field(u32, mr_id)
1116 		__field(int, nents)
1117 		__field(u32, handle)
1118 		__field(u32, length)
1119 		__field(u64, offset)
1120 		__field(u32, dir)
1121 		__field(int, rc)
1122 	),
1123 
1124 	TP_fast_assign(
1125 		__entry->mr_id  = mr->mr_ibmr->res.id;
1126 		__entry->nents  = mr->mr_nents;
1127 		__entry->handle = mr->mr_handle;
1128 		__entry->length = mr->mr_length;
1129 		__entry->offset = mr->mr_offset;
1130 		__entry->dir    = mr->mr_dir;
1131 		__entry->rc	= rc;
1132 	),
1133 
1134 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
1135 		__entry->mr_id, __entry->nents, __entry->length,
1136 		(unsigned long long)__entry->offset, __entry->handle,
1137 		xprtrdma_show_direction(__entry->dir),
1138 		__entry->rc
1139 	)
1140 );
1141 
1142 TRACE_EVENT(xprtrdma_frwr_sgerr,
1143 	TP_PROTO(
1144 		const struct rpcrdma_mr *mr,
1145 		int sg_nents
1146 	),
1147 
1148 	TP_ARGS(mr, sg_nents),
1149 
1150 	TP_STRUCT__entry(
1151 		__field(u32, mr_id)
1152 		__field(u64, addr)
1153 		__field(u32, dir)
1154 		__field(int, nents)
1155 	),
1156 
1157 	TP_fast_assign(
1158 		__entry->mr_id = mr->mr_ibmr->res.id;
1159 		__entry->addr = mr->mr_sg->dma_address;
1160 		__entry->dir = mr->mr_dir;
1161 		__entry->nents = sg_nents;
1162 	),
1163 
1164 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
1165 		__entry->mr_id, __entry->addr,
1166 		xprtrdma_show_direction(__entry->dir),
1167 		__entry->nents
1168 	)
1169 );
1170 
1171 TRACE_EVENT(xprtrdma_frwr_maperr,
1172 	TP_PROTO(
1173 		const struct rpcrdma_mr *mr,
1174 		int num_mapped
1175 	),
1176 
1177 	TP_ARGS(mr, num_mapped),
1178 
1179 	TP_STRUCT__entry(
1180 		__field(u32, mr_id)
1181 		__field(u64, addr)
1182 		__field(u32, dir)
1183 		__field(int, num_mapped)
1184 		__field(int, nents)
1185 	),
1186 
1187 	TP_fast_assign(
1188 		__entry->mr_id = mr->mr_ibmr->res.id;
1189 		__entry->addr = mr->mr_sg->dma_address;
1190 		__entry->dir = mr->mr_dir;
1191 		__entry->num_mapped = num_mapped;
1192 		__entry->nents = mr->mr_nents;
1193 	),
1194 
1195 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
1196 		__entry->mr_id, __entry->addr,
1197 		xprtrdma_show_direction(__entry->dir),
1198 		__entry->num_mapped, __entry->nents
1199 	)
1200 );
1201 
1202 DEFINE_MR_EVENT(fastreg);
1203 DEFINE_MR_EVENT(localinv);
1204 DEFINE_MR_EVENT(reminv);
1205 DEFINE_MR_EVENT(map);
1206 
1207 DEFINE_ANON_MR_EVENT(unmap);
1208 
1209 TRACE_EVENT(xprtrdma_dma_maperr,
1210 	TP_PROTO(
1211 		u64 addr
1212 	),
1213 
1214 	TP_ARGS(addr),
1215 
1216 	TP_STRUCT__entry(
1217 		__field(u64, addr)
1218 	),
1219 
1220 	TP_fast_assign(
1221 		__entry->addr = addr;
1222 	),
1223 
1224 	TP_printk("dma addr=0x%llx\n", __entry->addr)
1225 );
1226 
1227 /**
1228  ** Reply events
1229  **/
1230 
1231 TRACE_EVENT(xprtrdma_reply,
1232 	TP_PROTO(
1233 		const struct rpc_task *task,
1234 		const struct rpcrdma_rep *rep,
1235 		unsigned int credits
1236 	),
1237 
1238 	TP_ARGS(task, rep, credits),
1239 
1240 	TP_STRUCT__entry(
1241 		__field(unsigned int, task_id)
1242 		__field(unsigned int, client_id)
1243 		__field(u32, xid)
1244 		__field(unsigned int, credits)
1245 	),
1246 
1247 	TP_fast_assign(
1248 		__entry->task_id = task->tk_pid;
1249 		__entry->client_id = task->tk_client->cl_clid;
1250 		__entry->xid = be32_to_cpu(rep->rr_xid);
1251 		__entry->credits = credits;
1252 	),
1253 
1254 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x credits=%u",
1255 		__entry->task_id, __entry->client_id, __entry->xid,
1256 		__entry->credits
1257 	)
1258 );
1259 
1260 DEFINE_REPLY_EVENT(vers);
1261 DEFINE_REPLY_EVENT(rqst);
1262 DEFINE_REPLY_EVENT(short);
1263 DEFINE_REPLY_EVENT(hdr);
1264 
1265 TRACE_EVENT(xprtrdma_err_vers,
1266 	TP_PROTO(
1267 		const struct rpc_rqst *rqst,
1268 		__be32 *min,
1269 		__be32 *max
1270 	),
1271 
1272 	TP_ARGS(rqst, min, max),
1273 
1274 	TP_STRUCT__entry(
1275 		__field(unsigned int, task_id)
1276 		__field(unsigned int, client_id)
1277 		__field(u32, xid)
1278 		__field(u32, min)
1279 		__field(u32, max)
1280 	),
1281 
1282 	TP_fast_assign(
1283 		__entry->task_id = rqst->rq_task->tk_pid;
1284 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1285 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1286 		__entry->min = be32_to_cpup(min);
1287 		__entry->max = be32_to_cpup(max);
1288 	),
1289 
1290 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x versions=[%u, %u]",
1291 		__entry->task_id, __entry->client_id, __entry->xid,
1292 		__entry->min, __entry->max
1293 	)
1294 );
1295 
1296 TRACE_EVENT(xprtrdma_err_chunk,
1297 	TP_PROTO(
1298 		const struct rpc_rqst *rqst
1299 	),
1300 
1301 	TP_ARGS(rqst),
1302 
1303 	TP_STRUCT__entry(
1304 		__field(unsigned int, task_id)
1305 		__field(unsigned int, client_id)
1306 		__field(u32, xid)
1307 	),
1308 
1309 	TP_fast_assign(
1310 		__entry->task_id = rqst->rq_task->tk_pid;
1311 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1312 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1313 	),
1314 
1315 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x",
1316 		__entry->task_id, __entry->client_id, __entry->xid
1317 	)
1318 );
1319 
1320 TRACE_EVENT(xprtrdma_err_unrecognized,
1321 	TP_PROTO(
1322 		const struct rpc_rqst *rqst,
1323 		__be32 *procedure
1324 	),
1325 
1326 	TP_ARGS(rqst, procedure),
1327 
1328 	TP_STRUCT__entry(
1329 		__field(unsigned int, task_id)
1330 		__field(unsigned int, client_id)
1331 		__field(u32, xid)
1332 		__field(u32, procedure)
1333 	),
1334 
1335 	TP_fast_assign(
1336 		__entry->task_id = rqst->rq_task->tk_pid;
1337 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1338 		__entry->procedure = be32_to_cpup(procedure);
1339 	),
1340 
1341 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x procedure=%u",
1342 		__entry->task_id, __entry->client_id, __entry->xid,
1343 		__entry->procedure
1344 	)
1345 );
1346 
1347 TRACE_EVENT(xprtrdma_fixup,
1348 	TP_PROTO(
1349 		const struct rpc_rqst *rqst,
1350 		unsigned long fixup
1351 	),
1352 
1353 	TP_ARGS(rqst, fixup),
1354 
1355 	TP_STRUCT__entry(
1356 		__field(unsigned int, task_id)
1357 		__field(unsigned int, client_id)
1358 		__field(unsigned long, fixup)
1359 		__field(size_t, headlen)
1360 		__field(unsigned int, pagelen)
1361 		__field(size_t, taillen)
1362 	),
1363 
1364 	TP_fast_assign(
1365 		__entry->task_id = rqst->rq_task->tk_pid;
1366 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1367 		__entry->fixup = fixup;
1368 		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1369 		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1370 		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1371 	),
1372 
1373 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " fixup=%lu xdr=%zu/%u/%zu",
1374 		__entry->task_id, __entry->client_id, __entry->fixup,
1375 		__entry->headlen, __entry->pagelen, __entry->taillen
1376 	)
1377 );
1378 
1379 TRACE_EVENT(xprtrdma_decode_seg,
1380 	TP_PROTO(
1381 		u32 handle,
1382 		u32 length,
1383 		u64 offset
1384 	),
1385 
1386 	TP_ARGS(handle, length, offset),
1387 
1388 	TP_STRUCT__entry(
1389 		__field(u32, handle)
1390 		__field(u32, length)
1391 		__field(u64, offset)
1392 	),
1393 
1394 	TP_fast_assign(
1395 		__entry->handle = handle;
1396 		__entry->length = length;
1397 		__entry->offset = offset;
1398 	),
1399 
1400 	TP_printk("%u@0x%016llx:0x%08x",
1401 		__entry->length, (unsigned long long)__entry->offset,
1402 		__entry->handle
1403 	)
1404 );
1405 
1406 TRACE_EVENT(xprtrdma_mrs_zap,
1407 	TP_PROTO(
1408 		const struct rpc_task *task
1409 	),
1410 
1411 	TP_ARGS(task),
1412 
1413 	TP_STRUCT__entry(
1414 		__field(unsigned int, task_id)
1415 		__field(unsigned int, client_id)
1416 	),
1417 
1418 	TP_fast_assign(
1419 		__entry->task_id = task->tk_pid;
1420 		__entry->client_id = task->tk_client->cl_clid;
1421 	),
1422 
1423 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER,
1424 		__entry->task_id, __entry->client_id
1425 	)
1426 );
1427 
1428 /**
1429  ** Callback events
1430  **/
1431 
1432 TRACE_EVENT(xprtrdma_cb_setup,
1433 	TP_PROTO(
1434 		const struct rpcrdma_xprt *r_xprt,
1435 		unsigned int reqs
1436 	),
1437 
1438 	TP_ARGS(r_xprt, reqs),
1439 
1440 	TP_STRUCT__entry(
1441 		__field(unsigned int, reqs)
1442 		__string(addr, rpcrdma_addrstr(r_xprt))
1443 		__string(port, rpcrdma_portstr(r_xprt))
1444 	),
1445 
1446 	TP_fast_assign(
1447 		__entry->reqs = reqs;
1448 		__assign_str(addr);
1449 		__assign_str(port);
1450 	),
1451 
1452 	TP_printk("peer=[%s]:%s %u reqs",
1453 		__get_str(addr), __get_str(port), __entry->reqs
1454 	)
1455 );
1456 
1457 DEFINE_CALLBACK_EVENT(call);
1458 DEFINE_CALLBACK_EVENT(reply);
1459 
1460 /**
1461  ** Server-side RPC/RDMA events
1462  **/
1463 
1464 DECLARE_EVENT_CLASS(svcrdma_accept_class,
1465 	TP_PROTO(
1466 		const struct svcxprt_rdma *rdma,
1467 		long status
1468 	),
1469 
1470 	TP_ARGS(rdma, status),
1471 
1472 	TP_STRUCT__entry(
1473 		__field(long, status)
1474 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1475 	),
1476 
1477 	TP_fast_assign(
1478 		__entry->status = status;
1479 		__assign_str(addr);
1480 	),
1481 
1482 	TP_printk("addr=%s status=%ld",
1483 		__get_str(addr), __entry->status
1484 	)
1485 );
1486 
1487 #define DEFINE_ACCEPT_EVENT(name) \
1488 		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1489 				TP_PROTO( \
1490 					const struct svcxprt_rdma *rdma, \
1491 					long status \
1492 				), \
1493 				TP_ARGS(rdma, status))
1494 
1495 DEFINE_ACCEPT_EVENT(pd);
1496 DEFINE_ACCEPT_EVENT(qp);
1497 DEFINE_ACCEPT_EVENT(fabric);
1498 DEFINE_ACCEPT_EVENT(initdepth);
1499 DEFINE_ACCEPT_EVENT(accept);
1500 
1501 TRACE_DEFINE_ENUM(RDMA_MSG);
1502 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1503 TRACE_DEFINE_ENUM(RDMA_MSGP);
1504 TRACE_DEFINE_ENUM(RDMA_DONE);
1505 TRACE_DEFINE_ENUM(RDMA_ERROR);
1506 
1507 #define show_rpcrdma_proc(x)						\
1508 		__print_symbolic(x,					\
1509 				{ RDMA_MSG, "RDMA_MSG" },		\
1510 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1511 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1512 				{ RDMA_DONE, "RDMA_DONE" },		\
1513 				{ RDMA_ERROR, "RDMA_ERROR" })
1514 
1515 TRACE_EVENT(svcrdma_decode_rqst,
1516 	TP_PROTO(
1517 		const struct svc_rdma_recv_ctxt *ctxt,
1518 		__be32 *p,
1519 		unsigned int hdrlen
1520 	),
1521 
1522 	TP_ARGS(ctxt, p, hdrlen),
1523 
1524 	TP_STRUCT__entry(
1525 		__field(u32, cq_id)
1526 		__field(int, completion_id)
1527 		__field(u32, xid)
1528 		__field(u32, vers)
1529 		__field(u32, proc)
1530 		__field(u32, credits)
1531 		__field(unsigned int, hdrlen)
1532 	),
1533 
1534 	TP_fast_assign(
1535 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1536 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1537 		__entry->xid = be32_to_cpup(p++);
1538 		__entry->vers = be32_to_cpup(p++);
1539 		__entry->credits = be32_to_cpup(p++);
1540 		__entry->proc = be32_to_cpup(p);
1541 		__entry->hdrlen = hdrlen;
1542 	),
1543 
1544 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1545 		__entry->cq_id, __entry->completion_id,
1546 		__entry->xid, __entry->vers, __entry->credits,
1547 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1548 );
1549 
1550 TRACE_EVENT(svcrdma_decode_short_err,
1551 	TP_PROTO(
1552 		const struct svc_rdma_recv_ctxt *ctxt,
1553 		unsigned int hdrlen
1554 	),
1555 
1556 	TP_ARGS(ctxt, hdrlen),
1557 
1558 	TP_STRUCT__entry(
1559 		__field(u32, cq_id)
1560 		__field(int, completion_id)
1561 		__field(unsigned int, hdrlen)
1562 	),
1563 
1564 	TP_fast_assign(
1565 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1566 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1567 		__entry->hdrlen = hdrlen;
1568 	),
1569 
1570 	TP_printk("cq.id=%u cid=%d hdrlen=%u",
1571 		__entry->cq_id, __entry->completion_id,
1572 		__entry->hdrlen)
1573 );
1574 
1575 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1576 	TP_PROTO(
1577 		const struct svc_rdma_recv_ctxt *ctxt,
1578 		__be32 *p
1579 	),
1580 
1581 	TP_ARGS(ctxt, p),
1582 
1583 	TP_STRUCT__entry(
1584 		__field(u32, cq_id)
1585 		__field(int, completion_id)
1586 		__field(u32, xid)
1587 		__field(u32, vers)
1588 		__field(u32, proc)
1589 		__field(u32, credits)
1590 	),
1591 
1592 	TP_fast_assign(
1593 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1594 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1595 		__entry->xid = be32_to_cpup(p++);
1596 		__entry->vers = be32_to_cpup(p++);
1597 		__entry->credits = be32_to_cpup(p++);
1598 		__entry->proc = be32_to_cpup(p);
1599 	),
1600 
1601 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1602 		__entry->cq_id, __entry->completion_id,
1603 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1604 );
1605 
1606 #define DEFINE_BADREQ_EVENT(name)					\
1607 		DEFINE_EVENT(svcrdma_badreq_event,			\
1608 			     svcrdma_decode_##name##_err,		\
1609 				TP_PROTO(				\
1610 					const struct svc_rdma_recv_ctxt *ctxt,	\
1611 					__be32 *p			\
1612 				),					\
1613 				TP_ARGS(ctxt, p))
1614 
1615 DEFINE_BADREQ_EVENT(badvers);
1616 DEFINE_BADREQ_EVENT(drop);
1617 DEFINE_BADREQ_EVENT(badproc);
1618 DEFINE_BADREQ_EVENT(parse);
1619 
1620 TRACE_EVENT(svcrdma_encode_wseg,
1621 	TP_PROTO(
1622 		const struct svc_rdma_send_ctxt *ctxt,
1623 		u32 segno,
1624 		u32 handle,
1625 		u32 length,
1626 		u64 offset
1627 	),
1628 
1629 	TP_ARGS(ctxt, segno, handle, length, offset),
1630 
1631 	TP_STRUCT__entry(
1632 		__field(u32, cq_id)
1633 		__field(int, completion_id)
1634 		__field(u32, segno)
1635 		__field(u32, handle)
1636 		__field(u32, length)
1637 		__field(u64, offset)
1638 	),
1639 
1640 	TP_fast_assign(
1641 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1642 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1643 		__entry->segno = segno;
1644 		__entry->handle = handle;
1645 		__entry->length = length;
1646 		__entry->offset = offset;
1647 	),
1648 
1649 	TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1650 		__entry->cq_id, __entry->completion_id,
1651 		__entry->segno, __entry->length,
1652 		(unsigned long long)__entry->offset, __entry->handle
1653 	)
1654 );
1655 
1656 TRACE_EVENT(svcrdma_decode_rseg,
1657 	TP_PROTO(
1658 		const struct rpc_rdma_cid *cid,
1659 		const struct svc_rdma_chunk *chunk,
1660 		const struct svc_rdma_segment *segment
1661 	),
1662 
1663 	TP_ARGS(cid, chunk, segment),
1664 
1665 	TP_STRUCT__entry(
1666 		__field(u32, cq_id)
1667 		__field(int, completion_id)
1668 		__field(u32, segno)
1669 		__field(u32, position)
1670 		__field(u32, handle)
1671 		__field(u32, length)
1672 		__field(u64, offset)
1673 	),
1674 
1675 	TP_fast_assign(
1676 		__entry->cq_id = cid->ci_queue_id;
1677 		__entry->completion_id = cid->ci_completion_id;
1678 		__entry->segno = chunk->ch_segcount;
1679 		__entry->position = chunk->ch_position;
1680 		__entry->handle = segment->rs_handle;
1681 		__entry->length = segment->rs_length;
1682 		__entry->offset = segment->rs_offset;
1683 	),
1684 
1685 	TP_printk("cq.id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
1686 		__entry->cq_id, __entry->completion_id,
1687 		__entry->segno, __entry->position, __entry->length,
1688 		(unsigned long long)__entry->offset, __entry->handle
1689 	)
1690 );
1691 
1692 TRACE_EVENT(svcrdma_decode_wseg,
1693 	TP_PROTO(
1694 		const struct rpc_rdma_cid *cid,
1695 		const struct svc_rdma_chunk *chunk,
1696 		u32 segno
1697 	),
1698 
1699 	TP_ARGS(cid, chunk, segno),
1700 
1701 	TP_STRUCT__entry(
1702 		__field(u32, cq_id)
1703 		__field(int, completion_id)
1704 		__field(u32, segno)
1705 		__field(u32, handle)
1706 		__field(u32, length)
1707 		__field(u64, offset)
1708 	),
1709 
1710 	TP_fast_assign(
1711 		const struct svc_rdma_segment *segment =
1712 			&chunk->ch_segments[segno];
1713 
1714 		__entry->cq_id = cid->ci_queue_id;
1715 		__entry->completion_id = cid->ci_completion_id;
1716 		__entry->segno = segno;
1717 		__entry->handle = segment->rs_handle;
1718 		__entry->length = segment->rs_length;
1719 		__entry->offset = segment->rs_offset;
1720 	),
1721 
1722 	TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1723 		__entry->cq_id, __entry->completion_id,
1724 		__entry->segno, __entry->length,
1725 		(unsigned long long)__entry->offset, __entry->handle
1726 	)
1727 );
1728 
1729 DECLARE_EVENT_CLASS(svcrdma_error_event,
1730 	TP_PROTO(
1731 		__be32 xid
1732 	),
1733 
1734 	TP_ARGS(xid),
1735 
1736 	TP_STRUCT__entry(
1737 		__field(u32, xid)
1738 	),
1739 
1740 	TP_fast_assign(
1741 		__entry->xid = be32_to_cpu(xid);
1742 	),
1743 
1744 	TP_printk("xid=0x%08x",
1745 		__entry->xid
1746 	)
1747 );
1748 
1749 #define DEFINE_ERROR_EVENT(name)					\
1750 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1751 				TP_PROTO(				\
1752 					__be32 xid			\
1753 				),					\
1754 				TP_ARGS(xid))
1755 
1756 DEFINE_ERROR_EVENT(vers);
1757 DEFINE_ERROR_EVENT(chunk);
1758 
1759 /**
1760  ** Server-side RDMA API events
1761  **/
1762 
1763 DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1764 	TP_PROTO(
1765 		const struct rpc_rdma_cid *cid,
1766 		u64 dma_addr,
1767 		u32 length
1768 	),
1769 
1770 	TP_ARGS(cid, dma_addr, length),
1771 
1772 	TP_STRUCT__entry(
1773 		__field(u32, cq_id)
1774 		__field(int, completion_id)
1775 		__field(u64, dma_addr)
1776 		__field(u32, length)
1777 	),
1778 
1779 	TP_fast_assign(
1780 		__entry->cq_id = cid->ci_queue_id;
1781 		__entry->completion_id = cid->ci_completion_id;
1782 		__entry->dma_addr = dma_addr;
1783 		__entry->length = length;
1784 	),
1785 
1786 	TP_printk("cq.id=%u cid=%d dma_addr=%llu length=%u",
1787 		__entry->cq_id, __entry->completion_id,
1788 		__entry->dma_addr, __entry->length
1789 	)
1790 );
1791 
1792 #define DEFINE_SVC_DMA_EVENT(name)					\
1793 		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1794 				TP_PROTO(				\
1795 					const struct rpc_rdma_cid *cid, \
1796 					u64 dma_addr,			\
1797 					u32 length			\
1798 				),					\
1799 				TP_ARGS(cid, dma_addr, length)		\
1800 		)
1801 
1802 DEFINE_SVC_DMA_EVENT(dma_map_page);
1803 DEFINE_SVC_DMA_EVENT(dma_map_err);
1804 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1805 
1806 TRACE_EVENT(svcrdma_dma_map_rw_err,
1807 	TP_PROTO(
1808 		const struct svcxprt_rdma *rdma,
1809 		u64 offset,
1810 		u32 handle,
1811 		unsigned int nents,
1812 		int status
1813 	),
1814 
1815 	TP_ARGS(rdma, offset, handle, nents, status),
1816 
1817 	TP_STRUCT__entry(
1818 		__field(u32, cq_id)
1819 		__field(u32, handle)
1820 		__field(u64, offset)
1821 		__field(unsigned int, nents)
1822 		__field(int, status)
1823 	),
1824 
1825 	TP_fast_assign(
1826 		__entry->cq_id = rdma->sc_sq_cq->res.id;
1827 		__entry->handle = handle;
1828 		__entry->offset = offset;
1829 		__entry->nents = nents;
1830 		__entry->status = status;
1831 	),
1832 
1833 	TP_printk("cq.id=%u 0x%016llx:0x%08x nents=%u status=%d",
1834 		__entry->cq_id, (unsigned long long)__entry->offset,
1835 		__entry->handle, __entry->nents, __entry->status
1836 	)
1837 );
1838 
1839 TRACE_EVENT(svcrdma_rwctx_empty,
1840 	TP_PROTO(
1841 		const struct svcxprt_rdma *rdma,
1842 		unsigned int num_sges
1843 	),
1844 
1845 	TP_ARGS(rdma, num_sges),
1846 
1847 	TP_STRUCT__entry(
1848 		__field(u32, cq_id)
1849 		__field(unsigned int, num_sges)
1850 	),
1851 
1852 	TP_fast_assign(
1853 		__entry->cq_id = rdma->sc_sq_cq->res.id;
1854 		__entry->num_sges = num_sges;
1855 	),
1856 
1857 	TP_printk("cq.id=%u num_sges=%d",
1858 		__entry->cq_id, __entry->num_sges
1859 	)
1860 );
1861 
1862 TRACE_EVENT(svcrdma_page_overrun_err,
1863 	TP_PROTO(
1864 		const struct rpc_rdma_cid *cid,
1865 		unsigned int pageno
1866 	),
1867 
1868 	TP_ARGS(cid, pageno),
1869 
1870 	TP_STRUCT__entry(
1871 		__field(u32, cq_id)
1872 		__field(int, completion_id)
1873 		__field(unsigned int, pageno)
1874 	),
1875 
1876 	TP_fast_assign(
1877 		__entry->cq_id = cid->ci_queue_id;
1878 		__entry->completion_id = cid->ci_completion_id;
1879 		__entry->pageno = pageno;
1880 	),
1881 
1882 	TP_printk("cq.id=%u cid=%d pageno=%u",
1883 		__entry->cq_id, __entry->completion_id,
1884 		__entry->pageno
1885 	)
1886 );
1887 
1888 TRACE_EVENT(svcrdma_small_wrch_err,
1889 	TP_PROTO(
1890 		const struct rpc_rdma_cid *cid,
1891 		unsigned int remaining,
1892 		unsigned int seg_no,
1893 		unsigned int num_segs
1894 	),
1895 
1896 	TP_ARGS(cid, remaining, seg_no, num_segs),
1897 
1898 	TP_STRUCT__entry(
1899 		__field(u32, cq_id)
1900 		__field(int, completion_id)
1901 		__field(unsigned int, remaining)
1902 		__field(unsigned int, seg_no)
1903 		__field(unsigned int, num_segs)
1904 	),
1905 
1906 	TP_fast_assign(
1907 		__entry->cq_id = cid->ci_queue_id;
1908 		__entry->completion_id = cid->ci_completion_id;
1909 		__entry->remaining = remaining;
1910 		__entry->seg_no = seg_no;
1911 		__entry->num_segs = num_segs;
1912 	),
1913 
1914 	TP_printk("cq.id=%u cid=%d remaining=%u seg_no=%u num_segs=%u",
1915 		__entry->cq_id, __entry->completion_id,
1916 		__entry->remaining, __entry->seg_no, __entry->num_segs
1917 	)
1918 );
1919 
1920 TRACE_EVENT(svcrdma_send_pullup,
1921 	TP_PROTO(
1922 		const struct svc_rdma_send_ctxt *ctxt,
1923 		unsigned int msglen
1924 	),
1925 
1926 	TP_ARGS(ctxt, msglen),
1927 
1928 	TP_STRUCT__entry(
1929 		__field(u32, cq_id)
1930 		__field(int, completion_id)
1931 		__field(unsigned int, hdrlen)
1932 		__field(unsigned int, msglen)
1933 	),
1934 
1935 	TP_fast_assign(
1936 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1937 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1938 		__entry->hdrlen = ctxt->sc_hdrbuf.len,
1939 		__entry->msglen = msglen;
1940 	),
1941 
1942 	TP_printk("cq.id=%u cid=%d hdr=%u msg=%u (total %u)",
1943 		__entry->cq_id, __entry->completion_id,
1944 		__entry->hdrlen, __entry->msglen,
1945 		__entry->hdrlen + __entry->msglen)
1946 );
1947 
1948 TRACE_EVENT(svcrdma_send_err,
1949 	TP_PROTO(
1950 		const struct svc_rqst *rqst,
1951 		int status
1952 	),
1953 
1954 	TP_ARGS(rqst, status),
1955 
1956 	TP_STRUCT__entry(
1957 		__field(int, status)
1958 		__field(u32, xid)
1959 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1960 	),
1961 
1962 	TP_fast_assign(
1963 		__entry->status = status;
1964 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1965 		__assign_str(addr);
1966 	),
1967 
1968 	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1969 		__entry->xid, __entry->status
1970 	)
1971 );
1972 
1973 TRACE_EVENT(svcrdma_post_send,
1974 	TP_PROTO(
1975 		const struct svc_rdma_send_ctxt *ctxt
1976 	),
1977 
1978 	TP_ARGS(ctxt),
1979 
1980 	TP_STRUCT__entry(
1981 		__field(u32, cq_id)
1982 		__field(int, completion_id)
1983 		__field(unsigned int, num_sge)
1984 		__field(u32, inv_rkey)
1985 	),
1986 
1987 	TP_fast_assign(
1988 		const struct ib_send_wr *wr = &ctxt->sc_send_wr;
1989 
1990 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1991 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1992 		__entry->num_sge = wr->num_sge;
1993 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1994 					wr->ex.invalidate_rkey : 0;
1995 	),
1996 
1997 	TP_printk("cq.id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
1998 		__entry->cq_id, __entry->completion_id,
1999 		__entry->num_sge, __entry->inv_rkey
2000 	)
2001 );
2002 
2003 DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_send);
2004 DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush);
2005 DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err);
2006 
2007 DEFINE_SIMPLE_CID_EVENT(svcrdma_post_recv);
2008 
2009 DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv);
2010 DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush);
2011 DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_err);
2012 
2013 TRACE_EVENT(svcrdma_rq_post_err,
2014 	TP_PROTO(
2015 		const struct svcxprt_rdma *rdma,
2016 		int status
2017 	),
2018 
2019 	TP_ARGS(rdma, status),
2020 
2021 	TP_STRUCT__entry(
2022 		__field(int, status)
2023 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
2024 	),
2025 
2026 	TP_fast_assign(
2027 		__entry->status = status;
2028 		__assign_str(addr);
2029 	),
2030 
2031 	TP_printk("addr=%s status=%d",
2032 		__get_str(addr), __entry->status
2033 	)
2034 );
2035 
2036 DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
2037 	TP_PROTO(
2038 		const struct rpc_rdma_cid *cid,
2039 		int sqecount
2040 	),
2041 
2042 	TP_ARGS(cid, sqecount),
2043 
2044 	TP_STRUCT__entry(
2045 		__field(u32, cq_id)
2046 		__field(int, completion_id)
2047 		__field(int, sqecount)
2048 	),
2049 
2050 	TP_fast_assign(
2051 		__entry->cq_id = cid->ci_queue_id;
2052 		__entry->completion_id = cid->ci_completion_id;
2053 		__entry->sqecount = sqecount;
2054 	),
2055 
2056 	TP_printk("cq.id=%u cid=%d sqecount=%d",
2057 		__entry->cq_id, __entry->completion_id,
2058 		__entry->sqecount
2059 	)
2060 );
2061 
2062 #define DEFINE_POST_CHUNK_EVENT(name)					\
2063 		DEFINE_EVENT(svcrdma_post_chunk_class,			\
2064 				svcrdma_post_##name##_chunk,		\
2065 				TP_PROTO(				\
2066 					const struct rpc_rdma_cid *cid,	\
2067 					int sqecount			\
2068 				),					\
2069 				TP_ARGS(cid, sqecount))
2070 
2071 DEFINE_POST_CHUNK_EVENT(read);
2072 DEFINE_POST_CHUNK_EVENT(write);
2073 DEFINE_POST_CHUNK_EVENT(reply);
2074 
2075 DEFINE_EVENT(svcrdma_post_chunk_class, svcrdma_cc_release,
2076 	TP_PROTO(
2077 		const struct rpc_rdma_cid *cid,
2078 		int sqecount
2079 	),
2080 	TP_ARGS(cid, sqecount)
2081 );
2082 
2083 TRACE_EVENT(svcrdma_wc_read,
2084 	TP_PROTO(
2085 		const struct ib_wc *wc,
2086 		const struct rpc_rdma_cid *cid,
2087 		unsigned int totalbytes,
2088 		const ktime_t posttime
2089 	),
2090 
2091 	TP_ARGS(wc, cid, totalbytes, posttime),
2092 
2093 	TP_STRUCT__entry(
2094 		__field(u32, cq_id)
2095 		__field(int, completion_id)
2096 		__field(s64, read_latency)
2097 		__field(unsigned int, totalbytes)
2098 	),
2099 
2100 	TP_fast_assign(
2101 		__entry->cq_id = cid->ci_queue_id;
2102 		__entry->completion_id = cid->ci_completion_id;
2103 		__entry->totalbytes = totalbytes;
2104 		__entry->read_latency = ktime_us_delta(ktime_get(), posttime);
2105 	),
2106 
2107 	TP_printk("cq.id=%u cid=%d totalbytes=%u latency-us=%lld",
2108 		__entry->cq_id, __entry->completion_id,
2109 		__entry->totalbytes, __entry->read_latency
2110 	)
2111 );
2112 
2113 DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_flush);
2114 DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_err);
2115 DEFINE_SIMPLE_CID_EVENT(svcrdma_read_finished);
2116 
2117 DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_write);
2118 DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_flush);
2119 DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_err);
2120 
2121 DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_reply);
2122 DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_reply_flush);
2123 DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_reply_err);
2124 
2125 TRACE_EVENT(svcrdma_qp_error,
2126 	TP_PROTO(
2127 		const struct ib_event *event,
2128 		const struct sockaddr *sap
2129 	),
2130 
2131 	TP_ARGS(event, sap),
2132 
2133 	TP_STRUCT__entry(
2134 		__field(unsigned int, event)
2135 		__string(device, event->device->name)
2136 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
2137 	),
2138 
2139 	TP_fast_assign(
2140 		__entry->event = event->event;
2141 		__assign_str(device);
2142 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
2143 			 "%pISpc", sap);
2144 	),
2145 
2146 	TP_printk("addr=%s dev=%s event=%s (%u)",
2147 		__entry->addr, __get_str(device),
2148 		rdma_show_ib_event(__entry->event), __entry->event
2149 	)
2150 );
2151 
2152 DECLARE_EVENT_CLASS(svcrdma_sendqueue_class,
2153 	TP_PROTO(
2154 		const struct svcxprt_rdma *rdma,
2155 		const struct rpc_rdma_cid *cid
2156 	),
2157 
2158 	TP_ARGS(rdma, cid),
2159 
2160 	TP_STRUCT__entry(
2161 		__field(u32, cq_id)
2162 		__field(int, completion_id)
2163 		__field(int, avail)
2164 		__field(int, depth)
2165 	),
2166 
2167 	TP_fast_assign(
2168 		__entry->cq_id = cid->ci_queue_id;
2169 		__entry->completion_id = cid->ci_completion_id;
2170 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
2171 		__entry->depth = rdma->sc_sq_depth;
2172 	),
2173 
2174 	TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d",
2175 		__entry->cq_id, __entry->completion_id,
2176 		__entry->avail, __entry->depth
2177 	)
2178 );
2179 
2180 #define DEFINE_SQ_EVENT(name)						\
2181 		DEFINE_EVENT(svcrdma_sendqueue_class, name,		\
2182 			TP_PROTO(					\
2183 				const struct svcxprt_rdma *rdma,	\
2184 				const struct rpc_rdma_cid *cid		\
2185 			),						\
2186 			TP_ARGS(rdma, cid)				\
2187 		)
2188 
2189 DEFINE_SQ_EVENT(svcrdma_sq_full);
2190 DEFINE_SQ_EVENT(svcrdma_sq_retry);
2191 
2192 TRACE_EVENT(svcrdma_sq_post_err,
2193 	TP_PROTO(
2194 		const struct svcxprt_rdma *rdma,
2195 		const struct rpc_rdma_cid *cid,
2196 		int status
2197 	),
2198 
2199 	TP_ARGS(rdma, cid, status),
2200 
2201 	TP_STRUCT__entry(
2202 		__field(u32, cq_id)
2203 		__field(int, completion_id)
2204 		__field(int, avail)
2205 		__field(int, depth)
2206 		__field(int, status)
2207 	),
2208 
2209 	TP_fast_assign(
2210 		__entry->cq_id = cid->ci_queue_id;
2211 		__entry->completion_id = cid->ci_completion_id;
2212 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
2213 		__entry->depth = rdma->sc_sq_depth;
2214 		__entry->status = status;
2215 	),
2216 
2217 	TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d status=%d",
2218 		__entry->cq_id, __entry->completion_id,
2219 		__entry->avail, __entry->depth, __entry->status
2220 	)
2221 );
2222 
2223 #endif /* _TRACE_RPCRDMA_H */
2224 
2225 #include <trace/define_trace.h>
2226