1 /*
2  * Copyright(c) 2015 - 2017 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 #if !defined(__HFI1_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
48 #define __HFI1_TRACE_TX_H
49 
50 #include <linux/tracepoint.h>
51 #include <linux/trace_seq.h>
52 
53 #include "hfi.h"
54 #include "mad.h"
55 #include "sdma.h"
56 #include "ipoib.h"
57 #include "user_sdma.h"
58 
59 const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
60 
61 #define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
62 
63 #undef TRACE_SYSTEM
64 #define TRACE_SYSTEM hfi1_tx
65 
66 TRACE_EVENT(hfi1_piofree,
67 	    TP_PROTO(struct send_context *sc, int extra),
68 	    TP_ARGS(sc, extra),
69 	    TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
70 	    __field(u32, sw_index)
71 	    __field(u32, hw_context)
72 	    __field(int, extra)
73 	    ),
74 	    TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
75 	    __entry->sw_index = sc->sw_index;
76 	    __entry->hw_context = sc->hw_context;
77 	    __entry->extra = extra;
78 	    ),
79 	    TP_printk("[%s] ctxt %u(%u) extra %d",
80 		      __get_str(dev),
81 		      __entry->sw_index,
82 		      __entry->hw_context,
83 		      __entry->extra
84 	    )
85 );
86 
87 TRACE_EVENT(hfi1_wantpiointr,
88 	    TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
89 	    TP_ARGS(sc, needint, credit_ctrl),
90 	    TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
91 			__field(u32, sw_index)
92 			__field(u32, hw_context)
93 			__field(u32, needint)
94 			__field(u64, credit_ctrl)
95 			),
96 	    TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
97 			__entry->sw_index = sc->sw_index;
98 			__entry->hw_context = sc->hw_context;
99 			__entry->needint = needint;
100 			__entry->credit_ctrl = credit_ctrl;
101 			),
102 	    TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
103 		      __get_str(dev),
104 		      __entry->sw_index,
105 		      __entry->hw_context,
106 		      __entry->needint,
107 		      (unsigned long long)__entry->credit_ctrl
108 		      )
109 );
110 
111 DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
112 		    TP_PROTO(struct rvt_qp *qp, u32 flags),
113 		    TP_ARGS(qp, flags),
114 		    TP_STRUCT__entry(
115 		    DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
116 		    __field(u32, qpn)
117 		    __field(u32, flags)
118 		    __field(u32, s_flags)
119 		    __field(u32, ps_flags)
120 		    __field(unsigned long, iow_flags)
121 		    ),
122 		    TP_fast_assign(
123 		    DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
124 		    __entry->flags = flags;
125 		    __entry->qpn = qp->ibqp.qp_num;
126 		    __entry->s_flags = qp->s_flags;
127 		    __entry->ps_flags =
128 			((struct hfi1_qp_priv *)qp->priv)->s_flags;
129 		    __entry->iow_flags =
130 			((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
131 		    ),
132 		    TP_printk(
133 		    "[%s] qpn 0x%x flags 0x%x s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx",
134 		    __get_str(dev),
135 		    __entry->qpn,
136 		    __entry->flags,
137 		    __entry->s_flags,
138 		    __entry->ps_flags,
139 		    __entry->iow_flags
140 		    )
141 );
142 
143 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
144 	     TP_PROTO(struct rvt_qp *qp, u32 flags),
145 	     TP_ARGS(qp, flags));
146 
147 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
148 	     TP_PROTO(struct rvt_qp *qp, u32 flags),
149 	     TP_ARGS(qp, flags));
150 
151 TRACE_EVENT(hfi1_sdma_descriptor,
152 	    TP_PROTO(struct sdma_engine *sde,
153 		     u64 desc0,
154 		     u64 desc1,
155 		     u16 e,
156 		     void *descp),
157 		     TP_ARGS(sde, desc0, desc1, e, descp),
158 		     TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
159 		     __field(void *, descp)
160 		     __field(u64, desc0)
161 		     __field(u64, desc1)
162 		     __field(u16, e)
163 		     __field(u8, idx)
164 		     ),
165 		     TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
166 		     __entry->desc0 = desc0;
167 		     __entry->desc1 = desc1;
168 		     __entry->idx = sde->this_idx;
169 		     __entry->descp = descp;
170 		     __entry->e = e;
171 		     ),
172 	    TP_printk(
173 	    "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
174 	    __get_str(dev),
175 	    __entry->idx,
176 	    __parse_sdma_flags(__entry->desc0, __entry->desc1),
177 	    (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
178 	    SDMA_DESC0_PHY_ADDR_MASK,
179 	    (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
180 	    SDMA_DESC1_GENERATION_MASK),
181 	    (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
182 	    SDMA_DESC0_BYTE_COUNT_MASK),
183 	    __entry->desc0,
184 	    __entry->desc1,
185 	    __entry->descp,
186 	    __entry->e
187 	    )
188 );
189 
190 TRACE_EVENT(hfi1_sdma_engine_select,
191 	    TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
192 	    TP_ARGS(dd, sel, vl, idx),
193 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
194 	    __field(u32, sel)
195 	    __field(u8, vl)
196 	    __field(u8, idx)
197 	    ),
198 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
199 	    __entry->sel = sel;
200 	    __entry->vl = vl;
201 	    __entry->idx = idx;
202 	    ),
203 	    TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
204 		      __get_str(dev),
205 		      __entry->idx,
206 		      __entry->sel,
207 		      __entry->vl
208 		      )
209 );
210 
211 TRACE_EVENT(hfi1_sdma_user_free_queues,
212 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt),
213 	    TP_ARGS(dd, ctxt, subctxt),
214 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
215 			     __field(u16, ctxt)
216 			     __field(u16, subctxt)
217 			     ),
218 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
219 			   __entry->ctxt = ctxt;
220 			   __entry->subctxt = subctxt;
221 			   ),
222 	    TP_printk("[%s] SDMA [%u:%u] Freeing user SDMA queues",
223 		      __get_str(dev),
224 		      __entry->ctxt,
225 		      __entry->subctxt
226 		      )
227 );
228 
229 TRACE_EVENT(hfi1_sdma_user_process_request,
230 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
231 		     u16 comp_idx),
232 	    TP_ARGS(dd, ctxt, subctxt, comp_idx),
233 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
234 			     __field(u16, ctxt)
235 			     __field(u16, subctxt)
236 			     __field(u16, comp_idx)
237 			     ),
238 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
239 			   __entry->ctxt = ctxt;
240 			   __entry->subctxt = subctxt;
241 			   __entry->comp_idx = comp_idx;
242 			   ),
243 	    TP_printk("[%s] SDMA [%u:%u] Using req/comp entry: %u",
244 		      __get_str(dev),
245 		      __entry->ctxt,
246 		      __entry->subctxt,
247 		      __entry->comp_idx
248 		      )
249 );
250 
251 DECLARE_EVENT_CLASS(
252 	hfi1_sdma_value_template,
253 	TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, u16 comp_idx,
254 		 u32 value),
255 	TP_ARGS(dd, ctxt, subctxt, comp_idx, value),
256 	TP_STRUCT__entry(DD_DEV_ENTRY(dd)
257 			 __field(u16, ctxt)
258 			 __field(u16, subctxt)
259 			 __field(u16, comp_idx)
260 			 __field(u32, value)
261 		),
262 	TP_fast_assign(DD_DEV_ASSIGN(dd);
263 		       __entry->ctxt = ctxt;
264 		       __entry->subctxt = subctxt;
265 		       __entry->comp_idx = comp_idx;
266 		       __entry->value = value;
267 		),
268 	TP_printk("[%s] SDMA [%u:%u:%u] value: %u",
269 		  __get_str(dev),
270 		  __entry->ctxt,
271 		  __entry->subctxt,
272 		  __entry->comp_idx,
273 		  __entry->value
274 		)
275 );
276 
277 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_initial_tidoffset,
278 	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
279 		      u16 comp_idx, u32 tidoffset),
280 	     TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset));
281 
282 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_data_length,
283 	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
284 		      u16 comp_idx, u32 data_len),
285 	     TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
286 
287 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_compute_length,
288 	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
289 		      u16 comp_idx, u32 data_len),
290 	     TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
291 
292 TRACE_EVENT(hfi1_sdma_user_tid_info,
293 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
294 		     u16 comp_idx, u32 tidoffset, u32 units, u8 shift),
295 	    TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset, units, shift),
296 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
297 			     __field(u16, ctxt)
298 			     __field(u16, subctxt)
299 			     __field(u16, comp_idx)
300 			     __field(u32, tidoffset)
301 			     __field(u32, units)
302 			     __field(u8, shift)
303 			     ),
304 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
305 			   __entry->ctxt = ctxt;
306 			   __entry->subctxt = subctxt;
307 			   __entry->comp_idx = comp_idx;
308 			   __entry->tidoffset = tidoffset;
309 			   __entry->units = units;
310 			   __entry->shift = shift;
311 			   ),
312 	    TP_printk("[%s] SDMA [%u:%u:%u] TID offset %ubytes %uunits om %u",
313 		      __get_str(dev),
314 		      __entry->ctxt,
315 		      __entry->subctxt,
316 		      __entry->comp_idx,
317 		      __entry->tidoffset,
318 		      __entry->units,
319 		      __entry->shift
320 		      )
321 );
322 
323 TRACE_EVENT(hfi1_sdma_request,
324 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
325 		     unsigned long dim),
326 	    TP_ARGS(dd, ctxt, subctxt, dim),
327 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
328 			     __field(u16, ctxt)
329 			     __field(u16, subctxt)
330 			     __field(unsigned long, dim)
331 			     ),
332 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
333 			   __entry->ctxt = ctxt;
334 			   __entry->subctxt = subctxt;
335 			   __entry->dim = dim;
336 			   ),
337 	    TP_printk("[%s] SDMA from %u:%u (%lu)",
338 		      __get_str(dev),
339 		      __entry->ctxt,
340 		      __entry->subctxt,
341 		      __entry->dim
342 		      )
343 );
344 
345 DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
346 		    TP_PROTO(struct sdma_engine *sde, u64 status),
347 		    TP_ARGS(sde, status),
348 		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
349 		    __field(u64, status)
350 		    __field(u8, idx)
351 		    ),
352 		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
353 		    __entry->status = status;
354 		    __entry->idx = sde->this_idx;
355 		    ),
356 		    TP_printk("[%s] SDE(%u) status %llx",
357 			      __get_str(dev),
358 			      __entry->idx,
359 			      (unsigned long long)__entry->status
360 			      )
361 );
362 
363 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
364 	     TP_PROTO(struct sdma_engine *sde, u64 status),
365 	     TP_ARGS(sde, status)
366 );
367 
368 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
369 	     TP_PROTO(struct sdma_engine *sde, u64 status),
370 	     TP_ARGS(sde, status)
371 );
372 
373 DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
374 		    TP_PROTO(struct sdma_engine *sde, int aidx),
375 		    TP_ARGS(sde, aidx),
376 		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
377 		    __field(int, aidx)
378 		    __field(u8, idx)
379 		    ),
380 		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
381 		    __entry->idx = sde->this_idx;
382 		    __entry->aidx = aidx;
383 		    ),
384 		    TP_printk("[%s] SDE(%u) aidx %d",
385 			      __get_str(dev),
386 			      __entry->idx,
387 			      __entry->aidx
388 			      )
389 );
390 
391 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
392 	     TP_PROTO(struct sdma_engine *sde, int aidx),
393 	     TP_ARGS(sde, aidx));
394 
395 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
396 	     TP_PROTO(struct sdma_engine *sde, int aidx),
397 	     TP_ARGS(sde, aidx));
398 
399 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
400 TRACE_EVENT(hfi1_sdma_progress,
401 	    TP_PROTO(struct sdma_engine *sde,
402 		     u16 hwhead,
403 		     u16 swhead,
404 		     struct sdma_txreq *txp
405 		     ),
406 	    TP_ARGS(sde, hwhead, swhead, txp),
407 	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
408 	    __field(u64, sn)
409 	    __field(u16, hwhead)
410 	    __field(u16, swhead)
411 	    __field(u16, txnext)
412 	    __field(u16, tx_tail)
413 	    __field(u16, tx_head)
414 	    __field(u8, idx)
415 	    ),
416 	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
417 	    __entry->hwhead = hwhead;
418 	    __entry->swhead = swhead;
419 	    __entry->tx_tail = sde->tx_tail;
420 	    __entry->tx_head = sde->tx_head;
421 	    __entry->txnext = txp ? txp->next_descq_idx : ~0;
422 	    __entry->idx = sde->this_idx;
423 	    __entry->sn = txp ? txp->sn : ~0;
424 	    ),
425 	    TP_printk(
426 	    "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
427 	    __get_str(dev),
428 	    __entry->idx,
429 	    __entry->sn,
430 	    __entry->hwhead,
431 	    __entry->swhead,
432 	    __entry->txnext,
433 	    __entry->tx_head,
434 	    __entry->tx_tail
435 	    )
436 );
437 #else
438 TRACE_EVENT(hfi1_sdma_progress,
439 	    TP_PROTO(struct sdma_engine *sde,
440 		     u16 hwhead, u16 swhead,
441 		     struct sdma_txreq *txp
442 		     ),
443 	    TP_ARGS(sde, hwhead, swhead, txp),
444 	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
445 		    __field(u16, hwhead)
446 		    __field(u16, swhead)
447 		    __field(u16, txnext)
448 		    __field(u16, tx_tail)
449 		    __field(u16, tx_head)
450 		    __field(u8, idx)
451 		    ),
452 	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
453 		    __entry->hwhead = hwhead;
454 		    __entry->swhead = swhead;
455 		    __entry->tx_tail = sde->tx_tail;
456 		    __entry->tx_head = sde->tx_head;
457 		    __entry->txnext = txp ? txp->next_descq_idx : ~0;
458 		    __entry->idx = sde->this_idx;
459 		    ),
460 	    TP_printk(
461 		    "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
462 		    __get_str(dev),
463 		    __entry->idx,
464 		    __entry->hwhead,
465 		    __entry->swhead,
466 		    __entry->txnext,
467 		    __entry->tx_head,
468 		    __entry->tx_tail
469 	    )
470 );
471 #endif
472 
473 DECLARE_EVENT_CLASS(hfi1_sdma_sn,
474 		    TP_PROTO(struct sdma_engine *sde, u64 sn),
475 		    TP_ARGS(sde, sn),
476 		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
477 		    __field(u64, sn)
478 		    __field(u8, idx)
479 		    ),
480 		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
481 		    __entry->sn = sn;
482 		    __entry->idx = sde->this_idx;
483 		    ),
484 		    TP_printk("[%s] SDE(%u) sn %llu",
485 			      __get_str(dev),
486 			      __entry->idx,
487 			      __entry->sn
488 			      )
489 );
490 
491 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
492 	     TP_PROTO(
493 	     struct sdma_engine *sde,
494 	     u64 sn
495 	     ),
496 	     TP_ARGS(sde, sn)
497 );
498 
499 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
500 	     TP_PROTO(struct sdma_engine *sde, u64 sn),
501 	     TP_ARGS(sde, sn)
502 );
503 
504 #define USDMA_HDR_FORMAT \
505 	"[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
506 
507 TRACE_EVENT(hfi1_sdma_user_header,
508 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
509 		     struct hfi1_pkt_header *hdr, u32 tidval),
510 	    TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
511 	    TP_STRUCT__entry(
512 		    DD_DEV_ENTRY(dd)
513 		    __field(u16, ctxt)
514 		    __field(u8, subctxt)
515 		    __field(u16, req)
516 		    __field(u32, pbc0)
517 		    __field(u32, pbc1)
518 		    __field(u32, lrh0)
519 		    __field(u32, lrh1)
520 		    __field(u32, bth0)
521 		    __field(u32, bth1)
522 		    __field(u32, bth2)
523 		    __field(u32, kdeth0)
524 		    __field(u32, kdeth1)
525 		    __field(u32, kdeth2)
526 		    __field(u32, kdeth3)
527 		    __field(u32, kdeth4)
528 		    __field(u32, kdeth5)
529 		    __field(u32, kdeth6)
530 		    __field(u32, kdeth7)
531 		    __field(u32, kdeth8)
532 		    __field(u32, tidval)
533 		    ),
534 		    TP_fast_assign(
535 		    __le32 *pbc = (__le32 *)hdr->pbc;
536 		    __be32 *lrh = (__be32 *)hdr->lrh;
537 		    __be32 *bth = (__be32 *)hdr->bth;
538 		    __le32 *kdeth = (__le32 *)&hdr->kdeth;
539 
540 		    DD_DEV_ASSIGN(dd);
541 		    __entry->ctxt = ctxt;
542 		    __entry->subctxt = subctxt;
543 		    __entry->req = req;
544 		    __entry->pbc0 = le32_to_cpu(pbc[0]);
545 		    __entry->pbc1 = le32_to_cpu(pbc[1]);
546 		    __entry->lrh0 = be32_to_cpu(lrh[0]);
547 		    __entry->lrh1 = be32_to_cpu(lrh[1]);
548 		    __entry->bth0 = be32_to_cpu(bth[0]);
549 		    __entry->bth1 = be32_to_cpu(bth[1]);
550 		    __entry->bth2 = be32_to_cpu(bth[2]);
551 		    __entry->kdeth0 = le32_to_cpu(kdeth[0]);
552 		    __entry->kdeth1 = le32_to_cpu(kdeth[1]);
553 		    __entry->kdeth2 = le32_to_cpu(kdeth[2]);
554 		    __entry->kdeth3 = le32_to_cpu(kdeth[3]);
555 		    __entry->kdeth4 = le32_to_cpu(kdeth[4]);
556 		    __entry->kdeth5 = le32_to_cpu(kdeth[5]);
557 		    __entry->kdeth6 = le32_to_cpu(kdeth[6]);
558 		    __entry->kdeth7 = le32_to_cpu(kdeth[7]);
559 		    __entry->kdeth8 = le32_to_cpu(kdeth[8]);
560 		    __entry->tidval = tidval;
561 	    ),
562 	    TP_printk(USDMA_HDR_FORMAT,
563 		      __get_str(dev),
564 		      __entry->ctxt,
565 		      __entry->subctxt,
566 		      __entry->req,
567 		      __entry->pbc1,
568 		      __entry->pbc0,
569 		      __entry->lrh0,
570 		      __entry->lrh1,
571 		      __entry->bth0,
572 		      __entry->bth1,
573 		      __entry->bth2,
574 		      __entry->kdeth0,
575 		      __entry->kdeth1,
576 		      __entry->kdeth2,
577 		      __entry->kdeth3,
578 		      __entry->kdeth4,
579 		      __entry->kdeth5,
580 		      __entry->kdeth6,
581 		      __entry->kdeth7,
582 		      __entry->kdeth8,
583 		      __entry->tidval
584 	    )
585 );
586 
587 #define SDMA_UREQ_FMT \
588 	"[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
589 TRACE_EVENT(hfi1_sdma_user_reqinfo,
590 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
591 	    TP_ARGS(dd, ctxt, subctxt, i),
592 	    TP_STRUCT__entry(
593 		    DD_DEV_ENTRY(dd)
594 		    __field(u16, ctxt)
595 		    __field(u8, subctxt)
596 		    __field(u8, ver_opcode)
597 		    __field(u8, iovcnt)
598 		    __field(u16, npkts)
599 		    __field(u16, fragsize)
600 		    __field(u16, comp_idx)
601 	    ),
602 	    TP_fast_assign(
603 		    DD_DEV_ASSIGN(dd);
604 		    __entry->ctxt = ctxt;
605 		    __entry->subctxt = subctxt;
606 		    __entry->ver_opcode = i[0] & 0xff;
607 		    __entry->iovcnt = (i[0] >> 8) & 0xff;
608 		    __entry->npkts = i[1];
609 		    __entry->fragsize = i[2];
610 		    __entry->comp_idx = i[3];
611 	    ),
612 	    TP_printk(SDMA_UREQ_FMT,
613 		      __get_str(dev),
614 		      __entry->ctxt,
615 		      __entry->subctxt,
616 		      __entry->ver_opcode,
617 		      __entry->iovcnt,
618 		      __entry->npkts,
619 		      __entry->fragsize,
620 		      __entry->comp_idx
621 		      )
622 );
623 
624 #define usdma_complete_name(st) { st, #st }
625 #define show_usdma_complete_state(st)			\
626 	__print_symbolic(st,				\
627 			usdma_complete_name(FREE),	\
628 			usdma_complete_name(QUEUED),	\
629 			usdma_complete_name(COMPLETE), \
630 			usdma_complete_name(ERROR))
631 
632 TRACE_EVENT(hfi1_sdma_user_completion,
633 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
634 		     u8 state, int code),
635 	    TP_ARGS(dd, ctxt, subctxt, idx, state, code),
636 	    TP_STRUCT__entry(
637 	    DD_DEV_ENTRY(dd)
638 	    __field(u16, ctxt)
639 	    __field(u8, subctxt)
640 	    __field(u16, idx)
641 	    __field(u8, state)
642 	    __field(int, code)
643 	    ),
644 	    TP_fast_assign(
645 	    DD_DEV_ASSIGN(dd);
646 	    __entry->ctxt = ctxt;
647 	    __entry->subctxt = subctxt;
648 	    __entry->idx = idx;
649 	    __entry->state = state;
650 	    __entry->code = code;
651 	    ),
652 	    TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
653 		      __get_str(dev), __entry->ctxt, __entry->subctxt,
654 		      __entry->idx, show_usdma_complete_state(__entry->state),
655 		      __entry->code)
656 );
657 
658 TRACE_EVENT(hfi1_usdma_defer,
659 	    TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
660 		     struct sdma_engine *sde,
661 		     struct iowait *wait),
662 	    TP_ARGS(pq, sde, wait),
663 	    TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
664 			     __field(struct hfi1_user_sdma_pkt_q *, pq)
665 			     __field(struct sdma_engine *, sde)
666 			     __field(struct iowait *, wait)
667 			     __field(int, engine)
668 			     __field(int, empty)
669 			     ),
670 	     TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
671 			    __entry->pq = pq;
672 			    __entry->sde = sde;
673 			    __entry->wait = wait;
674 			    __entry->engine = sde->this_idx;
675 			    __entry->empty = list_empty(&__entry->wait->list);
676 			    ),
677 	     TP_printk("[%s] pq %llx sde %llx wait %llx engine %d empty %d",
678 		       __get_str(dev),
679 		       (unsigned long long)__entry->pq,
680 		       (unsigned long long)__entry->sde,
681 		       (unsigned long long)__entry->wait,
682 		       __entry->engine,
683 		       __entry->empty
684 		)
685 );
686 
687 TRACE_EVENT(hfi1_usdma_activate,
688 	    TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
689 		     struct iowait *wait,
690 		     int reason),
691 	    TP_ARGS(pq, wait, reason),
692 	    TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
693 			     __field(struct hfi1_user_sdma_pkt_q *, pq)
694 			     __field(struct iowait *, wait)
695 			     __field(int, reason)
696 			     ),
697 	     TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
698 			    __entry->pq = pq;
699 			    __entry->wait = wait;
700 			    __entry->reason = reason;
701 			    ),
702 	     TP_printk("[%s] pq %llx wait %llx reason %d",
703 		       __get_str(dev),
704 		       (unsigned long long)__entry->pq,
705 		       (unsigned long long)__entry->wait,
706 		       __entry->reason
707 		)
708 );
709 
710 TRACE_EVENT(hfi1_usdma_we,
711 	    TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
712 		     int we_ret),
713 	    TP_ARGS(pq, we_ret),
714 	    TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
715 			     __field(struct hfi1_user_sdma_pkt_q *, pq)
716 			     __field(int, state)
717 			     __field(int, we_ret)
718 			     ),
719 	     TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
720 			    __entry->pq = pq;
721 			    __entry->state = pq->state;
722 			    __entry->we_ret = we_ret;
723 			    ),
724 	     TP_printk("[%s] pq %llx state %d we_ret %d",
725 		       __get_str(dev),
726 		       (unsigned long long)__entry->pq,
727 		       __entry->state,
728 		       __entry->we_ret
729 		)
730 );
731 
732 const char *print_u32_array(struct trace_seq *, u32 *, int);
733 #define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
734 
735 TRACE_EVENT(hfi1_sdma_user_header_ahg,
736 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
737 		     u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
738 	    TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
739 	    TP_STRUCT__entry(
740 	    DD_DEV_ENTRY(dd)
741 	    __field(u16, ctxt)
742 	    __field(u8, subctxt)
743 	    __field(u16, req)
744 	    __field(u8, sde)
745 	    __field(u8, idx)
746 	    __field(int, len)
747 	    __field(u32, tidval)
748 	    __array(u32, ahg, 10)
749 	    ),
750 	    TP_fast_assign(
751 	    DD_DEV_ASSIGN(dd);
752 	    __entry->ctxt = ctxt;
753 	    __entry->subctxt = subctxt;
754 	    __entry->req = req;
755 	    __entry->sde = sde;
756 	    __entry->idx = ahgidx;
757 	    __entry->len = len;
758 	    __entry->tidval = tidval;
759 	    memcpy(__entry->ahg, ahg, len * sizeof(u32));
760 	    ),
761 	    TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
762 		      __get_str(dev),
763 		      __entry->ctxt,
764 		      __entry->subctxt,
765 		      __entry->req,
766 		      __entry->sde,
767 		      __entry->idx,
768 		      __entry->len - 1,
769 		      __print_u32_hex(__entry->ahg, __entry->len),
770 		      __entry->tidval
771 		      )
772 );
773 
774 TRACE_EVENT(hfi1_sdma_state,
775 	    TP_PROTO(struct sdma_engine *sde,
776 		     const char *cstate,
777 		     const char *nstate
778 		     ),
779 	    TP_ARGS(sde, cstate, nstate),
780 	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
781 		__string(curstate, cstate)
782 		__string(newstate, nstate)
783 	    ),
784 	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
785 		__assign_str(curstate, cstate);
786 		__assign_str(newstate, nstate);
787 	    ),
788 	    TP_printk("[%s] current state %s new state %s",
789 		      __get_str(dev),
790 		      __get_str(curstate),
791 		      __get_str(newstate)
792 	    )
793 );
794 
795 #define BCT_FORMAT \
796 	"shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
797 
798 #define BCT(field) \
799 	be16_to_cpu( \
800 	((struct buffer_control *)__get_dynamic_array(bct))->field \
801 	)
802 
803 DECLARE_EVENT_CLASS(hfi1_bct_template,
804 		    TP_PROTO(struct hfi1_devdata *dd,
805 			     struct buffer_control *bc),
806 		    TP_ARGS(dd, bc),
807 		    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
808 		    __dynamic_array(u8, bct, sizeof(*bc))
809 		    ),
810 		    TP_fast_assign(DD_DEV_ASSIGN(dd);
811 				   memcpy(__get_dynamic_array(bct), bc,
812 					  sizeof(*bc));
813 		    ),
814 		    TP_printk(BCT_FORMAT,
815 			      BCT(overall_shared_limit),
816 
817 			      BCT(vl[0].dedicated),
818 			      BCT(vl[0].shared),
819 
820 			      BCT(vl[1].dedicated),
821 			      BCT(vl[1].shared),
822 
823 			      BCT(vl[2].dedicated),
824 			      BCT(vl[2].shared),
825 
826 			      BCT(vl[3].dedicated),
827 			      BCT(vl[3].shared),
828 
829 			      BCT(vl[4].dedicated),
830 			      BCT(vl[4].shared),
831 
832 			      BCT(vl[5].dedicated),
833 			      BCT(vl[5].shared),
834 
835 			      BCT(vl[6].dedicated),
836 			      BCT(vl[6].shared),
837 
838 			      BCT(vl[7].dedicated),
839 			      BCT(vl[7].shared),
840 
841 			      BCT(vl[15].dedicated),
842 			      BCT(vl[15].shared)
843 		    )
844 );
845 
846 DEFINE_EVENT(hfi1_bct_template, bct_set,
847 	     TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
848 	     TP_ARGS(dd, bc));
849 
850 DEFINE_EVENT(hfi1_bct_template, bct_get,
851 	     TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
852 	     TP_ARGS(dd, bc));
853 
854 TRACE_EVENT(
855 	hfi1_qp_send_completion,
856 	TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
857 	TP_ARGS(qp, wqe, idx),
858 	TP_STRUCT__entry(
859 		DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
860 		__field(struct rvt_swqe *, wqe)
861 		__field(u64, wr_id)
862 		__field(u32, qpn)
863 		__field(u32, qpt)
864 		__field(u32, length)
865 		__field(u32, idx)
866 		__field(u32, ssn)
867 		__field(enum ib_wr_opcode, opcode)
868 		__field(int, send_flags)
869 	),
870 	TP_fast_assign(
871 		DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
872 		__entry->wqe = wqe;
873 		__entry->wr_id = wqe->wr.wr_id;
874 		__entry->qpn = qp->ibqp.qp_num;
875 		__entry->qpt = qp->ibqp.qp_type;
876 		__entry->length = wqe->length;
877 		__entry->idx = idx;
878 		__entry->ssn = wqe->ssn;
879 		__entry->opcode = wqe->wr.opcode;
880 		__entry->send_flags = wqe->wr.send_flags;
881 	),
882 	TP_printk(
883 		"[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
884 		__get_str(dev),
885 		__entry->qpn,
886 		__entry->qpt,
887 		__entry->wqe,
888 		__entry->idx,
889 		__entry->wr_id,
890 		__entry->length,
891 		__entry->ssn,
892 		__entry->opcode,
893 		__entry->send_flags
894 	)
895 );
896 
897 DECLARE_EVENT_CLASS(
898 	hfi1_do_send_template,
899 	TP_PROTO(struct rvt_qp *qp, bool flag),
900 	TP_ARGS(qp, flag),
901 	TP_STRUCT__entry(
902 		DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
903 		__field(u32, qpn)
904 		__field(bool, flag)
905 	),
906 	TP_fast_assign(
907 		DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
908 		__entry->qpn = qp->ibqp.qp_num;
909 		__entry->flag = flag;
910 	),
911 	TP_printk(
912 		"[%s] qpn %x flag %d",
913 		__get_str(dev),
914 		__entry->qpn,
915 		__entry->flag
916 	)
917 );
918 
919 DEFINE_EVENT(
920 	hfi1_do_send_template, hfi1_rc_do_send,
921 	TP_PROTO(struct rvt_qp *qp, bool flag),
922 	TP_ARGS(qp, flag)
923 );
924 
925 DEFINE_EVENT(/* event */
926 	hfi1_do_send_template, hfi1_rc_do_tid_send,
927 	TP_PROTO(struct rvt_qp *qp, bool flag),
928 	TP_ARGS(qp, flag)
929 );
930 
931 DEFINE_EVENT(
932 	hfi1_do_send_template, hfi1_rc_expired_time_slice,
933 	TP_PROTO(struct rvt_qp *qp, bool flag),
934 	TP_ARGS(qp, flag)
935 );
936 
937 DECLARE_EVENT_CLASS(/* AIP  */
938 	hfi1_ipoib_txq_template,
939 	TP_PROTO(struct hfi1_ipoib_txq *txq),
940 	TP_ARGS(txq),
941 	TP_STRUCT__entry(/* entry */
942 		DD_DEV_ENTRY(txq->priv->dd)
943 		__field(struct hfi1_ipoib_txq *, txq)
944 		__field(struct sdma_engine *, sde)
945 		__field(ulong, head)
946 		__field(ulong, tail)
947 		__field(uint, used)
948 		__field(uint, flow)
949 		__field(int, stops)
950 		__field(int, no_desc)
951 		__field(u8, idx)
952 		__field(u8, stopped)
953 	),
954 	TP_fast_assign(/* assign */
955 		DD_DEV_ASSIGN(txq->priv->dd)
956 		__entry->txq = txq;
957 		__entry->sde = txq->sde;
958 		__entry->head = txq->tx_ring.head;
959 		__entry->tail = txq->tx_ring.tail;
960 		__entry->idx = txq->q_idx;
961 		__entry->used =
962 			txq->sent_txreqs -
963 			atomic64_read(&txq->complete_txreqs);
964 		__entry->flow = txq->flow.as_int;
965 		__entry->stops = atomic_read(&txq->stops);
966 		__entry->no_desc = atomic_read(&txq->no_desc);
967 		__entry->stopped =
968 		 __netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
969 	),
970 	TP_printk(/* print  */
971 		"[%s] txq %llx idx %u sde %llx head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
972 		__get_str(dev),
973 		(unsigned long long)__entry->txq,
974 		__entry->idx,
975 		(unsigned long long)__entry->sde,
976 		__entry->head,
977 		__entry->tail,
978 		__entry->flow,
979 		__entry->used,
980 		__entry->stops,
981 		__entry->no_desc,
982 		__entry->stopped
983 	)
984 );
985 
986 DEFINE_EVENT(/* queue stop */
987 	hfi1_ipoib_txq_template, hfi1_txq_stop,
988 	TP_PROTO(struct hfi1_ipoib_txq *txq),
989 	TP_ARGS(txq)
990 );
991 
992 DEFINE_EVENT(/* queue wake */
993 	hfi1_ipoib_txq_template, hfi1_txq_wake,
994 	TP_PROTO(struct hfi1_ipoib_txq *txq),
995 	TP_ARGS(txq)
996 );
997 
998 DEFINE_EVENT(/* flow flush */
999 	hfi1_ipoib_txq_template, hfi1_flow_flush,
1000 	TP_PROTO(struct hfi1_ipoib_txq *txq),
1001 	TP_ARGS(txq)
1002 );
1003 
1004 DEFINE_EVENT(/* flow switch */
1005 	hfi1_ipoib_txq_template, hfi1_flow_switch,
1006 	TP_PROTO(struct hfi1_ipoib_txq *txq),
1007 	TP_ARGS(txq)
1008 );
1009 
1010 DEFINE_EVENT(/* wakeup */
1011 	hfi1_ipoib_txq_template, hfi1_txq_wakeup,
1012 	TP_PROTO(struct hfi1_ipoib_txq *txq),
1013 	TP_ARGS(txq)
1014 );
1015 
1016 DEFINE_EVENT(/* full */
1017 	hfi1_ipoib_txq_template, hfi1_txq_full,
1018 	TP_PROTO(struct hfi1_ipoib_txq *txq),
1019 	TP_ARGS(txq)
1020 );
1021 
1022 DEFINE_EVENT(/* queued */
1023 	hfi1_ipoib_txq_template, hfi1_txq_queued,
1024 	TP_PROTO(struct hfi1_ipoib_txq *txq),
1025 	TP_ARGS(txq)
1026 );
1027 
1028 DEFINE_EVENT(/* xmit_stopped */
1029 	hfi1_ipoib_txq_template, hfi1_txq_xmit_stopped,
1030 	TP_PROTO(struct hfi1_ipoib_txq *txq),
1031 	TP_ARGS(txq)
1032 );
1033 
1034 DEFINE_EVENT(/* xmit_unstopped */
1035 	hfi1_ipoib_txq_template, hfi1_txq_xmit_unstopped,
1036 	TP_PROTO(struct hfi1_ipoib_txq *txq),
1037 	TP_ARGS(txq)
1038 );
1039 
1040 #endif /* __HFI1_TRACE_TX_H */
1041 
1042 #undef TRACE_INCLUDE_PATH
1043 #undef TRACE_INCLUDE_FILE
1044 #define TRACE_INCLUDE_PATH .
1045 #define TRACE_INCLUDE_FILE trace_tx
1046 #include <trace/define_trace.h>
1047