xref: /linux/drivers/infiniband/hw/hfi1/trace_tx.h (revision 0be3ff0c)
1 /* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
2 /*
3  * Copyright(c) 2015 - 2017 Intel Corporation.
4  */
5 #if !defined(__HFI1_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define __HFI1_TRACE_TX_H
7 
8 #include <linux/tracepoint.h>
9 #include <linux/trace_seq.h>
10 
11 #include "hfi.h"
12 #include "mad.h"
13 #include "sdma.h"
14 #include "ipoib.h"
15 #include "user_sdma.h"
16 
17 const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
18 
19 #define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
20 
21 #undef TRACE_SYSTEM
22 #define TRACE_SYSTEM hfi1_tx
23 
24 TRACE_EVENT(hfi1_piofree,
25 	    TP_PROTO(struct send_context *sc, int extra),
26 	    TP_ARGS(sc, extra),
27 	    TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
28 	    __field(u32, sw_index)
29 	    __field(u32, hw_context)
30 	    __field(int, extra)
31 	    ),
32 	    TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
33 	    __entry->sw_index = sc->sw_index;
34 	    __entry->hw_context = sc->hw_context;
35 	    __entry->extra = extra;
36 	    ),
37 	    TP_printk("[%s] ctxt %u(%u) extra %d",
38 		      __get_str(dev),
39 		      __entry->sw_index,
40 		      __entry->hw_context,
41 		      __entry->extra
42 	    )
43 );
44 
45 TRACE_EVENT(hfi1_wantpiointr,
46 	    TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
47 	    TP_ARGS(sc, needint, credit_ctrl),
48 	    TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
49 			__field(u32, sw_index)
50 			__field(u32, hw_context)
51 			__field(u32, needint)
52 			__field(u64, credit_ctrl)
53 			),
54 	    TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
55 			__entry->sw_index = sc->sw_index;
56 			__entry->hw_context = sc->hw_context;
57 			__entry->needint = needint;
58 			__entry->credit_ctrl = credit_ctrl;
59 			),
60 	    TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
61 		      __get_str(dev),
62 		      __entry->sw_index,
63 		      __entry->hw_context,
64 		      __entry->needint,
65 		      (unsigned long long)__entry->credit_ctrl
66 		      )
67 );
68 
69 DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
70 		    TP_PROTO(struct rvt_qp *qp, u32 flags),
71 		    TP_ARGS(qp, flags),
72 		    TP_STRUCT__entry(
73 		    DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
74 		    __field(u32, qpn)
75 		    __field(u32, flags)
76 		    __field(u32, s_flags)
77 		    __field(u32, ps_flags)
78 		    __field(unsigned long, iow_flags)
79 		    ),
80 		    TP_fast_assign(
81 		    DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
82 		    __entry->flags = flags;
83 		    __entry->qpn = qp->ibqp.qp_num;
84 		    __entry->s_flags = qp->s_flags;
85 		    __entry->ps_flags =
86 			((struct hfi1_qp_priv *)qp->priv)->s_flags;
87 		    __entry->iow_flags =
88 			((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
89 		    ),
90 		    TP_printk(
91 		    "[%s] qpn 0x%x flags 0x%x s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx",
92 		    __get_str(dev),
93 		    __entry->qpn,
94 		    __entry->flags,
95 		    __entry->s_flags,
96 		    __entry->ps_flags,
97 		    __entry->iow_flags
98 		    )
99 );
100 
101 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
102 	     TP_PROTO(struct rvt_qp *qp, u32 flags),
103 	     TP_ARGS(qp, flags));
104 
105 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
106 	     TP_PROTO(struct rvt_qp *qp, u32 flags),
107 	     TP_ARGS(qp, flags));
108 
109 TRACE_EVENT(hfi1_sdma_descriptor,
110 	    TP_PROTO(struct sdma_engine *sde,
111 		     u64 desc0,
112 		     u64 desc1,
113 		     u16 e,
114 		     void *descp),
115 		     TP_ARGS(sde, desc0, desc1, e, descp),
116 		     TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
117 		     __field(void *, descp)
118 		     __field(u64, desc0)
119 		     __field(u64, desc1)
120 		     __field(u16, e)
121 		     __field(u8, idx)
122 		     ),
123 		     TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
124 		     __entry->desc0 = desc0;
125 		     __entry->desc1 = desc1;
126 		     __entry->idx = sde->this_idx;
127 		     __entry->descp = descp;
128 		     __entry->e = e;
129 		     ),
130 	    TP_printk(
131 	    "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
132 	    __get_str(dev),
133 	    __entry->idx,
134 	    __parse_sdma_flags(__entry->desc0, __entry->desc1),
135 	    (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
136 	    SDMA_DESC0_PHY_ADDR_MASK,
137 	    (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
138 	    SDMA_DESC1_GENERATION_MASK),
139 	    (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
140 	    SDMA_DESC0_BYTE_COUNT_MASK),
141 	    __entry->desc0,
142 	    __entry->desc1,
143 	    __entry->descp,
144 	    __entry->e
145 	    )
146 );
147 
148 TRACE_EVENT(hfi1_sdma_engine_select,
149 	    TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
150 	    TP_ARGS(dd, sel, vl, idx),
151 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
152 	    __field(u32, sel)
153 	    __field(u8, vl)
154 	    __field(u8, idx)
155 	    ),
156 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
157 	    __entry->sel = sel;
158 	    __entry->vl = vl;
159 	    __entry->idx = idx;
160 	    ),
161 	    TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
162 		      __get_str(dev),
163 		      __entry->idx,
164 		      __entry->sel,
165 		      __entry->vl
166 		      )
167 );
168 
169 TRACE_EVENT(hfi1_sdma_user_free_queues,
170 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt),
171 	    TP_ARGS(dd, ctxt, subctxt),
172 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
173 			     __field(u16, ctxt)
174 			     __field(u16, subctxt)
175 			     ),
176 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
177 			   __entry->ctxt = ctxt;
178 			   __entry->subctxt = subctxt;
179 			   ),
180 	    TP_printk("[%s] SDMA [%u:%u] Freeing user SDMA queues",
181 		      __get_str(dev),
182 		      __entry->ctxt,
183 		      __entry->subctxt
184 		      )
185 );
186 
187 TRACE_EVENT(hfi1_sdma_user_process_request,
188 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
189 		     u16 comp_idx),
190 	    TP_ARGS(dd, ctxt, subctxt, comp_idx),
191 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
192 			     __field(u16, ctxt)
193 			     __field(u16, subctxt)
194 			     __field(u16, comp_idx)
195 			     ),
196 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
197 			   __entry->ctxt = ctxt;
198 			   __entry->subctxt = subctxt;
199 			   __entry->comp_idx = comp_idx;
200 			   ),
201 	    TP_printk("[%s] SDMA [%u:%u] Using req/comp entry: %u",
202 		      __get_str(dev),
203 		      __entry->ctxt,
204 		      __entry->subctxt,
205 		      __entry->comp_idx
206 		      )
207 );
208 
209 DECLARE_EVENT_CLASS(
210 	hfi1_sdma_value_template,
211 	TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, u16 comp_idx,
212 		 u32 value),
213 	TP_ARGS(dd, ctxt, subctxt, comp_idx, value),
214 	TP_STRUCT__entry(DD_DEV_ENTRY(dd)
215 			 __field(u16, ctxt)
216 			 __field(u16, subctxt)
217 			 __field(u16, comp_idx)
218 			 __field(u32, value)
219 		),
220 	TP_fast_assign(DD_DEV_ASSIGN(dd);
221 		       __entry->ctxt = ctxt;
222 		       __entry->subctxt = subctxt;
223 		       __entry->comp_idx = comp_idx;
224 		       __entry->value = value;
225 		),
226 	TP_printk("[%s] SDMA [%u:%u:%u] value: %u",
227 		  __get_str(dev),
228 		  __entry->ctxt,
229 		  __entry->subctxt,
230 		  __entry->comp_idx,
231 		  __entry->value
232 		)
233 );
234 
235 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_initial_tidoffset,
236 	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
237 		      u16 comp_idx, u32 tidoffset),
238 	     TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset));
239 
240 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_data_length,
241 	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
242 		      u16 comp_idx, u32 data_len),
243 	     TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
244 
245 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_compute_length,
246 	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
247 		      u16 comp_idx, u32 data_len),
248 	     TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
249 
250 TRACE_EVENT(hfi1_sdma_user_tid_info,
251 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
252 		     u16 comp_idx, u32 tidoffset, u32 units, u8 shift),
253 	    TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset, units, shift),
254 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
255 			     __field(u16, ctxt)
256 			     __field(u16, subctxt)
257 			     __field(u16, comp_idx)
258 			     __field(u32, tidoffset)
259 			     __field(u32, units)
260 			     __field(u8, shift)
261 			     ),
262 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
263 			   __entry->ctxt = ctxt;
264 			   __entry->subctxt = subctxt;
265 			   __entry->comp_idx = comp_idx;
266 			   __entry->tidoffset = tidoffset;
267 			   __entry->units = units;
268 			   __entry->shift = shift;
269 			   ),
270 	    TP_printk("[%s] SDMA [%u:%u:%u] TID offset %ubytes %uunits om %u",
271 		      __get_str(dev),
272 		      __entry->ctxt,
273 		      __entry->subctxt,
274 		      __entry->comp_idx,
275 		      __entry->tidoffset,
276 		      __entry->units,
277 		      __entry->shift
278 		      )
279 );
280 
281 TRACE_EVENT(hfi1_sdma_request,
282 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
283 		     unsigned long dim),
284 	    TP_ARGS(dd, ctxt, subctxt, dim),
285 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
286 			     __field(u16, ctxt)
287 			     __field(u16, subctxt)
288 			     __field(unsigned long, dim)
289 			     ),
290 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
291 			   __entry->ctxt = ctxt;
292 			   __entry->subctxt = subctxt;
293 			   __entry->dim = dim;
294 			   ),
295 	    TP_printk("[%s] SDMA from %u:%u (%lu)",
296 		      __get_str(dev),
297 		      __entry->ctxt,
298 		      __entry->subctxt,
299 		      __entry->dim
300 		      )
301 );
302 
303 DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
304 		    TP_PROTO(struct sdma_engine *sde, u64 status),
305 		    TP_ARGS(sde, status),
306 		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
307 		    __field(u64, status)
308 		    __field(u8, idx)
309 		    ),
310 		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
311 		    __entry->status = status;
312 		    __entry->idx = sde->this_idx;
313 		    ),
314 		    TP_printk("[%s] SDE(%u) status %llx",
315 			      __get_str(dev),
316 			      __entry->idx,
317 			      (unsigned long long)__entry->status
318 			      )
319 );
320 
321 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
322 	     TP_PROTO(struct sdma_engine *sde, u64 status),
323 	     TP_ARGS(sde, status)
324 );
325 
326 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
327 	     TP_PROTO(struct sdma_engine *sde, u64 status),
328 	     TP_ARGS(sde, status)
329 );
330 
331 DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
332 		    TP_PROTO(struct sdma_engine *sde, int aidx),
333 		    TP_ARGS(sde, aidx),
334 		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
335 		    __field(int, aidx)
336 		    __field(u8, idx)
337 		    ),
338 		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
339 		    __entry->idx = sde->this_idx;
340 		    __entry->aidx = aidx;
341 		    ),
342 		    TP_printk("[%s] SDE(%u) aidx %d",
343 			      __get_str(dev),
344 			      __entry->idx,
345 			      __entry->aidx
346 			      )
347 );
348 
349 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
350 	     TP_PROTO(struct sdma_engine *sde, int aidx),
351 	     TP_ARGS(sde, aidx));
352 
353 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
354 	     TP_PROTO(struct sdma_engine *sde, int aidx),
355 	     TP_ARGS(sde, aidx));
356 
357 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
358 TRACE_EVENT(hfi1_sdma_progress,
359 	    TP_PROTO(struct sdma_engine *sde,
360 		     u16 hwhead,
361 		     u16 swhead,
362 		     struct sdma_txreq *txp
363 		     ),
364 	    TP_ARGS(sde, hwhead, swhead, txp),
365 	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
366 	    __field(u64, sn)
367 	    __field(u16, hwhead)
368 	    __field(u16, swhead)
369 	    __field(u16, txnext)
370 	    __field(u16, tx_tail)
371 	    __field(u16, tx_head)
372 	    __field(u8, idx)
373 	    ),
374 	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
375 	    __entry->hwhead = hwhead;
376 	    __entry->swhead = swhead;
377 	    __entry->tx_tail = sde->tx_tail;
378 	    __entry->tx_head = sde->tx_head;
379 	    __entry->txnext = txp ? txp->next_descq_idx : ~0;
380 	    __entry->idx = sde->this_idx;
381 	    __entry->sn = txp ? txp->sn : ~0;
382 	    ),
383 	    TP_printk(
384 	    "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
385 	    __get_str(dev),
386 	    __entry->idx,
387 	    __entry->sn,
388 	    __entry->hwhead,
389 	    __entry->swhead,
390 	    __entry->txnext,
391 	    __entry->tx_head,
392 	    __entry->tx_tail
393 	    )
394 );
395 #else
396 TRACE_EVENT(hfi1_sdma_progress,
397 	    TP_PROTO(struct sdma_engine *sde,
398 		     u16 hwhead, u16 swhead,
399 		     struct sdma_txreq *txp
400 		     ),
401 	    TP_ARGS(sde, hwhead, swhead, txp),
402 	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
403 		    __field(u16, hwhead)
404 		    __field(u16, swhead)
405 		    __field(u16, txnext)
406 		    __field(u16, tx_tail)
407 		    __field(u16, tx_head)
408 		    __field(u8, idx)
409 		    ),
410 	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
411 		    __entry->hwhead = hwhead;
412 		    __entry->swhead = swhead;
413 		    __entry->tx_tail = sde->tx_tail;
414 		    __entry->tx_head = sde->tx_head;
415 		    __entry->txnext = txp ? txp->next_descq_idx : ~0;
416 		    __entry->idx = sde->this_idx;
417 		    ),
418 	    TP_printk(
419 		    "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
420 		    __get_str(dev),
421 		    __entry->idx,
422 		    __entry->hwhead,
423 		    __entry->swhead,
424 		    __entry->txnext,
425 		    __entry->tx_head,
426 		    __entry->tx_tail
427 	    )
428 );
429 #endif
430 
431 DECLARE_EVENT_CLASS(hfi1_sdma_sn,
432 		    TP_PROTO(struct sdma_engine *sde, u64 sn),
433 		    TP_ARGS(sde, sn),
434 		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
435 		    __field(u64, sn)
436 		    __field(u8, idx)
437 		    ),
438 		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
439 		    __entry->sn = sn;
440 		    __entry->idx = sde->this_idx;
441 		    ),
442 		    TP_printk("[%s] SDE(%u) sn %llu",
443 			      __get_str(dev),
444 			      __entry->idx,
445 			      __entry->sn
446 			      )
447 );
448 
449 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
450 	     TP_PROTO(
451 	     struct sdma_engine *sde,
452 	     u64 sn
453 	     ),
454 	     TP_ARGS(sde, sn)
455 );
456 
457 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
458 	     TP_PROTO(struct sdma_engine *sde, u64 sn),
459 	     TP_ARGS(sde, sn)
460 );
461 
462 #define USDMA_HDR_FORMAT \
463 	"[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
464 
465 TRACE_EVENT(hfi1_sdma_user_header,
466 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
467 		     struct hfi1_pkt_header *hdr, u32 tidval),
468 	    TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
469 	    TP_STRUCT__entry(
470 		    DD_DEV_ENTRY(dd)
471 		    __field(u16, ctxt)
472 		    __field(u8, subctxt)
473 		    __field(u16, req)
474 		    __field(u32, pbc0)
475 		    __field(u32, pbc1)
476 		    __field(u32, lrh0)
477 		    __field(u32, lrh1)
478 		    __field(u32, bth0)
479 		    __field(u32, bth1)
480 		    __field(u32, bth2)
481 		    __field(u32, kdeth0)
482 		    __field(u32, kdeth1)
483 		    __field(u32, kdeth2)
484 		    __field(u32, kdeth3)
485 		    __field(u32, kdeth4)
486 		    __field(u32, kdeth5)
487 		    __field(u32, kdeth6)
488 		    __field(u32, kdeth7)
489 		    __field(u32, kdeth8)
490 		    __field(u32, tidval)
491 		    ),
492 		    TP_fast_assign(
493 		    __le32 *pbc = (__le32 *)hdr->pbc;
494 		    __be32 *lrh = (__be32 *)hdr->lrh;
495 		    __be32 *bth = (__be32 *)hdr->bth;
496 		    __le32 *kdeth = (__le32 *)&hdr->kdeth;
497 
498 		    DD_DEV_ASSIGN(dd);
499 		    __entry->ctxt = ctxt;
500 		    __entry->subctxt = subctxt;
501 		    __entry->req = req;
502 		    __entry->pbc0 = le32_to_cpu(pbc[0]);
503 		    __entry->pbc1 = le32_to_cpu(pbc[1]);
504 		    __entry->lrh0 = be32_to_cpu(lrh[0]);
505 		    __entry->lrh1 = be32_to_cpu(lrh[1]);
506 		    __entry->bth0 = be32_to_cpu(bth[0]);
507 		    __entry->bth1 = be32_to_cpu(bth[1]);
508 		    __entry->bth2 = be32_to_cpu(bth[2]);
509 		    __entry->kdeth0 = le32_to_cpu(kdeth[0]);
510 		    __entry->kdeth1 = le32_to_cpu(kdeth[1]);
511 		    __entry->kdeth2 = le32_to_cpu(kdeth[2]);
512 		    __entry->kdeth3 = le32_to_cpu(kdeth[3]);
513 		    __entry->kdeth4 = le32_to_cpu(kdeth[4]);
514 		    __entry->kdeth5 = le32_to_cpu(kdeth[5]);
515 		    __entry->kdeth6 = le32_to_cpu(kdeth[6]);
516 		    __entry->kdeth7 = le32_to_cpu(kdeth[7]);
517 		    __entry->kdeth8 = le32_to_cpu(kdeth[8]);
518 		    __entry->tidval = tidval;
519 	    ),
520 	    TP_printk(USDMA_HDR_FORMAT,
521 		      __get_str(dev),
522 		      __entry->ctxt,
523 		      __entry->subctxt,
524 		      __entry->req,
525 		      __entry->pbc1,
526 		      __entry->pbc0,
527 		      __entry->lrh0,
528 		      __entry->lrh1,
529 		      __entry->bth0,
530 		      __entry->bth1,
531 		      __entry->bth2,
532 		      __entry->kdeth0,
533 		      __entry->kdeth1,
534 		      __entry->kdeth2,
535 		      __entry->kdeth3,
536 		      __entry->kdeth4,
537 		      __entry->kdeth5,
538 		      __entry->kdeth6,
539 		      __entry->kdeth7,
540 		      __entry->kdeth8,
541 		      __entry->tidval
542 	    )
543 );
544 
545 #define SDMA_UREQ_FMT \
546 	"[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
547 TRACE_EVENT(hfi1_sdma_user_reqinfo,
548 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
549 	    TP_ARGS(dd, ctxt, subctxt, i),
550 	    TP_STRUCT__entry(
551 		    DD_DEV_ENTRY(dd)
552 		    __field(u16, ctxt)
553 		    __field(u8, subctxt)
554 		    __field(u8, ver_opcode)
555 		    __field(u8, iovcnt)
556 		    __field(u16, npkts)
557 		    __field(u16, fragsize)
558 		    __field(u16, comp_idx)
559 	    ),
560 	    TP_fast_assign(
561 		    DD_DEV_ASSIGN(dd);
562 		    __entry->ctxt = ctxt;
563 		    __entry->subctxt = subctxt;
564 		    __entry->ver_opcode = i[0] & 0xff;
565 		    __entry->iovcnt = (i[0] >> 8) & 0xff;
566 		    __entry->npkts = i[1];
567 		    __entry->fragsize = i[2];
568 		    __entry->comp_idx = i[3];
569 	    ),
570 	    TP_printk(SDMA_UREQ_FMT,
571 		      __get_str(dev),
572 		      __entry->ctxt,
573 		      __entry->subctxt,
574 		      __entry->ver_opcode,
575 		      __entry->iovcnt,
576 		      __entry->npkts,
577 		      __entry->fragsize,
578 		      __entry->comp_idx
579 		      )
580 );
581 
582 #define usdma_complete_name(st) { st, #st }
583 #define show_usdma_complete_state(st)			\
584 	__print_symbolic(st,				\
585 			usdma_complete_name(FREE),	\
586 			usdma_complete_name(QUEUED),	\
587 			usdma_complete_name(COMPLETE), \
588 			usdma_complete_name(ERROR))
589 
590 TRACE_EVENT(hfi1_sdma_user_completion,
591 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
592 		     u8 state, int code),
593 	    TP_ARGS(dd, ctxt, subctxt, idx, state, code),
594 	    TP_STRUCT__entry(
595 	    DD_DEV_ENTRY(dd)
596 	    __field(u16, ctxt)
597 	    __field(u8, subctxt)
598 	    __field(u16, idx)
599 	    __field(u8, state)
600 	    __field(int, code)
601 	    ),
602 	    TP_fast_assign(
603 	    DD_DEV_ASSIGN(dd);
604 	    __entry->ctxt = ctxt;
605 	    __entry->subctxt = subctxt;
606 	    __entry->idx = idx;
607 	    __entry->state = state;
608 	    __entry->code = code;
609 	    ),
610 	    TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
611 		      __get_str(dev), __entry->ctxt, __entry->subctxt,
612 		      __entry->idx, show_usdma_complete_state(__entry->state),
613 		      __entry->code)
614 );
615 
616 TRACE_EVENT(hfi1_usdma_defer,
617 	    TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
618 		     struct sdma_engine *sde,
619 		     struct iowait *wait),
620 	    TP_ARGS(pq, sde, wait),
621 	    TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
622 			     __field(struct hfi1_user_sdma_pkt_q *, pq)
623 			     __field(struct sdma_engine *, sde)
624 			     __field(struct iowait *, wait)
625 			     __field(int, engine)
626 			     __field(int, empty)
627 			     ),
628 	     TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
629 			    __entry->pq = pq;
630 			    __entry->sde = sde;
631 			    __entry->wait = wait;
632 			    __entry->engine = sde->this_idx;
633 			    __entry->empty = list_empty(&__entry->wait->list);
634 			    ),
635 	     TP_printk("[%s] pq %llx sde %llx wait %llx engine %d empty %d",
636 		       __get_str(dev),
637 		       (unsigned long long)__entry->pq,
638 		       (unsigned long long)__entry->sde,
639 		       (unsigned long long)__entry->wait,
640 		       __entry->engine,
641 		       __entry->empty
642 		)
643 );
644 
645 TRACE_EVENT(hfi1_usdma_activate,
646 	    TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
647 		     struct iowait *wait,
648 		     int reason),
649 	    TP_ARGS(pq, wait, reason),
650 	    TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
651 			     __field(struct hfi1_user_sdma_pkt_q *, pq)
652 			     __field(struct iowait *, wait)
653 			     __field(int, reason)
654 			     ),
655 	     TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
656 			    __entry->pq = pq;
657 			    __entry->wait = wait;
658 			    __entry->reason = reason;
659 			    ),
660 	     TP_printk("[%s] pq %llx wait %llx reason %d",
661 		       __get_str(dev),
662 		       (unsigned long long)__entry->pq,
663 		       (unsigned long long)__entry->wait,
664 		       __entry->reason
665 		)
666 );
667 
668 TRACE_EVENT(hfi1_usdma_we,
669 	    TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
670 		     int we_ret),
671 	    TP_ARGS(pq, we_ret),
672 	    TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
673 			     __field(struct hfi1_user_sdma_pkt_q *, pq)
674 			     __field(int, state)
675 			     __field(int, we_ret)
676 			     ),
677 	     TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
678 			    __entry->pq = pq;
679 			    __entry->state = pq->state;
680 			    __entry->we_ret = we_ret;
681 			    ),
682 	     TP_printk("[%s] pq %llx state %d we_ret %d",
683 		       __get_str(dev),
684 		       (unsigned long long)__entry->pq,
685 		       __entry->state,
686 		       __entry->we_ret
687 		)
688 );
689 
690 const char *print_u32_array(struct trace_seq *, u32 *, int);
691 #define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
692 
693 TRACE_EVENT(hfi1_sdma_user_header_ahg,
694 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
695 		     u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
696 	    TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
697 	    TP_STRUCT__entry(
698 	    DD_DEV_ENTRY(dd)
699 	    __field(u16, ctxt)
700 	    __field(u8, subctxt)
701 	    __field(u16, req)
702 	    __field(u8, sde)
703 	    __field(u8, idx)
704 	    __field(int, len)
705 	    __field(u32, tidval)
706 	    __array(u32, ahg, 10)
707 	    ),
708 	    TP_fast_assign(
709 	    DD_DEV_ASSIGN(dd);
710 	    __entry->ctxt = ctxt;
711 	    __entry->subctxt = subctxt;
712 	    __entry->req = req;
713 	    __entry->sde = sde;
714 	    __entry->idx = ahgidx;
715 	    __entry->len = len;
716 	    __entry->tidval = tidval;
717 	    memcpy(__entry->ahg, ahg, len * sizeof(u32));
718 	    ),
719 	    TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
720 		      __get_str(dev),
721 		      __entry->ctxt,
722 		      __entry->subctxt,
723 		      __entry->req,
724 		      __entry->sde,
725 		      __entry->idx,
726 		      __entry->len - 1,
727 		      __print_u32_hex(__entry->ahg, __entry->len),
728 		      __entry->tidval
729 		      )
730 );
731 
732 TRACE_EVENT(hfi1_sdma_state,
733 	    TP_PROTO(struct sdma_engine *sde,
734 		     const char *cstate,
735 		     const char *nstate
736 		     ),
737 	    TP_ARGS(sde, cstate, nstate),
738 	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
739 		__string(curstate, cstate)
740 		__string(newstate, nstate)
741 	    ),
742 	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
743 		__assign_str(curstate, cstate);
744 		__assign_str(newstate, nstate);
745 	    ),
746 	    TP_printk("[%s] current state %s new state %s",
747 		      __get_str(dev),
748 		      __get_str(curstate),
749 		      __get_str(newstate)
750 	    )
751 );
752 
753 #define BCT_FORMAT \
754 	"shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
755 
756 #define BCT(field) \
757 	be16_to_cpu( \
758 	((struct buffer_control *)__get_dynamic_array(bct))->field \
759 	)
760 
761 DECLARE_EVENT_CLASS(hfi1_bct_template,
762 		    TP_PROTO(struct hfi1_devdata *dd,
763 			     struct buffer_control *bc),
764 		    TP_ARGS(dd, bc),
765 		    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
766 		    __dynamic_array(u8, bct, sizeof(*bc))
767 		    ),
768 		    TP_fast_assign(DD_DEV_ASSIGN(dd);
769 				   memcpy(__get_dynamic_array(bct), bc,
770 					  sizeof(*bc));
771 		    ),
772 		    TP_printk(BCT_FORMAT,
773 			      BCT(overall_shared_limit),
774 
775 			      BCT(vl[0].dedicated),
776 			      BCT(vl[0].shared),
777 
778 			      BCT(vl[1].dedicated),
779 			      BCT(vl[1].shared),
780 
781 			      BCT(vl[2].dedicated),
782 			      BCT(vl[2].shared),
783 
784 			      BCT(vl[3].dedicated),
785 			      BCT(vl[3].shared),
786 
787 			      BCT(vl[4].dedicated),
788 			      BCT(vl[4].shared),
789 
790 			      BCT(vl[5].dedicated),
791 			      BCT(vl[5].shared),
792 
793 			      BCT(vl[6].dedicated),
794 			      BCT(vl[6].shared),
795 
796 			      BCT(vl[7].dedicated),
797 			      BCT(vl[7].shared),
798 
799 			      BCT(vl[15].dedicated),
800 			      BCT(vl[15].shared)
801 		    )
802 );
803 
804 DEFINE_EVENT(hfi1_bct_template, bct_set,
805 	     TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
806 	     TP_ARGS(dd, bc));
807 
808 DEFINE_EVENT(hfi1_bct_template, bct_get,
809 	     TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
810 	     TP_ARGS(dd, bc));
811 
812 TRACE_EVENT(
813 	hfi1_qp_send_completion,
814 	TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
815 	TP_ARGS(qp, wqe, idx),
816 	TP_STRUCT__entry(
817 		DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
818 		__field(struct rvt_swqe *, wqe)
819 		__field(u64, wr_id)
820 		__field(u32, qpn)
821 		__field(u32, qpt)
822 		__field(u32, length)
823 		__field(u32, idx)
824 		__field(u32, ssn)
825 		__field(enum ib_wr_opcode, opcode)
826 		__field(int, send_flags)
827 	),
828 	TP_fast_assign(
829 		DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
830 		__entry->wqe = wqe;
831 		__entry->wr_id = wqe->wr.wr_id;
832 		__entry->qpn = qp->ibqp.qp_num;
833 		__entry->qpt = qp->ibqp.qp_type;
834 		__entry->length = wqe->length;
835 		__entry->idx = idx;
836 		__entry->ssn = wqe->ssn;
837 		__entry->opcode = wqe->wr.opcode;
838 		__entry->send_flags = wqe->wr.send_flags;
839 	),
840 	TP_printk(
841 		"[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
842 		__get_str(dev),
843 		__entry->qpn,
844 		__entry->qpt,
845 		__entry->wqe,
846 		__entry->idx,
847 		__entry->wr_id,
848 		__entry->length,
849 		__entry->ssn,
850 		__entry->opcode,
851 		__entry->send_flags
852 	)
853 );
854 
855 DECLARE_EVENT_CLASS(
856 	hfi1_do_send_template,
857 	TP_PROTO(struct rvt_qp *qp, bool flag),
858 	TP_ARGS(qp, flag),
859 	TP_STRUCT__entry(
860 		DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
861 		__field(u32, qpn)
862 		__field(bool, flag)
863 	),
864 	TP_fast_assign(
865 		DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
866 		__entry->qpn = qp->ibqp.qp_num;
867 		__entry->flag = flag;
868 	),
869 	TP_printk(
870 		"[%s] qpn %x flag %d",
871 		__get_str(dev),
872 		__entry->qpn,
873 		__entry->flag
874 	)
875 );
876 
877 DEFINE_EVENT(
878 	hfi1_do_send_template, hfi1_rc_do_send,
879 	TP_PROTO(struct rvt_qp *qp, bool flag),
880 	TP_ARGS(qp, flag)
881 );
882 
883 DEFINE_EVENT(/* event */
884 	hfi1_do_send_template, hfi1_rc_do_tid_send,
885 	TP_PROTO(struct rvt_qp *qp, bool flag),
886 	TP_ARGS(qp, flag)
887 );
888 
889 DEFINE_EVENT(
890 	hfi1_do_send_template, hfi1_rc_expired_time_slice,
891 	TP_PROTO(struct rvt_qp *qp, bool flag),
892 	TP_ARGS(qp, flag)
893 );
894 
895 DECLARE_EVENT_CLASS(/* AIP  */
896 	hfi1_ipoib_txq_template,
897 	TP_PROTO(struct hfi1_ipoib_txq *txq),
898 	TP_ARGS(txq),
899 	TP_STRUCT__entry(/* entry */
900 		DD_DEV_ENTRY(txq->priv->dd)
901 		__field(struct hfi1_ipoib_txq *, txq)
902 		__field(struct sdma_engine *, sde)
903 		__field(ulong, head)
904 		__field(ulong, tail)
905 		__field(uint, used)
906 		__field(uint, flow)
907 		__field(int, stops)
908 		__field(int, no_desc)
909 		__field(u8, idx)
910 		__field(u8, stopped)
911 	),
912 	TP_fast_assign(/* assign */
913 		DD_DEV_ASSIGN(txq->priv->dd);
914 		__entry->txq = txq;
915 		__entry->sde = txq->sde;
916 		__entry->head = txq->tx_ring.head;
917 		__entry->tail = txq->tx_ring.tail;
918 		__entry->idx = txq->q_idx;
919 		__entry->used =
920 			txq->tx_ring.sent_txreqs -
921 			txq->tx_ring.complete_txreqs;
922 		__entry->flow = txq->flow.as_int;
923 		__entry->stops = atomic_read(&txq->tx_ring.stops);
924 		__entry->no_desc = atomic_read(&txq->tx_ring.no_desc);
925 		__entry->stopped =
926 		 __netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
927 	),
928 	TP_printk(/* print  */
929 		"[%s] txq %llx idx %u sde %llx:%u cpu %d head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
930 		__get_str(dev),
931 		(unsigned long long)__entry->txq,
932 		__entry->idx,
933 		(unsigned long long)__entry->sde,
934 		__entry->sde ? __entry->sde->this_idx : 0,
935 		__entry->sde ? __entry->sde->cpu : 0,
936 		__entry->head,
937 		__entry->tail,
938 		__entry->flow,
939 		__entry->used,
940 		__entry->stops,
941 		__entry->no_desc,
942 		__entry->stopped
943 	)
944 );
945 
946 DEFINE_EVENT(/* queue stop */
947 	hfi1_ipoib_txq_template, hfi1_txq_stop,
948 	TP_PROTO(struct hfi1_ipoib_txq *txq),
949 	TP_ARGS(txq)
950 );
951 
952 DEFINE_EVENT(/* queue wake */
953 	hfi1_ipoib_txq_template, hfi1_txq_wake,
954 	TP_PROTO(struct hfi1_ipoib_txq *txq),
955 	TP_ARGS(txq)
956 );
957 
958 DEFINE_EVENT(/* flow flush */
959 	hfi1_ipoib_txq_template, hfi1_flow_flush,
960 	TP_PROTO(struct hfi1_ipoib_txq *txq),
961 	TP_ARGS(txq)
962 );
963 
964 DEFINE_EVENT(/* flow switch */
965 	hfi1_ipoib_txq_template, hfi1_flow_switch,
966 	TP_PROTO(struct hfi1_ipoib_txq *txq),
967 	TP_ARGS(txq)
968 );
969 
970 DEFINE_EVENT(/* wakeup */
971 	hfi1_ipoib_txq_template, hfi1_txq_wakeup,
972 	TP_PROTO(struct hfi1_ipoib_txq *txq),
973 	TP_ARGS(txq)
974 );
975 
976 DEFINE_EVENT(/* full */
977 	hfi1_ipoib_txq_template, hfi1_txq_full,
978 	TP_PROTO(struct hfi1_ipoib_txq *txq),
979 	TP_ARGS(txq)
980 );
981 
982 DEFINE_EVENT(/* queued */
983 	hfi1_ipoib_txq_template, hfi1_txq_queued,
984 	TP_PROTO(struct hfi1_ipoib_txq *txq),
985 	TP_ARGS(txq)
986 );
987 
988 DEFINE_EVENT(/* xmit_stopped */
989 	hfi1_ipoib_txq_template, hfi1_txq_xmit_stopped,
990 	TP_PROTO(struct hfi1_ipoib_txq *txq),
991 	TP_ARGS(txq)
992 );
993 
994 DEFINE_EVENT(/* xmit_unstopped */
995 	hfi1_ipoib_txq_template, hfi1_txq_xmit_unstopped,
996 	TP_PROTO(struct hfi1_ipoib_txq *txq),
997 	TP_ARGS(txq)
998 );
999 
1000 DECLARE_EVENT_CLASS(/* AIP  */
1001 	hfi1_ipoib_tx_template,
1002 	TP_PROTO(struct ipoib_txreq *tx, u32 idx),
1003 	TP_ARGS(tx, idx),
1004 	TP_STRUCT__entry(/* entry */
1005 		DD_DEV_ENTRY(tx->txq->priv->dd)
1006 		__field(struct ipoib_txreq *, tx)
1007 		__field(struct hfi1_ipoib_txq *, txq)
1008 		__field(struct sk_buff *, skb)
1009 		__field(ulong, idx)
1010 	),
1011 	TP_fast_assign(/* assign */
1012 		DD_DEV_ASSIGN(tx->txq->priv->dd);
1013 		__entry->tx = tx;
1014 		__entry->skb = tx->skb;
1015 		__entry->txq = tx->txq;
1016 		__entry->idx = idx;
1017 	),
1018 	TP_printk(/* print  */
1019 		"[%s] tx %llx txq %llx,%u skb %llx idx %lu",
1020 		__get_str(dev),
1021 		(unsigned long long)__entry->tx,
1022 		(unsigned long long)__entry->txq,
1023 		__entry->txq ? __entry->txq->q_idx : 0,
1024 		(unsigned long long)__entry->skb,
1025 		__entry->idx
1026 	)
1027 );
1028 
1029 DEFINE_EVENT(/* produce */
1030 	hfi1_ipoib_tx_template, hfi1_tx_produce,
1031 	TP_PROTO(struct ipoib_txreq *tx, u32 idx),
1032 	TP_ARGS(tx, idx)
1033 );
1034 
1035 DEFINE_EVENT(/* consume */
1036 	hfi1_ipoib_tx_template, hfi1_tx_consume,
1037 	TP_PROTO(struct ipoib_txreq *tx, u32 idx),
1038 	TP_ARGS(tx, idx)
1039 );
1040 
1041 DEFINE_EVENT(/* alloc_tx */
1042 	hfi1_ipoib_txq_template, hfi1_txq_alloc_tx,
1043 	TP_PROTO(struct hfi1_ipoib_txq *txq),
1044 	TP_ARGS(txq)
1045 );
1046 
1047 DEFINE_EVENT(/* poll */
1048 	hfi1_ipoib_txq_template, hfi1_txq_poll,
1049 	TP_PROTO(struct hfi1_ipoib_txq *txq),
1050 	TP_ARGS(txq)
1051 );
1052 
1053 DEFINE_EVENT(/* complete */
1054 	hfi1_ipoib_txq_template, hfi1_txq_complete,
1055 	TP_PROTO(struct hfi1_ipoib_txq *txq),
1056 	TP_ARGS(txq)
1057 );
1058 
1059 #endif /* __HFI1_TRACE_TX_H */
1060 
1061 #undef TRACE_INCLUDE_PATH
1062 #undef TRACE_INCLUDE_FILE
1063 #define TRACE_INCLUDE_PATH .
1064 #define TRACE_INCLUDE_FILE trace_tx
1065 #include <trace/define_trace.h>
1066