]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/infiniband/hw/hfi1/trace_tx.h
HID: logitech-dj: fix spelling in printk
[mirror_ubuntu-kernels.git] / drivers / infiniband / hw / hfi1 / trace_tx.h
1 /*
2 * Copyright(c) 2015 - 2017 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47 #if !defined(__HFI1_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
48 #define __HFI1_TRACE_TX_H
49
50 #include <linux/tracepoint.h>
51 #include <linux/trace_seq.h>
52
53 #include "hfi.h"
54 #include "mad.h"
55 #include "sdma.h"
56
57 const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
58
59 #define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
60
61 #undef TRACE_SYSTEM
62 #define TRACE_SYSTEM hfi1_tx
63
64 TRACE_EVENT(hfi1_piofree,
65 TP_PROTO(struct send_context *sc, int extra),
66 TP_ARGS(sc, extra),
67 TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
68 __field(u32, sw_index)
69 __field(u32, hw_context)
70 __field(int, extra)
71 ),
72 TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
73 __entry->sw_index = sc->sw_index;
74 __entry->hw_context = sc->hw_context;
75 __entry->extra = extra;
76 ),
77 TP_printk("[%s] ctxt %u(%u) extra %d",
78 __get_str(dev),
79 __entry->sw_index,
80 __entry->hw_context,
81 __entry->extra
82 )
83 );
84
85 TRACE_EVENT(hfi1_wantpiointr,
86 TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
87 TP_ARGS(sc, needint, credit_ctrl),
88 TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
89 __field(u32, sw_index)
90 __field(u32, hw_context)
91 __field(u32, needint)
92 __field(u64, credit_ctrl)
93 ),
94 TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
95 __entry->sw_index = sc->sw_index;
96 __entry->hw_context = sc->hw_context;
97 __entry->needint = needint;
98 __entry->credit_ctrl = credit_ctrl;
99 ),
100 TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
101 __get_str(dev),
102 __entry->sw_index,
103 __entry->hw_context,
104 __entry->needint,
105 (unsigned long long)__entry->credit_ctrl
106 )
107 );
108
109 DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
110 TP_PROTO(struct rvt_qp *qp, u32 flags),
111 TP_ARGS(qp, flags),
112 TP_STRUCT__entry(
113 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
114 __field(u32, qpn)
115 __field(u32, flags)
116 __field(u32, s_flags)
117 ),
118 TP_fast_assign(
119 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
120 __entry->flags = flags;
121 __entry->qpn = qp->ibqp.qp_num;
122 __entry->s_flags = qp->s_flags;
123 ),
124 TP_printk(
125 "[%s] qpn 0x%x flags 0x%x s_flags 0x%x",
126 __get_str(dev),
127 __entry->qpn,
128 __entry->flags,
129 __entry->s_flags
130 )
131 );
132
133 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
134 TP_PROTO(struct rvt_qp *qp, u32 flags),
135 TP_ARGS(qp, flags));
136
137 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
138 TP_PROTO(struct rvt_qp *qp, u32 flags),
139 TP_ARGS(qp, flags));
140
141 TRACE_EVENT(hfi1_sdma_descriptor,
142 TP_PROTO(struct sdma_engine *sde,
143 u64 desc0,
144 u64 desc1,
145 u16 e,
146 void *descp),
147 TP_ARGS(sde, desc0, desc1, e, descp),
148 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
149 __field(void *, descp)
150 __field(u64, desc0)
151 __field(u64, desc1)
152 __field(u16, e)
153 __field(u8, idx)
154 ),
155 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
156 __entry->desc0 = desc0;
157 __entry->desc1 = desc1;
158 __entry->idx = sde->this_idx;
159 __entry->descp = descp;
160 __entry->e = e;
161 ),
162 TP_printk(
163 "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
164 __get_str(dev),
165 __entry->idx,
166 __parse_sdma_flags(__entry->desc0, __entry->desc1),
167 (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
168 SDMA_DESC0_PHY_ADDR_MASK,
169 (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
170 SDMA_DESC1_GENERATION_MASK),
171 (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
172 SDMA_DESC0_BYTE_COUNT_MASK),
173 __entry->desc0,
174 __entry->desc1,
175 __entry->descp,
176 __entry->e
177 )
178 );
179
180 TRACE_EVENT(hfi1_sdma_engine_select,
181 TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
182 TP_ARGS(dd, sel, vl, idx),
183 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
184 __field(u32, sel)
185 __field(u8, vl)
186 __field(u8, idx)
187 ),
188 TP_fast_assign(DD_DEV_ASSIGN(dd);
189 __entry->sel = sel;
190 __entry->vl = vl;
191 __entry->idx = idx;
192 ),
193 TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
194 __get_str(dev),
195 __entry->idx,
196 __entry->sel,
197 __entry->vl
198 )
199 );
200
201 TRACE_EVENT(hfi1_sdma_user_free_queues,
202 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt),
203 TP_ARGS(dd, ctxt, subctxt),
204 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
205 __field(u16, ctxt)
206 __field(u16, subctxt)
207 ),
208 TP_fast_assign(DD_DEV_ASSIGN(dd);
209 __entry->ctxt = ctxt;
210 __entry->subctxt = subctxt;
211 ),
212 TP_printk("[%s] SDMA [%u:%u] Freeing user SDMA queues",
213 __get_str(dev),
214 __entry->ctxt,
215 __entry->subctxt
216 )
217 );
218
219 TRACE_EVENT(hfi1_sdma_user_process_request,
220 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
221 u16 comp_idx),
222 TP_ARGS(dd, ctxt, subctxt, comp_idx),
223 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
224 __field(u16, ctxt)
225 __field(u16, subctxt)
226 __field(u16, comp_idx)
227 ),
228 TP_fast_assign(DD_DEV_ASSIGN(dd);
229 __entry->ctxt = ctxt;
230 __entry->subctxt = subctxt;
231 __entry->comp_idx = comp_idx;
232 ),
233 TP_printk("[%s] SDMA [%u:%u] Using req/comp entry: %u",
234 __get_str(dev),
235 __entry->ctxt,
236 __entry->subctxt,
237 __entry->comp_idx
238 )
239 );
240
241 DECLARE_EVENT_CLASS(
242 hfi1_sdma_value_template,
243 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, u16 comp_idx,
244 u32 value),
245 TP_ARGS(dd, ctxt, subctxt, comp_idx, value),
246 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
247 __field(u16, ctxt)
248 __field(u16, subctxt)
249 __field(u16, comp_idx)
250 __field(u32, value)
251 ),
252 TP_fast_assign(DD_DEV_ASSIGN(dd);
253 __entry->ctxt = ctxt;
254 __entry->subctxt = subctxt;
255 __entry->comp_idx = comp_idx;
256 __entry->value = value;
257 ),
258 TP_printk("[%s] SDMA [%u:%u:%u] value: %u",
259 __get_str(dev),
260 __entry->ctxt,
261 __entry->subctxt,
262 __entry->comp_idx,
263 __entry->value
264 )
265 );
266
267 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_initial_tidoffset,
268 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
269 u16 comp_idx, u32 tidoffset),
270 TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset));
271
272 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_data_length,
273 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
274 u16 comp_idx, u32 data_len),
275 TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
276
277 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_compute_length,
278 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
279 u16 comp_idx, u32 data_len),
280 TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
281
282 TRACE_EVENT(hfi1_sdma_user_tid_info,
283 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
284 u16 comp_idx, u32 tidoffset, u32 units, u8 shift),
285 TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset, units, shift),
286 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
287 __field(u16, ctxt)
288 __field(u16, subctxt)
289 __field(u16, comp_idx)
290 __field(u32, tidoffset)
291 __field(u32, units)
292 __field(u8, shift)
293 ),
294 TP_fast_assign(DD_DEV_ASSIGN(dd);
295 __entry->ctxt = ctxt;
296 __entry->subctxt = subctxt;
297 __entry->comp_idx = comp_idx;
298 __entry->tidoffset = tidoffset;
299 __entry->units = units;
300 __entry->shift = shift;
301 ),
302 TP_printk("[%s] SDMA [%u:%u:%u] TID offset %ubytes %uunits om %u",
303 __get_str(dev),
304 __entry->ctxt,
305 __entry->subctxt,
306 __entry->comp_idx,
307 __entry->tidoffset,
308 __entry->units,
309 __entry->shift
310 )
311 );
312
313 TRACE_EVENT(hfi1_sdma_request,
314 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
315 unsigned long dim),
316 TP_ARGS(dd, ctxt, subctxt, dim),
317 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
318 __field(u16, ctxt)
319 __field(u16, subctxt)
320 __field(unsigned long, dim)
321 ),
322 TP_fast_assign(DD_DEV_ASSIGN(dd);
323 __entry->ctxt = ctxt;
324 __entry->subctxt = subctxt;
325 __entry->dim = dim;
326 ),
327 TP_printk("[%s] SDMA from %u:%u (%lu)",
328 __get_str(dev),
329 __entry->ctxt,
330 __entry->subctxt,
331 __entry->dim
332 )
333 );
334
335 DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
336 TP_PROTO(struct sdma_engine *sde, u64 status),
337 TP_ARGS(sde, status),
338 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
339 __field(u64, status)
340 __field(u8, idx)
341 ),
342 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
343 __entry->status = status;
344 __entry->idx = sde->this_idx;
345 ),
346 TP_printk("[%s] SDE(%u) status %llx",
347 __get_str(dev),
348 __entry->idx,
349 (unsigned long long)__entry->status
350 )
351 );
352
353 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
354 TP_PROTO(struct sdma_engine *sde, u64 status),
355 TP_ARGS(sde, status)
356 );
357
358 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
359 TP_PROTO(struct sdma_engine *sde, u64 status),
360 TP_ARGS(sde, status)
361 );
362
363 DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
364 TP_PROTO(struct sdma_engine *sde, int aidx),
365 TP_ARGS(sde, aidx),
366 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
367 __field(int, aidx)
368 __field(u8, idx)
369 ),
370 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
371 __entry->idx = sde->this_idx;
372 __entry->aidx = aidx;
373 ),
374 TP_printk("[%s] SDE(%u) aidx %d",
375 __get_str(dev),
376 __entry->idx,
377 __entry->aidx
378 )
379 );
380
381 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
382 TP_PROTO(struct sdma_engine *sde, int aidx),
383 TP_ARGS(sde, aidx));
384
385 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
386 TP_PROTO(struct sdma_engine *sde, int aidx),
387 TP_ARGS(sde, aidx));
388
389 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
390 TRACE_EVENT(hfi1_sdma_progress,
391 TP_PROTO(struct sdma_engine *sde,
392 u16 hwhead,
393 u16 swhead,
394 struct sdma_txreq *txp
395 ),
396 TP_ARGS(sde, hwhead, swhead, txp),
397 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
398 __field(u64, sn)
399 __field(u16, hwhead)
400 __field(u16, swhead)
401 __field(u16, txnext)
402 __field(u16, tx_tail)
403 __field(u16, tx_head)
404 __field(u8, idx)
405 ),
406 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
407 __entry->hwhead = hwhead;
408 __entry->swhead = swhead;
409 __entry->tx_tail = sde->tx_tail;
410 __entry->tx_head = sde->tx_head;
411 __entry->txnext = txp ? txp->next_descq_idx : ~0;
412 __entry->idx = sde->this_idx;
413 __entry->sn = txp ? txp->sn : ~0;
414 ),
415 TP_printk(
416 "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
417 __get_str(dev),
418 __entry->idx,
419 __entry->sn,
420 __entry->hwhead,
421 __entry->swhead,
422 __entry->txnext,
423 __entry->tx_head,
424 __entry->tx_tail
425 )
426 );
427 #else
428 TRACE_EVENT(hfi1_sdma_progress,
429 TP_PROTO(struct sdma_engine *sde,
430 u16 hwhead, u16 swhead,
431 struct sdma_txreq *txp
432 ),
433 TP_ARGS(sde, hwhead, swhead, txp),
434 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
435 __field(u16, hwhead)
436 __field(u16, swhead)
437 __field(u16, txnext)
438 __field(u16, tx_tail)
439 __field(u16, tx_head)
440 __field(u8, idx)
441 ),
442 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
443 __entry->hwhead = hwhead;
444 __entry->swhead = swhead;
445 __entry->tx_tail = sde->tx_tail;
446 __entry->tx_head = sde->tx_head;
447 __entry->txnext = txp ? txp->next_descq_idx : ~0;
448 __entry->idx = sde->this_idx;
449 ),
450 TP_printk(
451 "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
452 __get_str(dev),
453 __entry->idx,
454 __entry->hwhead,
455 __entry->swhead,
456 __entry->txnext,
457 __entry->tx_head,
458 __entry->tx_tail
459 )
460 );
461 #endif
462
463 DECLARE_EVENT_CLASS(hfi1_sdma_sn,
464 TP_PROTO(struct sdma_engine *sde, u64 sn),
465 TP_ARGS(sde, sn),
466 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
467 __field(u64, sn)
468 __field(u8, idx)
469 ),
470 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
471 __entry->sn = sn;
472 __entry->idx = sde->this_idx;
473 ),
474 TP_printk("[%s] SDE(%u) sn %llu",
475 __get_str(dev),
476 __entry->idx,
477 __entry->sn
478 )
479 );
480
481 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
482 TP_PROTO(
483 struct sdma_engine *sde,
484 u64 sn
485 ),
486 TP_ARGS(sde, sn)
487 );
488
489 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
490 TP_PROTO(struct sdma_engine *sde, u64 sn),
491 TP_ARGS(sde, sn)
492 );
493
494 #define USDMA_HDR_FORMAT \
495 "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
496
497 TRACE_EVENT(hfi1_sdma_user_header,
498 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
499 struct hfi1_pkt_header *hdr, u32 tidval),
500 TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
501 TP_STRUCT__entry(
502 DD_DEV_ENTRY(dd)
503 __field(u16, ctxt)
504 __field(u8, subctxt)
505 __field(u16, req)
506 __field(u32, pbc0)
507 __field(u32, pbc1)
508 __field(u32, lrh0)
509 __field(u32, lrh1)
510 __field(u32, bth0)
511 __field(u32, bth1)
512 __field(u32, bth2)
513 __field(u32, kdeth0)
514 __field(u32, kdeth1)
515 __field(u32, kdeth2)
516 __field(u32, kdeth3)
517 __field(u32, kdeth4)
518 __field(u32, kdeth5)
519 __field(u32, kdeth6)
520 __field(u32, kdeth7)
521 __field(u32, kdeth8)
522 __field(u32, tidval)
523 ),
524 TP_fast_assign(
525 __le32 *pbc = (__le32 *)hdr->pbc;
526 __be32 *lrh = (__be32 *)hdr->lrh;
527 __be32 *bth = (__be32 *)hdr->bth;
528 __le32 *kdeth = (__le32 *)&hdr->kdeth;
529
530 DD_DEV_ASSIGN(dd);
531 __entry->ctxt = ctxt;
532 __entry->subctxt = subctxt;
533 __entry->req = req;
534 __entry->pbc0 = le32_to_cpu(pbc[0]);
535 __entry->pbc1 = le32_to_cpu(pbc[1]);
536 __entry->lrh0 = be32_to_cpu(lrh[0]);
537 __entry->lrh1 = be32_to_cpu(lrh[1]);
538 __entry->bth0 = be32_to_cpu(bth[0]);
539 __entry->bth1 = be32_to_cpu(bth[1]);
540 __entry->bth2 = be32_to_cpu(bth[2]);
541 __entry->kdeth0 = le32_to_cpu(kdeth[0]);
542 __entry->kdeth1 = le32_to_cpu(kdeth[1]);
543 __entry->kdeth2 = le32_to_cpu(kdeth[2]);
544 __entry->kdeth3 = le32_to_cpu(kdeth[3]);
545 __entry->kdeth4 = le32_to_cpu(kdeth[4]);
546 __entry->kdeth5 = le32_to_cpu(kdeth[5]);
547 __entry->kdeth6 = le32_to_cpu(kdeth[6]);
548 __entry->kdeth7 = le32_to_cpu(kdeth[7]);
549 __entry->kdeth8 = le32_to_cpu(kdeth[8]);
550 __entry->tidval = tidval;
551 ),
552 TP_printk(USDMA_HDR_FORMAT,
553 __get_str(dev),
554 __entry->ctxt,
555 __entry->subctxt,
556 __entry->req,
557 __entry->pbc1,
558 __entry->pbc0,
559 __entry->lrh0,
560 __entry->lrh1,
561 __entry->bth0,
562 __entry->bth1,
563 __entry->bth2,
564 __entry->kdeth0,
565 __entry->kdeth1,
566 __entry->kdeth2,
567 __entry->kdeth3,
568 __entry->kdeth4,
569 __entry->kdeth5,
570 __entry->kdeth6,
571 __entry->kdeth7,
572 __entry->kdeth8,
573 __entry->tidval
574 )
575 );
576
577 #define SDMA_UREQ_FMT \
578 "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
579 TRACE_EVENT(hfi1_sdma_user_reqinfo,
580 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
581 TP_ARGS(dd, ctxt, subctxt, i),
582 TP_STRUCT__entry(
583 DD_DEV_ENTRY(dd);
584 __field(u16, ctxt)
585 __field(u8, subctxt)
586 __field(u8, ver_opcode)
587 __field(u8, iovcnt)
588 __field(u16, npkts)
589 __field(u16, fragsize)
590 __field(u16, comp_idx)
591 ),
592 TP_fast_assign(
593 DD_DEV_ASSIGN(dd);
594 __entry->ctxt = ctxt;
595 __entry->subctxt = subctxt;
596 __entry->ver_opcode = i[0] & 0xff;
597 __entry->iovcnt = (i[0] >> 8) & 0xff;
598 __entry->npkts = i[1];
599 __entry->fragsize = i[2];
600 __entry->comp_idx = i[3];
601 ),
602 TP_printk(SDMA_UREQ_FMT,
603 __get_str(dev),
604 __entry->ctxt,
605 __entry->subctxt,
606 __entry->ver_opcode,
607 __entry->iovcnt,
608 __entry->npkts,
609 __entry->fragsize,
610 __entry->comp_idx
611 )
612 );
613
614 #define usdma_complete_name(st) { st, #st }
615 #define show_usdma_complete_state(st) \
616 __print_symbolic(st, \
617 usdma_complete_name(FREE), \
618 usdma_complete_name(QUEUED), \
619 usdma_complete_name(COMPLETE), \
620 usdma_complete_name(ERROR))
621
622 TRACE_EVENT(hfi1_sdma_user_completion,
623 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
624 u8 state, int code),
625 TP_ARGS(dd, ctxt, subctxt, idx, state, code),
626 TP_STRUCT__entry(
627 DD_DEV_ENTRY(dd)
628 __field(u16, ctxt)
629 __field(u8, subctxt)
630 __field(u16, idx)
631 __field(u8, state)
632 __field(int, code)
633 ),
634 TP_fast_assign(
635 DD_DEV_ASSIGN(dd);
636 __entry->ctxt = ctxt;
637 __entry->subctxt = subctxt;
638 __entry->idx = idx;
639 __entry->state = state;
640 __entry->code = code;
641 ),
642 TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
643 __get_str(dev), __entry->ctxt, __entry->subctxt,
644 __entry->idx, show_usdma_complete_state(__entry->state),
645 __entry->code)
646 );
647
648 const char *print_u32_array(struct trace_seq *, u32 *, int);
649 #define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
650
651 TRACE_EVENT(hfi1_sdma_user_header_ahg,
652 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
653 u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
654 TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
655 TP_STRUCT__entry(
656 DD_DEV_ENTRY(dd)
657 __field(u16, ctxt)
658 __field(u8, subctxt)
659 __field(u16, req)
660 __field(u8, sde)
661 __field(u8, idx)
662 __field(int, len)
663 __field(u32, tidval)
664 __array(u32, ahg, 10)
665 ),
666 TP_fast_assign(
667 DD_DEV_ASSIGN(dd);
668 __entry->ctxt = ctxt;
669 __entry->subctxt = subctxt;
670 __entry->req = req;
671 __entry->sde = sde;
672 __entry->idx = ahgidx;
673 __entry->len = len;
674 __entry->tidval = tidval;
675 memcpy(__entry->ahg, ahg, len * sizeof(u32));
676 ),
677 TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
678 __get_str(dev),
679 __entry->ctxt,
680 __entry->subctxt,
681 __entry->req,
682 __entry->sde,
683 __entry->idx,
684 __entry->len - 1,
685 __print_u32_hex(__entry->ahg, __entry->len),
686 __entry->tidval
687 )
688 );
689
690 TRACE_EVENT(hfi1_sdma_state,
691 TP_PROTO(struct sdma_engine *sde,
692 const char *cstate,
693 const char *nstate
694 ),
695 TP_ARGS(sde, cstate, nstate),
696 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
697 __string(curstate, cstate)
698 __string(newstate, nstate)
699 ),
700 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
701 __assign_str(curstate, cstate);
702 __assign_str(newstate, nstate);
703 ),
704 TP_printk("[%s] current state %s new state %s",
705 __get_str(dev),
706 __get_str(curstate),
707 __get_str(newstate)
708 )
709 );
710
711 #define BCT_FORMAT \
712 "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
713
714 #define BCT(field) \
715 be16_to_cpu( \
716 ((struct buffer_control *)__get_dynamic_array(bct))->field \
717 )
718
719 DECLARE_EVENT_CLASS(hfi1_bct_template,
720 TP_PROTO(struct hfi1_devdata *dd,
721 struct buffer_control *bc),
722 TP_ARGS(dd, bc),
723 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
724 __dynamic_array(u8, bct, sizeof(*bc))
725 ),
726 TP_fast_assign(DD_DEV_ASSIGN(dd);
727 memcpy(__get_dynamic_array(bct), bc,
728 sizeof(*bc));
729 ),
730 TP_printk(BCT_FORMAT,
731 BCT(overall_shared_limit),
732
733 BCT(vl[0].dedicated),
734 BCT(vl[0].shared),
735
736 BCT(vl[1].dedicated),
737 BCT(vl[1].shared),
738
739 BCT(vl[2].dedicated),
740 BCT(vl[2].shared),
741
742 BCT(vl[3].dedicated),
743 BCT(vl[3].shared),
744
745 BCT(vl[4].dedicated),
746 BCT(vl[4].shared),
747
748 BCT(vl[5].dedicated),
749 BCT(vl[5].shared),
750
751 BCT(vl[6].dedicated),
752 BCT(vl[6].shared),
753
754 BCT(vl[7].dedicated),
755 BCT(vl[7].shared),
756
757 BCT(vl[15].dedicated),
758 BCT(vl[15].shared)
759 )
760 );
761
762 DEFINE_EVENT(hfi1_bct_template, bct_set,
763 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
764 TP_ARGS(dd, bc));
765
766 DEFINE_EVENT(hfi1_bct_template, bct_get,
767 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
768 TP_ARGS(dd, bc));
769
770 TRACE_EVENT(
771 hfi1_qp_send_completion,
772 TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
773 TP_ARGS(qp, wqe, idx),
774 TP_STRUCT__entry(
775 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
776 __field(struct rvt_swqe *, wqe)
777 __field(u64, wr_id)
778 __field(u32, qpn)
779 __field(u32, qpt)
780 __field(u32, length)
781 __field(u32, idx)
782 __field(u32, ssn)
783 __field(enum ib_wr_opcode, opcode)
784 __field(int, send_flags)
785 ),
786 TP_fast_assign(
787 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
788 __entry->wqe = wqe;
789 __entry->wr_id = wqe->wr.wr_id;
790 __entry->qpn = qp->ibqp.qp_num;
791 __entry->qpt = qp->ibqp.qp_type;
792 __entry->length = wqe->length;
793 __entry->idx = idx;
794 __entry->ssn = wqe->ssn;
795 __entry->opcode = wqe->wr.opcode;
796 __entry->send_flags = wqe->wr.send_flags;
797 ),
798 TP_printk(
799 "[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
800 __get_str(dev),
801 __entry->qpn,
802 __entry->qpt,
803 __entry->wqe,
804 __entry->idx,
805 __entry->wr_id,
806 __entry->length,
807 __entry->ssn,
808 __entry->opcode,
809 __entry->send_flags
810 )
811 );
812
813 DECLARE_EVENT_CLASS(
814 hfi1_do_send_template,
815 TP_PROTO(struct rvt_qp *qp, bool flag),
816 TP_ARGS(qp, flag),
817 TP_STRUCT__entry(
818 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
819 __field(u32, qpn)
820 __field(bool, flag)
821 ),
822 TP_fast_assign(
823 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
824 __entry->qpn = qp->ibqp.qp_num;
825 __entry->flag = flag;
826 ),
827 TP_printk(
828 "[%s] qpn %x flag %d",
829 __get_str(dev),
830 __entry->qpn,
831 __entry->flag
832 )
833 );
834
835 DEFINE_EVENT(
836 hfi1_do_send_template, hfi1_rc_do_send,
837 TP_PROTO(struct rvt_qp *qp, bool flag),
838 TP_ARGS(qp, flag)
839 );
840
841 DEFINE_EVENT(
842 hfi1_do_send_template, hfi1_rc_expired_time_slice,
843 TP_PROTO(struct rvt_qp *qp, bool flag),
844 TP_ARGS(qp, flag)
845 );
846
847 #endif /* __HFI1_TRACE_TX_H */
848
849 #undef TRACE_INCLUDE_PATH
850 #undef TRACE_INCLUDE_FILE
851 #define TRACE_INCLUDE_PATH .
852 #define TRACE_INCLUDE_FILE trace_tx
853 #include <trace/define_trace.h>