]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/infiniband/hw/hfi1/trace.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / infiniband / hw / hfi1 / trace.c
1 /*
2 * Copyright(c) 2015 - 2018 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47 #define CREATE_TRACE_POINTS
48 #include "trace.h"
49 #include "exp_rcv.h"
50
51 static u8 __get_ib_hdr_len(struct ib_header *hdr)
52 {
53 struct ib_other_headers *ohdr;
54 u8 opcode;
55
56 if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
57 ohdr = &hdr->u.oth;
58 else
59 ohdr = &hdr->u.l.oth;
60 opcode = ib_bth_get_opcode(ohdr);
61 return hdr_len_by_opcode[opcode] == 0 ?
62 0 : hdr_len_by_opcode[opcode] - (12 + 8);
63 }
64
65 static u8 __get_16b_hdr_len(struct hfi1_16b_header *hdr)
66 {
67 struct ib_other_headers *ohdr = NULL;
68 u8 opcode;
69 u8 l4 = hfi1_16B_get_l4(hdr);
70
71 if (l4 == OPA_16B_L4_FM) {
72 opcode = IB_OPCODE_UD_SEND_ONLY;
73 return (8 + 8); /* No BTH */
74 }
75
76 if (l4 == OPA_16B_L4_IB_LOCAL)
77 ohdr = &hdr->u.oth;
78 else
79 ohdr = &hdr->u.l.oth;
80
81 opcode = ib_bth_get_opcode(ohdr);
82 return hdr_len_by_opcode[opcode] == 0 ?
83 0 : hdr_len_by_opcode[opcode] - (12 + 8 + 8);
84 }
85
86 u8 hfi1_trace_packet_hdr_len(struct hfi1_packet *packet)
87 {
88 if (packet->etype != RHF_RCV_TYPE_BYPASS)
89 return __get_ib_hdr_len(packet->hdr);
90 else
91 return __get_16b_hdr_len(packet->hdr);
92 }
93
94 u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opa_hdr)
95 {
96 if (!opa_hdr->hdr_type)
97 return __get_ib_hdr_len(&opa_hdr->ibh);
98 else
99 return __get_16b_hdr_len(&opa_hdr->opah);
100 }
101
102 const char *hfi1_trace_get_packet_l4_str(u8 l4)
103 {
104 if (l4)
105 return "16B";
106 else
107 return "9B";
108 }
109
110 const char *hfi1_trace_get_packet_l2_str(u8 l2)
111 {
112 switch (l2) {
113 case 0:
114 return "0";
115 case 1:
116 return "1";
117 case 2:
118 return "16B";
119 case 3:
120 return "9B";
121 }
122 return "";
123 }
124
125 #define IMM_PRN "imm:%d"
126 #define RETH_PRN "reth vaddr:0x%.16llx rkey:0x%.8x dlen:0x%.8x"
127 #define AETH_PRN "aeth syn:0x%.2x %s msn:0x%.8x"
128 #define DETH_PRN "deth qkey:0x%.8x sqpn:0x%.6x"
129 #define IETH_PRN "ieth rkey:0x%.8x"
130 #define ATOMICACKETH_PRN "origdata:%llx"
131 #define ATOMICETH_PRN "vaddr:0x%llx rkey:0x%.8x sdata:%llx cdata:%llx"
132 #define TID_RDMA_KDETH "kdeth0 0x%x kdeth1 0x%x"
133 #define TID_RDMA_KDETH_DATA "kdeth0 0x%x: kver %u sh %u intr %u tidctrl %u tid %x offset %x kdeth1 0x%x: jkey %x"
134 #define TID_READ_REQ_PRN "tid_flow_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x"
135 #define TID_READ_RSP_PRN "verbs_qp 0x%x"
136 #define TID_WRITE_REQ_PRN "original_qp 0x%x"
137 #define TID_WRITE_RSP_PRN "tid_flow_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x"
138 #define TID_WRITE_DATA_PRN "verbs_qp 0x%x"
139 #define TID_ACK_PRN "tid_flow_psn 0x%x verbs_psn 0x%x tid_flow_qp 0x%x verbs_qp 0x%x"
140 #define TID_RESYNC_PRN "verbs_qp 0x%x"
141
142 #define OP(transport, op) IB_OPCODE_## transport ## _ ## op
143
144 static const char *parse_syndrome(u8 syndrome)
145 {
146 switch (syndrome >> 5) {
147 case 0:
148 return "ACK";
149 case 1:
150 return "RNRNAK";
151 case 3:
152 return "NAK";
153 }
154 return "";
155 }
156
157 void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr,
158 u8 *ack, bool *becn, bool *fecn, u8 *mig,
159 u8 *se, u8 *pad, u8 *opcode, u8 *tver,
160 u16 *pkey, u32 *psn, u32 *qpn)
161 {
162 *ack = ib_bth_get_ackreq(ohdr);
163 *becn = ib_bth_get_becn(ohdr);
164 *fecn = ib_bth_get_fecn(ohdr);
165 *mig = ib_bth_get_migreq(ohdr);
166 *se = ib_bth_get_se(ohdr);
167 *pad = ib_bth_get_pad(ohdr);
168 *opcode = ib_bth_get_opcode(ohdr);
169 *tver = ib_bth_get_tver(ohdr);
170 *pkey = ib_bth_get_pkey(ohdr);
171 *psn = mask_psn(ib_bth_get_psn(ohdr));
172 *qpn = ib_bth_get_qpn(ohdr);
173 }
174
175 void hfi1_trace_parse_16b_bth(struct ib_other_headers *ohdr,
176 u8 *ack, u8 *mig, u8 *opcode,
177 u8 *pad, u8 *se, u8 *tver,
178 u32 *psn, u32 *qpn)
179 {
180 *ack = ib_bth_get_ackreq(ohdr);
181 *mig = ib_bth_get_migreq(ohdr);
182 *opcode = ib_bth_get_opcode(ohdr);
183 *pad = ib_bth_get_pad(ohdr);
184 *se = ib_bth_get_se(ohdr);
185 *tver = ib_bth_get_tver(ohdr);
186 *psn = mask_psn(ib_bth_get_psn(ohdr));
187 *qpn = ib_bth_get_qpn(ohdr);
188 }
189
190 void hfi1_trace_parse_9b_hdr(struct ib_header *hdr, bool sc5,
191 u8 *lnh, u8 *lver, u8 *sl, u8 *sc,
192 u16 *len, u32 *dlid, u32 *slid)
193 {
194 *lnh = ib_get_lnh(hdr);
195 *lver = ib_get_lver(hdr);
196 *sl = ib_get_sl(hdr);
197 *sc = ib_get_sc(hdr) | (sc5 << 4);
198 *len = ib_get_len(hdr);
199 *dlid = ib_get_dlid(hdr);
200 *slid = ib_get_slid(hdr);
201 }
202
203 void hfi1_trace_parse_16b_hdr(struct hfi1_16b_header *hdr,
204 u8 *age, bool *becn, bool *fecn,
205 u8 *l4, u8 *rc, u8 *sc,
206 u16 *entropy, u16 *len, u16 *pkey,
207 u32 *dlid, u32 *slid)
208 {
209 *age = hfi1_16B_get_age(hdr);
210 *becn = hfi1_16B_get_becn(hdr);
211 *fecn = hfi1_16B_get_fecn(hdr);
212 *l4 = hfi1_16B_get_l4(hdr);
213 *rc = hfi1_16B_get_rc(hdr);
214 *sc = hfi1_16B_get_sc(hdr);
215 *entropy = hfi1_16B_get_entropy(hdr);
216 *len = hfi1_16B_get_len(hdr);
217 *pkey = hfi1_16B_get_pkey(hdr);
218 *dlid = hfi1_16B_get_dlid(hdr);
219 *slid = hfi1_16B_get_slid(hdr);
220 }
221
222 #define LRH_PRN "len:%d sc:%d dlid:0x%.4x slid:0x%.4x "
223 #define LRH_9B_PRN "lnh:%d,%s lver:%d sl:%d"
224 #define LRH_16B_PRN "age:%d becn:%d fecn:%d l4:%d " \
225 "rc:%d sc:%d pkey:0x%.4x entropy:0x%.4x"
226 const char *hfi1_trace_fmt_lrh(struct trace_seq *p, bool bypass,
227 u8 age, bool becn, bool fecn, u8 l4,
228 u8 lnh, const char *lnh_name, u8 lver,
229 u8 rc, u8 sc, u8 sl, u16 entropy,
230 u16 len, u16 pkey, u32 dlid, u32 slid)
231 {
232 const char *ret = trace_seq_buffer_ptr(p);
233
234 trace_seq_printf(p, LRH_PRN, len, sc, dlid, slid);
235
236 if (bypass)
237 trace_seq_printf(p, LRH_16B_PRN,
238 age, becn, fecn, l4, rc, sc, pkey, entropy);
239
240 else
241 trace_seq_printf(p, LRH_9B_PRN,
242 lnh, lnh_name, lver, sl);
243 trace_seq_putc(p, 0);
244
245 return ret;
246 }
247
248 #define BTH_9B_PRN \
249 "op:0x%.2x,%s se:%d m:%d pad:%d tver:%d pkey:0x%.4x " \
250 "f:%d b:%d qpn:0x%.6x a:%d psn:0x%.8x"
251 #define BTH_16B_PRN \
252 "op:0x%.2x,%s se:%d m:%d pad:%d tver:%d " \
253 "qpn:0x%.6x a:%d psn:0x%.8x"
254 #define L4_FM_16B_PRN \
255 "op:0x%.2x,%s dest_qpn:0x%.6x src_qpn:0x%.6x"
256 const char *hfi1_trace_fmt_rest(struct trace_seq *p, bool bypass, u8 l4,
257 u8 ack, bool becn, bool fecn, u8 mig,
258 u8 se, u8 pad, u8 opcode, const char *opname,
259 u8 tver, u16 pkey, u32 psn, u32 qpn,
260 u32 dest_qpn, u32 src_qpn)
261 {
262 const char *ret = trace_seq_buffer_ptr(p);
263
264 if (bypass)
265 if (l4 == OPA_16B_L4_FM)
266 trace_seq_printf(p, L4_FM_16B_PRN,
267 opcode, opname, dest_qpn, src_qpn);
268 else
269 trace_seq_printf(p, BTH_16B_PRN,
270 opcode, opname,
271 se, mig, pad, tver, qpn, ack, psn);
272
273 else
274 trace_seq_printf(p, BTH_9B_PRN,
275 opcode, opname,
276 se, mig, pad, tver, pkey, fecn, becn,
277 qpn, ack, psn);
278 trace_seq_putc(p, 0);
279
280 return ret;
281 }
282
283 const char *parse_everbs_hdrs(
284 struct trace_seq *p,
285 u8 opcode, u8 l4, u32 dest_qpn, u32 src_qpn,
286 void *ehdrs)
287 {
288 union ib_ehdrs *eh = ehdrs;
289 const char *ret = trace_seq_buffer_ptr(p);
290
291 if (l4 == OPA_16B_L4_FM) {
292 trace_seq_printf(p, "mgmt pkt");
293 goto out;
294 }
295
296 switch (opcode) {
297 /* imm */
298 case OP(RC, SEND_LAST_WITH_IMMEDIATE):
299 case OP(UC, SEND_LAST_WITH_IMMEDIATE):
300 case OP(RC, SEND_ONLY_WITH_IMMEDIATE):
301 case OP(UC, SEND_ONLY_WITH_IMMEDIATE):
302 case OP(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE):
303 case OP(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE):
304 trace_seq_printf(p, IMM_PRN,
305 be32_to_cpu(eh->imm_data));
306 break;
307 /* reth + imm */
308 case OP(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE):
309 case OP(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE):
310 trace_seq_printf(p, RETH_PRN " " IMM_PRN,
311 get_ib_reth_vaddr(&eh->rc.reth),
312 be32_to_cpu(eh->rc.reth.rkey),
313 be32_to_cpu(eh->rc.reth.length),
314 be32_to_cpu(eh->rc.imm_data));
315 break;
316 /* reth */
317 case OP(RC, RDMA_READ_REQUEST):
318 case OP(RC, RDMA_WRITE_FIRST):
319 case OP(UC, RDMA_WRITE_FIRST):
320 case OP(RC, RDMA_WRITE_ONLY):
321 case OP(UC, RDMA_WRITE_ONLY):
322 trace_seq_printf(p, RETH_PRN,
323 get_ib_reth_vaddr(&eh->rc.reth),
324 be32_to_cpu(eh->rc.reth.rkey),
325 be32_to_cpu(eh->rc.reth.length));
326 break;
327 case OP(RC, RDMA_READ_RESPONSE_FIRST):
328 case OP(RC, RDMA_READ_RESPONSE_LAST):
329 case OP(RC, RDMA_READ_RESPONSE_ONLY):
330 case OP(RC, ACKNOWLEDGE):
331 trace_seq_printf(p, AETH_PRN, be32_to_cpu(eh->aeth) >> 24,
332 parse_syndrome(be32_to_cpu(eh->aeth) >> 24),
333 be32_to_cpu(eh->aeth) & IB_MSN_MASK);
334 break;
335 case OP(TID_RDMA, WRITE_REQ):
336 trace_seq_printf(p, TID_RDMA_KDETH " " RETH_PRN " "
337 TID_WRITE_REQ_PRN,
338 le32_to_cpu(eh->tid_rdma.w_req.kdeth0),
339 le32_to_cpu(eh->tid_rdma.w_req.kdeth1),
340 ib_u64_get(&eh->tid_rdma.w_req.reth.vaddr),
341 be32_to_cpu(eh->tid_rdma.w_req.reth.rkey),
342 be32_to_cpu(eh->tid_rdma.w_req.reth.length),
343 be32_to_cpu(eh->tid_rdma.w_req.verbs_qp));
344 break;
345 case OP(TID_RDMA, WRITE_RESP):
346 trace_seq_printf(p, TID_RDMA_KDETH " " AETH_PRN " "
347 TID_WRITE_RSP_PRN,
348 le32_to_cpu(eh->tid_rdma.w_rsp.kdeth0),
349 le32_to_cpu(eh->tid_rdma.w_rsp.kdeth1),
350 be32_to_cpu(eh->tid_rdma.w_rsp.aeth) >> 24,
351 parse_syndrome(/* aeth */
352 be32_to_cpu(eh->tid_rdma.w_rsp.aeth)
353 >> 24),
354 (be32_to_cpu(eh->tid_rdma.w_rsp.aeth) &
355 IB_MSN_MASK),
356 be32_to_cpu(eh->tid_rdma.w_rsp.tid_flow_psn),
357 be32_to_cpu(eh->tid_rdma.w_rsp.tid_flow_qp),
358 be32_to_cpu(eh->tid_rdma.w_rsp.verbs_qp));
359 break;
360 case OP(TID_RDMA, WRITE_DATA_LAST):
361 case OP(TID_RDMA, WRITE_DATA):
362 trace_seq_printf(p, TID_RDMA_KDETH_DATA " " TID_WRITE_DATA_PRN,
363 le32_to_cpu(eh->tid_rdma.w_data.kdeth0),
364 KDETH_GET(eh->tid_rdma.w_data.kdeth0, KVER),
365 KDETH_GET(eh->tid_rdma.w_data.kdeth0, SH),
366 KDETH_GET(eh->tid_rdma.w_data.kdeth0, INTR),
367 KDETH_GET(eh->tid_rdma.w_data.kdeth0, TIDCTRL),
368 KDETH_GET(eh->tid_rdma.w_data.kdeth0, TID),
369 KDETH_GET(eh->tid_rdma.w_data.kdeth0, OFFSET),
370 le32_to_cpu(eh->tid_rdma.w_data.kdeth1),
371 KDETH_GET(eh->tid_rdma.w_data.kdeth1, JKEY),
372 be32_to_cpu(eh->tid_rdma.w_data.verbs_qp));
373 break;
374 case OP(TID_RDMA, READ_REQ):
375 trace_seq_printf(p, TID_RDMA_KDETH " " RETH_PRN " "
376 TID_READ_REQ_PRN,
377 le32_to_cpu(eh->tid_rdma.r_req.kdeth0),
378 le32_to_cpu(eh->tid_rdma.r_req.kdeth1),
379 ib_u64_get(&eh->tid_rdma.r_req.reth.vaddr),
380 be32_to_cpu(eh->tid_rdma.r_req.reth.rkey),
381 be32_to_cpu(eh->tid_rdma.r_req.reth.length),
382 be32_to_cpu(eh->tid_rdma.r_req.tid_flow_psn),
383 be32_to_cpu(eh->tid_rdma.r_req.tid_flow_qp),
384 be32_to_cpu(eh->tid_rdma.r_req.verbs_qp));
385 break;
386 case OP(TID_RDMA, READ_RESP):
387 trace_seq_printf(p, TID_RDMA_KDETH_DATA " " AETH_PRN " "
388 TID_READ_RSP_PRN,
389 le32_to_cpu(eh->tid_rdma.r_rsp.kdeth0),
390 KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, KVER),
391 KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, SH),
392 KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, INTR),
393 KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, TIDCTRL),
394 KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, TID),
395 KDETH_GET(eh->tid_rdma.r_rsp.kdeth0, OFFSET),
396 le32_to_cpu(eh->tid_rdma.r_rsp.kdeth1),
397 KDETH_GET(eh->tid_rdma.r_rsp.kdeth1, JKEY),
398 be32_to_cpu(eh->tid_rdma.r_rsp.aeth) >> 24,
399 parse_syndrome(/* aeth */
400 be32_to_cpu(eh->tid_rdma.r_rsp.aeth)
401 >> 24),
402 (be32_to_cpu(eh->tid_rdma.r_rsp.aeth) &
403 IB_MSN_MASK),
404 be32_to_cpu(eh->tid_rdma.r_rsp.verbs_qp));
405 break;
406 case OP(TID_RDMA, ACK):
407 trace_seq_printf(p, TID_RDMA_KDETH " " AETH_PRN " "
408 TID_ACK_PRN,
409 le32_to_cpu(eh->tid_rdma.ack.kdeth0),
410 le32_to_cpu(eh->tid_rdma.ack.kdeth1),
411 be32_to_cpu(eh->tid_rdma.ack.aeth) >> 24,
412 parse_syndrome(/* aeth */
413 be32_to_cpu(eh->tid_rdma.ack.aeth)
414 >> 24),
415 (be32_to_cpu(eh->tid_rdma.ack.aeth) &
416 IB_MSN_MASK),
417 be32_to_cpu(eh->tid_rdma.ack.tid_flow_psn),
418 be32_to_cpu(eh->tid_rdma.ack.verbs_psn),
419 be32_to_cpu(eh->tid_rdma.ack.tid_flow_qp),
420 be32_to_cpu(eh->tid_rdma.ack.verbs_qp));
421 break;
422 case OP(TID_RDMA, RESYNC):
423 trace_seq_printf(p, TID_RDMA_KDETH " " TID_RESYNC_PRN,
424 le32_to_cpu(eh->tid_rdma.resync.kdeth0),
425 le32_to_cpu(eh->tid_rdma.resync.kdeth1),
426 be32_to_cpu(eh->tid_rdma.resync.verbs_qp));
427 break;
428 /* aeth + atomicacketh */
429 case OP(RC, ATOMIC_ACKNOWLEDGE):
430 trace_seq_printf(p, AETH_PRN " " ATOMICACKETH_PRN,
431 be32_to_cpu(eh->at.aeth) >> 24,
432 parse_syndrome(be32_to_cpu(eh->at.aeth) >> 24),
433 be32_to_cpu(eh->at.aeth) & IB_MSN_MASK,
434 ib_u64_get(&eh->at.atomic_ack_eth));
435 break;
436 /* atomiceth */
437 case OP(RC, COMPARE_SWAP):
438 case OP(RC, FETCH_ADD):
439 trace_seq_printf(p, ATOMICETH_PRN,
440 get_ib_ateth_vaddr(&eh->atomic_eth),
441 eh->atomic_eth.rkey,
442 get_ib_ateth_swap(&eh->atomic_eth),
443 get_ib_ateth_compare(&eh->atomic_eth));
444 break;
445 /* deth */
446 case OP(UD, SEND_ONLY):
447 case OP(UD, SEND_ONLY_WITH_IMMEDIATE):
448 trace_seq_printf(p, DETH_PRN,
449 be32_to_cpu(eh->ud.deth[0]),
450 be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK);
451 break;
452 /* ieth */
453 case OP(RC, SEND_LAST_WITH_INVALIDATE):
454 case OP(RC, SEND_ONLY_WITH_INVALIDATE):
455 trace_seq_printf(p, IETH_PRN,
456 be32_to_cpu(eh->ieth));
457 break;
458 }
459 out:
460 trace_seq_putc(p, 0);
461 return ret;
462 }
463
464 const char *parse_sdma_flags(
465 struct trace_seq *p,
466 u64 desc0, u64 desc1)
467 {
468 const char *ret = trace_seq_buffer_ptr(p);
469 char flags[5] = { 'x', 'x', 'x', 'x', 0 };
470
471 flags[0] = (desc1 & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
472 flags[1] = (desc1 & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 'H' : '-';
473 flags[2] = (desc0 & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
474 flags[3] = (desc0 & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
475 trace_seq_printf(p, "%s", flags);
476 if (desc0 & SDMA_DESC0_FIRST_DESC_FLAG)
477 trace_seq_printf(p, " amode:%u aidx:%u alen:%u",
478 (u8)((desc1 >> SDMA_DESC1_HEADER_MODE_SHIFT) &
479 SDMA_DESC1_HEADER_MODE_MASK),
480 (u8)((desc1 >> SDMA_DESC1_HEADER_INDEX_SHIFT) &
481 SDMA_DESC1_HEADER_INDEX_MASK),
482 (u8)((desc1 >> SDMA_DESC1_HEADER_DWS_SHIFT) &
483 SDMA_DESC1_HEADER_DWS_MASK));
484 return ret;
485 }
486
487 const char *print_u32_array(
488 struct trace_seq *p,
489 u32 *arr, int len)
490 {
491 int i;
492 const char *ret = trace_seq_buffer_ptr(p);
493
494 for (i = 0; i < len ; i++)
495 trace_seq_printf(p, "%s%#x", i == 0 ? "" : " ", arr[i]);
496 trace_seq_putc(p, 0);
497 return ret;
498 }
499
500 u8 hfi1_trace_get_tid_ctrl(u32 ent)
501 {
502 return EXP_TID_GET(ent, CTRL);
503 }
504
505 u16 hfi1_trace_get_tid_len(u32 ent)
506 {
507 return EXP_TID_GET(ent, LEN);
508 }
509
510 u16 hfi1_trace_get_tid_idx(u32 ent)
511 {
512 return EXP_TID_GET(ent, IDX);
513 }
514
515 __hfi1_trace_fn(AFFINITY);
516 __hfi1_trace_fn(PKT);
517 __hfi1_trace_fn(PROC);
518 __hfi1_trace_fn(SDMA);
519 __hfi1_trace_fn(LINKVERB);
520 __hfi1_trace_fn(DEBUG);
521 __hfi1_trace_fn(SNOOP);
522 __hfi1_trace_fn(CNTR);
523 __hfi1_trace_fn(PIO);
524 __hfi1_trace_fn(DC8051);
525 __hfi1_trace_fn(FIRMWARE);
526 __hfi1_trace_fn(RCVCTRL);
527 __hfi1_trace_fn(TID);
528 __hfi1_trace_fn(MMU);
529 __hfi1_trace_fn(IOCTL);