Merge remote-tracking branches 'asoc/fix/davinci', 'asoc/fix/doc', 'asoc/fix/fsl...
[deliverable/linux.git] / drivers / staging / rdma / hfi1 / trace.h
CommitLineData
77241056
MM
1/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50#undef TRACE_SYSTEM_VAR
51#define TRACE_SYSTEM_VAR hfi1
52
53#if !defined(__HFI1_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
54#define __HFI1_TRACE_H
55
56#include <linux/tracepoint.h>
57#include <linux/trace_seq.h>
58
59#include "hfi.h"
60#include "mad.h"
61#include "sdma.h"
62
63#define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
64#define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev))
65
66#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
67#define show_packettype(etype) \
68__print_symbolic(etype, \
69 packettype_name(EXPECTED), \
70 packettype_name(EAGER), \
71 packettype_name(IB), \
72 packettype_name(ERROR), \
73 packettype_name(BYPASS))
74
75#undef TRACE_SYSTEM
76#define TRACE_SYSTEM hfi1_rx
77
78TRACE_EVENT(hfi1_rcvhdr,
79 TP_PROTO(struct hfi1_devdata *dd,
80 u64 eflags,
81 u32 ctxt,
82 u32 etype,
83 u32 hlen,
84 u32 tlen,
85 u32 updegr,
86 u32 etail),
87 TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail),
88 TP_STRUCT__entry(
89 DD_DEV_ENTRY(dd)
90 __field(u64, eflags)
91 __field(u32, ctxt)
92 __field(u32, etype)
93 __field(u32, hlen)
94 __field(u32, tlen)
95 __field(u32, updegr)
96 __field(u32, etail)
97 ),
98 TP_fast_assign(
99 DD_DEV_ASSIGN(dd);
100 __entry->eflags = eflags;
101 __entry->ctxt = ctxt;
102 __entry->etype = etype;
103 __entry->hlen = hlen;
104 __entry->tlen = tlen;
105 __entry->updegr = updegr;
106 __entry->etail = etail;
107 ),
108 TP_printk(
109"[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
110 __get_str(dev),
111 __entry->ctxt,
112 __entry->eflags,
113 __entry->etype, show_packettype(__entry->etype),
114 __entry->hlen,
115 __entry->tlen,
116 __entry->updegr,
117 __entry->etail
118 )
119);
120
121TRACE_EVENT(hfi1_receive_interrupt,
122 TP_PROTO(struct hfi1_devdata *dd, u32 ctxt),
123 TP_ARGS(dd, ctxt),
124 TP_STRUCT__entry(
125 DD_DEV_ENTRY(dd)
126 __field(u32, ctxt)
127 __field(u8, slow_path)
128 __field(u8, dma_rtail)
129 ),
130 TP_fast_assign(
131 DD_DEV_ASSIGN(dd);
132 __entry->ctxt = ctxt;
133 if (dd->rcd[ctxt]->do_interrupt ==
134 &handle_receive_interrupt) {
135 __entry->slow_path = 1;
136 __entry->dma_rtail = 0xFF;
137 } else if (dd->rcd[ctxt]->do_interrupt ==
138 &handle_receive_interrupt_dma_rtail){
139 __entry->dma_rtail = 1;
140 __entry->slow_path = 0;
141 } else if (dd->rcd[ctxt]->do_interrupt ==
142 &handle_receive_interrupt_nodma_rtail) {
143 __entry->dma_rtail = 0;
144 __entry->slow_path = 0;
145 }
146 ),
147 TP_printk(
148 "[%s] ctxt %d SlowPath: %d DmaRtail: %d",
149 __get_str(dev),
150 __entry->ctxt,
151 __entry->slow_path,
152 __entry->dma_rtail
153 )
154);
155
156const char *print_u64_array(struct trace_seq *, u64 *, int);
157
158TRACE_EVENT(hfi1_exp_tid_map,
159 TP_PROTO(unsigned ctxt, u16 subctxt, int dir,
160 unsigned long *maps, u16 count),
161 TP_ARGS(ctxt, subctxt, dir, maps, count),
162 TP_STRUCT__entry(
163 __field(unsigned, ctxt)
164 __field(u16, subctxt)
165 __field(int, dir)
166 __field(u16, count)
167 __dynamic_array(unsigned long, maps, sizeof(*maps) * count)
168 ),
169 TP_fast_assign(
170 __entry->ctxt = ctxt;
171 __entry->subctxt = subctxt;
172 __entry->dir = dir;
173 __entry->count = count;
174 memcpy(__get_dynamic_array(maps), maps,
175 sizeof(*maps) * count);
176 ),
177 TP_printk("[%3u:%02u] %s tidmaps %s",
178 __entry->ctxt,
179 __entry->subctxt,
180 (__entry->dir ? ">" : "<"),
181 print_u64_array(p, __get_dynamic_array(maps),
182 __entry->count)
183 )
184 );
185
186TRACE_EVENT(hfi1_exp_rcv_set,
187 TP_PROTO(unsigned ctxt, u16 subctxt, u32 tid,
188 unsigned long vaddr, u64 phys_addr, void *page),
189 TP_ARGS(ctxt, subctxt, tid, vaddr, phys_addr, page),
190 TP_STRUCT__entry(
191 __field(unsigned, ctxt)
192 __field(u16, subctxt)
193 __field(u32, tid)
194 __field(unsigned long, vaddr)
195 __field(u64, phys_addr)
196 __field(void *, page)
197 ),
198 TP_fast_assign(
199 __entry->ctxt = ctxt;
200 __entry->subctxt = subctxt;
201 __entry->tid = tid;
202 __entry->vaddr = vaddr;
203 __entry->phys_addr = phys_addr;
204 __entry->page = page;
205 ),
206 TP_printk("[%u:%u] TID %u, vaddrs 0x%lx, physaddr 0x%llx, pgp %p",
207 __entry->ctxt,
208 __entry->subctxt,
209 __entry->tid,
210 __entry->vaddr,
211 __entry->phys_addr,
212 __entry->page
213 )
214 );
215
216TRACE_EVENT(hfi1_exp_rcv_free,
217 TP_PROTO(unsigned ctxt, u16 subctxt, u32 tid,
218 unsigned long phys, void *page),
219 TP_ARGS(ctxt, subctxt, tid, phys, page),
220 TP_STRUCT__entry(
221 __field(unsigned, ctxt)
222 __field(u16, subctxt)
223 __field(u32, tid)
224 __field(unsigned long, phys)
225 __field(void *, page)
226 ),
227 TP_fast_assign(
228 __entry->ctxt = ctxt;
229 __entry->subctxt = subctxt;
230 __entry->tid = tid;
231 __entry->phys = phys;
232 __entry->page = page;
233 ),
234 TP_printk("[%u:%u] freeing TID %u, 0x%lx, pgp %p",
235 __entry->ctxt,
236 __entry->subctxt,
237 __entry->tid,
238 __entry->phys,
239 __entry->page
240 )
241 );
242#undef TRACE_SYSTEM
243#define TRACE_SYSTEM hfi1_tx
244
245TRACE_EVENT(hfi1_piofree,
246 TP_PROTO(struct send_context *sc, int extra),
247 TP_ARGS(sc, extra),
248 TP_STRUCT__entry(
249 DD_DEV_ENTRY(sc->dd)
250 __field(u32, sw_index)
251 __field(u32, hw_context)
252 __field(int, extra)
253 ),
254 TP_fast_assign(
255 DD_DEV_ASSIGN(sc->dd);
256 __entry->sw_index = sc->sw_index;
257 __entry->hw_context = sc->hw_context;
258 __entry->extra = extra;
259 ),
260 TP_printk(
261 "[%s] ctxt %u(%u) extra %d",
262 __get_str(dev),
263 __entry->sw_index,
264 __entry->hw_context,
265 __entry->extra
266 )
267);
268
269TRACE_EVENT(hfi1_wantpiointr,
270 TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
271 TP_ARGS(sc, needint, credit_ctrl),
272 TP_STRUCT__entry(
273 DD_DEV_ENTRY(sc->dd)
274 __field(u32, sw_index)
275 __field(u32, hw_context)
276 __field(u32, needint)
277 __field(u64, credit_ctrl)
278 ),
279 TP_fast_assign(
280 DD_DEV_ASSIGN(sc->dd);
281 __entry->sw_index = sc->sw_index;
282 __entry->hw_context = sc->hw_context;
283 __entry->needint = needint;
284 __entry->credit_ctrl = credit_ctrl;
285 ),
286 TP_printk(
287 "[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
288 __get_str(dev),
289 __entry->sw_index,
290 __entry->hw_context,
291 __entry->needint,
292 (unsigned long long)__entry->credit_ctrl
293 )
294);
295
296DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
297 TP_PROTO(struct hfi1_qp *qp, u32 flags),
298 TP_ARGS(qp, flags),
299 TP_STRUCT__entry(
300 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
301 __field(u32, qpn)
302 __field(u32, flags)
303 __field(u32, s_flags)
304 ),
305 TP_fast_assign(
306 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
307 __entry->flags = flags;
308 __entry->qpn = qp->ibqp.qp_num;
309 __entry->s_flags = qp->s_flags;
310 ),
311 TP_printk(
312 "[%s] qpn 0x%x flags 0x%x s_flags 0x%x",
313 __get_str(dev),
314 __entry->qpn,
315 __entry->flags,
316 __entry->s_flags
317 )
318);
319
320DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
321 TP_PROTO(struct hfi1_qp *qp, u32 flags),
322 TP_ARGS(qp, flags));
323
324DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
325 TP_PROTO(struct hfi1_qp *qp, u32 flags),
326 TP_ARGS(qp, flags));
327
328#undef TRACE_SYSTEM
329#define TRACE_SYSTEM hfi1_qphash
330DECLARE_EVENT_CLASS(hfi1_qphash_template,
331 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
332 TP_ARGS(qp, bucket),
333 TP_STRUCT__entry(
334 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
335 __field(u32, qpn)
336 __field(u32, bucket)
337 ),
338 TP_fast_assign(
339 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
340 __entry->qpn = qp->ibqp.qp_num;
341 __entry->bucket = bucket;
342 ),
343 TP_printk(
344 "[%s] qpn 0x%x bucket %u",
345 __get_str(dev),
346 __entry->qpn,
347 __entry->bucket
348 )
349);
350
351DEFINE_EVENT(hfi1_qphash_template, hfi1_qpinsert,
352 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
353 TP_ARGS(qp, bucket));
354
355DEFINE_EVENT(hfi1_qphash_template, hfi1_qpremove,
356 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
357 TP_ARGS(qp, bucket));
358
359#undef TRACE_SYSTEM
360#define TRACE_SYSTEM hfi1_ibhdrs
361
362u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr);
363const char *parse_everbs_hdrs(
364 struct trace_seq *p,
365 u8 opcode,
366 void *ehdrs);
367
368#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
369
370const char *parse_sdma_flags(
371 struct trace_seq *p,
372 u64 desc0, u64 desc1);
373
374#define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
375
376
377#define lrh_name(lrh) { HFI1_##lrh, #lrh }
378#define show_lnh(lrh) \
379__print_symbolic(lrh, \
380 lrh_name(LRH_BTH), \
381 lrh_name(LRH_GRH))
382
383#define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode }
384#define show_ib_opcode(opcode) \
385__print_symbolic(opcode, \
386 ib_opcode_name(RC_SEND_FIRST), \
387 ib_opcode_name(RC_SEND_MIDDLE), \
388 ib_opcode_name(RC_SEND_LAST), \
389 ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE), \
390 ib_opcode_name(RC_SEND_ONLY), \
391 ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE), \
392 ib_opcode_name(RC_RDMA_WRITE_FIRST), \
393 ib_opcode_name(RC_RDMA_WRITE_MIDDLE), \
394 ib_opcode_name(RC_RDMA_WRITE_LAST), \
395 ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
396 ib_opcode_name(RC_RDMA_WRITE_ONLY), \
397 ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
398 ib_opcode_name(RC_RDMA_READ_REQUEST), \
399 ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST), \
400 ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE), \
401 ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST), \
402 ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY), \
403 ib_opcode_name(RC_ACKNOWLEDGE), \
404 ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
405 ib_opcode_name(RC_COMPARE_SWAP), \
406 ib_opcode_name(RC_FETCH_ADD), \
407 ib_opcode_name(UC_SEND_FIRST), \
408 ib_opcode_name(UC_SEND_MIDDLE), \
409 ib_opcode_name(UC_SEND_LAST), \
410 ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE), \
411 ib_opcode_name(UC_SEND_ONLY), \
412 ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE), \
413 ib_opcode_name(UC_RDMA_WRITE_FIRST), \
414 ib_opcode_name(UC_RDMA_WRITE_MIDDLE), \
415 ib_opcode_name(UC_RDMA_WRITE_LAST), \
416 ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
417 ib_opcode_name(UC_RDMA_WRITE_ONLY), \
418 ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
419 ib_opcode_name(UD_SEND_ONLY), \
420 ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE))
421
422
423#define LRH_PRN "vl %d lver %d sl %d lnh %d,%s dlid %.4x len %d slid %.4x"
424#define BTH_PRN \
425 "op 0x%.2x,%s se %d m %d pad %d tver %d pkey 0x%.4x " \
426 "f %d b %d qpn 0x%.6x a %d psn 0x%.8x"
427#define EHDR_PRN "%s"
428
429DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
430 TP_PROTO(struct hfi1_devdata *dd,
431 struct hfi1_ib_header *hdr),
432 TP_ARGS(dd, hdr),
433 TP_STRUCT__entry(
434 DD_DEV_ENTRY(dd)
435 /* LRH */
436 __field(u8, vl)
437 __field(u8, lver)
438 __field(u8, sl)
439 __field(u8, lnh)
440 __field(u16, dlid)
441 __field(u16, len)
442 __field(u16, slid)
443 /* BTH */
444 __field(u8, opcode)
445 __field(u8, se)
446 __field(u8, m)
447 __field(u8, pad)
448 __field(u8, tver)
449 __field(u16, pkey)
450 __field(u8, f)
451 __field(u8, b)
452 __field(u32, qpn)
453 __field(u8, a)
454 __field(u32, psn)
455 /* extended headers */
456 __dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr))
457 ),
458 TP_fast_assign(
459 struct hfi1_other_headers *ohdr;
460
461 DD_DEV_ASSIGN(dd);
462 /* LRH */
463 __entry->vl =
464 (u8)(be16_to_cpu(hdr->lrh[0]) >> 12);
465 __entry->lver =
466 (u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf;
467 __entry->sl =
468 (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
469 __entry->lnh =
470 (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
471 __entry->dlid =
472 be16_to_cpu(hdr->lrh[1]);
473 /* allow for larger len */
474 __entry->len =
475 be16_to_cpu(hdr->lrh[2]);
476 __entry->slid =
477 be16_to_cpu(hdr->lrh[3]);
478 /* BTH */
479 if (__entry->lnh == HFI1_LRH_BTH)
480 ohdr = &hdr->u.oth;
481 else
482 ohdr = &hdr->u.l.oth;
483 __entry->opcode =
484 (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
485 __entry->se =
486 (be32_to_cpu(ohdr->bth[0]) >> 23) & 1;
487 __entry->m =
488 (be32_to_cpu(ohdr->bth[0]) >> 22) & 1;
489 __entry->pad =
490 (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
491 __entry->tver =
492 (be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf;
493 __entry->pkey =
494 be32_to_cpu(ohdr->bth[0]) & 0xffff;
495 __entry->f =
496 (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT)
497 & HFI1_FECN_MASK;
498 __entry->b =
499 (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT)
500 & HFI1_BECN_MASK;
501 __entry->qpn =
502 be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
503 __entry->a =
504 (be32_to_cpu(ohdr->bth[2]) >> 31) & 1;
505 /* allow for larger PSN */
506 __entry->psn =
507 be32_to_cpu(ohdr->bth[2]) & 0x7fffffff;
508 /* extended headers */
509 memcpy(
510 __get_dynamic_array(ehdrs),
511 &ohdr->u,
512 ibhdr_exhdr_len(hdr));
513 ),
514 TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN,
515 __get_str(dev),
516 /* LRH */
517 __entry->vl,
518 __entry->lver,
519 __entry->sl,
520 __entry->lnh, show_lnh(__entry->lnh),
521 __entry->dlid,
522 __entry->len,
523 __entry->slid,
524 /* BTH */
525 __entry->opcode, show_ib_opcode(__entry->opcode),
526 __entry->se,
527 __entry->m,
528 __entry->pad,
529 __entry->tver,
530 __entry->pkey,
531 __entry->f,
532 __entry->b,
533 __entry->qpn,
534 __entry->a,
535 __entry->psn,
536 /* extended headers */
537 __parse_ib_ehdrs(
538 __entry->opcode,
539 (void *)__get_dynamic_array(ehdrs))
540 )
541);
542
543DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr,
544 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
545 TP_ARGS(dd, hdr));
546
547DEFINE_EVENT(hfi1_ibhdr_template, output_ibhdr,
548 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
549 TP_ARGS(dd, hdr));
550
551#define SNOOP_PRN \
552 "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
553 "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
554
555#undef TRACE_SYSTEM
556#define TRACE_SYSTEM hfi1_snoop
557
558
559TRACE_EVENT(snoop_capture,
560 TP_PROTO(struct hfi1_devdata *dd,
561 int hdr_len,
562 struct hfi1_ib_header *hdr,
563 int data_len,
564 void *data),
565 TP_ARGS(dd, hdr_len, hdr, data_len, data),
566 TP_STRUCT__entry(
567 DD_DEV_ENTRY(dd)
568 __field(u16, slid)
569 __field(u16, dlid)
570 __field(u32, qpn)
571 __field(u8, opcode)
572 __field(u8, sl)
573 __field(u16, pkey)
574 __field(u32, hdr_len)
575 __field(u32, data_len)
576 __field(u8, lnh)
577 __dynamic_array(u8, raw_hdr, hdr_len)
578 __dynamic_array(u8, raw_pkt, data_len)
579 ),
580 TP_fast_assign(
581 struct hfi1_other_headers *ohdr;
582
583 __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
584 if (__entry->lnh == HFI1_LRH_BTH)
585 ohdr = &hdr->u.oth;
586 else
587 ohdr = &hdr->u.l.oth;
588 DD_DEV_ASSIGN(dd);
589 __entry->slid = be16_to_cpu(hdr->lrh[3]);
590 __entry->dlid = be16_to_cpu(hdr->lrh[1]);
591 __entry->qpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
592 __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
593 __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
594 __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
595 __entry->hdr_len = hdr_len;
596 __entry->data_len = data_len;
597 memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
598 memcpy(__get_dynamic_array(raw_pkt), data, data_len);
599 ),
600 TP_printk("[%s] " SNOOP_PRN,
601 __get_str(dev),
602 __entry->slid,
603 __entry->dlid,
604 __entry->qpn,
605 __entry->opcode,
606 show_ib_opcode(__entry->opcode),
607 __entry->sl,
608 __entry->pkey,
609 __entry->hdr_len,
610 __entry->data_len
611 )
612);
613
614#undef TRACE_SYSTEM
615#define TRACE_SYSTEM hfi1_ctxts
616
617#define UCTXT_FMT \
618 "cred:%u, credaddr:0x%llx, piobase:0x%llx, rcvhdr_cnt:%u, " \
619 "rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx"
620TRACE_EVENT(hfi1_uctxtdata,
621 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt),
622 TP_ARGS(dd, uctxt),
623 TP_STRUCT__entry(
624 DD_DEV_ENTRY(dd)
625 __field(unsigned, ctxt)
626 __field(u32, credits)
627 __field(u64, hw_free)
628 __field(u64, piobase)
629 __field(u16, rcvhdrq_cnt)
630 __field(u64, rcvhdrq_phys)
631 __field(u32, eager_cnt)
632 __field(u64, rcvegr_phys)
633 ),
634 TP_fast_assign(
635 DD_DEV_ASSIGN(dd);
636 __entry->ctxt = uctxt->ctxt;
637 __entry->credits = uctxt->sc->credits;
638 __entry->hw_free = (u64)uctxt->sc->hw_free;
639 __entry->piobase = (u64)uctxt->sc->base_addr;
640 __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
641 __entry->rcvhdrq_phys = uctxt->rcvhdrq_phys;
642 __entry->eager_cnt = uctxt->egrbufs.alloced;
643 __entry->rcvegr_phys = uctxt->egrbufs.rcvtids[0].phys;
644 ),
645 TP_printk(
646 "[%s] ctxt %u " UCTXT_FMT,
647 __get_str(dev),
648 __entry->ctxt,
649 __entry->credits,
650 __entry->hw_free,
651 __entry->piobase,
652 __entry->rcvhdrq_cnt,
653 __entry->rcvhdrq_phys,
654 __entry->eager_cnt,
655 __entry->rcvegr_phys
656 )
657 );
658
659#define CINFO_FMT \
660 "egrtids:%u, egr_size:%u, hdrq_cnt:%u, hdrq_size:%u, sdma_ring_size:%u"
661TRACE_EVENT(hfi1_ctxt_info,
662 TP_PROTO(struct hfi1_devdata *dd, unsigned ctxt, unsigned subctxt,
663 struct hfi1_ctxt_info cinfo),
664 TP_ARGS(dd, ctxt, subctxt, cinfo),
665 TP_STRUCT__entry(
666 DD_DEV_ENTRY(dd)
667 __field(unsigned, ctxt)
668 __field(unsigned, subctxt)
669 __field(u16, egrtids)
670 __field(u16, rcvhdrq_cnt)
671 __field(u16, rcvhdrq_size)
672 __field(u16, sdma_ring_size)
673 __field(u32, rcvegr_size)
674 ),
675 TP_fast_assign(
676 DD_DEV_ASSIGN(dd);
677 __entry->ctxt = ctxt;
678 __entry->subctxt = subctxt;
679 __entry->egrtids = cinfo.egrtids;
680 __entry->rcvhdrq_cnt = cinfo.rcvhdrq_cnt;
681 __entry->rcvhdrq_size = cinfo.rcvhdrq_entsize;
682 __entry->sdma_ring_size = cinfo.sdma_ring_size;
683 __entry->rcvegr_size = cinfo.rcvegr_size;
684 ),
685 TP_printk(
686 "[%s] ctxt %u:%u " CINFO_FMT,
687 __get_str(dev),
688 __entry->ctxt,
689 __entry->subctxt,
690 __entry->egrtids,
691 __entry->rcvegr_size,
692 __entry->rcvhdrq_cnt,
693 __entry->rcvhdrq_size,
694 __entry->sdma_ring_size
695 )
696 );
697
698#undef TRACE_SYSTEM
699#define TRACE_SYSTEM hfi1_sma
700
701#define BCT_FORMAT \
702 "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
703
704#define BCT(field) \
705 be16_to_cpu( \
706 ((struct buffer_control *)__get_dynamic_array(bct))->field \
707 )
708
709DECLARE_EVENT_CLASS(hfi1_bct_template,
710 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
711 TP_ARGS(dd, bc),
712 TP_STRUCT__entry(
713 DD_DEV_ENTRY(dd)
714 __dynamic_array(u8, bct, sizeof(*bc))
715 ),
716 TP_fast_assign(
717 DD_DEV_ASSIGN(dd);
718 memcpy(
719 __get_dynamic_array(bct),
720 bc,
721 sizeof(*bc));
722 ),
723 TP_printk(BCT_FORMAT,
724 BCT(overall_shared_limit),
725
726 BCT(vl[0].dedicated),
727 BCT(vl[0].shared),
728
729 BCT(vl[1].dedicated),
730 BCT(vl[1].shared),
731
732 BCT(vl[2].dedicated),
733 BCT(vl[2].shared),
734
735 BCT(vl[3].dedicated),
736 BCT(vl[3].shared),
737
738 BCT(vl[4].dedicated),
739 BCT(vl[4].shared),
740
741 BCT(vl[5].dedicated),
742 BCT(vl[5].shared),
743
744 BCT(vl[6].dedicated),
745 BCT(vl[6].shared),
746
747 BCT(vl[7].dedicated),
748 BCT(vl[7].shared),
749
750 BCT(vl[15].dedicated),
751 BCT(vl[15].shared)
752 )
753);
754
755
756DEFINE_EVENT(hfi1_bct_template, bct_set,
757 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
758 TP_ARGS(dd, bc));
759
760DEFINE_EVENT(hfi1_bct_template, bct_get,
761 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
762 TP_ARGS(dd, bc));
763
764#undef TRACE_SYSTEM
765#define TRACE_SYSTEM hfi1_sdma
766
767TRACE_EVENT(hfi1_sdma_descriptor,
768 TP_PROTO(
769 struct sdma_engine *sde,
770 u64 desc0,
771 u64 desc1,
772 u16 e,
773 void *descp),
774 TP_ARGS(sde, desc0, desc1, e, descp),
775 TP_STRUCT__entry(
776 DD_DEV_ENTRY(sde->dd)
777 __field(void *, descp)
778 __field(u64, desc0)
779 __field(u64, desc1)
780 __field(u16, e)
781 __field(u8, idx)
782 ),
783 TP_fast_assign(
784 DD_DEV_ASSIGN(sde->dd);
785 __entry->desc0 = desc0;
786 __entry->desc1 = desc1;
787 __entry->idx = sde->this_idx;
788 __entry->descp = descp;
789 __entry->e = e;
790 ),
791 TP_printk(
792 "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
793 __get_str(dev),
794 __entry->idx,
795 __parse_sdma_flags(__entry->desc0, __entry->desc1),
796 (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT)
797 & SDMA_DESC0_PHY_ADDR_MASK,
798 (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT)
799 & SDMA_DESC1_GENERATION_MASK),
800 (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT)
801 & SDMA_DESC0_BYTE_COUNT_MASK),
802 __entry->desc0,
803 __entry->desc1,
804 __entry->descp,
805 __entry->e
806 )
807);
808
809TRACE_EVENT(hfi1_sdma_engine_select,
810 TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
811 TP_ARGS(dd, sel, vl, idx),
812 TP_STRUCT__entry(
813 DD_DEV_ENTRY(dd)
814 __field(u32, sel)
815 __field(u8, vl)
816 __field(u8, idx)
817 ),
818 TP_fast_assign(
819 DD_DEV_ASSIGN(dd);
820 __entry->sel = sel;
821 __entry->vl = vl;
822 __entry->idx = idx;
823 ),
824 TP_printk(
825 "[%s] selecting SDE %u sel 0x%x vl %u",
826 __get_str(dev),
827 __entry->idx,
828 __entry->sel,
829 __entry->vl
830 )
831);
832
833DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
834 TP_PROTO(
835 struct sdma_engine *sde,
836 u64 status
837 ),
838 TP_ARGS(sde, status),
839 TP_STRUCT__entry(
840 DD_DEV_ENTRY(sde->dd)
841 __field(u64, status)
842 __field(u8, idx)
843 ),
844 TP_fast_assign(
845 DD_DEV_ASSIGN(sde->dd);
846 __entry->status = status;
847 __entry->idx = sde->this_idx;
848 ),
849 TP_printk(
850 "[%s] SDE(%u) status %llx",
851 __get_str(dev),
852 __entry->idx,
853 (unsigned long long)__entry->status
854 )
855);
856
857DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
858 TP_PROTO(
859 struct sdma_engine *sde,
860 u64 status
861 ),
862 TP_ARGS(sde, status)
863);
864
865DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
866 TP_PROTO(
867 struct sdma_engine *sde,
868 u64 status
869 ),
870 TP_ARGS(sde, status)
871);
872
873DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
874 TP_PROTO(
875 struct sdma_engine *sde,
876 int aidx
877 ),
878 TP_ARGS(sde, aidx),
879 TP_STRUCT__entry(
880 DD_DEV_ENTRY(sde->dd)
881 __field(int, aidx)
882 __field(u8, idx)
883 ),
884 TP_fast_assign(
885 DD_DEV_ASSIGN(sde->dd);
886 __entry->idx = sde->this_idx;
887 __entry->aidx = aidx;
888 ),
889 TP_printk(
890 "[%s] SDE(%u) aidx %d",
891 __get_str(dev),
892 __entry->idx,
893 __entry->aidx
894 )
895);
896
897DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
898 TP_PROTO(
899 struct sdma_engine *sde,
900 int aidx
901 ),
902 TP_ARGS(sde, aidx));
903
904DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
905 TP_PROTO(
906 struct sdma_engine *sde,
907 int aidx
908 ),
909 TP_ARGS(sde, aidx));
910
911#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
912TRACE_EVENT(hfi1_sdma_progress,
913 TP_PROTO(
914 struct sdma_engine *sde,
915 u16 hwhead,
916 u16 swhead,
917 struct sdma_txreq *txp
918 ),
919 TP_ARGS(sde, hwhead, swhead, txp),
920 TP_STRUCT__entry(
921 DD_DEV_ENTRY(sde->dd)
922 __field(u64, sn)
923 __field(u16, hwhead)
924 __field(u16, swhead)
925 __field(u16, txnext)
926 __field(u16, tx_tail)
927 __field(u16, tx_head)
928 __field(u8, idx)
929 ),
930 TP_fast_assign(
931 DD_DEV_ASSIGN(sde->dd);
932 __entry->hwhead = hwhead;
933 __entry->swhead = swhead;
934 __entry->tx_tail = sde->tx_tail;
935 __entry->tx_head = sde->tx_head;
936 __entry->txnext = txp ? txp->next_descq_idx : ~0;
937 __entry->idx = sde->this_idx;
938 __entry->sn = txp ? txp->sn : ~0;
939 ),
940 TP_printk(
941 "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
942 __get_str(dev),
943 __entry->idx,
944 __entry->sn,
945 __entry->hwhead,
946 __entry->swhead,
947 __entry->txnext,
948 __entry->tx_head,
949 __entry->tx_tail
950 )
951);
952#else
953TRACE_EVENT(hfi1_sdma_progress,
954 TP_PROTO(
955 struct sdma_engine *sde,
956 u16 hwhead,
957 u16 swhead,
958 struct sdma_txreq *txp
959 ),
960 TP_ARGS(sde, hwhead, swhead, txp),
961 TP_STRUCT__entry(
962 DD_DEV_ENTRY(sde->dd)
963 __field(u16, hwhead)
964 __field(u16, swhead)
965 __field(u16, txnext)
966 __field(u16, tx_tail)
967 __field(u16, tx_head)
968 __field(u8, idx)
969 ),
970 TP_fast_assign(
971 DD_DEV_ASSIGN(sde->dd);
972 __entry->hwhead = hwhead;
973 __entry->swhead = swhead;
974 __entry->tx_tail = sde->tx_tail;
975 __entry->tx_head = sde->tx_head;
976 __entry->txnext = txp ? txp->next_descq_idx : ~0;
977 __entry->idx = sde->this_idx;
978 ),
979 TP_printk(
980 "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
981 __get_str(dev),
982 __entry->idx,
983 __entry->hwhead,
984 __entry->swhead,
985 __entry->txnext,
986 __entry->tx_head,
987 __entry->tx_tail
988 )
989);
990#endif
991
992DECLARE_EVENT_CLASS(hfi1_sdma_sn,
993 TP_PROTO(
994 struct sdma_engine *sde,
995 u64 sn
996 ),
997 TP_ARGS(sde, sn),
998 TP_STRUCT__entry(
999 DD_DEV_ENTRY(sde->dd)
1000 __field(u64, sn)
1001 __field(u8, idx)
1002 ),
1003 TP_fast_assign(
1004 DD_DEV_ASSIGN(sde->dd);
1005 __entry->sn = sn;
1006 __entry->idx = sde->this_idx;
1007 ),
1008 TP_printk(
1009 "[%s] SDE(%u) sn %llu",
1010 __get_str(dev),
1011 __entry->idx,
1012 __entry->sn
1013 )
1014);
1015
1016DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
1017 TP_PROTO(
1018 struct sdma_engine *sde,
1019 u64 sn
1020 ),
1021 TP_ARGS(sde, sn)
1022);
1023
1024DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
1025 TP_PROTO(
1026 struct sdma_engine *sde,
1027 u64 sn
1028 ),
1029 TP_ARGS(sde, sn)
1030);
1031
1032#define USDMA_HDR_FORMAT \
1033 "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
1034
1035TRACE_EVENT(hfi1_sdma_user_header,
1036 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
1037 struct hfi1_pkt_header *hdr, u32 tidval),
1038 TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
1039 TP_STRUCT__entry(
1040 DD_DEV_ENTRY(dd)
1041 __field(u16, ctxt)
1042 __field(u8, subctxt)
1043 __field(u16, req)
1044 __field(__le32, pbc0)
1045 __field(__le32, pbc1)
1046 __field(__be32, lrh0)
1047 __field(__be32, lrh1)
1048 __field(__be32, bth0)
1049 __field(__be32, bth1)
1050 __field(__be32, bth2)
1051 __field(__le32, kdeth0)
1052 __field(__le32, kdeth1)
1053 __field(__le32, kdeth2)
1054 __field(__le32, kdeth3)
1055 __field(__le32, kdeth4)
1056 __field(__le32, kdeth5)
1057 __field(__le32, kdeth6)
1058 __field(__le32, kdeth7)
1059 __field(__le32, kdeth8)
1060 __field(u32, tidval)
1061 ),
1062 TP_fast_assign(
1063 __le32 *pbc = (__le32 *)hdr->pbc;
1064 __be32 *lrh = (__be32 *)hdr->lrh;
1065 __be32 *bth = (__be32 *)hdr->bth;
1066 __le32 *kdeth = (__le32 *)&hdr->kdeth;
1067
1068 DD_DEV_ASSIGN(dd);
1069 __entry->ctxt = ctxt;
1070 __entry->subctxt = subctxt;
1071 __entry->req = req;
1072 __entry->pbc0 = pbc[0];
1073 __entry->pbc1 = pbc[1];
1074 __entry->lrh0 = be32_to_cpu(lrh[0]);
1075 __entry->lrh1 = be32_to_cpu(lrh[1]);
1076 __entry->bth0 = be32_to_cpu(bth[0]);
1077 __entry->bth1 = be32_to_cpu(bth[1]);
1078 __entry->bth2 = be32_to_cpu(bth[2]);
1079 __entry->kdeth0 = kdeth[0];
1080 __entry->kdeth1 = kdeth[1];
1081 __entry->kdeth2 = kdeth[2];
1082 __entry->kdeth3 = kdeth[3];
1083 __entry->kdeth4 = kdeth[4];
1084 __entry->kdeth5 = kdeth[5];
1085 __entry->kdeth6 = kdeth[6];
1086 __entry->kdeth7 = kdeth[7];
1087 __entry->kdeth8 = kdeth[8];
1088 __entry->tidval = tidval;
1089 ),
1090 TP_printk(USDMA_HDR_FORMAT,
1091 __get_str(dev),
1092 __entry->ctxt,
1093 __entry->subctxt,
1094 __entry->req,
1095 __entry->pbc1,
1096 __entry->pbc0,
1097 __entry->lrh0,
1098 __entry->lrh1,
1099 __entry->bth0,
1100 __entry->bth1,
1101 __entry->bth2,
1102 __entry->kdeth0,
1103 __entry->kdeth1,
1104 __entry->kdeth2,
1105 __entry->kdeth3,
1106 __entry->kdeth4,
1107 __entry->kdeth5,
1108 __entry->kdeth6,
1109 __entry->kdeth7,
1110 __entry->kdeth8,
1111 __entry->tidval
1112 )
1113 );
1114
1115#define SDMA_UREQ_FMT \
1116 "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
1117TRACE_EVENT(hfi1_sdma_user_reqinfo,
1118 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
1119 TP_ARGS(dd, ctxt, subctxt, i),
1120 TP_STRUCT__entry(
1121 DD_DEV_ENTRY(dd);
1122 __field(u16, ctxt)
1123 __field(u8, subctxt)
1124 __field(u8, ver_opcode)
1125 __field(u8, iovcnt)
1126 __field(u16, npkts)
1127 __field(u16, fragsize)
1128 __field(u16, comp_idx)
1129 ),
1130 TP_fast_assign(
1131 DD_DEV_ASSIGN(dd);
1132 __entry->ctxt = ctxt;
1133 __entry->subctxt = subctxt;
1134 __entry->ver_opcode = i[0] & 0xff;
1135 __entry->iovcnt = (i[0] >> 8) & 0xff;
1136 __entry->npkts = i[1];
1137 __entry->fragsize = i[2];
1138 __entry->comp_idx = i[3];
1139 ),
1140 TP_printk(SDMA_UREQ_FMT,
1141 __get_str(dev),
1142 __entry->ctxt,
1143 __entry->subctxt,
1144 __entry->ver_opcode,
1145 __entry->iovcnt,
1146 __entry->npkts,
1147 __entry->fragsize,
1148 __entry->comp_idx
1149 )
1150 );
1151
1152#define usdma_complete_name(st) { st, #st }
1153#define show_usdma_complete_state(st) \
1154 __print_symbolic(st, \
1155 usdma_complete_name(FREE), \
1156 usdma_complete_name(QUEUED), \
1157 usdma_complete_name(COMPLETE), \
1158 usdma_complete_name(ERROR))
1159
1160TRACE_EVENT(hfi1_sdma_user_completion,
1161 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
1162 u8 state, int code),
1163 TP_ARGS(dd, ctxt, subctxt, idx, state, code),
1164 TP_STRUCT__entry(
1165 DD_DEV_ENTRY(dd)
1166 __field(u16, ctxt)
1167 __field(u8, subctxt)
1168 __field(u16, idx)
1169 __field(u8, state)
1170 __field(int, code)
1171 ),
1172 TP_fast_assign(
1173 DD_DEV_ASSIGN(dd);
1174 __entry->ctxt = ctxt;
1175 __entry->subctxt = subctxt;
1176 __entry->idx = idx;
1177 __entry->state = state;
1178 __entry->code = code;
1179 ),
1180 TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
1181 __get_str(dev), __entry->ctxt, __entry->subctxt,
1182 __entry->idx, show_usdma_complete_state(__entry->state),
1183 __entry->code)
1184 );
1185
1186const char *print_u32_array(struct trace_seq *, u32 *, int);
1187#define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
1188
1189TRACE_EVENT(hfi1_sdma_user_header_ahg,
1190 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
1191 u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
1192 TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
1193 TP_STRUCT__entry(
1194 DD_DEV_ENTRY(dd)
1195 __field(u16, ctxt)
1196 __field(u8, subctxt)
1197 __field(u16, req)
1198 __field(u8, sde)
1199 __field(u8, idx)
1200 __field(int, len)
1201 __field(u32, tidval)
1202 __array(u32, ahg, 10)
1203 ),
1204 TP_fast_assign(
1205 DD_DEV_ASSIGN(dd);
1206 __entry->ctxt = ctxt;
1207 __entry->subctxt = subctxt;
1208 __entry->req = req;
1209 __entry->sde = sde;
1210 __entry->idx = ahgidx;
1211 __entry->len = len;
1212 __entry->tidval = tidval;
1213 memcpy(__entry->ahg, ahg, len * sizeof(u32));
1214 ),
1215 TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
1216 __get_str(dev),
1217 __entry->ctxt,
1218 __entry->subctxt,
1219 __entry->req,
1220 __entry->sde,
1221 __entry->idx,
1222 __entry->len - 1,
1223 __print_u32_hex(__entry->ahg, __entry->len),
1224 __entry->tidval
1225 )
1226 );
1227
1228TRACE_EVENT(hfi1_sdma_state,
1229 TP_PROTO(
1230 struct sdma_engine *sde,
1231 const char *cstate,
1232 const char *nstate
1233 ),
1234 TP_ARGS(sde, cstate, nstate),
1235 TP_STRUCT__entry(
1236 DD_DEV_ENTRY(sde->dd)
1237 __string(curstate, cstate)
1238 __string(newstate, nstate)
1239 ),
1240 TP_fast_assign(
1241 DD_DEV_ASSIGN(sde->dd);
1242 __assign_str(curstate, cstate);
1243 __assign_str(newstate, nstate);
1244 ),
1245 TP_printk("[%s] current state %s new state %s",
1246 __get_str(dev),
1247 __get_str(curstate),
1248 __get_str(newstate)
1249 )
1250);
1251
1252#undef TRACE_SYSTEM
1253#define TRACE_SYSTEM hfi1_rc
1254
1255DECLARE_EVENT_CLASS(hfi1_sdma_rc,
1256 TP_PROTO(struct hfi1_qp *qp, u32 psn),
1257 TP_ARGS(qp, psn),
1258 TP_STRUCT__entry(
1259 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1260 __field(u32, qpn)
1261 __field(u32, flags)
1262 __field(u32, psn)
1263 __field(u32, sending_psn)
1264 __field(u32, sending_hpsn)
1265 ),
1266 TP_fast_assign(
1267 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
1268 __entry->qpn = qp->ibqp.qp_num;
1269 __entry->flags = qp->s_flags;
1270 __entry->psn = psn;
1271 __entry->sending_psn = qp->s_sending_psn;
1272 __entry->sending_hpsn = qp->s_sending_hpsn;
1273 ),
1274 TP_printk(
1275 "[%s] qpn 0x%x flags 0x%x psn 0x%x sending_psn 0x%x sending_hpsn 0x%x",
1276 __get_str(dev),
1277 __entry->qpn,
1278 __entry->flags,
1279 __entry->psn,
1280 __entry->sending_psn,
1281 __entry->sending_psn
1282 )
1283);
1284
1285DEFINE_EVENT(hfi1_sdma_rc, hfi1_rc_sendcomplete,
1286 TP_PROTO(struct hfi1_qp *qp, u32 psn),
1287 TP_ARGS(qp, psn)
1288);
1289
1290#undef TRACE_SYSTEM
1291#define TRACE_SYSTEM hfi1_misc
1292
1293TRACE_EVENT(hfi1_interrupt,
1294 TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry,
1295 int src),
1296 TP_ARGS(dd, is_entry, src),
1297 TP_STRUCT__entry(
1298 DD_DEV_ENTRY(dd)
1299 __array(char, buf, 64)
1300 __field(int, src)
1301 ),
1302 TP_fast_assign(
1303 DD_DEV_ASSIGN(dd)
1304 is_entry->is_name(__entry->buf, 64, src - is_entry->start);
1305 __entry->src = src;
1306 ),
1307 TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf,
1308 __entry->src)
1309);
1310
1311/*
1312 * Note:
1313 * This produces a REALLY ugly trace in the console output when the string is
1314 * too long.
1315 */
1316
1317#undef TRACE_SYSTEM
1318#define TRACE_SYSTEM hfi1_trace
1319
1320#define MAX_MSG_LEN 512
1321
1322DECLARE_EVENT_CLASS(hfi1_trace_template,
1323 TP_PROTO(const char *function, struct va_format *vaf),
1324 TP_ARGS(function, vaf),
1325 TP_STRUCT__entry(
1326 __string(function, function)
1327 __dynamic_array(char, msg, MAX_MSG_LEN)
1328 ),
1329 TP_fast_assign(
1330 __assign_str(function, function);
1331 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
1332 MAX_MSG_LEN, vaf->fmt,
1333 *vaf->va) >= MAX_MSG_LEN);
1334 ),
1335 TP_printk("(%s) %s",
1336 __get_str(function),
1337 __get_str(msg))
1338);
1339
1340/*
1341 * It may be nice to macroize the __hfi1_trace but the va_* stuff requires an
1342 * actual function to work and can not be in a macro.
1343 */
1344#define __hfi1_trace_def(lvl) \
1345void __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
1346 \
1347DEFINE_EVENT(hfi1_trace_template, hfi1_ ##lvl, \
1348 TP_PROTO(const char *function, struct va_format *vaf), \
1349 TP_ARGS(function, vaf))
1350
1351#define __hfi1_trace_fn(lvl) \
1352void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
1353{ \
1354 struct va_format vaf = { \
1355 .fmt = fmt, \
1356 }; \
1357 va_list args; \
1358 \
1359 va_start(args, fmt); \
1360 vaf.va = &args; \
1361 trace_hfi1_ ##lvl(func, &vaf); \
1362 va_end(args); \
1363 return; \
1364}
1365
1366/*
1367 * To create a new trace level simply define it below and as a __hfi1_trace_fn
1368 * in trace.c. This will create all the hooks for calling
1369 * hfi1_cdbg(LVL, fmt, ...); as well as take care of all
1370 * the debugfs stuff.
1371 */
1372__hfi1_trace_def(PKT);
1373__hfi1_trace_def(PROC);
1374__hfi1_trace_def(SDMA);
1375__hfi1_trace_def(LINKVERB);
1376__hfi1_trace_def(DEBUG);
1377__hfi1_trace_def(SNOOP);
1378__hfi1_trace_def(CNTR);
1379__hfi1_trace_def(PIO);
1380__hfi1_trace_def(DC8051);
1381__hfi1_trace_def(FIRMWARE);
1382__hfi1_trace_def(RCVCTRL);
1383__hfi1_trace_def(TID);
1384
1385#define hfi1_cdbg(which, fmt, ...) \
1386 __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
1387
1388#define hfi1_dbg(fmt, ...) \
1389 hfi1_cdbg(DEBUG, fmt, ##__VA_ARGS__)
1390
1391/*
1392 * Define HFI1_EARLY_DBG at compile time or here to enable early trace
1393 * messages. Do not check in an enablement for this.
1394 */
1395
1396#ifdef HFI1_EARLY_DBG
1397#define hfi1_dbg_early(fmt, ...) \
1398 trace_printk(fmt, ##__VA_ARGS__)
1399#else
1400#define hfi1_dbg_early(fmt, ...)
1401#endif
1402
1403#endif /* __HFI1_TRACE_H */
1404
1405#undef TRACE_INCLUDE_PATH
1406#undef TRACE_INCLUDE_FILE
1407#define TRACE_INCLUDE_PATH .
1408#define TRACE_INCLUDE_FILE trace
1409#include <trace/define_trace.h>
This page took 0.115858 seconds and 5 git commands to generate.