3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 #undef TRACE_SYSTEM_VAR
51 #define TRACE_SYSTEM_VAR hfi1
53 #if !defined(__HFI1_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
54 #define __HFI1_TRACE_H
56 #include <linux/tracepoint.h>
57 #include <linux/trace_seq.h>
63 #define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
64 #define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev))
66 #define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
67 #define show_packettype(etype) \
68 __print_symbolic(etype, \
69 packettype_name(EXPECTED), \
70 packettype_name(EAGER), \
71 packettype_name(IB), \
72 packettype_name(ERROR), \
73 packettype_name(BYPASS))
76 #define TRACE_SYSTEM hfi1_rx
78 TRACE_EVENT(hfi1_rcvhdr,
79 TP_PROTO(struct hfi1_devdata *dd,
87 TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail),
100 __entry->eflags = eflags;
101 __entry->ctxt = ctxt;
102 __entry->etype = etype;
103 __entry->hlen = hlen;
104 __entry->tlen = tlen;
105 __entry->updegr = updegr;
106 __entry->etail = etail;
109 "[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
113 __entry->etype, show_packettype(__entry->etype),
121 TRACE_EVENT(hfi1_receive_interrupt,
122 TP_PROTO(struct hfi1_devdata *dd, u32 ctxt),
127 __field(u8, slow_path)
128 __field(u8, dma_rtail)
132 __entry->ctxt = ctxt;
133 if (dd->rcd[ctxt]->do_interrupt ==
134 &handle_receive_interrupt) {
135 __entry->slow_path = 1;
136 __entry->dma_rtail = 0xFF;
137 } else if (dd->rcd[ctxt]->do_interrupt ==
138 &handle_receive_interrupt_dma_rtail){
139 __entry->dma_rtail = 1;
140 __entry->slow_path = 0;
141 } else if (dd->rcd[ctxt]->do_interrupt ==
142 &handle_receive_interrupt_nodma_rtail) {
143 __entry->dma_rtail = 0;
144 __entry->slow_path = 0;
148 "[%s] ctxt %d SlowPath: %d DmaRtail: %d",
156 const char *print_u64_array(struct trace_seq *, u64 *, int);
158 TRACE_EVENT(hfi1_exp_tid_map,
159 TP_PROTO(unsigned ctxt, u16 subctxt, int dir,
160 unsigned long *maps, u16 count),
161 TP_ARGS(ctxt, subctxt, dir, maps, count),
163 __field(unsigned, ctxt)
164 __field(u16, subctxt)
167 __dynamic_array(unsigned long, maps, sizeof(*maps) * count)
170 __entry->ctxt = ctxt;
171 __entry->subctxt = subctxt;
173 __entry->count = count;
174 memcpy(__get_dynamic_array(maps), maps,
175 sizeof(*maps) * count);
177 TP_printk("[%3u:%02u] %s tidmaps %s",
180 (__entry->dir ? ">" : "<"),
181 print_u64_array(p, __get_dynamic_array(maps),
186 TRACE_EVENT(hfi1_exp_rcv_set,
187 TP_PROTO(unsigned ctxt, u16 subctxt, u32 tid,
188 unsigned long vaddr, u64 phys_addr, void *page),
189 TP_ARGS(ctxt, subctxt, tid, vaddr, phys_addr, page),
191 __field(unsigned, ctxt)
192 __field(u16, subctxt)
194 __field(unsigned long, vaddr)
195 __field(u64, phys_addr)
196 __field(void *, page)
199 __entry->ctxt = ctxt;
200 __entry->subctxt = subctxt;
202 __entry->vaddr = vaddr;
203 __entry->phys_addr = phys_addr;
204 __entry->page = page;
206 TP_printk("[%u:%u] TID %u, vaddrs 0x%lx, physaddr 0x%llx, pgp %p",
216 TRACE_EVENT(hfi1_exp_rcv_free,
217 TP_PROTO(unsigned ctxt, u16 subctxt, u32 tid,
218 unsigned long phys, void *page),
219 TP_ARGS(ctxt, subctxt, tid, phys, page),
221 __field(unsigned, ctxt)
222 __field(u16, subctxt)
224 __field(unsigned long, phys)
225 __field(void *, page)
228 __entry->ctxt = ctxt;
229 __entry->subctxt = subctxt;
231 __entry->phys = phys;
232 __entry->page = page;
234 TP_printk("[%u:%u] freeing TID %u, 0x%lx, pgp %p",
243 #define TRACE_SYSTEM hfi1_tx
245 TRACE_EVENT(hfi1_piofree,
246 TP_PROTO(struct send_context *sc, int extra),
250 __field(u32, sw_index)
251 __field(u32, hw_context)
255 DD_DEV_ASSIGN(sc->dd);
256 __entry->sw_index = sc->sw_index;
257 __entry->hw_context = sc->hw_context;
258 __entry->extra = extra;
261 "[%s] ctxt %u(%u) extra %d",
269 TRACE_EVENT(hfi1_wantpiointr,
270 TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
271 TP_ARGS(sc, needint, credit_ctrl),
274 __field(u32, sw_index)
275 __field(u32, hw_context)
276 __field(u32, needint)
277 __field(u64, credit_ctrl)
280 DD_DEV_ASSIGN(sc->dd);
281 __entry->sw_index = sc->sw_index;
282 __entry->hw_context = sc->hw_context;
283 __entry->needint = needint;
284 __entry->credit_ctrl = credit_ctrl;
287 "[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
292 (unsigned long long)__entry->credit_ctrl
296 DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
297 TP_PROTO(struct hfi1_qp *qp, u32 flags),
300 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
303 __field(u32, s_flags)
306 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
307 __entry->flags = flags;
308 __entry->qpn = qp->ibqp.qp_num;
309 __entry->s_flags = qp->s_flags;
312 "[%s] qpn 0x%x flags 0x%x s_flags 0x%x",
320 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
321 TP_PROTO(struct hfi1_qp *qp, u32 flags),
324 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
325 TP_PROTO(struct hfi1_qp *qp, u32 flags),
329 #define TRACE_SYSTEM hfi1_qphash
330 DECLARE_EVENT_CLASS(hfi1_qphash_template,
331 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
334 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
339 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
340 __entry->qpn = qp->ibqp.qp_num;
341 __entry->bucket = bucket;
344 "[%s] qpn 0x%x bucket %u",
351 DEFINE_EVENT(hfi1_qphash_template, hfi1_qpinsert,
352 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
353 TP_ARGS(qp, bucket));
355 DEFINE_EVENT(hfi1_qphash_template, hfi1_qpremove,
356 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
357 TP_ARGS(qp, bucket));
360 #define TRACE_SYSTEM hfi1_ibhdrs
362 u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr);
363 const char *parse_everbs_hdrs(
368 #define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
370 const char *parse_sdma_flags(
372 u64 desc0, u64 desc1);
374 #define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
377 #define lrh_name(lrh) { HFI1_##lrh, #lrh }
378 #define show_lnh(lrh) \
379 __print_symbolic(lrh, \
383 #define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode }
384 #define show_ib_opcode(opcode) \
385 __print_symbolic(opcode, \
386 ib_opcode_name(RC_SEND_FIRST), \
387 ib_opcode_name(RC_SEND_MIDDLE), \
388 ib_opcode_name(RC_SEND_LAST), \
389 ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE), \
390 ib_opcode_name(RC_SEND_ONLY), \
391 ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE), \
392 ib_opcode_name(RC_RDMA_WRITE_FIRST), \
393 ib_opcode_name(RC_RDMA_WRITE_MIDDLE), \
394 ib_opcode_name(RC_RDMA_WRITE_LAST), \
395 ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
396 ib_opcode_name(RC_RDMA_WRITE_ONLY), \
397 ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
398 ib_opcode_name(RC_RDMA_READ_REQUEST), \
399 ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST), \
400 ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE), \
401 ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST), \
402 ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY), \
403 ib_opcode_name(RC_ACKNOWLEDGE), \
404 ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
405 ib_opcode_name(RC_COMPARE_SWAP), \
406 ib_opcode_name(RC_FETCH_ADD), \
407 ib_opcode_name(UC_SEND_FIRST), \
408 ib_opcode_name(UC_SEND_MIDDLE), \
409 ib_opcode_name(UC_SEND_LAST), \
410 ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE), \
411 ib_opcode_name(UC_SEND_ONLY), \
412 ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE), \
413 ib_opcode_name(UC_RDMA_WRITE_FIRST), \
414 ib_opcode_name(UC_RDMA_WRITE_MIDDLE), \
415 ib_opcode_name(UC_RDMA_WRITE_LAST), \
416 ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
417 ib_opcode_name(UC_RDMA_WRITE_ONLY), \
418 ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
419 ib_opcode_name(UD_SEND_ONLY), \
420 ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \
424 #define LRH_PRN "vl %d lver %d sl %d lnh %d,%s dlid %.4x len %d slid %.4x"
426 "op 0x%.2x,%s se %d m %d pad %d tver %d pkey 0x%.4x " \
427 "f %d b %d qpn 0x%.6x a %d psn 0x%.8x"
428 #define EHDR_PRN "%s"
430 DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
431 TP_PROTO(struct hfi1_devdata *dd,
432 struct hfi1_ib_header *hdr),
456 /* extended headers */
457 __dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr))
460 struct hfi1_other_headers *ohdr;
465 (u8)(be16_to_cpu(hdr->lrh[0]) >> 12);
467 (u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf;
469 (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
471 (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
473 be16_to_cpu(hdr->lrh[1]);
474 /* allow for larger len */
476 be16_to_cpu(hdr->lrh[2]);
478 be16_to_cpu(hdr->lrh[3]);
480 if (__entry->lnh == HFI1_LRH_BTH)
483 ohdr = &hdr->u.l.oth;
485 (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
487 (be32_to_cpu(ohdr->bth[0]) >> 23) & 1;
489 (be32_to_cpu(ohdr->bth[0]) >> 22) & 1;
491 (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
493 (be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf;
495 be32_to_cpu(ohdr->bth[0]) & 0xffff;
497 (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT)
500 (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT)
503 be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
505 (be32_to_cpu(ohdr->bth[2]) >> 31) & 1;
506 /* allow for larger PSN */
508 be32_to_cpu(ohdr->bth[2]) & 0x7fffffff;
509 /* extended headers */
511 __get_dynamic_array(ehdrs),
513 ibhdr_exhdr_len(hdr));
515 TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN,
521 __entry->lnh, show_lnh(__entry->lnh),
526 __entry->opcode, show_ib_opcode(__entry->opcode),
537 /* extended headers */
540 (void *)__get_dynamic_array(ehdrs))
544 DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr,
545 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
548 DEFINE_EVENT(hfi1_ibhdr_template, output_ibhdr,
549 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
553 "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
554 "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
557 #define TRACE_SYSTEM hfi1_snoop
560 TRACE_EVENT(snoop_capture,
561 TP_PROTO(struct hfi1_devdata *dd,
563 struct hfi1_ib_header *hdr,
566 TP_ARGS(dd, hdr_len, hdr, data_len, data),
575 __field(u32, hdr_len)
576 __field(u32, data_len)
578 __dynamic_array(u8, raw_hdr, hdr_len)
579 __dynamic_array(u8, raw_pkt, data_len)
582 struct hfi1_other_headers *ohdr;
584 __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
585 if (__entry->lnh == HFI1_LRH_BTH)
588 ohdr = &hdr->u.l.oth;
590 __entry->slid = be16_to_cpu(hdr->lrh[3]);
591 __entry->dlid = be16_to_cpu(hdr->lrh[1]);
592 __entry->qpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
593 __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
594 __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
595 __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
596 __entry->hdr_len = hdr_len;
597 __entry->data_len = data_len;
598 memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
599 memcpy(__get_dynamic_array(raw_pkt), data, data_len);
601 TP_printk("[%s] " SNOOP_PRN,
607 show_ib_opcode(__entry->opcode),
616 #define TRACE_SYSTEM hfi1_ctxts
619 "cred:%u, credaddr:0x%llx, piobase:0x%llx, rcvhdr_cnt:%u, " \
620 "rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx"
621 TRACE_EVENT(hfi1_uctxtdata,
622 TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt),
626 __field(unsigned, ctxt)
627 __field(u32, credits)
628 __field(u64, hw_free)
629 __field(u64, piobase)
630 __field(u16, rcvhdrq_cnt)
631 __field(u64, rcvhdrq_phys)
632 __field(u32, eager_cnt)
633 __field(u64, rcvegr_phys)
637 __entry->ctxt = uctxt->ctxt;
638 __entry->credits = uctxt->sc->credits;
639 __entry->hw_free = (u64)uctxt->sc->hw_free;
640 __entry->piobase = (u64)uctxt->sc->base_addr;
641 __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
642 __entry->rcvhdrq_phys = uctxt->rcvhdrq_phys;
643 __entry->eager_cnt = uctxt->egrbufs.alloced;
644 __entry->rcvegr_phys = uctxt->egrbufs.rcvtids[0].phys;
647 "[%s] ctxt %u " UCTXT_FMT,
653 __entry->rcvhdrq_cnt,
654 __entry->rcvhdrq_phys,
661 "egrtids:%u, egr_size:%u, hdrq_cnt:%u, hdrq_size:%u, sdma_ring_size:%u"
662 TRACE_EVENT(hfi1_ctxt_info,
663 TP_PROTO(struct hfi1_devdata *dd, unsigned ctxt, unsigned subctxt,
664 struct hfi1_ctxt_info cinfo),
665 TP_ARGS(dd, ctxt, subctxt, cinfo),
668 __field(unsigned, ctxt)
669 __field(unsigned, subctxt)
670 __field(u16, egrtids)
671 __field(u16, rcvhdrq_cnt)
672 __field(u16, rcvhdrq_size)
673 __field(u16, sdma_ring_size)
674 __field(u32, rcvegr_size)
678 __entry->ctxt = ctxt;
679 __entry->subctxt = subctxt;
680 __entry->egrtids = cinfo.egrtids;
681 __entry->rcvhdrq_cnt = cinfo.rcvhdrq_cnt;
682 __entry->rcvhdrq_size = cinfo.rcvhdrq_entsize;
683 __entry->sdma_ring_size = cinfo.sdma_ring_size;
684 __entry->rcvegr_size = cinfo.rcvegr_size;
687 "[%s] ctxt %u:%u " CINFO_FMT,
692 __entry->rcvegr_size,
693 __entry->rcvhdrq_cnt,
694 __entry->rcvhdrq_size,
695 __entry->sdma_ring_size
700 #define TRACE_SYSTEM hfi1_sma
703 "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
707 ((struct buffer_control *)__get_dynamic_array(bct))->field \
710 DECLARE_EVENT_CLASS(hfi1_bct_template,
711 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
715 __dynamic_array(u8, bct, sizeof(*bc))
720 __get_dynamic_array(bct),
724 TP_printk(BCT_FORMAT,
725 BCT(overall_shared_limit),
727 BCT(vl[0].dedicated),
730 BCT(vl[1].dedicated),
733 BCT(vl[2].dedicated),
736 BCT(vl[3].dedicated),
739 BCT(vl[4].dedicated),
742 BCT(vl[5].dedicated),
745 BCT(vl[6].dedicated),
748 BCT(vl[7].dedicated),
751 BCT(vl[15].dedicated),
757 DEFINE_EVENT(hfi1_bct_template, bct_set,
758 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
761 DEFINE_EVENT(hfi1_bct_template, bct_get,
762 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
766 #define TRACE_SYSTEM hfi1_sdma
768 TRACE_EVENT(hfi1_sdma_descriptor,
770 struct sdma_engine *sde,
775 TP_ARGS(sde, desc0, desc1, e, descp),
777 DD_DEV_ENTRY(sde->dd)
778 __field(void *, descp)
785 DD_DEV_ASSIGN(sde->dd);
786 __entry->desc0 = desc0;
787 __entry->desc1 = desc1;
788 __entry->idx = sde->this_idx;
789 __entry->descp = descp;
793 "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
796 __parse_sdma_flags(__entry->desc0, __entry->desc1),
797 (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT)
798 & SDMA_DESC0_PHY_ADDR_MASK,
799 (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT)
800 & SDMA_DESC1_GENERATION_MASK),
801 (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT)
802 & SDMA_DESC0_BYTE_COUNT_MASK),
810 TRACE_EVENT(hfi1_sdma_engine_select,
811 TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
812 TP_ARGS(dd, sel, vl, idx),
826 "[%s] selecting SDE %u sel 0x%x vl %u",
834 DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
836 struct sdma_engine *sde,
839 TP_ARGS(sde, status),
841 DD_DEV_ENTRY(sde->dd)
846 DD_DEV_ASSIGN(sde->dd);
847 __entry->status = status;
848 __entry->idx = sde->this_idx;
851 "[%s] SDE(%u) status %llx",
854 (unsigned long long)__entry->status
858 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
860 struct sdma_engine *sde,
866 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
868 struct sdma_engine *sde,
874 DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
876 struct sdma_engine *sde,
881 DD_DEV_ENTRY(sde->dd)
886 DD_DEV_ASSIGN(sde->dd);
887 __entry->idx = sde->this_idx;
888 __entry->aidx = aidx;
891 "[%s] SDE(%u) aidx %d",
898 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
900 struct sdma_engine *sde,
905 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
907 struct sdma_engine *sde,
912 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
913 TRACE_EVENT(hfi1_sdma_progress,
915 struct sdma_engine *sde,
918 struct sdma_txreq *txp
920 TP_ARGS(sde, hwhead, swhead, txp),
922 DD_DEV_ENTRY(sde->dd)
927 __field(u16, tx_tail)
928 __field(u16, tx_head)
932 DD_DEV_ASSIGN(sde->dd);
933 __entry->hwhead = hwhead;
934 __entry->swhead = swhead;
935 __entry->tx_tail = sde->tx_tail;
936 __entry->tx_head = sde->tx_head;
937 __entry->txnext = txp ? txp->next_descq_idx : ~0;
938 __entry->idx = sde->this_idx;
939 __entry->sn = txp ? txp->sn : ~0;
942 "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
954 TRACE_EVENT(hfi1_sdma_progress,
956 struct sdma_engine *sde,
959 struct sdma_txreq *txp
961 TP_ARGS(sde, hwhead, swhead, txp),
963 DD_DEV_ENTRY(sde->dd)
967 __field(u16, tx_tail)
968 __field(u16, tx_head)
972 DD_DEV_ASSIGN(sde->dd);
973 __entry->hwhead = hwhead;
974 __entry->swhead = swhead;
975 __entry->tx_tail = sde->tx_tail;
976 __entry->tx_head = sde->tx_head;
977 __entry->txnext = txp ? txp->next_descq_idx : ~0;
978 __entry->idx = sde->this_idx;
981 "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
993 DECLARE_EVENT_CLASS(hfi1_sdma_sn,
995 struct sdma_engine *sde,
1000 DD_DEV_ENTRY(sde->dd)
1005 DD_DEV_ASSIGN(sde->dd);
1007 __entry->idx = sde->this_idx;
1010 "[%s] SDE(%u) sn %llu",
1017 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
1019 struct sdma_engine *sde,
1025 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
1027 struct sdma_engine *sde,
1033 #define USDMA_HDR_FORMAT \
1034 "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
1036 TRACE_EVENT(hfi1_sdma_user_header,
1037 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
1038 struct hfi1_pkt_header *hdr, u32 tidval),
1039 TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
1043 __field(u8, subctxt)
1045 __field(__le32, pbc0)
1046 __field(__le32, pbc1)
1047 __field(__be32, lrh0)
1048 __field(__be32, lrh1)
1049 __field(__be32, bth0)
1050 __field(__be32, bth1)
1051 __field(__be32, bth2)
1052 __field(__le32, kdeth0)
1053 __field(__le32, kdeth1)
1054 __field(__le32, kdeth2)
1055 __field(__le32, kdeth3)
1056 __field(__le32, kdeth4)
1057 __field(__le32, kdeth5)
1058 __field(__le32, kdeth6)
1059 __field(__le32, kdeth7)
1060 __field(__le32, kdeth8)
1061 __field(u32, tidval)
1064 __le32 *pbc = (__le32 *)hdr->pbc;
1065 __be32 *lrh = (__be32 *)hdr->lrh;
1066 __be32 *bth = (__be32 *)hdr->bth;
1067 __le32 *kdeth = (__le32 *)&hdr->kdeth;
1070 __entry->ctxt = ctxt;
1071 __entry->subctxt = subctxt;
1073 __entry->pbc0 = pbc[0];
1074 __entry->pbc1 = pbc[1];
1075 __entry->lrh0 = be32_to_cpu(lrh[0]);
1076 __entry->lrh1 = be32_to_cpu(lrh[1]);
1077 __entry->bth0 = be32_to_cpu(bth[0]);
1078 __entry->bth1 = be32_to_cpu(bth[1]);
1079 __entry->bth2 = be32_to_cpu(bth[2]);
1080 __entry->kdeth0 = kdeth[0];
1081 __entry->kdeth1 = kdeth[1];
1082 __entry->kdeth2 = kdeth[2];
1083 __entry->kdeth3 = kdeth[3];
1084 __entry->kdeth4 = kdeth[4];
1085 __entry->kdeth5 = kdeth[5];
1086 __entry->kdeth6 = kdeth[6];
1087 __entry->kdeth7 = kdeth[7];
1088 __entry->kdeth8 = kdeth[8];
1089 __entry->tidval = tidval;
1091 TP_printk(USDMA_HDR_FORMAT,
1116 #define SDMA_UREQ_FMT \
1117 "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
1118 TRACE_EVENT(hfi1_sdma_user_reqinfo,
1119 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
1120 TP_ARGS(dd, ctxt, subctxt, i),
1124 __field(u8, subctxt)
1125 __field(u8, ver_opcode)
1128 __field(u16, fragsize)
1129 __field(u16, comp_idx)
1133 __entry->ctxt = ctxt;
1134 __entry->subctxt = subctxt;
1135 __entry->ver_opcode = i[0] & 0xff;
1136 __entry->iovcnt = (i[0] >> 8) & 0xff;
1137 __entry->npkts = i[1];
1138 __entry->fragsize = i[2];
1139 __entry->comp_idx = i[3];
1141 TP_printk(SDMA_UREQ_FMT,
1145 __entry->ver_opcode,
1153 #define usdma_complete_name(st) { st, #st }
1154 #define show_usdma_complete_state(st) \
1155 __print_symbolic(st, \
1156 usdma_complete_name(FREE), \
1157 usdma_complete_name(QUEUED), \
1158 usdma_complete_name(COMPLETE), \
1159 usdma_complete_name(ERROR))
1161 TRACE_EVENT(hfi1_sdma_user_completion,
1162 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
1163 u8 state, int code),
1164 TP_ARGS(dd, ctxt, subctxt, idx, state, code),
1168 __field(u8, subctxt)
1175 __entry->ctxt = ctxt;
1176 __entry->subctxt = subctxt;
1178 __entry->state = state;
1179 __entry->code = code;
1181 TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
1182 __get_str(dev), __entry->ctxt, __entry->subctxt,
1183 __entry->idx, show_usdma_complete_state(__entry->state),
1187 const char *print_u32_array(struct trace_seq *, u32 *, int);
1188 #define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
1190 TRACE_EVENT(hfi1_sdma_user_header_ahg,
1191 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
1192 u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
1193 TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
1197 __field(u8, subctxt)
1202 __field(u32, tidval)
1203 __array(u32, ahg, 10)
1207 __entry->ctxt = ctxt;
1208 __entry->subctxt = subctxt;
1211 __entry->idx = ahgidx;
1213 __entry->tidval = tidval;
1214 memcpy(__entry->ahg, ahg, len * sizeof(u32));
1216 TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
1224 __print_u32_hex(__entry->ahg, __entry->len),
1229 TRACE_EVENT(hfi1_sdma_state,
1231 struct sdma_engine *sde,
1235 TP_ARGS(sde, cstate, nstate),
1237 DD_DEV_ENTRY(sde->dd)
1238 __string(curstate, cstate)
1239 __string(newstate, nstate)
1242 DD_DEV_ASSIGN(sde->dd);
1243 __assign_str(curstate, cstate);
1244 __assign_str(newstate, nstate);
1246 TP_printk("[%s] current state %s new state %s",
1248 __get_str(curstate),
1254 #define TRACE_SYSTEM hfi1_rc
1256 DECLARE_EVENT_CLASS(hfi1_rc_template,
1257 TP_PROTO(struct hfi1_qp *qp, u32 psn),
1260 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1262 __field(u32, s_flags)
1265 __field(u32, s_next_psn)
1266 __field(u32, s_sending_psn)
1267 __field(u32, s_sending_hpsn)
1271 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
1272 __entry->qpn = qp->ibqp.qp_num;
1273 __entry->s_flags = qp->s_flags;
1275 __entry->s_psn = qp->s_psn;
1276 __entry->s_next_psn = qp->s_next_psn;
1277 __entry->s_sending_psn = qp->s_sending_psn;
1278 __entry->s_sending_hpsn = qp->s_sending_hpsn;
1279 __entry->r_psn = qp->r_psn;
1282 "[%s] qpn 0x%x s_flags 0x%x psn 0x%x s_psn 0x%x s_next_psn 0x%x s_sending_psn 0x%x sending_hpsn 0x%x r_psn 0x%x",
1288 __entry->s_next_psn,
1289 __entry->s_sending_psn,
1290 __entry->s_sending_hpsn,
1295 DEFINE_EVENT(hfi1_rc_template, hfi1_rc_sendcomplete,
1296 TP_PROTO(struct hfi1_qp *qp, u32 psn),
1300 DEFINE_EVENT(hfi1_rc_template, hfi1_rc_ack,
1301 TP_PROTO(struct hfi1_qp *qp, u32 psn),
1305 DEFINE_EVENT(hfi1_rc_template, hfi1_rc_timeout,
1306 TP_PROTO(struct hfi1_qp *qp, u32 psn),
1310 DEFINE_EVENT(hfi1_rc_template, hfi1_rc_rcv_error,
1311 TP_PROTO(struct hfi1_qp *qp, u32 psn),
1316 #define TRACE_SYSTEM hfi1_misc
1318 TRACE_EVENT(hfi1_interrupt,
1319 TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry,
1321 TP_ARGS(dd, is_entry, src),
1324 __array(char, buf, 64)
1329 is_entry->is_name(__entry->buf, 64, src - is_entry->start);
1332 TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf,
1338 * This produces a REALLY ugly trace in the console output when the string is
1343 #define TRACE_SYSTEM hfi1_trace
1345 #define MAX_MSG_LEN 512
1347 DECLARE_EVENT_CLASS(hfi1_trace_template,
1348 TP_PROTO(const char *function, struct va_format *vaf),
1349 TP_ARGS(function, vaf),
1351 __string(function, function)
1352 __dynamic_array(char, msg, MAX_MSG_LEN)
1355 __assign_str(function, function);
1356 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
1357 MAX_MSG_LEN, vaf->fmt,
1358 *vaf->va) >= MAX_MSG_LEN);
1360 TP_printk("(%s) %s",
1361 __get_str(function),
1366 * It may be nice to macroize the __hfi1_trace but the va_* stuff requires an
1367 * actual function to work and can not be in a macro.
1369 #define __hfi1_trace_def(lvl) \
1370 void __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
1372 DEFINE_EVENT(hfi1_trace_template, hfi1_ ##lvl, \
1373 TP_PROTO(const char *function, struct va_format *vaf), \
1374 TP_ARGS(function, vaf))
1376 #define __hfi1_trace_fn(lvl) \
1377 void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
1379 struct va_format vaf = { \
1384 va_start(args, fmt); \
1386 trace_hfi1_ ##lvl(func, &vaf); \
1392 * To create a new trace level simply define it below and as a __hfi1_trace_fn
1393 * in trace.c. This will create all the hooks for calling
1394 * hfi1_cdbg(LVL, fmt, ...); as well as take care of all
1395 * the debugfs stuff.
1397 __hfi1_trace_def(PKT);
1398 __hfi1_trace_def(PROC);
1399 __hfi1_trace_def(SDMA);
1400 __hfi1_trace_def(LINKVERB);
1401 __hfi1_trace_def(DEBUG);
1402 __hfi1_trace_def(SNOOP);
1403 __hfi1_trace_def(CNTR);
1404 __hfi1_trace_def(PIO);
1405 __hfi1_trace_def(DC8051);
1406 __hfi1_trace_def(FIRMWARE);
1407 __hfi1_trace_def(RCVCTRL);
1408 __hfi1_trace_def(TID);
1410 #define hfi1_cdbg(which, fmt, ...) \
1411 __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
1413 #define hfi1_dbg(fmt, ...) \
1414 hfi1_cdbg(DEBUG, fmt, ##__VA_ARGS__)
1417 * Define HFI1_EARLY_DBG at compile time or here to enable early trace
1418 * messages. Do not check in an enablement for this.
1421 #ifdef HFI1_EARLY_DBG
1422 #define hfi1_dbg_early(fmt, ...) \
1423 trace_printk(fmt, ##__VA_ARGS__)
1425 #define hfi1_dbg_early(fmt, ...)
1428 #endif /* __HFI1_TRACE_H */
1430 #undef TRACE_INCLUDE_PATH
1431 #undef TRACE_INCLUDE_FILE
1432 #define TRACE_INCLUDE_PATH .
1433 #define TRACE_INCLUDE_FILE trace
1434 #include <trace/define_trace.h>