2 * CTF writing support via babeltrace.
4 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
5 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <linux/compiler.h>
11 #include <babeltrace/ctf-writer/writer.h>
12 #include <babeltrace/ctf-writer/clock.h>
13 #include <babeltrace/ctf-writer/stream.h>
14 #include <babeltrace/ctf-writer/event.h>
15 #include <babeltrace/ctf-writer/event-types.h>
16 #include <babeltrace/ctf-writer/event-fields.h>
17 #include <babeltrace/ctf-ir/utils.h>
18 #include <babeltrace/ctf/events.h>
19 #include <traceevent/event-parse.h>
21 #include "data-convert-bt.h"
31 #define pr_N(n, fmt, ...) \
32 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
34 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
35 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
37 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
40 struct bt_ctf_event_class *event_class;
46 struct bt_ctf_stream *stream;
52 /* writer primitives */
53 struct bt_ctf_writer *writer;
54 struct ctf_stream **stream;
56 struct bt_ctf_stream_class *stream_class;
57 struct bt_ctf_clock *clock;
62 struct bt_ctf_field_type *s64;
63 struct bt_ctf_field_type *u64;
64 struct bt_ctf_field_type *s32;
65 struct bt_ctf_field_type *u32;
66 struct bt_ctf_field_type *string;
67 struct bt_ctf_field_type *u32_hex;
68 struct bt_ctf_field_type *u64_hex;
70 struct bt_ctf_field_type *array[6];
75 struct perf_tool tool;
76 struct ctf_writer writer;
81 /* Ordered events configured queue size. */
85 static int value_set(struct bt_ctf_field_type *type,
86 struct bt_ctf_event *event,
87 const char *name, u64 val)
89 struct bt_ctf_field *field;
90 bool sign = bt_ctf_field_type_integer_get_signed(type);
93 field = bt_ctf_field_create(type);
95 pr_err("failed to create a field %s\n", name);
100 ret = bt_ctf_field_signed_integer_set_value(field, val);
102 pr_err("failed to set field value %s\n", name);
106 ret = bt_ctf_field_unsigned_integer_set_value(field, val);
108 pr_err("failed to set field value %s\n", name);
113 ret = bt_ctf_event_set_payload(event, name, field);
115 pr_err("failed to set payload %s\n", name);
119 pr2(" SET [%s = %" PRIu64 "]\n", name, val);
122 bt_ctf_field_put(field);
126 #define __FUNC_VALUE_SET(_name, _val_type) \
127 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \
128 struct bt_ctf_event *event, \
132 struct bt_ctf_field_type *type = cw->data._name; \
133 return value_set(type, event, name, (u64) val); \
136 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
142 __FUNC_VALUE_SET(u64_hex, u64)
144 static struct bt_ctf_field_type*
145 get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field)
147 unsigned long flags = field->flags;
149 if (flags & FIELD_IS_STRING)
150 return cw->data.string;
152 if (!(flags & FIELD_IS_SIGNED)) {
153 /* unsigned long are mostly pointers */
154 if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER)
155 return cw->data.u64_hex;
158 if (flags & FIELD_IS_SIGNED) {
159 if (field->size == 8)
165 if (field->size == 8)
171 static unsigned long long adjust_signedness(unsigned long long value_int, int size)
173 unsigned long long value_mask;
176 * value_mask = (1 << (size * 8 - 1)) - 1.
177 * Directly set value_mask for code readers.
181 value_mask = 0x7fULL;
184 value_mask = 0x7fffULL;
187 value_mask = 0x7fffffffULL;
191 * For 64 bit value, return it self. There is no need
200 /* If it is a positive value, don't adjust. */
201 if ((value_int & (~0ULL - value_mask)) == 0)
204 /* Fill upper part of value_int with 1 to make it a negative long long. */
205 return (value_int & value_mask) | ~value_mask;
208 static int string_set_value(struct bt_ctf_field *field, const char *string)
211 size_t len = strlen(string), i, p;
214 for (i = p = 0; i < len; i++, p++) {
215 if (isprint(string[i])) {
218 buffer[p] = string[i];
222 snprintf(numstr, sizeof(numstr), "\\x%02x",
223 (unsigned int)(string[i]) & 0xff);
226 buffer = zalloc(i + (len - i) * 4 + 2);
228 pr_err("failed to set unprintable string '%s'\n", string);
229 return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
232 strncpy(buffer, string, i);
234 strncat(buffer + p, numstr, 4);
240 return bt_ctf_field_string_set_value(field, string);
241 err = bt_ctf_field_string_set_value(field, buffer);
246 static int add_tracepoint_field_value(struct ctf_writer *cw,
247 struct bt_ctf_event_class *event_class,
248 struct bt_ctf_event *event,
249 struct perf_sample *sample,
250 struct format_field *fmtf)
252 struct bt_ctf_field_type *type;
253 struct bt_ctf_field *array_field;
254 struct bt_ctf_field *field;
255 const char *name = fmtf->name;
256 void *data = sample->raw_data;
257 unsigned long flags = fmtf->flags;
258 unsigned int n_items;
265 offset = fmtf->offset;
267 if (flags & FIELD_IS_STRING)
268 flags &= ~FIELD_IS_ARRAY;
270 if (flags & FIELD_IS_DYNAMIC) {
271 unsigned long long tmp_val;
273 tmp_val = pevent_read_number(fmtf->event->pevent,
280 if (flags & FIELD_IS_ARRAY) {
282 type = bt_ctf_event_class_get_field_by_name(
284 array_field = bt_ctf_field_create(type);
285 bt_ctf_field_type_put(type);
287 pr_err("Failed to create array type %s\n", name);
291 len = fmtf->size / fmtf->arraylen;
292 n_items = fmtf->arraylen;
298 type = get_tracepoint_field_type(cw, fmtf);
300 for (i = 0; i < n_items; i++) {
301 if (flags & FIELD_IS_ARRAY)
302 field = bt_ctf_field_array_get_field(array_field, i);
304 field = bt_ctf_field_create(type);
307 pr_err("failed to create a field %s\n", name);
311 if (flags & FIELD_IS_STRING)
312 ret = string_set_value(field, data + offset + i * len);
314 unsigned long long value_int;
316 value_int = pevent_read_number(
318 data + offset + i * len, len);
320 if (!(flags & FIELD_IS_SIGNED))
321 ret = bt_ctf_field_unsigned_integer_set_value(
324 ret = bt_ctf_field_signed_integer_set_value(
325 field, adjust_signedness(value_int, len));
329 pr_err("failed to set file value %s\n", name);
332 if (!(flags & FIELD_IS_ARRAY)) {
333 ret = bt_ctf_event_set_payload(event, name, field);
335 pr_err("failed to set payload %s\n", name);
339 bt_ctf_field_put(field);
341 if (flags & FIELD_IS_ARRAY) {
342 ret = bt_ctf_event_set_payload(event, name, array_field);
344 pr_err("Failed add payload array %s\n", name);
347 bt_ctf_field_put(array_field);
352 bt_ctf_field_put(field);
356 static int add_tracepoint_fields_values(struct ctf_writer *cw,
357 struct bt_ctf_event_class *event_class,
358 struct bt_ctf_event *event,
359 struct format_field *fields,
360 struct perf_sample *sample)
362 struct format_field *field;
365 for (field = fields; field; field = field->next) {
366 ret = add_tracepoint_field_value(cw, event_class, event, sample,
374 static int add_tracepoint_values(struct ctf_writer *cw,
375 struct bt_ctf_event_class *event_class,
376 struct bt_ctf_event *event,
377 struct perf_evsel *evsel,
378 struct perf_sample *sample)
380 struct format_field *common_fields = evsel->tp_format->format.common_fields;
381 struct format_field *fields = evsel->tp_format->format.fields;
384 ret = add_tracepoint_fields_values(cw, event_class, event,
385 common_fields, sample);
387 ret = add_tracepoint_fields_values(cw, event_class, event,
394 add_bpf_output_values(struct bt_ctf_event_class *event_class,
395 struct bt_ctf_event *event,
396 struct perf_sample *sample)
398 struct bt_ctf_field_type *len_type, *seq_type;
399 struct bt_ctf_field *len_field, *seq_field;
400 unsigned int raw_size = sample->raw_size;
401 unsigned int nr_elements = raw_size / sizeof(u32);
405 if (nr_elements * sizeof(u32) != raw_size)
406 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %lu bytes\n",
407 raw_size, nr_elements * sizeof(u32) - raw_size);
409 len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
410 len_field = bt_ctf_field_create(len_type);
412 pr_err("failed to create 'raw_len' for bpf output event\n");
417 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
419 pr_err("failed to set field value for raw_len\n");
422 ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
424 pr_err("failed to set payload to raw_len\n");
428 seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
429 seq_field = bt_ctf_field_create(seq_type);
431 pr_err("failed to create 'raw_data' for bpf output event\n");
436 ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
438 pr_err("failed to set length of 'raw_data'\n");
442 for (i = 0; i < nr_elements; i++) {
443 struct bt_ctf_field *elem_field =
444 bt_ctf_field_sequence_get_field(seq_field, i);
446 ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
447 ((u32 *)(sample->raw_data))[i]);
449 bt_ctf_field_put(elem_field);
451 pr_err("failed to set raw_data[%d]\n", i);
456 ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
458 pr_err("failed to set payload for raw_data\n");
461 bt_ctf_field_put(seq_field);
463 bt_ctf_field_type_put(seq_type);
465 bt_ctf_field_put(len_field);
467 bt_ctf_field_type_put(len_type);
471 static int add_generic_values(struct ctf_writer *cw,
472 struct bt_ctf_event *event,
473 struct perf_evsel *evsel,
474 struct perf_sample *sample)
476 u64 type = evsel->attr.sample_type;
481 * PERF_SAMPLE_TIME - not needed as we have it in
483 * PERF_SAMPLE_READ - TODO
484 * PERF_SAMPLE_CALLCHAIN - TODO
485 * PERF_SAMPLE_RAW - tracepoint fields are handled separately
486 * PERF_SAMPLE_BRANCH_STACK - TODO
487 * PERF_SAMPLE_REGS_USER - TODO
488 * PERF_SAMPLE_STACK_USER - TODO
491 if (type & PERF_SAMPLE_IP) {
492 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
497 if (type & PERF_SAMPLE_TID) {
498 ret = value_set_s32(cw, event, "perf_tid", sample->tid);
502 ret = value_set_s32(cw, event, "perf_pid", sample->pid);
507 if ((type & PERF_SAMPLE_ID) ||
508 (type & PERF_SAMPLE_IDENTIFIER)) {
509 ret = value_set_u64(cw, event, "perf_id", sample->id);
514 if (type & PERF_SAMPLE_STREAM_ID) {
515 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
520 if (type & PERF_SAMPLE_PERIOD) {
521 ret = value_set_u64(cw, event, "perf_period", sample->period);
526 if (type & PERF_SAMPLE_WEIGHT) {
527 ret = value_set_u64(cw, event, "perf_weight", sample->weight);
532 if (type & PERF_SAMPLE_DATA_SRC) {
533 ret = value_set_u64(cw, event, "perf_data_src",
539 if (type & PERF_SAMPLE_TRANSACTION) {
540 ret = value_set_u64(cw, event, "perf_transaction",
541 sample->transaction);
549 static int ctf_stream__flush(struct ctf_stream *cs)
554 err = bt_ctf_stream_flush(cs->stream);
556 pr_err("CTF stream %d flush failed\n", cs->cpu);
558 pr("Flush stream for cpu %d (%u samples)\n",
567 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
569 struct ctf_stream *cs;
570 struct bt_ctf_field *pkt_ctx = NULL;
571 struct bt_ctf_field *cpu_field = NULL;
572 struct bt_ctf_stream *stream = NULL;
575 cs = zalloc(sizeof(*cs));
577 pr_err("Failed to allocate ctf stream\n");
581 stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
583 pr_err("Failed to create CTF stream\n");
587 pkt_ctx = bt_ctf_stream_get_packet_context(stream);
589 pr_err("Failed to obtain packet context\n");
593 cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
594 bt_ctf_field_put(pkt_ctx);
596 pr_err("Failed to obtain cpu field\n");
600 ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
602 pr_err("Failed to update CPU number\n");
606 bt_ctf_field_put(cpu_field);
614 bt_ctf_field_put(cpu_field);
616 bt_ctf_stream_put(stream);
622 static void ctf_stream__delete(struct ctf_stream *cs)
625 bt_ctf_stream_put(cs->stream);
630 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
632 struct ctf_stream *cs = cw->stream[cpu];
635 cs = ctf_stream__create(cw, cpu);
636 cw->stream[cpu] = cs;
642 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
643 struct perf_evsel *evsel)
647 if (evsel->attr.sample_type & PERF_SAMPLE_CPU)
650 if (cpu > cw->stream_cnt) {
651 pr_err("Event was recorded for CPU %d, limit is at %d.\n",
652 cpu, cw->stream_cnt);
659 #define STREAM_FLUSH_COUNT 100000
662 * Currently we have no other way to determine the
663 * time for the stream flush other than keep track
664 * of the number of events and check it against
667 static bool is_flush_needed(struct ctf_stream *cs)
669 return cs->count >= STREAM_FLUSH_COUNT;
672 static int process_sample_event(struct perf_tool *tool,
673 union perf_event *_event,
674 struct perf_sample *sample,
675 struct perf_evsel *evsel,
676 struct machine *machine __maybe_unused)
678 struct convert *c = container_of(tool, struct convert, tool);
679 struct evsel_priv *priv = evsel->priv;
680 struct ctf_writer *cw = &c->writer;
681 struct ctf_stream *cs;
682 struct bt_ctf_event_class *event_class;
683 struct bt_ctf_event *event;
686 if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
689 event_class = priv->event_class;
693 c->events_size += _event->header.size;
695 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
697 event = bt_ctf_event_create(event_class);
699 pr_err("Failed to create an CTF event\n");
703 bt_ctf_clock_set_time(cw->clock, sample->time);
705 ret = add_generic_values(cw, event, evsel, sample);
709 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
710 ret = add_tracepoint_values(cw, event_class, event,
716 if (perf_evsel__is_bpf_output(evsel)) {
717 ret = add_bpf_output_values(event_class, event, sample);
722 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
724 if (is_flush_needed(cs))
725 ctf_stream__flush(cs);
728 bt_ctf_stream_append_event(cs->stream, event);
731 bt_ctf_event_put(event);
735 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
736 static char *change_name(char *name, char *orig_name, int dup)
738 char *new_name = NULL;
747 * Add '_' prefix to potential keywork. According to
748 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
749 * futher CTF spec updating may require us to use '$'.
752 len = strlen(name) + sizeof("_");
754 len = strlen(orig_name) + sizeof("_dupl_X");
756 new_name = malloc(len);
761 snprintf(new_name, len, "_%s", name);
763 snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
766 if (name != orig_name)
771 static int event_class_add_field(struct bt_ctf_event_class *event_class,
772 struct bt_ctf_field_type *type,
773 struct format_field *field)
775 struct bt_ctf_field_type *t = NULL;
780 /* alias was already assigned */
781 if (field->alias != field->name)
782 return bt_ctf_event_class_add_field(event_class, type,
783 (char *)field->alias);
787 /* If 'name' is a keywork, add prefix. */
788 if (bt_ctf_validate_identifier(name))
789 name = change_name(name, field->name, -1);
792 pr_err("Failed to fix invalid identifier.");
795 while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
796 bt_ctf_field_type_put(t);
797 name = change_name(name, field->name, dup++);
799 pr_err("Failed to create dup name for '%s'\n", field->name);
804 ret = bt_ctf_event_class_add_field(event_class, type, name);
811 static int add_tracepoint_fields_types(struct ctf_writer *cw,
812 struct format_field *fields,
813 struct bt_ctf_event_class *event_class)
815 struct format_field *field;
818 for (field = fields; field; field = field->next) {
819 struct bt_ctf_field_type *type;
820 unsigned long flags = field->flags;
822 pr2(" field '%s'\n", field->name);
824 type = get_tracepoint_field_type(cw, field);
829 * A string is an array of chars. For this we use the string
830 * type and don't care that it is an array. What we don't
831 * support is an array of strings.
833 if (flags & FIELD_IS_STRING)
834 flags &= ~FIELD_IS_ARRAY;
836 if (flags & FIELD_IS_ARRAY)
837 type = bt_ctf_field_type_array_create(type, field->arraylen);
839 ret = event_class_add_field(event_class, type, field);
841 if (flags & FIELD_IS_ARRAY)
842 bt_ctf_field_type_put(type);
845 pr_err("Failed to add field '%s': %d\n",
854 static int add_tracepoint_types(struct ctf_writer *cw,
855 struct perf_evsel *evsel,
856 struct bt_ctf_event_class *class)
858 struct format_field *common_fields = evsel->tp_format->format.common_fields;
859 struct format_field *fields = evsel->tp_format->format.fields;
862 ret = add_tracepoint_fields_types(cw, common_fields, class);
864 ret = add_tracepoint_fields_types(cw, fields, class);
869 static int add_bpf_output_types(struct ctf_writer *cw,
870 struct bt_ctf_event_class *class)
872 struct bt_ctf_field_type *len_type = cw->data.u32;
873 struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
874 struct bt_ctf_field_type *seq_type;
877 ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
881 seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
885 return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
888 static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
889 struct bt_ctf_event_class *event_class)
891 u64 type = evsel->attr.sample_type;
895 * PERF_SAMPLE_TIME - not needed as we have it in
897 * PERF_SAMPLE_READ - TODO
898 * PERF_SAMPLE_CALLCHAIN - TODO
899 * PERF_SAMPLE_RAW - tracepoint fields and BPF output
900 * are handled separately
901 * PERF_SAMPLE_BRANCH_STACK - TODO
902 * PERF_SAMPLE_REGS_USER - TODO
903 * PERF_SAMPLE_STACK_USER - TODO
906 #define ADD_FIELD(cl, t, n) \
908 pr2(" field '%s'\n", n); \
909 if (bt_ctf_event_class_add_field(cl, t, n)) { \
910 pr_err("Failed to add field '%s';\n", n); \
915 if (type & PERF_SAMPLE_IP)
916 ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
918 if (type & PERF_SAMPLE_TID) {
919 ADD_FIELD(event_class, cw->data.s32, "perf_tid");
920 ADD_FIELD(event_class, cw->data.s32, "perf_pid");
923 if ((type & PERF_SAMPLE_ID) ||
924 (type & PERF_SAMPLE_IDENTIFIER))
925 ADD_FIELD(event_class, cw->data.u64, "perf_id");
927 if (type & PERF_SAMPLE_STREAM_ID)
928 ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
930 if (type & PERF_SAMPLE_PERIOD)
931 ADD_FIELD(event_class, cw->data.u64, "perf_period");
933 if (type & PERF_SAMPLE_WEIGHT)
934 ADD_FIELD(event_class, cw->data.u64, "perf_weight");
936 if (type & PERF_SAMPLE_DATA_SRC)
937 ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
939 if (type & PERF_SAMPLE_TRANSACTION)
940 ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
946 static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel)
948 struct bt_ctf_event_class *event_class;
949 struct evsel_priv *priv;
950 const char *name = perf_evsel__name(evsel);
953 pr("Adding event '%s' (type %d)\n", name, evsel->attr.type);
955 event_class = bt_ctf_event_class_create(name);
959 ret = add_generic_types(cw, evsel, event_class);
963 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
964 ret = add_tracepoint_types(cw, evsel, event_class);
969 if (perf_evsel__is_bpf_output(evsel)) {
970 ret = add_bpf_output_types(cw, event_class);
975 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
977 pr("Failed to add event class into stream.\n");
981 priv = malloc(sizeof(*priv));
985 priv->event_class = event_class;
990 bt_ctf_event_class_put(event_class);
991 pr_err("Failed to add event '%s'.\n", name);
995 static int setup_events(struct ctf_writer *cw, struct perf_session *session)
997 struct perf_evlist *evlist = session->evlist;
998 struct perf_evsel *evsel;
1001 evlist__for_each_entry(evlist, evsel) {
1002 ret = add_event(cw, evsel);
1009 static void cleanup_events(struct perf_session *session)
1011 struct perf_evlist *evlist = session->evlist;
1012 struct perf_evsel *evsel;
1014 evlist__for_each_entry(evlist, evsel) {
1015 struct evsel_priv *priv;
1018 bt_ctf_event_class_put(priv->event_class);
1019 zfree(&evsel->priv);
1022 perf_evlist__delete(evlist);
1023 session->evlist = NULL;
1026 static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
1028 struct ctf_stream **stream;
1029 struct perf_header *ph = &session->header;
1033 * Try to get the number of cpus used in the data file,
1034 * if not present fallback to the MAX_CPUS.
1036 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
1038 stream = zalloc(sizeof(*stream) * ncpus);
1040 pr_err("Failed to allocate streams.\n");
1044 cw->stream = stream;
1045 cw->stream_cnt = ncpus;
1049 static void free_streams(struct ctf_writer *cw)
1053 for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1054 ctf_stream__delete(cw->stream[cpu]);
1059 static int ctf_writer__setup_env(struct ctf_writer *cw,
1060 struct perf_session *session)
1062 struct perf_header *header = &session->header;
1063 struct bt_ctf_writer *writer = cw->writer;
1065 #define ADD(__n, __v) \
1067 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \
1071 ADD("host", header->env.hostname);
1072 ADD("sysname", "Linux");
1073 ADD("release", header->env.os_release);
1074 ADD("version", header->env.version);
1075 ADD("machine", header->env.arch);
1076 ADD("domain", "kernel");
1077 ADD("tracer_name", "perf");
1083 static int ctf_writer__setup_clock(struct ctf_writer *cw)
1085 struct bt_ctf_clock *clock = cw->clock;
1087 bt_ctf_clock_set_description(clock, "perf clock");
1089 #define SET(__n, __v) \
1091 if (bt_ctf_clock_set_##__n(clock, __v)) \
1095 SET(frequency, 1000000000);
1099 SET(is_absolute, 0);
1105 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1107 struct bt_ctf_field_type *type;
1109 type = bt_ctf_field_type_integer_create(size);
1114 bt_ctf_field_type_integer_set_signed(type, 1))
1118 bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1121 #if __BYTE_ORDER == __BIG_ENDIAN
1122 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1124 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1127 pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1128 size, sign ? "un" : "", hex ? "hex" : "");
1132 bt_ctf_field_type_put(type);
1136 static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1140 for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1141 bt_ctf_field_type_put(cw->data.array[i]);
1144 static int ctf_writer__init_data(struct ctf_writer *cw)
1146 #define CREATE_INT_TYPE(type, size, sign, hex) \
1148 (type) = create_int_type(size, sign, hex); \
1153 CREATE_INT_TYPE(cw->data.s64, 64, true, false);
1154 CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1155 CREATE_INT_TYPE(cw->data.s32, 32, true, false);
1156 CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1157 CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1158 CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1160 cw->data.string = bt_ctf_field_type_string_create();
1161 if (cw->data.string)
1165 ctf_writer__cleanup_data(cw);
1166 pr_err("Failed to create data types.\n");
1170 static void ctf_writer__cleanup(struct ctf_writer *cw)
1172 ctf_writer__cleanup_data(cw);
1174 bt_ctf_clock_put(cw->clock);
1176 bt_ctf_stream_class_put(cw->stream_class);
1177 bt_ctf_writer_put(cw->writer);
1179 /* and NULL all the pointers */
1180 memset(cw, 0, sizeof(*cw));
1183 static int ctf_writer__init(struct ctf_writer *cw, const char *path)
1185 struct bt_ctf_writer *writer;
1186 struct bt_ctf_stream_class *stream_class;
1187 struct bt_ctf_clock *clock;
1188 struct bt_ctf_field_type *pkt_ctx_type;
1192 writer = bt_ctf_writer_create(path);
1196 cw->writer = writer;
1199 clock = bt_ctf_clock_create("perf_clock");
1201 pr("Failed to create CTF clock.\n");
1207 if (ctf_writer__setup_clock(cw)) {
1208 pr("Failed to setup CTF clock.\n");
1212 /* CTF stream class */
1213 stream_class = bt_ctf_stream_class_create("perf_stream");
1214 if (!stream_class) {
1215 pr("Failed to create CTF stream class.\n");
1219 cw->stream_class = stream_class;
1221 /* CTF clock stream setup */
1222 if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1223 pr("Failed to assign CTF clock to stream class.\n");
1227 if (ctf_writer__init_data(cw))
1230 /* Add cpu_id for packet context */
1231 pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1235 ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1236 bt_ctf_field_type_put(pkt_ctx_type);
1240 /* CTF clock writer setup */
1241 if (bt_ctf_writer_add_clock(writer, clock)) {
1242 pr("Failed to assign CTF clock to writer.\n");
1249 ctf_writer__cleanup(cw);
1251 pr_err("Failed to setup CTF writer.\n");
1255 static int ctf_writer__flush_streams(struct ctf_writer *cw)
1259 for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1260 ret = ctf_stream__flush(cw->stream[cpu]);
1265 static int convert__config(const char *var, const char *value, void *cb)
1267 struct convert *c = cb;
1269 if (!strcmp(var, "convert.queue-size")) {
1270 c->queue_size = perf_config_u64(var, value);
1277 int bt_convert__perf2ctf(const char *input, const char *path, bool force)
1279 struct perf_session *session;
1280 struct perf_data_file file = {
1282 .mode = PERF_DATA_MODE_READ,
1285 struct convert c = {
1287 .sample = process_sample_event,
1288 .mmap = perf_event__process_mmap,
1289 .mmap2 = perf_event__process_mmap2,
1290 .comm = perf_event__process_comm,
1291 .exit = perf_event__process_exit,
1292 .fork = perf_event__process_fork,
1293 .lost = perf_event__process_lost,
1294 .tracing_data = perf_event__process_tracing_data,
1295 .build_id = perf_event__process_build_id,
1296 .ordered_events = true,
1297 .ordering_requires_timestamps = true,
1300 struct ctf_writer *cw = &c.writer;
1303 perf_config(convert__config, &c);
1306 if (ctf_writer__init(cw, path))
1309 /* perf.data session */
1310 session = perf_session__new(&file, 0, &c.tool);
1315 ordered_events__set_alloc_size(&session->ordered_events,
1319 /* CTF writer env/clock setup */
1320 if (ctf_writer__setup_env(cw, session))
1323 /* CTF events setup */
1324 if (setup_events(cw, session))
1327 if (setup_streams(cw, session))
1330 err = perf_session__process_events(session);
1332 err = ctf_writer__flush_streams(cw);
1334 pr_err("Error during conversion.\n");
1337 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1341 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples) ]\n",
1342 (double) c.events_size / 1024.0 / 1024.0,
1345 cleanup_events(session);
1346 perf_session__delete(session);
1347 ctf_writer__cleanup(cw);
1352 perf_session__delete(session);
1354 ctf_writer__cleanup(cw);
1355 pr_err("Error during conversion setup.\n");