summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/cma/cma-test.c385
-rw-r--r--tools/firewire/Makefile19
-rw-r--r--tools/firewire/decode-fcp.c213
-rw-r--r--tools/firewire/list.h62
-rw-r--r--tools/firewire/nosy-dump.c1031
-rw-r--r--tools/firewire/nosy-dump.h173
-rw-r--r--tools/perf/CREDITS30
-rw-r--r--tools/perf/Documentation/Makefile302
-rw-r--r--tools/perf/Documentation/asciidoc.conf91
-rw-r--r--tools/perf/Documentation/examples.txt225
-rw-r--r--tools/perf/Documentation/manpage-1.72.xsl14
-rw-r--r--tools/perf/Documentation/manpage-base.xsl35
-rw-r--r--tools/perf/Documentation/manpage-bold-literal.xsl17
-rw-r--r--tools/perf/Documentation/manpage-normal.xsl13
-rw-r--r--tools/perf/Documentation/manpage-suppress-sp.xsl21
-rw-r--r--tools/perf/Documentation/perf-annotate.txt29
-rw-r--r--tools/perf/Documentation/perf-archive.txt22
-rw-r--r--tools/perf/Documentation/perf-bench.txt120
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt33
-rw-r--r--tools/perf/Documentation/perf-buildid-list.txt34
-rw-r--r--tools/perf/Documentation/perf-diff.txt55
-rw-r--r--tools/perf/Documentation/perf-help.txt38
-rw-r--r--tools/perf/Documentation/perf-inject.txt35
-rw-r--r--tools/perf/Documentation/perf-kmem.txt47
-rw-r--r--tools/perf/Documentation/perf-kvm.txt68
-rw-r--r--tools/perf/Documentation/perf-list.txt56
-rw-r--r--tools/perf/Documentation/perf-lock.txt29
-rw-r--r--tools/perf/Documentation/perf-probe.txt150
-rw-r--r--tools/perf/Documentation/perf-record.txt121
-rw-r--r--tools/perf/Documentation/perf-report.txt70
-rw-r--r--tools/perf/Documentation/perf-sched.txt41
-rw-r--r--tools/perf/Documentation/perf-stat.txt76
-rw-r--r--tools/perf/Documentation/perf-test.txt22
-rw-r--r--tools/perf/Documentation/perf-timechart.txt44
-rw-r--r--tools/perf/Documentation/perf-top.txt127
-rw-r--r--tools/perf/Documentation/perf-trace-perl.txt217
-rw-r--r--tools/perf/Documentation/perf-trace-python.txt623
-rw-r--r--tools/perf/Documentation/perf-trace.txt70
-rw-r--r--tools/perf/Documentation/perf.txt24
-rw-r--r--tools/perf/MANIFEST12
-rw-r--r--tools/perf/Makefile1269
-rw-r--r--tools/perf/arch/arm/Makefile4
-rw-r--r--tools/perf/arch/arm/util/dwarf-regs.c64
-rw-r--r--tools/perf/arch/powerpc/Makefile4
-rw-r--r--tools/perf/arch/powerpc/util/dwarf-regs.c88
-rw-r--r--tools/perf/arch/sh/Makefile4
-rw-r--r--tools/perf/arch/sh/util/dwarf-regs.c55
-rw-r--r--tools/perf/arch/sparc/Makefile4
-rw-r--r--tools/perf/arch/sparc/util/dwarf-regs.c43
-rw-r--r--tools/perf/arch/x86/Makefile4
-rw-r--r--tools/perf/arch/x86/util/dwarf-regs.c75
-rw-r--r--tools/perf/bench/bench.h17
-rw-r--r--tools/perf/bench/mem-memcpy.c192
-rw-r--r--tools/perf/bench/sched-messaging.c336
-rw-r--r--tools/perf/bench/sched-pipe.c127
-rwxr-xr-xtools/perf/build.sh10
-rw-r--r--tools/perf/builtin-annotate.c473
-rw-r--r--tools/perf/builtin-bench.c245
-rw-r--r--tools/perf/builtin-buildid-cache.c132
-rw-r--r--tools/perf/builtin-buildid-list.c60
-rw-r--r--tools/perf/builtin-diff.c227
-rw-r--r--tools/perf/builtin-help.c459
-rw-r--r--tools/perf/builtin-inject.c228
-rw-r--r--tools/perf/builtin-kmem.c784
-rw-r--r--tools/perf/builtin-kvm.c144
-rw-r--r--tools/perf/builtin-list.c21
-rw-r--r--tools/perf/builtin-lock.c1005
-rw-r--r--tools/perf/builtin-probe.c269
-rw-r--r--tools/perf/builtin-record.c925
-rw-r--r--tools/perf/builtin-report.c534
-rw-r--r--tools/perf/builtin-sched.c1925
-rw-r--r--tools/perf/builtin-stat.c626
-rw-r--r--tools/perf/builtin-test.c281
-rw-r--r--tools/perf/builtin-timechart.c1040
-rw-r--r--tools/perf/builtin-top.c1470
-rw-r--r--tools/perf/builtin-trace.c734
-rw-r--r--tools/perf/builtin.h39
-rw-r--r--tools/perf/command-list.txt24
-rw-r--r--tools/perf/design.txt462
-rw-r--r--tools/perf/external/usr/include/ansidecl.h423
-rw-r--r--tools/perf/external/usr/include/bfd.h5902
-rw-r--r--tools/perf/external/usr/include/bfdlink.h786
-rw-r--r--tools/perf/external/usr/include/demangle.h638
-rw-r--r--tools/perf/external/usr/include/dis-asm.h368
-rw-r--r--tools/perf/external/usr/include/dwarf.h772
-rw-r--r--tools/perf/external/usr/include/elfutils/elf-knowledge.h127
-rw-r--r--tools/perf/external/usr/include/elfutils/libdw.h865
-rw-r--r--tools/perf/external/usr/include/elfutils/libdwfl.h582
-rw-r--r--tools/perf/external/usr/include/elfutils/version.h58
-rw-r--r--tools/perf/external/usr/include/gelf.h353
-rw-r--r--tools/perf/external/usr/include/libelf.h417
-rw-r--r--tools/perf/external/usr/include/libiberty.h679
-rw-r--r--tools/perf/external/usr/include/newt.h387
-rw-r--r--tools/perf/external/usr/include/nlist.h77
-rw-r--r--tools/perf/external/usr/include/plugin-api.h308
-rw-r--r--tools/perf/external/usr/include/symcat.h55
-rw-r--r--tools/perf/feature-tests.mak119
-rw-r--r--tools/perf/perf-archive.sh46
-rw-r--r--tools/perf/perf.c508
-rw-r--r--tools/perf/perf.h149
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Context.c135
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Context.xs42
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL17
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/README59
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm55
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm192
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm94
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/typemap1
-rw-r--r--tools/perf/scripts/perl/bin/check-perf-trace-record2
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-record2
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-report10
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-file-record3
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-file-report13
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-pid-record2
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-pid-report6
-rw-r--r--tools/perf/scripts/perl/bin/rwtop-record2
-rw-r--r--tools/perf/scripts/perl/bin/rwtop-report23
-rw-r--r--tools/perf/scripts/perl/bin/wakeup-latency-record6
-rw-r--r--tools/perf/scripts/perl/bin/wakeup-latency-report6
-rw-r--r--tools/perf/scripts/perl/bin/workqueue-stats-record2
-rw-r--r--tools/perf/scripts/perl/bin/workqueue-stats-report7
-rw-r--r--tools/perf/scripts/perl/check-perf-trace.pl106
-rw-r--r--tools/perf/scripts/perl/failed-syscalls.pl42
-rw-r--r--tools/perf/scripts/perl/rw-by-file.pl106
-rw-r--r--tools/perf/scripts/perl/rw-by-pid.pl184
-rw-r--r--tools/perf/scripts/perl/rwtop.pl199
-rw-r--r--tools/perf/scripts/perl/wakeup-latency.pl107
-rw-r--r--tools/perf/scripts/perl/workqueue-stats.pl129
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Context.c88
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py121
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py184
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py28
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-report10
-rw-r--r--tools/perf/scripts/python/bin/sched-migration-record2
-rw-r--r--tools/perf/scripts/python/bin/sched-migration-report3
-rw-r--r--tools/perf/scripts/python/bin/sctop-record2
-rw-r--r--tools/perf/scripts/python/bin/sctop-report24
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-report10
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-report10
-rw-r--r--tools/perf/scripts/python/check-perf-trace.py82
-rw-r--r--tools/perf/scripts/python/failed-syscalls-by-pid.py68
-rw-r--r--tools/perf/scripts/python/sched-migration.py461
-rw-r--r--tools/perf/scripts/python/sctop.py78
-rw-r--r--tools/perf/scripts/python/syscall-counts-by-pid.py64
-rw-r--r--tools/perf/scripts/python/syscall-counts.py58
-rwxr-xr-xtools/perf/util/PERF-VERSION-GEN47
-rw-r--r--tools/perf/util/abspath.c37
-rw-r--r--tools/perf/util/alias.c77
-rw-r--r--tools/perf/util/bitmap.c21
-rw-r--r--tools/perf/util/build-id.c77
-rw-r--r--tools/perf/util/build-id.h10
-rw-r--r--tools/perf/util/cache.h87
-rw-r--r--tools/perf/util/callchain.c406
-rw-r--r--tools/perf/util/callchain.h68
-rw-r--r--tools/perf/util/color.c324
-rw-r--r--tools/perf/util/color.h46
-rw-r--r--tools/perf/util/config.c492
-rw-r--r--tools/perf/util/cpumap.c114
-rw-r--r--tools/perf/util/cpumap.h7
-rw-r--r--tools/perf/util/ctype.c39
-rw-r--r--tools/perf/util/debug.c98
-rw-r--r--tools/perf/util/debug.h38
-rw-r--r--tools/perf/util/debugfs.c240
-rw-r--r--tools/perf/util/debugfs.h25
-rw-r--r--tools/perf/util/environment.c9
-rw-r--r--tools/perf/util/event.c836
-rw-r--r--tools/perf/util/event.h167
-rw-r--r--tools/perf/util/exec_cmd.c167
-rw-r--r--tools/perf/util/exec_cmd.h12
-rwxr-xr-xtools/perf/util/generate-cmdlist.sh24
-rw-r--r--tools/perf/util/header.c1199
-rw-r--r--tools/perf/util/header.h127
-rw-r--r--tools/perf/util/help.c338
-rw-r--r--tools/perf/util/help.h29
-rw-r--r--tools/perf/util/hist.c1176
-rw-r--r--tools/perf/util/hist.h148
-rw-r--r--tools/perf/util/hweight.c31
-rw-r--r--tools/perf/util/include/asm/asm-offsets.h1
-rw-r--r--tools/perf/util/include/asm/bug.h22
-rw-r--r--tools/perf/util/include/asm/byteorder.h2
-rw-r--r--tools/perf/util/include/asm/hweight.h8
-rw-r--r--tools/perf/util/include/asm/swab.h1
-rw-r--r--tools/perf/util/include/asm/system.h1
-rw-r--r--tools/perf/util/include/asm/uaccess.h14
-rw-r--r--tools/perf/util/include/dwarf-regs.h8
-rw-r--r--tools/perf/util/include/linux/bitmap.h35
-rw-r--r--tools/perf/util/include/linux/bitops.h27
-rw-r--r--tools/perf/util/include/linux/compiler.h12
-rw-r--r--tools/perf/util/include/linux/ctype.h1
-rw-r--r--tools/perf/util/include/linux/hash.h5
-rw-r--r--tools/perf/util/include/linux/kernel.h111
-rw-r--r--tools/perf/util/include/linux/list.h26
-rw-r--r--tools/perf/util/include/linux/module.h6
-rw-r--r--tools/perf/util/include/linux/poison.h1
-rw-r--r--tools/perf/util/include/linux/prefetch.h6
-rw-r--r--tools/perf/util/include/linux/rbtree.h1
-rw-r--r--tools/perf/util/include/linux/string.h1
-rw-r--r--tools/perf/util/include/linux/types.h21
-rw-r--r--tools/perf/util/levenshtein.c84
-rw-r--r--tools/perf/util/levenshtein.h8
-rw-r--r--tools/perf/util/map.c682
-rw-r--r--tools/perf/util/map.h227
-rw-r--r--tools/perf/util/pager.c96
-rw-r--r--tools/perf/util/parse-events.c965
-rw-r--r--tools/perf/util/parse-events.h37
-rw-r--r--tools/perf/util/parse-options.c577
-rw-r--r--tools/perf/util/parse-options.h188
-rw-r--r--tools/perf/util/path.c156
-rw-r--r--tools/perf/util/probe-event.c1755
-rw-r--r--tools/perf/util/probe-event.h123
-rw-r--r--tools/perf/util/probe-finder.c1457
-rw-r--r--tools/perf/util/probe-finder.h68
-rw-r--r--tools/perf/util/pstack.c75
-rw-r--r--tools/perf/util/pstack.h14
-rw-r--r--tools/perf/util/quote.c54
-rw-r--r--tools/perf/util/quote.h29
-rw-r--r--tools/perf/util/run-command.c214
-rw-r--r--tools/perf/util/run-command.h58
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c565
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c594
-rw-r--r--tools/perf/util/session.c931
-rw-r--r--tools/perf/util/session.h145
-rw-r--r--tools/perf/util/sigchain.c52
-rw-r--r--tools/perf/util/sigchain.h10
-rw-r--r--tools/perf/util/sort.c345
-rw-r--r--tools/perf/util/sort.h124
-rw-r--r--tools/perf/util/strbuf.c133
-rw-r--r--tools/perf/util/strbuf.h92
-rw-r--r--tools/perf/util/string.c296
-rw-r--r--tools/perf/util/strlist.c200
-rw-r--r--tools/perf/util/strlist.h78
-rw-r--r--tools/perf/util/svghelper.c500
-rw-r--r--tools/perf/util/svghelper.h28
-rw-r--r--tools/perf/util/symbol.c2459
-rw-r--r--tools/perf/util/symbol.h231
-rw-r--r--tools/perf/util/thread.c180
-rw-r--r--tools/perf/util/thread.h49
-rw-r--r--tools/perf/util/trace-event-info.c563
-rw-r--r--tools/perf/util/trace-event-parse.c3233
-rw-r--r--tools/perf/util/trace-event-read.c539
-rw-r--r--tools/perf/util/trace-event-scripting.c167
-rw-r--r--tools/perf/util/trace-event.h300
-rw-r--r--tools/perf/util/types.h17
-rw-r--r--tools/perf/util/ui/browser.c329
-rw-r--r--tools/perf/util/ui/browser.h46
-rw-r--r--tools/perf/util/ui/browsers/annotate.c241
-rw-r--r--tools/perf/util/ui/browsers/hists.c948
-rw-r--r--tools/perf/util/ui/browsers/map.c161
-rw-r--r--tools/perf/util/ui/browsers/map.h6
-rw-r--r--tools/perf/util/ui/helpline.c69
-rw-r--r--tools/perf/util/ui/helpline.h11
-rw-r--r--tools/perf/util/ui/libslang.h27
-rw-r--r--tools/perf/util/ui/progress.c60
-rw-r--r--tools/perf/util/ui/progress.h11
-rw-r--r--tools/perf/util/ui/setup.c42
-rw-r--r--tools/perf/util/ui/util.c114
-rw-r--r--tools/perf/util/ui/util.h10
-rw-r--r--tools/perf/util/usage.c80
-rw-r--r--tools/perf/util/util.c116
-rw-r--r--tools/perf/util/util.h285
-rw-r--r--tools/perf/util/values.c231
-rw-r--r--tools/perf/util/values.h27
-rw-r--r--tools/perf/util/wrapper.c40
-rw-r--r--tools/usb/ffs-test.c554
-rw-r--r--tools/usb/testusb.c547
268 files changed, 65146 insertions, 0 deletions
diff --git a/tools/cma/cma-test.c b/tools/cma/cma-test.c
new file mode 100644
index 00000000..b6076a62
--- /dev/null
+++ b/tools/cma/cma-test.c
@@ -0,0 +1,385 @@
+/*
+ * cma-test.c -- CMA testing application
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* $(CROSS_COMPILE)gcc -Wall -Wextra -g -o cma-test cma-test.c */
+
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <linux/cma.h>
+
+
+static void handle_command(char *line);
+
+int main(void)
+{
+ unsigned no = 1;
+ char line[1024];
+ int skip = 0;
+
+ fputs("commands:\n"
+ " l or list list allocated chunks\n"
+ " a or alloc <dev>/<kind> <size>[/<alignment>] allocate chunk\n"
+ " A or afrom <regions> <size>[/<alignment>] allocate from region(s)\n"
+ " f or free [<num>] free an chunk\n"
+ " # ... comment\n"
+ " <empty line> repeat previous\n"
+ "\n", stderr);
+
+ while (fgets(line, sizeof line, stdin)) {
+ char *nl = strchr(line, '\n');
+ if (nl) {
+ if (skip) {
+ fprintf(stderr, "cma: %d: line too long\n", no);
+ skip = 0;
+ } else {
+ *nl = '\0';
+ handle_command(line);
+ }
+ ++no;
+ } else {
+ skip = 1;
+ }
+ }
+
+ if (skip)
+ fprintf(stderr, "cma: %d: no new line at EOF\n", no);
+ return 0;
+}
+
+
+
+static void cmd_list(char *name, char *line);
+static void cmd_alloc(char *name, char *line);
+static void cmd_alloc_from(char *name, char *line);
+static void cmd_free(char *name, char *line);
+
+static const struct command {
+ const char name[8];
+ void (*handle)(char *name, char *line);
+} commands[] = {
+ { "list", cmd_list },
+ { "l", cmd_list },
+ { "alloc", cmd_alloc },
+ { "a", cmd_alloc },
+ { "afrom", cmd_alloc_from },
+ { "A", cmd_alloc_from },
+ { "free", cmd_free },
+ { "f", cmd_free },
+ { "", NULL }
+};
+
+
+#define SKIP_SPACE(ch) do while (isspace(*(ch))) ++(ch); while (0)
+
+
+static void handle_command(char *line)
+{
+ static char last_line[1024];
+
+ const struct command *cmd;
+ char *name;
+
+ SKIP_SPACE(line);
+ if (*line == '#')
+ return;
+
+ if (!*line)
+ strcpy(line, last_line);
+ else
+ strcpy(last_line, line);
+
+ name = line;
+ while (*line && !isspace(*line))
+ ++line;
+
+ if (*line) {
+ *line = '\0';
+ ++line;
+ }
+
+ for (cmd = commands; *(cmd->name); ++cmd)
+ if (!strcmp(name, cmd->name)) {
+ cmd->handle(name, line);
+ return;
+ }
+
+ fprintf(stderr, "%s: unknown command\n", name);
+}
+
+
+
+struct chunk {
+ struct chunk *next, *prev;
+ int fd;
+ unsigned long size;
+ unsigned long start;
+};
+
+static struct chunk root = {
+ .next = &root,
+ .prev = &root,
+};
+
+#define for_each(a) for (a = root.next; a != &root; a = a->next)
+
+static struct chunk *chunk_create(const char *prefix);
+static void chunk_destroy(struct chunk *chunk);
+static void chunk_add(struct chunk *chunk);
+
+static int memparse(char *ptr, char **retptr, unsigned long *ret);
+
+
+static void cmd_list(char *name, char *line)
+{
+ struct chunk *chunk;
+
+ (void)name; (void)line;
+
+ for_each(chunk)
+ printf("%3d: %p@%p\n", chunk->fd,
+ (void *)chunk->size, (void *)chunk->start);
+}
+
+
+static void __cma_alloc(char *name, char *line, int from);
+
+static void cmd_alloc(char *name, char *line)
+{
+ __cma_alloc(name, line, 0);
+}
+
+static void cmd_alloc_from(char *name, char *line)
+{
+ __cma_alloc(name, line, 1);
+}
+
+static void __cma_alloc(char *name, char *line, int from)
+{
+ static const char *what[2] = { "dev/kind", "regions" };
+
+ unsigned long size, alignment = 0;
+ struct cma_alloc_request req;
+ struct chunk *chunk;
+ char *spec;
+ size_t n;
+ int ret;
+
+ SKIP_SPACE(line);
+ if (!*line) {
+ fprintf(stderr, "%s: expecting %s\n", name, what[from]);
+ return;
+ }
+
+ for (spec = line; *line && !isspace(*line); ++line)
+ /* nothing */;
+
+ if (!*line) {
+ fprintf(stderr, "%s: expecting size after %s\n",
+ name, what[from]);
+ return;
+ }
+
+ *line++ = '\0';
+ n = line - spec;
+ if (n > sizeof req.spec) {
+ fprintf(stderr, "%s: %s too long\n", name, what[from]);
+ return;
+ }
+
+ if (memparse(line, &line, &size) < 0 || !size) {
+ fprintf(stderr, "%s: invalid size\n", name);
+ return;
+ }
+
+ if (*line == '/')
+ if (memparse(line, &line, &alignment) < 0) {
+ fprintf(stderr, "%s: invalid alignment\n", name);
+ return;
+ }
+
+ SKIP_SPACE(line);
+ if (*line) {
+ fprintf(stderr, "%s: unknown argument(s) at the end: %s\n",
+ name, line);
+ return;
+ }
+
+
+ chunk = chunk_create(name);
+ if (!chunk)
+ return;
+
+ fprintf(stderr, "%s: allocating %p/%p\n", name,
+ (void *)size, (void *)alignment);
+
+ req.magic = CMA_MAGIC;
+ req.type = from ? CMA_REQ_FROM_REG : CMA_REQ_DEV_KIND;
+ req.size = size;
+ req.alignment = alignment;
+ req.start = 0;
+
+ memcpy(req.spec, spec, n);
+ memset(req.spec + n, '\0', sizeof req.spec - n);
+
+ ret = ioctl(chunk->fd, IOCTL_CMA_ALLOC, &req);
+ if (ret < 0) {
+ fprintf(stderr, "%s: cma_alloc: %s\n", name, strerror(errno));
+ chunk_destroy(chunk);
+ } else {
+ chunk_add(chunk);
+ chunk->size = req.size;
+ chunk->start = req.start;
+
+ printf("%3d: %p@%p\n", chunk->fd,
+ (void *)chunk->size, (void *)chunk->start);
+ }
+}
+
+
+static void cmd_free(char *name, char *line)
+{
+ struct chunk *chunk;
+
+ SKIP_SPACE(line);
+
+ if (*line) {
+ unsigned long num;
+
+ errno = 0;
+ num = strtoul(line, &line, 10);
+
+ if (errno || num > INT_MAX) {
+ fprintf(stderr, "%s: invalid number\n", name);
+ return;
+ }
+
+ SKIP_SPACE(line);
+ if (*line) {
+ fprintf(stderr, "%s: unknown arguments at the end: %s\n",
+ name, line);
+ return;
+ }
+
+ for_each(chunk)
+ if (chunk->fd == (int)num)
+ goto ok;
+ fprintf(stderr, "%s: no chunk %3lu\n", name, num);
+ return;
+
+ } else {
+ chunk = root.prev;
+ if (chunk == &root) {
+ fprintf(stderr, "%s: no chunks\n", name);
+ return;
+ }
+ }
+
+ok:
+ fprintf(stderr, "%s: freeing %p@%p\n", name,
+ (void *)chunk->size, (void *)chunk->start);
+ chunk_destroy(chunk);
+}
+
+
+static struct chunk *chunk_create(const char *prefix)
+{
+ struct chunk *chunk;
+ int fd;
+
+ chunk = malloc(sizeof *chunk);
+ if (!chunk) {
+ fprintf(stderr, "%s: %s\n", prefix, strerror(errno));
+ return NULL;
+ }
+
+ fd = open("/dev/cma", O_RDWR);
+ if (fd < 0) {
+ fprintf(stderr, "%s: /dev/cma: %s\n", prefix, strerror(errno));
+ return NULL;
+ }
+
+ chunk->prev = chunk;
+ chunk->next = chunk;
+ chunk->fd = fd;
+ return chunk;
+}
+
+static void chunk_destroy(struct chunk *chunk)
+{
+ chunk->prev->next = chunk->next;
+ chunk->next->prev = chunk->prev;
+ close(chunk->fd);
+}
+
+static void chunk_add(struct chunk *chunk)
+{
+ chunk->next = &root;
+ chunk->prev = root.prev;
+ root.prev->next = chunk;
+ root.prev = chunk;
+}
+
+
+
+static int memparse(char *ptr, char **retptr, unsigned long *ret)
+{
+ unsigned long val;
+
+ SKIP_SPACE(ptr);
+
+ errno = 0;
+ val = strtoul(ptr, &ptr, 0);
+ if (errno)
+ return -1;
+
+ switch (*ptr) {
+ case 'G':
+ case 'g':
+ val <<= 10;
+ case 'M':
+ case 'm':
+ val <<= 10;
+ case 'K':
+ case 'k':
+ val <<= 10;
+ ++ptr;
+ }
+
+ if (retptr) {
+ SKIP_SPACE(ptr);
+ *retptr = ptr;
+ }
+
+ *ret = val;
+ return 0;
+}
diff --git a/tools/firewire/Makefile b/tools/firewire/Makefile
new file mode 100644
index 00000000..81767ada
--- /dev/null
+++ b/tools/firewire/Makefile
@@ -0,0 +1,19 @@
+prefix = /usr
+nosy-dump-version = 0.4
+
+CC = gcc
+
+all : nosy-dump
+
+nosy-dump : CFLAGS = -Wall -O2 -g
+nosy-dump : CPPFLAGS = -DVERSION=\"$(nosy-dump-version)\" -I../../drivers/firewire
+nosy-dump : LDFLAGS = -g
+nosy-dump : LDLIBS = -lpopt
+
+nosy-dump : nosy-dump.o decode-fcp.o
+
+clean :
+ rm -rf *.o nosy-dump
+
+install :
+ install nosy-dump $(prefix)/bin/nosy-dump
diff --git a/tools/firewire/decode-fcp.c b/tools/firewire/decode-fcp.c
new file mode 100644
index 00000000..e41223b6
--- /dev/null
+++ b/tools/firewire/decode-fcp.c
@@ -0,0 +1,213 @@
+#include <linux/firewire-constants.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "list.h"
+#include "nosy-dump.h"
+
+#define CSR_FCP_COMMAND 0xfffff0000b00ull
+#define CSR_FCP_RESPONSE 0xfffff0000d00ull
+
+static const char * const ctype_names[] = {
+ [0x0] = "control", [0x8] = "not implemented",
+ [0x1] = "status", [0x9] = "accepted",
+ [0x2] = "specific inquiry", [0xa] = "rejected",
+ [0x3] = "notify", [0xb] = "in transition",
+ [0x4] = "general inquiry", [0xc] = "stable",
+ [0x5] = "(reserved 0x05)", [0xd] = "changed",
+ [0x6] = "(reserved 0x06)", [0xe] = "(reserved 0x0e)",
+ [0x7] = "(reserved 0x07)", [0xf] = "interim",
+};
+
+static const char * const subunit_type_names[] = {
+ [0x00] = "monitor", [0x10] = "(reserved 0x10)",
+ [0x01] = "audio", [0x11] = "(reserved 0x11)",
+ [0x02] = "printer", [0x12] = "(reserved 0x12)",
+ [0x03] = "disc", [0x13] = "(reserved 0x13)",
+ [0x04] = "tape recorder/player",[0x14] = "(reserved 0x14)",
+ [0x05] = "tuner", [0x15] = "(reserved 0x15)",
+ [0x06] = "ca", [0x16] = "(reserved 0x16)",
+ [0x07] = "camera", [0x17] = "(reserved 0x17)",
+ [0x08] = "(reserved 0x08)", [0x18] = "(reserved 0x18)",
+ [0x09] = "panel", [0x19] = "(reserved 0x19)",
+ [0x0a] = "bulletin board", [0x1a] = "(reserved 0x1a)",
+ [0x0b] = "camera storage", [0x1b] = "(reserved 0x1b)",
+ [0x0c] = "(reserved 0x0c)", [0x1c] = "vendor unique",
+ [0x0d] = "(reserved 0x0d)", [0x1d] = "all subunit types",
+ [0x0e] = "(reserved 0x0e)", [0x1e] = "subunit_type extended to next byte",
+ [0x0f] = "(reserved 0x0f)", [0x1f] = "unit",
+};
+
+struct avc_enum {
+ int value;
+ const char *name;
+};
+
+struct avc_field {
+ const char *name; /* Short name for field. */
+ int offset; /* Location of field, specified in bits; */
+ /* negative means from end of packet. */
+ int width; /* Width of field, 0 means use data_length. */
+ struct avc_enum *names;
+};
+
+struct avc_opcode_info {
+ const char *name;
+ struct avc_field fields[8];
+};
+
+struct avc_enum power_field_names[] = {
+ { 0x70, "on" },
+ { 0x60, "off" },
+ { }
+};
+
+static const struct avc_opcode_info opcode_info[256] = {
+
+ /* TA Document 1999026 */
+ /* AV/C Digital Interface Command Set General Specification 4.0 */
+ [0xb2] = { "power", {
+ { "state", 0, 8, power_field_names }
+ }
+ },
+ [0x30] = { "unit info", {
+ { "foo", 0, 8 },
+ { "unit_type", 8, 5 },
+ { "unit", 13, 3 },
+ { "company id", 16, 24 },
+ }
+ },
+ [0x31] = { "subunit info" },
+ [0x01] = { "reserve" },
+ [0xb0] = { "version" },
+ [0x00] = { "vendor dependent" },
+ [0x02] = { "plug info" },
+ [0x12] = { "channel usage" },
+ [0x24] = { "connect" },
+ [0x20] = { "connect av" },
+ [0x22] = { "connections" },
+ [0x11] = { "digital input" },
+ [0x10] = { "digital output" },
+ [0x25] = { "disconnect" },
+ [0x21] = { "disconnect av" },
+ [0x19] = { "input plug signal format" },
+ [0x18] = { "output plug signal format" },
+ [0x1f] = { "general bus setup" },
+
+ /* TA Document 1999025 */
+ /* AV/C Descriptor Mechanism Specification Version 1.0 */
+ [0x0c] = { "create descriptor" },
+ [0x08] = { "open descriptor" },
+ [0x09] = { "read descriptor" },
+ [0x0a] = { "write descriptor" },
+ [0x05] = { "open info block" },
+ [0x06] = { "read info block" },
+ [0x07] = { "write info block" },
+ [0x0b] = { "search descriptor" },
+ [0x0d] = { "object number select" },
+
+ /* TA Document 1999015 */
+ /* AV/C Command Set for Rate Control of Isochronous Data Flow 1.0 */
+ [0xb3] = { "rate", {
+ { "subfunction", 0, 8 },
+ { "result", 8, 8 },
+ { "plug_type", 16, 8 },
+ { "plug_id", 16, 8 },
+ }
+ },
+
+ /* TA Document 1999008 */
+ /* AV/C Audio Subunit Specification 1.0 */
+ [0xb8] = { "function block" },
+
+ /* TA Document 2001001 */
+ /* AV/C Panel Subunit Specification 1.1 */
+ [0x7d] = { "gui update" },
+ [0x7e] = { "push gui data" },
+ [0x7f] = { "user action" },
+ [0x7c] = { "pass through" },
+
+ /* */
+ [0x26] = { "asynchronous connection" },
+};
+
+struct avc_frame {
+ uint32_t operand0:8;
+ uint32_t opcode:8;
+ uint32_t subunit_id:3;
+ uint32_t subunit_type:5;
+ uint32_t ctype:4;
+ uint32_t cts:4;
+};
+
+static void
+decode_avc(struct link_transaction *t)
+{
+ struct avc_frame *frame =
+ (struct avc_frame *) t->request->packet.write_block.data;
+ const struct avc_opcode_info *info;
+ const char *name;
+ char buffer[32];
+ int i;
+
+ info = &opcode_info[frame->opcode];
+ if (info->name == NULL) {
+ snprintf(buffer, sizeof(buffer),
+ "(unknown opcode 0x%02x)", frame->opcode);
+ name = buffer;
+ } else {
+ name = info->name;
+ }
+
+ printf("av/c %s, subunit_type=%s, subunit_id=%d, opcode=%s",
+ ctype_names[frame->ctype], subunit_type_names[frame->subunit_type],
+ frame->subunit_id, name);
+
+ for (i = 0; info->fields[i].name != NULL; i++)
+ printf(", %s", info->fields[i].name);
+
+ printf("\n");
+}
+
+int
+decode_fcp(struct link_transaction *t)
+{
+ struct avc_frame *frame =
+ (struct avc_frame *) t->request->packet.write_block.data;
+ unsigned long long offset =
+ ((unsigned long long) t->request->packet.common.offset_high << 32) |
+ t->request->packet.common.offset_low;
+
+ if (t->request->packet.common.tcode != TCODE_WRITE_BLOCK_REQUEST)
+ return 0;
+
+ if (offset == CSR_FCP_COMMAND || offset == CSR_FCP_RESPONSE) {
+ switch (frame->cts) {
+ case 0x00:
+ decode_avc(t);
+ break;
+ case 0x01:
+ printf("cal fcp frame (cts=0x01)\n");
+ break;
+ case 0x02:
+ printf("ehs fcp frame (cts=0x02)\n");
+ break;
+ case 0x03:
+ printf("havi fcp frame (cts=0x03)\n");
+ break;
+ case 0x0e:
+ printf("vendor specific fcp frame (cts=0x0e)\n");
+ break;
+ case 0x0f:
+ printf("extended cts\n");
+ break;
+ default:
+ printf("reserved fcp frame (ctx=0x%02x)\n", frame->cts);
+ break;
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
diff --git a/tools/firewire/list.h b/tools/firewire/list.h
new file mode 100644
index 00000000..41f4bdad
--- /dev/null
+++ b/tools/firewire/list.h
@@ -0,0 +1,62 @@
+struct list {
+ struct list *next, *prev;
+};
+
+static inline void
+list_init(struct list *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+
+static inline int
+list_empty(struct list *list)
+{
+ return list->next == list;
+}
+
+static inline void
+list_insert(struct list *link, struct list *new_link)
+{
+ new_link->prev = link->prev;
+ new_link->next = link;
+ new_link->prev->next = new_link;
+ new_link->next->prev = new_link;
+}
+
+static inline void
+list_append(struct list *list, struct list *new_link)
+{
+ list_insert((struct list *)list, new_link);
+}
+
+static inline void
+list_prepend(struct list *list, struct list *new_link)
+{
+ list_insert(list->next, new_link);
+}
+
+static inline void
+list_remove(struct list *link)
+{
+ link->prev->next = link->next;
+ link->next->prev = link->prev;
+}
+
+#define list_entry(link, type, member) \
+ ((type *)((char *)(link)-(unsigned long)(&((type *)0)->member)))
+
+#define list_head(list, type, member) \
+ list_entry((list)->next, type, member)
+
+#define list_tail(list, type, member) \
+ list_entry((list)->prev, type, member)
+
+#define list_next(elm, member) \
+ list_entry((elm)->member.next, typeof(*elm), member)
+
+#define list_for_each_entry(pos, list, member) \
+ for (pos = list_head(list, typeof(*pos), member); \
+ &pos->member != (list); \
+ pos = list_next(pos, member))
+
diff --git a/tools/firewire/nosy-dump.c b/tools/firewire/nosy-dump.c
new file mode 100644
index 00000000..f93b7763
--- /dev/null
+++ b/tools/firewire/nosy-dump.c
@@ -0,0 +1,1031 @@
+/*
+ * nosy-dump - Interface to snoop mode driver for TI PCILynx 1394 controllers
+ * Copyright (C) 2002-2006 Kristian Høgsberg
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <byteswap.h>
+#include <endian.h>
+#include <fcntl.h>
+#include <linux/firewire-constants.h>
+#include <poll.h>
+#include <popt.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <termios.h>
+#include <unistd.h>
+
+#include "list.h"
+#include "nosy-dump.h"
+#include "nosy-user.h"
+
+enum {
+ PACKET_FIELD_DETAIL = 0x01,
+ PACKET_FIELD_DATA_LENGTH = 0x02,
+ /* Marks the fields we print in transaction view. */
+ PACKET_FIELD_TRANSACTION = 0x04,
+};
+
+static void print_packet(uint32_t *data, size_t length);
+static void decode_link_packet(struct link_packet *packet, size_t length,
+ int include_flags, int exclude_flags);
+static int run = 1;
+sig_t sys_sigint_handler;
+
+static char *option_nosy_device = "/dev/nosy";
+static char *option_view = "packet";
+static char *option_output;
+static char *option_input;
+static int option_hex;
+static int option_iso;
+static int option_cycle_start;
+static int option_version;
+static int option_verbose;
+
+enum {
+ VIEW_TRANSACTION,
+ VIEW_PACKET,
+ VIEW_STATS,
+};
+
+static const struct poptOption options[] = {
+ {
+ .longName = "device",
+ .shortName = 'd',
+ .argInfo = POPT_ARG_STRING,
+ .arg = &option_nosy_device,
+ .descrip = "Path to nosy device.",
+ .argDescrip = "DEVICE"
+ },
+ {
+ .longName = "view",
+ .argInfo = POPT_ARG_STRING,
+ .arg = &option_view,
+ .descrip = "Specify view of bus traffic: packet, transaction or stats.",
+ .argDescrip = "VIEW"
+ },
+ {
+ .longName = "hex",
+ .shortName = 'x',
+ .argInfo = POPT_ARG_NONE,
+ .arg = &option_hex,
+ .descrip = "Print each packet in hex.",
+ },
+ {
+ .longName = "iso",
+ .argInfo = POPT_ARG_NONE,
+ .arg = &option_iso,
+ .descrip = "Print iso packets.",
+ },
+ {
+ .longName = "cycle-start",
+ .argInfo = POPT_ARG_NONE,
+ .arg = &option_cycle_start,
+ .descrip = "Print cycle start packets.",
+ },
+ {
+ .longName = "verbose",
+ .shortName = 'v',
+ .argInfo = POPT_ARG_NONE,
+ .arg = &option_verbose,
+ .descrip = "Verbose packet view.",
+ },
+ {
+ .longName = "output",
+ .shortName = 'o',
+ .argInfo = POPT_ARG_STRING,
+ .arg = &option_output,
+ .descrip = "Log to output file.",
+ .argDescrip = "FILENAME"
+ },
+ {
+ .longName = "input",
+ .shortName = 'i',
+ .argInfo = POPT_ARG_STRING,
+ .arg = &option_input,
+ .descrip = "Decode log from file.",
+ .argDescrip = "FILENAME"
+ },
+ {
+ .longName = "version",
+ .argInfo = POPT_ARG_NONE,
+ .arg = &option_version,
+ .descrip = "Specify print version info.",
+ },
+ POPT_AUTOHELP
+ POPT_TABLEEND
+};
+
+/* Allow all ^C except the first to interrupt the program in the usual way. */
+static void
+sigint_handler(int signal_num)
+{
+ if (run == 1) {
+ run = 0;
+ signal(SIGINT, SIG_DFL);
+ }
+}
+
+static struct subaction *
+subaction_create(uint32_t *data, size_t length)
+{
+ struct subaction *sa;
+
+ /* we put the ack in the subaction struct for easy access. */
+ sa = malloc(sizeof *sa - sizeof sa->packet + length);
+ sa->ack = data[length / 4 - 1];
+ sa->length = length;
+ memcpy(&sa->packet, data, length);
+
+ return sa;
+}
+
+static void
+subaction_destroy(struct subaction *sa)
+{
+ free(sa);
+}
+
+static struct list pending_transaction_list = {
+ &pending_transaction_list, &pending_transaction_list
+};
+
+static struct link_transaction *
+link_transaction_lookup(int request_node, int response_node, int tlabel)
+{
+ struct link_transaction *t;
+
+ list_for_each_entry(t, &pending_transaction_list, link) {
+ if (t->request_node == request_node &&
+ t->response_node == response_node &&
+ t->tlabel == tlabel)
+ return t;
+ }
+
+ t = malloc(sizeof *t);
+ t->request_node = request_node;
+ t->response_node = response_node;
+ t->tlabel = tlabel;
+ list_init(&t->request_list);
+ list_init(&t->response_list);
+
+ list_append(&pending_transaction_list, &t->link);
+
+ return t;
+}
+
+static void
+link_transaction_destroy(struct link_transaction *t)
+{
+ struct subaction *sa;
+
+ while (!list_empty(&t->request_list)) {
+ sa = list_head(&t->request_list, struct subaction, link);
+ list_remove(&sa->link);
+ subaction_destroy(sa);
+ }
+ while (!list_empty(&t->response_list)) {
+ sa = list_head(&t->response_list, struct subaction, link);
+ list_remove(&sa->link);
+ subaction_destroy(sa);
+ }
+ free(t);
+}
+
+struct protocol_decoder {
+ const char *name;
+ int (*decode)(struct link_transaction *t);
+};
+
+static const struct protocol_decoder protocol_decoders[] = {
+ { "FCP", decode_fcp }
+};
+
+static void
+handle_transaction(struct link_transaction *t)
+{
+ struct subaction *sa;
+ int i;
+
+ if (!t->request) {
+ printf("BUG in handle_transaction\n");
+ return;
+ }
+
+ for (i = 0; i < array_length(protocol_decoders); i++)
+ if (protocol_decoders[i].decode(t))
+ break;
+
+ /* HACK: decode only fcp right now. */
+ return;
+
+ decode_link_packet(&t->request->packet, t->request->length,
+ PACKET_FIELD_TRANSACTION, 0);
+ if (t->response)
+ decode_link_packet(&t->response->packet, t->request->length,
+ PACKET_FIELD_TRANSACTION, 0);
+ else
+ printf("[no response]");
+
+ if (option_verbose) {
+ list_for_each_entry(sa, &t->request_list, link)
+ print_packet((uint32_t *) &sa->packet, sa->length);
+ list_for_each_entry(sa, &t->response_list, link)
+ print_packet((uint32_t *) &sa->packet, sa->length);
+ }
+ printf("\r\n");
+
+ link_transaction_destroy(t);
+}
+
+static void
+clear_pending_transaction_list(void)
+{
+ struct link_transaction *t;
+
+ while (!list_empty(&pending_transaction_list)) {
+ t = list_head(&pending_transaction_list,
+ struct link_transaction, link);
+ list_remove(&t->link);
+ link_transaction_destroy(t);
+ /* print unfinished transactions */
+ }
+}
+
+static const char * const tcode_names[] = {
+ [0x0] = "write_quadlet_request", [0x6] = "read_quadlet_response",
+ [0x1] = "write_block_request", [0x7] = "read_block_response",
+ [0x2] = "write_response", [0x8] = "cycle_start",
+ [0x3] = "reserved", [0x9] = "lock_request",
+ [0x4] = "read_quadlet_request", [0xa] = "iso_data",
+ [0x5] = "read_block_request", [0xb] = "lock_response",
+};
+
+static const char * const ack_names[] = {
+ [0x0] = "no ack", [0x8] = "reserved (0x08)",
+ [0x1] = "ack_complete", [0x9] = "reserved (0x09)",
+ [0x2] = "ack_pending", [0xa] = "reserved (0x0a)",
+ [0x3] = "reserved (0x03)", [0xb] = "reserved (0x0b)",
+ [0x4] = "ack_busy_x", [0xc] = "reserved (0x0c)",
+ [0x5] = "ack_busy_a", [0xd] = "ack_data_error",
+ [0x6] = "ack_busy_b", [0xe] = "ack_type_error",
+ [0x7] = "reserved (0x07)", [0xf] = "reserved (0x0f)",
+};
+
+static const char * const rcode_names[] = {
+ [0x0] = "complete", [0x4] = "conflict_error",
+ [0x1] = "reserved (0x01)", [0x5] = "data_error",
+ [0x2] = "reserved (0x02)", [0x6] = "type_error",
+ [0x3] = "reserved (0x03)", [0x7] = "address_error",
+};
+
+static const char * const retry_names[] = {
+ [0x0] = "retry_1",
+ [0x1] = "retry_x",
+ [0x2] = "retry_a",
+ [0x3] = "retry_b",
+};
+
+enum {
+ PACKET_RESERVED,
+ PACKET_REQUEST,
+ PACKET_RESPONSE,
+ PACKET_OTHER,
+};
+
+struct packet_info {
+ const char *name;
+ int type;
+ int response_tcode;
+ const struct packet_field *fields;
+ int field_count;
+};
+
+struct packet_field {
+ const char *name; /* Short name for field. */
+ int offset; /* Location of field, specified in bits; */
+ /* negative means from end of packet. */
+ int width; /* Width of field, 0 means use data_length. */
+ int flags; /* Show options. */
+ const char * const *value_names;
+};
+
+#define COMMON_REQUEST_FIELDS \
+ { "dest", 0, 16, PACKET_FIELD_TRANSACTION }, \
+ { "tl", 16, 6 }, \
+ { "rt", 22, 2, PACKET_FIELD_DETAIL, retry_names }, \
+ { "tcode", 24, 4, PACKET_FIELD_TRANSACTION, tcode_names }, \
+ { "pri", 28, 4, PACKET_FIELD_DETAIL }, \
+ { "src", 32, 16, PACKET_FIELD_TRANSACTION }, \
+ { "offs", 48, 48, PACKET_FIELD_TRANSACTION }
+
+#define COMMON_RESPONSE_FIELDS \
+ { "dest", 0, 16 }, \
+ { "tl", 16, 6 }, \
+ { "rt", 22, 2, PACKET_FIELD_DETAIL, retry_names }, \
+ { "tcode", 24, 4, 0, tcode_names }, \
+ { "pri", 28, 4, PACKET_FIELD_DETAIL }, \
+ { "src", 32, 16 }, \
+ { "rcode", 48, 4, PACKET_FIELD_TRANSACTION, rcode_names }
+
+static const struct packet_field read_quadlet_request_fields[] = {
+ COMMON_REQUEST_FIELDS,
+ { "crc", 96, 32, PACKET_FIELD_DETAIL },
+ { "ack", 156, 4, 0, ack_names },
+};
+
+static const struct packet_field read_quadlet_response_fields[] = {
+ COMMON_RESPONSE_FIELDS,
+ { "data", 96, 32, PACKET_FIELD_TRANSACTION },
+ { "crc", 128, 32, PACKET_FIELD_DETAIL },
+ { "ack", 188, 4, 0, ack_names },
+};
+
+static const struct packet_field read_block_request_fields[] = {
+ COMMON_REQUEST_FIELDS,
+ { "data_length", 96, 16, PACKET_FIELD_TRANSACTION },
+ { "extended_tcode", 112, 16 },
+ { "crc", 128, 32, PACKET_FIELD_DETAIL },
+ { "ack", 188, 4, 0, ack_names },
+};
+
+static const struct packet_field block_response_fields[] = {
+ COMMON_RESPONSE_FIELDS,
+ { "data_length", 96, 16, PACKET_FIELD_DATA_LENGTH },
+ { "extended_tcode", 112, 16 },
+ { "crc", 128, 32, PACKET_FIELD_DETAIL },
+ { "data", 160, 0, PACKET_FIELD_TRANSACTION },
+ { "crc", -64, 32, PACKET_FIELD_DETAIL },
+ { "ack", -4, 4, 0, ack_names },
+};
+
+static const struct packet_field write_quadlet_request_fields[] = {
+ COMMON_REQUEST_FIELDS,
+ { "data", 96, 32, PACKET_FIELD_TRANSACTION },
+ { "ack", -4, 4, 0, ack_names },
+};
+
+static const struct packet_field block_request_fields[] = {
+ COMMON_REQUEST_FIELDS,
+ { "data_length", 96, 16, PACKET_FIELD_DATA_LENGTH | PACKET_FIELD_TRANSACTION },
+ { "extended_tcode", 112, 16, PACKET_FIELD_TRANSACTION },
+ { "crc", 128, 32, PACKET_FIELD_DETAIL },
+ { "data", 160, 0, PACKET_FIELD_TRANSACTION },
+ { "crc", -64, 32, PACKET_FIELD_DETAIL },
+ { "ack", -4, 4, 0, ack_names },
+};
+
+static const struct packet_field write_response_fields[] = {
+ COMMON_RESPONSE_FIELDS,
+ { "reserved", 64, 32, PACKET_FIELD_DETAIL },
+ { "ack", -4, 4, 0, ack_names },
+};
+
+static const struct packet_field iso_data_fields[] = {
+ { "data_length", 0, 16, PACKET_FIELD_DATA_LENGTH },
+ { "tag", 16, 2 },
+ { "channel", 18, 6 },
+ { "tcode", 24, 4, 0, tcode_names },
+ { "sy", 28, 4 },
+ { "crc", 32, 32, PACKET_FIELD_DETAIL },
+ { "data", 64, 0 },
+ { "crc", -64, 32, PACKET_FIELD_DETAIL },
+ { "ack", -4, 4, 0, ack_names },
+};
+
+static const struct packet_info packet_info[] = {
+ {
+ .name = "write_quadlet_request",
+ .type = PACKET_REQUEST,
+ .response_tcode = TCODE_WRITE_RESPONSE,
+ .fields = write_quadlet_request_fields,
+ .field_count = array_length(write_quadlet_request_fields)
+ },
+ {
+ .name = "write_block_request",
+ .type = PACKET_REQUEST,
+ .response_tcode = TCODE_WRITE_RESPONSE,
+ .fields = block_request_fields,
+ .field_count = array_length(block_request_fields)
+ },
+ {
+ .name = "write_response",
+ .type = PACKET_RESPONSE,
+ .fields = write_response_fields,
+ .field_count = array_length(write_response_fields)
+ },
+ {
+ .name = "reserved",
+ .type = PACKET_RESERVED,
+ },
+ {
+ .name = "read_quadlet_request",
+ .type = PACKET_REQUEST,
+ .response_tcode = TCODE_READ_QUADLET_RESPONSE,
+ .fields = read_quadlet_request_fields,
+ .field_count = array_length(read_quadlet_request_fields)
+ },
+ {
+ .name = "read_block_request",
+ .type = PACKET_REQUEST,
+ .response_tcode = TCODE_READ_BLOCK_RESPONSE,
+ .fields = read_block_request_fields,
+ .field_count = array_length(read_block_request_fields)
+ },
+ {
+ .name = "read_quadlet_response",
+ .type = PACKET_RESPONSE,
+ .fields = read_quadlet_response_fields,
+ .field_count = array_length(read_quadlet_response_fields)
+ },
+ {
+ .name = "read_block_response",
+ .type = PACKET_RESPONSE,
+ .fields = block_response_fields,
+ .field_count = array_length(block_response_fields)
+ },
+ {
+ .name = "cycle_start",
+ .type = PACKET_OTHER,
+ .fields = write_quadlet_request_fields,
+ .field_count = array_length(write_quadlet_request_fields)
+ },
+ {
+ .name = "lock_request",
+ .type = PACKET_REQUEST,
+ .fields = block_request_fields,
+ .field_count = array_length(block_request_fields)
+ },
+ {
+ .name = "iso_data",
+ .type = PACKET_OTHER,
+ .fields = iso_data_fields,
+ .field_count = array_length(iso_data_fields)
+ },
+ {
+ .name = "lock_response",
+ .type = PACKET_RESPONSE,
+ .fields = block_response_fields,
+ .field_count = array_length(block_response_fields)
+ },
+};
+
+static int
+handle_request_packet(uint32_t *data, size_t length)
+{
+ struct link_packet *p = (struct link_packet *) data;
+ struct subaction *sa, *prev;
+ struct link_transaction *t;
+
+ t = link_transaction_lookup(p->common.source, p->common.destination,
+ p->common.tlabel);
+ sa = subaction_create(data, length);
+ t->request = sa;
+
+ if (!list_empty(&t->request_list)) {
+ prev = list_tail(&t->request_list,
+ struct subaction, link);
+
+ if (!ACK_BUSY(prev->ack)) {
+ /*
+ * error, we should only see ack_busy_* before the
+ * ack_pending/ack_complete -- this is an ack_pending
+ * instead (ack_complete would have finished the
+ * transaction).
+ */
+ }
+
+ if (prev->packet.common.tcode != sa->packet.common.tcode ||
+ prev->packet.common.tlabel != sa->packet.common.tlabel) {
+ /* memcmp() ? */
+ /* error, these should match for retries. */
+ }
+ }
+
+ list_append(&t->request_list, &sa->link);
+
+ switch (sa->ack) {
+ case ACK_COMPLETE:
+ if (p->common.tcode != TCODE_WRITE_QUADLET_REQUEST &&
+ p->common.tcode != TCODE_WRITE_BLOCK_REQUEST)
+ /* error, unified transactions only allowed for write */;
+ list_remove(&t->link);
+ handle_transaction(t);
+ break;
+
+ case ACK_NO_ACK:
+ case ACK_DATA_ERROR:
+ case ACK_TYPE_ERROR:
+ list_remove(&t->link);
+ handle_transaction(t);
+ break;
+
+ case ACK_PENDING:
+ /* request subaction phase over, wait for response. */
+ break;
+
+ case ACK_BUSY_X:
+ case ACK_BUSY_A:
+ case ACK_BUSY_B:
+ /* ok, wait for retry. */
+ /* check that retry protocol is respected. */
+ break;
+ }
+
+ return 1;
+}
+
+static int
+handle_response_packet(uint32_t *data, size_t length)
+{
+ struct link_packet *p = (struct link_packet *) data;
+ struct subaction *sa, *prev;
+ struct link_transaction *t;
+
+ t = link_transaction_lookup(p->common.destination, p->common.source,
+ p->common.tlabel);
+ if (list_empty(&t->request_list)) {
+ /* unsolicited response */
+ }
+
+ sa = subaction_create(data, length);
+ t->response = sa;
+
+ if (!list_empty(&t->response_list)) {
+ prev = list_tail(&t->response_list, struct subaction, link);
+
+ if (!ACK_BUSY(prev->ack)) {
+ /*
+ * error, we should only see ack_busy_* before the
+ * ack_pending/ack_complete
+ */
+ }
+
+ if (prev->packet.common.tcode != sa->packet.common.tcode ||
+ prev->packet.common.tlabel != sa->packet.common.tlabel) {
+ /* use memcmp() instead? */
+ /* error, these should match for retries. */
+ }
+ } else {
+ prev = list_tail(&t->request_list, struct subaction, link);
+ if (prev->ack != ACK_PENDING) {
+ /*
+ * error, should not get response unless last request got
+ * ack_pending.
+ */
+ }
+
+ if (packet_info[prev->packet.common.tcode].response_tcode !=
+ sa->packet.common.tcode) {
+ /* error, tcode mismatch */
+ }
+ }
+
+ list_append(&t->response_list, &sa->link);
+
+ switch (sa->ack) {
+ case ACK_COMPLETE:
+ case ACK_NO_ACK:
+ case ACK_DATA_ERROR:
+ case ACK_TYPE_ERROR:
+ list_remove(&t->link);
+ handle_transaction(t);
+ /* transaction complete, remove t from pending list. */
+ break;
+
+ case ACK_PENDING:
+ /* error for responses. */
+ break;
+
+ case ACK_BUSY_X:
+ case ACK_BUSY_A:
+ case ACK_BUSY_B:
+ /* no problem, wait for next retry */
+ break;
+ }
+
+ return 1;
+}
+
+static int
+handle_packet(uint32_t *data, size_t length)
+{
+ if (length == 0) {
+ printf("bus reset\r\n");
+ clear_pending_transaction_list();
+ } else if (length > sizeof(struct phy_packet)) {
+ struct link_packet *p = (struct link_packet *) data;
+
+ switch (packet_info[p->common.tcode].type) {
+ case PACKET_REQUEST:
+ return handle_request_packet(data, length);
+
+ case PACKET_RESPONSE:
+ return handle_response_packet(data, length);
+
+ case PACKET_OTHER:
+ case PACKET_RESERVED:
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static unsigned int
+get_bits(struct link_packet *packet, int offset, int width)
+{
+ uint32_t *data = (uint32_t *) packet;
+ uint32_t index, shift, mask;
+
+ index = offset / 32 + 1;
+ shift = 32 - (offset & 31) - width;
+ mask = width == 32 ? ~0 : (1 << width) - 1;
+
+ return (data[index] >> shift) & mask;
+}
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define byte_index(i) ((i) ^ 3)
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#define byte_index(i) (i)
+#else
+#error unsupported byte order.
+#endif
+
+static void
+dump_data(unsigned char *data, int length)
+{
+ int i, print_length;
+
+ if (length > 128)
+ print_length = 128;
+ else
+ print_length = length;
+
+ for (i = 0; i < print_length; i++)
+ printf("%s%02hhx",
+ (i % 4 == 0 && i != 0) ? " " : "",
+ data[byte_index(i)]);
+
+ if (print_length < length)
+ printf(" (%d more bytes)", length - print_length);
+}
+
+static void
+decode_link_packet(struct link_packet *packet, size_t length,
+ int include_flags, int exclude_flags)
+{
+ const struct packet_info *pi;
+ int data_length = 0;
+ int i;
+
+ pi = &packet_info[packet->common.tcode];
+
+ for (i = 0; i < pi->field_count; i++) {
+ const struct packet_field *f = &pi->fields[i];
+ int offset;
+
+ if (f->flags & exclude_flags)
+ continue;
+ if (include_flags && !(f->flags & include_flags))
+ continue;
+
+ if (f->offset < 0)
+ offset = length * 8 + f->offset - 32;
+ else
+ offset = f->offset;
+
+ if (f->value_names != NULL) {
+ uint32_t bits;
+
+ bits = get_bits(packet, offset, f->width);
+ printf("%s", f->value_names[bits]);
+ } else if (f->width == 0) {
+ printf("%s=[", f->name);
+ dump_data((unsigned char *) packet + (offset / 8 + 4), data_length);
+ printf("]");
+ } else {
+ unsigned long long bits;
+ int high_width, low_width;
+
+ if ((offset & ~31) != ((offset + f->width - 1) & ~31)) {
+ /* Bit field spans quadlet boundary. */
+ high_width = ((offset + 31) & ~31) - offset;
+ low_width = f->width - high_width;
+
+ bits = get_bits(packet, offset, high_width);
+ bits = (bits << low_width) |
+ get_bits(packet, offset + high_width, low_width);
+ } else {
+ bits = get_bits(packet, offset, f->width);
+ }
+
+ printf("%s=0x%0*llx", f->name, (f->width + 3) / 4, bits);
+
+ if (f->flags & PACKET_FIELD_DATA_LENGTH)
+ data_length = bits;
+ }
+
+ if (i < pi->field_count - 1)
+ printf(", ");
+ }
+}
+
+static void
+print_packet(uint32_t *data, size_t length)
+{
+ int i;
+
+ printf("%6u ", data[0]);
+
+ if (length == 4) {
+ printf("bus reset");
+ } else if (length < sizeof(struct phy_packet)) {
+ printf("short packet: ");
+ for (i = 1; i < length / 4; i++)
+ printf("%s%08x", i == 0 ? "[" : " ", data[i]);
+ printf("]");
+
+ } else if (length == sizeof(struct phy_packet) && data[1] == ~data[2]) {
+ struct phy_packet *pp = (struct phy_packet *) data;
+
+ /* phy packet are 3 quadlets: the 1 quadlet payload,
+ * the bitwise inverse of the payload and the snoop
+ * mode ack */
+
+ switch (pp->common.identifier) {
+ case PHY_PACKET_CONFIGURATION:
+ if (!pp->phy_config.set_root && !pp->phy_config.set_gap_count) {
+ printf("ext phy config: phy_id=%02x", pp->phy_config.root_id);
+ } else {
+ printf("phy config:");
+ if (pp->phy_config.set_root)
+ printf(" set_root_id=%02x", pp->phy_config.root_id);
+ if (pp->phy_config.set_gap_count)
+ printf(" set_gap_count=%d", pp->phy_config.gap_count);
+ }
+ break;
+
+ case PHY_PACKET_LINK_ON:
+ printf("link-on packet, phy_id=%02x", pp->link_on.phy_id);
+ break;
+
+ case PHY_PACKET_SELF_ID:
+ if (pp->self_id.extended) {
+ printf("extended self id: phy_id=%02x, seq=%d",
+ pp->ext_self_id.phy_id, pp->ext_self_id.sequence);
+ } else {
+ static const char * const speed_names[] = {
+ "S100", "S200", "S400", "BETA"
+ };
+ printf("self id: phy_id=%02x, link %s, gap_count=%d, speed=%s%s%s",
+ pp->self_id.phy_id,
+ (pp->self_id.link_active ? "active" : "not active"),
+ pp->self_id.gap_count,
+ speed_names[pp->self_id.phy_speed],
+ (pp->self_id.contender ? ", irm contender" : ""),
+ (pp->self_id.initiated_reset ? ", initiator" : ""));
+ }
+ break;
+ default:
+ printf("unknown phy packet: ");
+ for (i = 1; i < length / 4; i++)
+ printf("%s%08x", i == 0 ? "[" : " ", data[i]);
+ printf("]");
+ break;
+ }
+ } else {
+ struct link_packet *packet = (struct link_packet *) data;
+
+ decode_link_packet(packet, length, 0,
+ option_verbose ? 0 : PACKET_FIELD_DETAIL);
+ }
+
+ if (option_hex) {
+ printf(" [");
+ dump_data((unsigned char *) data + 4, length - 4);
+ printf("]");
+ }
+
+ printf("\r\n");
+}
+
+#define HIDE_CURSOR "\033[?25l"
+#define SHOW_CURSOR "\033[?25h"
+#define CLEAR "\033[H\033[2J"
+
+static void
+print_stats(uint32_t *data, size_t length)
+{
+ static int bus_reset_count, short_packet_count, phy_packet_count;
+ static int tcode_count[16];
+ static struct timeval last_update;
+ struct timeval now;
+ int i;
+
+ if (length == 0)
+ bus_reset_count++;
+ else if (length < sizeof(struct phy_packet))
+ short_packet_count++;
+ else if (length == sizeof(struct phy_packet) && data[1] == ~data[2])
+ phy_packet_count++;
+ else {
+ struct link_packet *packet = (struct link_packet *) data;
+ tcode_count[packet->common.tcode]++;
+ }
+
+ gettimeofday(&now, NULL);
+ if (now.tv_sec <= last_update.tv_sec &&
+ now.tv_usec < last_update.tv_usec + 500000)
+ return;
+
+ last_update = now;
+ printf(CLEAR HIDE_CURSOR
+ " bus resets : %8d\n"
+ " short packets : %8d\n"
+ " phy packets : %8d\n",
+ bus_reset_count, short_packet_count, phy_packet_count);
+
+ for (i = 0; i < array_length(packet_info); i++)
+ if (packet_info[i].type != PACKET_RESERVED)
+ printf(" %-24s: %8d\n", packet_info[i].name, tcode_count[i]);
+ printf(SHOW_CURSOR "\n");
+}
+
+static struct termios saved_attributes;
+
+static void
+reset_input_mode(void)
+{
+ tcsetattr(STDIN_FILENO, TCSANOW, &saved_attributes);
+}
+
+static void
+set_input_mode(void)
+{
+ struct termios tattr;
+
+ /* Make sure stdin is a terminal. */
+ if (!isatty(STDIN_FILENO)) {
+ fprintf(stderr, "Not a terminal.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Save the terminal attributes so we can restore them later. */
+ tcgetattr(STDIN_FILENO, &saved_attributes);
+ atexit(reset_input_mode);
+
+ /* Set the funny terminal modes. */
+ tcgetattr(STDIN_FILENO, &tattr);
+ tattr.c_lflag &= ~(ICANON|ECHO); /* Clear ICANON and ECHO. */
+ tattr.c_cc[VMIN] = 1;
+ tattr.c_cc[VTIME] = 0;
+ tcsetattr(STDIN_FILENO, TCSAFLUSH, &tattr);
+}
+
+int main(int argc, const char *argv[])
+{
+ uint32_t buf[128 * 1024];
+ uint32_t filter;
+ int length, retval, view;
+ int fd = -1;
+ FILE *output = NULL, *input = NULL;
+ poptContext con;
+ char c;
+ struct pollfd pollfds[2];
+
+ sys_sigint_handler = signal(SIGINT, sigint_handler);
+
+ con = poptGetContext(NULL, argc, argv, options, 0);
+ retval = poptGetNextOpt(con);
+ if (retval < -1) {
+ poptPrintUsage(con, stdout, 0);
+ return -1;
+ }
+
+ if (option_version) {
+ printf("dump tool for nosy sniffer, version %s\n", VERSION);
+ return 0;
+ }
+
+ if (__BYTE_ORDER != __LITTLE_ENDIAN)
+ fprintf(stderr, "warning: nosy has only been tested on little "
+ "endian machines\n");
+
+ if (option_input != NULL) {
+ input = fopen(option_input, "r");
+ if (input == NULL) {
+ fprintf(stderr, "Could not open %s, %m\n", option_input);
+ return -1;
+ }
+ } else {
+ fd = open(option_nosy_device, O_RDWR);
+ if (fd < 0) {
+ fprintf(stderr, "Could not open %s, %m\n", option_nosy_device);
+ return -1;
+ }
+ set_input_mode();
+ }
+
+ if (strcmp(option_view, "transaction") == 0)
+ view = VIEW_TRANSACTION;
+ else if (strcmp(option_view, "stats") == 0)
+ view = VIEW_STATS;
+ else
+ view = VIEW_PACKET;
+
+ if (option_output) {
+ output = fopen(option_output, "w");
+ if (output == NULL) {
+ fprintf(stderr, "Could not open %s, %m\n", option_output);
+ return -1;
+ }
+ }
+
+ setvbuf(stdout, NULL, _IOLBF, BUFSIZ);
+
+ filter = ~0;
+ if (!option_iso)
+ filter &= ~(1 << TCODE_STREAM_DATA);
+ if (!option_cycle_start)
+ filter &= ~(1 << TCODE_CYCLE_START);
+ if (view == VIEW_STATS)
+ filter = ~(1 << TCODE_CYCLE_START);
+
+ ioctl(fd, NOSY_IOC_FILTER, filter);
+
+ ioctl(fd, NOSY_IOC_START);
+
+ pollfds[0].fd = fd;
+ pollfds[0].events = POLLIN;
+ pollfds[1].fd = STDIN_FILENO;
+ pollfds[1].events = POLLIN;
+
+ while (run) {
+ if (input != NULL) {
+ if (fread(&length, sizeof length, 1, input) != 1)
+ return 0;
+ fread(buf, 1, length, input);
+ } else {
+ poll(pollfds, 2, -1);
+ if (pollfds[1].revents) {
+ read(STDIN_FILENO, &c, sizeof c);
+ switch (c) {
+ case 'q':
+ if (output != NULL)
+ fclose(output);
+ return 0;
+ }
+ }
+
+ if (pollfds[0].revents)
+ length = read(fd, buf, sizeof buf);
+ else
+ continue;
+ }
+
+ if (output != NULL) {
+ fwrite(&length, sizeof length, 1, output);
+ fwrite(buf, 1, length, output);
+ }
+
+ switch (view) {
+ case VIEW_TRANSACTION:
+ handle_packet(buf, length);
+ break;
+ case VIEW_PACKET:
+ print_packet(buf, length);
+ break;
+ case VIEW_STATS:
+ print_stats(buf, length);
+ break;
+ }
+ }
+
+ if (output != NULL)
+ fclose(output);
+
+ close(fd);
+
+ poptFreeContext(con);
+
+ return 0;
+}
diff --git a/tools/firewire/nosy-dump.h b/tools/firewire/nosy-dump.h
new file mode 100644
index 00000000..3a4b5b33
--- /dev/null
+++ b/tools/firewire/nosy-dump.h
@@ -0,0 +1,173 @@
+#ifndef __nosy_dump_h__
+#define __nosy_dump_h__
+
+#define array_length(array) (sizeof(array) / sizeof(array[0]))
+
+#define ACK_NO_ACK 0x0
+#define ACK_DONE(a) ((a >> 2) == 0)
+#define ACK_BUSY(a) ((a >> 2) == 1)
+#define ACK_ERROR(a) ((a >> 2) == 3)
+
+#include <stdint.h>
+
+struct phy_packet {
+ uint32_t timestamp;
+ union {
+ struct {
+ uint32_t zero:24;
+ uint32_t phy_id:6;
+ uint32_t identifier:2;
+ } common, link_on;
+
+ struct {
+ uint32_t zero:16;
+ uint32_t gap_count:6;
+ uint32_t set_gap_count:1;
+ uint32_t set_root:1;
+ uint32_t root_id:6;
+ uint32_t identifier:2;
+ } phy_config;
+
+ struct {
+ uint32_t more_packets:1;
+ uint32_t initiated_reset:1;
+ uint32_t port2:2;
+ uint32_t port1:2;
+ uint32_t port0:2;
+ uint32_t power_class:3;
+ uint32_t contender:1;
+ uint32_t phy_delay:2;
+ uint32_t phy_speed:2;
+ uint32_t gap_count:6;
+ uint32_t link_active:1;
+ uint32_t extended:1;
+ uint32_t phy_id:6;
+ uint32_t identifier:2;
+ } self_id;
+
+ struct {
+ uint32_t more_packets:1;
+ uint32_t reserved1:1;
+ uint32_t porth:2;
+ uint32_t portg:2;
+ uint32_t portf:2;
+ uint32_t porte:2;
+ uint32_t portd:2;
+ uint32_t portc:2;
+ uint32_t portb:2;
+ uint32_t porta:2;
+ uint32_t reserved0:2;
+ uint32_t sequence:3;
+ uint32_t extended:1;
+ uint32_t phy_id:6;
+ uint32_t identifier:2;
+ } ext_self_id;
+ };
+ uint32_t inverted;
+ uint32_t ack;
+};
+
+#define TCODE_PHY_PACKET 0x10
+
+#define PHY_PACKET_CONFIGURATION 0x00
+#define PHY_PACKET_LINK_ON 0x01
+#define PHY_PACKET_SELF_ID 0x02
+
+struct link_packet {
+ uint32_t timestamp;
+ union {
+ struct {
+ uint32_t priority:4;
+ uint32_t tcode:4;
+ uint32_t rt:2;
+ uint32_t tlabel:6;
+ uint32_t destination:16;
+
+ uint32_t offset_high:16;
+ uint32_t source:16;
+
+ uint32_t offset_low;
+ } common;
+
+ struct {
+ uint32_t common[3];
+ uint32_t crc;
+ } read_quadlet;
+
+ struct {
+ uint32_t common[3];
+ uint32_t data;
+ uint32_t crc;
+ } read_quadlet_response;
+
+ struct {
+ uint32_t common[3];
+ uint32_t extended_tcode:16;
+ uint32_t data_length:16;
+ uint32_t crc;
+ } read_block;
+
+ struct {
+ uint32_t common[3];
+ uint32_t extended_tcode:16;
+ uint32_t data_length:16;
+ uint32_t crc;
+ uint32_t data[0];
+ /* crc and ack follows. */
+ } read_block_response;
+
+ struct {
+ uint32_t common[3];
+ uint32_t data;
+ uint32_t crc;
+ } write_quadlet;
+
+ struct {
+ uint32_t common[3];
+ uint32_t extended_tcode:16;
+ uint32_t data_length:16;
+ uint32_t crc;
+ uint32_t data[0];
+ /* crc and ack follows. */
+ } write_block;
+
+ struct {
+ uint32_t common[3];
+ uint32_t crc;
+ } write_response;
+
+ struct {
+ uint32_t common[3];
+ uint32_t data;
+ uint32_t crc;
+ } cycle_start;
+
+ struct {
+ uint32_t sy:4;
+ uint32_t tcode:4;
+ uint32_t channel:6;
+ uint32_t tag:2;
+ uint32_t data_length:16;
+
+ uint32_t crc;
+ } iso_data;
+ };
+};
+
+struct subaction {
+ uint32_t ack;
+ size_t length;
+ struct list link;
+ struct link_packet packet;
+};
+
+struct link_transaction {
+ int request_node, response_node, tlabel;
+ struct subaction *request, *response;
+ struct list request_list, response_list;
+ struct list link;
+};
+
+int decode_fcp(struct link_transaction *t);
+
+#endif /* __nosy_dump_h__ */
diff --git a/tools/perf/CREDITS b/tools/perf/CREDITS
new file mode 100644
index 00000000..c2ddcb3a
--- /dev/null
+++ b/tools/perf/CREDITS
@@ -0,0 +1,30 @@
+Most of the infrastructure that 'perf' uses here has been reused
+from the Git project, as of version:
+
+ 66996ec: Sync with 1.6.2.4
+
+Here is an (incomplete!) list of main contributors to those files
+in util/* and elsewhere:
+
+ Alex Riesen
+ Christian Couder
+ Dmitry Potapov
+ Jeff King
+ Johannes Schindelin
+ Johannes Sixt
+ Junio C Hamano
+ Linus Torvalds
+ Matthias Kestenholz
+ Michal Ostrowski
+ Miklos Vajna
+ Petr Baudis
+ Pierre Habouzit
+ René Scharfe
+ Samuel Tardieu
+ Shawn O. Pearce
+ Steffen Prohaska
+ Steve Haslam
+
+Thanks guys!
+
+The full history of the files can be found in the upstream Git commits.
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
new file mode 100644
index 00000000..bd498d49
--- /dev/null
+++ b/tools/perf/Documentation/Makefile
@@ -0,0 +1,302 @@
+MAN1_TXT= \
+ $(filter-out $(addsuffix .txt, $(ARTICLES) $(SP_ARTICLES)), \
+ $(wildcard perf-*.txt)) \
+ perf.txt
+MAN5_TXT=
+MAN7_TXT=
+
+MAN_TXT = $(MAN1_TXT) $(MAN5_TXT) $(MAN7_TXT)
+MAN_XML=$(patsubst %.txt,%.xml,$(MAN_TXT))
+MAN_HTML=$(patsubst %.txt,%.html,$(MAN_TXT))
+
+DOC_HTML=$(MAN_HTML)
+
+ARTICLES =
+# with their own formatting rules.
+SP_ARTICLES =
+API_DOCS = $(patsubst %.txt,%,$(filter-out technical/api-index-skel.txt technical/api-index.txt, $(wildcard technical/api-*.txt)))
+SP_ARTICLES += $(API_DOCS)
+SP_ARTICLES += technical/api-index
+
+DOC_HTML += $(patsubst %,%.html,$(ARTICLES) $(SP_ARTICLES))
+
+DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT))
+DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT))
+DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT))
+
+# Make the path relative to DESTDIR, not prefix
+ifndef DESTDIR
+prefix?=$(HOME)
+endif
+bindir?=$(prefix)/bin
+htmldir?=$(prefix)/share/doc/perf-doc
+pdfdir?=$(prefix)/share/doc/perf-doc
+mandir?=$(prefix)/share/man
+man1dir=$(mandir)/man1
+man5dir=$(mandir)/man5
+man7dir=$(mandir)/man7
+
+ASCIIDOC=asciidoc
+ASCIIDOC_EXTRA = --unsafe
+MANPAGE_XSL = manpage-normal.xsl
+XMLTO_EXTRA =
+INSTALL?=install
+RM ?= rm -f
+DOC_REF = origin/man
+HTML_REF = origin/html
+
+infodir?=$(prefix)/share/info
+MAKEINFO=makeinfo
+INSTALL_INFO=install-info
+DOCBOOK2X_TEXI=docbook2x-texi
+DBLATEX=dblatex
+ifndef PERL_PATH
+ PERL_PATH = /usr/bin/perl
+endif
+
+-include ../config.mak.autogen
+-include ../config.mak
+
+#
+# For asciidoc ...
+# -7.1.2, no extra settings are needed.
+# 8.0-, set ASCIIDOC8.
+#
+
+#
+# For docbook-xsl ...
+# -1.68.1, set ASCIIDOC_NO_ROFF? (based on changelog from 1.73.0)
+# 1.69.0, no extra settings are needed?
+# 1.69.1-1.71.0, set DOCBOOK_SUPPRESS_SP?
+# 1.71.1, no extra settings are needed?
+# 1.72.0, set DOCBOOK_XSL_172.
+# 1.73.0-, set ASCIIDOC_NO_ROFF
+#
+
+#
+# If you had been using DOCBOOK_XSL_172 in an attempt to get rid
+# of 'the ".ft C" problem' in your generated manpages, and you
+# instead ended up with weird characters around callouts, try
+# using ASCIIDOC_NO_ROFF instead (it works fine with ASCIIDOC8).
+#
+
+ifdef ASCIIDOC8
+ASCIIDOC_EXTRA += -a asciidoc7compatible
+endif
+ifdef DOCBOOK_XSL_172
+ASCIIDOC_EXTRA += -a perf-asciidoc-no-roff
+MANPAGE_XSL = manpage-1.72.xsl
+else
+ ifdef ASCIIDOC_NO_ROFF
+ # docbook-xsl after 1.72 needs the regular XSL, but will not
+ # pass-thru raw roff codes from asciidoc.conf, so turn them off.
+ ASCIIDOC_EXTRA += -a perf-asciidoc-no-roff
+ endif
+endif
+ifdef MAN_BOLD_LITERAL
+XMLTO_EXTRA += -m manpage-bold-literal.xsl
+endif
+ifdef DOCBOOK_SUPPRESS_SP
+XMLTO_EXTRA += -m manpage-suppress-sp.xsl
+endif
+
+SHELL_PATH ?= $(SHELL)
+# Shell quote;
+SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
+
+#
+# Please note that there is a minor bug in asciidoc.
+# The version after 6.0.3 _will_ include the patch found here:
+# http://marc.theaimsgroup.com/?l=perf&m=111558757202243&w=2
+#
+# Until that version is released you may have to apply the patch
+# yourself - yes, all 6 characters of it!
+#
+
+QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir
+QUIET_SUBDIR1 =
+
+ifneq ($(findstring $(MAKEFLAGS),w),w)
+PRINT_DIR = --no-print-directory
+else # "make -w"
+NO_SUBDIR = :
+endif
+
+ifneq ($(findstring $(MAKEFLAGS),s),s)
+ifndef V
+ QUIET_ASCIIDOC = @echo ' ' ASCIIDOC $@;
+ QUIET_XMLTO = @echo ' ' XMLTO $@;
+ QUIET_DB2TEXI = @echo ' ' DB2TEXI $@;
+ QUIET_MAKEINFO = @echo ' ' MAKEINFO $@;
+ QUIET_DBLATEX = @echo ' ' DBLATEX $@;
+ QUIET_XSLTPROC = @echo ' ' XSLTPROC $@;
+ QUIET_GEN = @echo ' ' GEN $@;
+ QUIET_STDERR = 2> /dev/null
+ QUIET_SUBDIR0 = +@subdir=
+ QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \
+ $(MAKE) $(PRINT_DIR) -C $$subdir
+ export V
+endif
+endif
+
+all: html man
+
+html: $(DOC_HTML)
+
+$(DOC_HTML) $(DOC_MAN1) $(DOC_MAN5) $(DOC_MAN7): asciidoc.conf
+
+man: man1 man5 man7
+man1: $(DOC_MAN1)
+man5: $(DOC_MAN5)
+man7: $(DOC_MAN7)
+
+info: perf.info perfman.info
+
+pdf: user-manual.pdf
+
+install: install-man
+
+install-man: man
+ $(INSTALL) -d -m 755 $(DESTDIR)$(man1dir)
+# $(INSTALL) -d -m 755 $(DESTDIR)$(man5dir)
+# $(INSTALL) -d -m 755 $(DESTDIR)$(man7dir)
+ $(INSTALL) -m 644 $(DOC_MAN1) $(DESTDIR)$(man1dir)
+# $(INSTALL) -m 644 $(DOC_MAN5) $(DESTDIR)$(man5dir)
+# $(INSTALL) -m 644 $(DOC_MAN7) $(DESTDIR)$(man7dir)
+
+install-info: info
+ $(INSTALL) -d -m 755 $(DESTDIR)$(infodir)
+ $(INSTALL) -m 644 perf.info perfman.info $(DESTDIR)$(infodir)
+ if test -r $(DESTDIR)$(infodir)/dir; then \
+ $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perf.info ;\
+ $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perfman.info ;\
+ else \
+ echo "No directory found in $(DESTDIR)$(infodir)" >&2 ; \
+ fi
+
+install-pdf: pdf
+ $(INSTALL) -d -m 755 $(DESTDIR)$(pdfdir)
+ $(INSTALL) -m 644 user-manual.pdf $(DESTDIR)$(pdfdir)
+
+install-html: html
+ '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir)
+
+../PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
+ $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) PERF-VERSION-FILE
+
+-include ../PERF-VERSION-FILE
+
+#
+# Determine "include::" file references in asciidoc files.
+#
+doc.dep : $(wildcard *.txt) build-docdep.perl
+ $(QUIET_GEN)$(RM) $@+ $@ && \
+ $(PERL_PATH) ./build-docdep.perl >$@+ $(QUIET_STDERR) && \
+ mv $@+ $@
+
+-include doc.dep
+
+cmds_txt = cmds-ancillaryinterrogators.txt \
+ cmds-ancillarymanipulators.txt \
+ cmds-mainporcelain.txt \
+ cmds-plumbinginterrogators.txt \
+ cmds-plumbingmanipulators.txt \
+ cmds-synchingrepositories.txt \
+ cmds-synchelpers.txt \
+ cmds-purehelpers.txt \
+ cmds-foreignscminterface.txt
+
+$(cmds_txt): cmd-list.made
+
+cmd-list.made: cmd-list.perl ../command-list.txt $(MAN1_TXT)
+ $(QUIET_GEN)$(RM) $@ && \
+ $(PERL_PATH) ./cmd-list.perl ../command-list.txt $(QUIET_STDERR) && \
+ date >$@
+
+clean:
+ $(RM) *.xml *.xml+ *.html *.html+ *.1 *.5 *.7
+ $(RM) *.texi *.texi+ *.texi++ perf.info perfman.info
+ $(RM) howto-index.txt howto/*.html doc.dep
+ $(RM) technical/api-*.html technical/api-index.txt
+ $(RM) $(cmds_txt) *.made
+
+$(MAN_HTML): %.html : %.txt
+ $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
+ $(ASCIIDOC) -b xhtml11 -d manpage -f asciidoc.conf \
+ $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \
+ mv $@+ $@
+
+%.1 %.5 %.7 : %.xml
+ $(QUIET_XMLTO)$(RM) $@ && \
+ xmlto -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $<
+
+%.xml : %.txt
+ $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
+ $(ASCIIDOC) -b docbook -d manpage -f asciidoc.conf \
+ $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \
+ mv $@+ $@
+
+XSLT = docbook.xsl
+XSLTOPTS = --xinclude --stringparam html.stylesheet docbook-xsl.css
+
+user-manual.html: user-manual.xml
+ $(QUIET_XSLTPROC)xsltproc $(XSLTOPTS) -o $@ $(XSLT) $<
+
+perf.info: user-manual.texi
+ $(QUIET_MAKEINFO)$(MAKEINFO) --no-split -o $@ user-manual.texi
+
+user-manual.texi: user-manual.xml
+ $(QUIET_DB2TEXI)$(RM) $@+ $@ && \
+ $(DOCBOOK2X_TEXI) user-manual.xml --encoding=UTF-8 --to-stdout >$@++ && \
+ $(PERL_PATH) fix-texi.perl <$@++ >$@+ && \
+ rm $@++ && \
+ mv $@+ $@
+
+user-manual.pdf: user-manual.xml
+ $(QUIET_DBLATEX)$(RM) $@+ $@ && \
+ $(DBLATEX) -o $@+ -p /etc/asciidoc/dblatex/asciidoc-dblatex.xsl -s /etc/asciidoc/dblatex/asciidoc-dblatex.sty $< && \
+ mv $@+ $@
+
+perfman.texi: $(MAN_XML) cat-texi.perl
+ $(QUIET_DB2TEXI)$(RM) $@+ $@ && \
+ ($(foreach xml,$(MAN_XML),$(DOCBOOK2X_TEXI) --encoding=UTF-8 \
+ --to-stdout $(xml) &&) true) > $@++ && \
+ $(PERL_PATH) cat-texi.perl $@ <$@++ >$@+ && \
+ rm $@++ && \
+ mv $@+ $@
+
+perfman.info: perfman.texi
+ $(QUIET_MAKEINFO)$(MAKEINFO) --no-split --no-validate $*.texi
+
+$(patsubst %.txt,%.texi,$(MAN_TXT)): %.texi : %.xml
+ $(QUIET_DB2TEXI)$(RM) $@+ $@ && \
+ $(DOCBOOK2X_TEXI) --to-stdout $*.xml >$@+ && \
+ mv $@+ $@
+
+howto-index.txt: howto-index.sh $(wildcard howto/*.txt)
+ $(QUIET_GEN)$(RM) $@+ $@ && \
+ '$(SHELL_PATH_SQ)' ./howto-index.sh $(wildcard howto/*.txt) >$@+ && \
+ mv $@+ $@
+
+$(patsubst %,%.html,$(ARTICLES)) : %.html : %.txt
+ $(QUIET_ASCIIDOC)$(ASCIIDOC) -b xhtml11 $*.txt
+
+WEBDOC_DEST = /pub/software/tools/perf/docs
+
+$(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt
+ $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
+ sed -e '1,/^$$/d' $< | $(ASCIIDOC) -b xhtml11 - >$@+ && \
+ mv $@+ $@
+
+install-webdoc : html
+ '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(WEBDOC_DEST)
+
+quick-install: quick-install-man
+
+quick-install-man:
+ '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(DOC_REF) $(DESTDIR)$(mandir)
+
+quick-install-html:
+ '$(SHELL_PATH_SQ)' ./install-doc-quick.sh $(HTML_REF) $(DESTDIR)$(htmldir)
+
+.PHONY: .FORCE-PERF-VERSION-FILE
diff --git a/tools/perf/Documentation/asciidoc.conf b/tools/perf/Documentation/asciidoc.conf
new file mode 100644
index 00000000..356b23a4
--- /dev/null
+++ b/tools/perf/Documentation/asciidoc.conf
@@ -0,0 +1,91 @@
+## linkperf: macro
+#
+# Usage: linkperf:command[manpage-section]
+#
+# Note, {0} is the manpage section, while {target} is the command.
+#
+# Show PERF link as: <command>(<section>); if section is defined, else just show
+# the command.
+
+[macros]
+(?su)[\\]?(?P<name>linkperf):(?P<target>\S*?)\[(?P<attrlist>.*?)\]=
+
+[attributes]
+asterisk=&#42;
+plus=&#43;
+caret=&#94;
+startsb=&#91;
+endsb=&#93;
+tilde=&#126;
+
+ifdef::backend-docbook[]
+[linkperf-inlinemacro]
+{0%{target}}
+{0#<citerefentry>}
+{0#<refentrytitle>{target}</refentrytitle><manvolnum>{0}</manvolnum>}
+{0#</citerefentry>}
+endif::backend-docbook[]
+
+ifdef::backend-docbook[]
+ifndef::perf-asciidoc-no-roff[]
+# "unbreak" docbook-xsl v1.68 for manpages. v1.69 works with or without this.
+# v1.72 breaks with this because it replaces dots not in roff requests.
+[listingblock]
+<example><title>{title}</title>
+<literallayout>
+ifdef::doctype-manpage[]
+&#10;.ft C&#10;
+endif::doctype-manpage[]
+|
+ifdef::doctype-manpage[]
+&#10;.ft&#10;
+endif::doctype-manpage[]
+</literallayout>
+{title#}</example>
+endif::perf-asciidoc-no-roff[]
+
+ifdef::perf-asciidoc-no-roff[]
+ifdef::doctype-manpage[]
+# The following two small workarounds insert a simple paragraph after screen
+[listingblock]
+<example><title>{title}</title>
+<literallayout>
+|
+</literallayout><simpara></simpara>
+{title#}</example>
+
+[verseblock]
+<formalpara{id? id="{id}"}><title>{title}</title><para>
+{title%}<literallayout{id? id="{id}"}>
+{title#}<literallayout>
+|
+</literallayout>
+{title#}</para></formalpara>
+{title%}<simpara></simpara>
+endif::doctype-manpage[]
+endif::perf-asciidoc-no-roff[]
+endif::backend-docbook[]
+
+ifdef::doctype-manpage[]
+ifdef::backend-docbook[]
+[header]
+template::[header-declarations]
+<refentry>
+<refmeta>
+<refentrytitle>{mantitle}</refentrytitle>
+<manvolnum>{manvolnum}</manvolnum>
+<refmiscinfo class="source">perf</refmiscinfo>
+<refmiscinfo class="version">{perf_version}</refmiscinfo>
+<refmiscinfo class="manual">perf Manual</refmiscinfo>
+</refmeta>
+<refnamediv>
+ <refname>{manname}</refname>
+ <refpurpose>{manpurpose}</refpurpose>
+</refnamediv>
+endif::backend-docbook[]
+endif::doctype-manpage[]
+
+ifdef::backend-xhtml11[]
+[linkperf-inlinemacro]
+<a href="{target}.html">{target}{0?({0})}</a>
+endif::backend-xhtml11[]
diff --git a/tools/perf/Documentation/examples.txt b/tools/perf/Documentation/examples.txt
new file mode 100644
index 00000000..8eb6c489
--- /dev/null
+++ b/tools/perf/Documentation/examples.txt
@@ -0,0 +1,225 @@
+
+ ------------------------------
+ ****** perf by examples ******
+ ------------------------------
+
+[ From an e-mail by Ingo Molnar, http://lkml.org/lkml/2009/8/4/346 ]
+
+
+First, discovery/enumeration of available counters can be done via
+'perf list':
+
+titan:~> perf list
+ [...]
+ kmem:kmalloc [Tracepoint event]
+ kmem:kmem_cache_alloc [Tracepoint event]
+ kmem:kmalloc_node [Tracepoint event]
+ kmem:kmem_cache_alloc_node [Tracepoint event]
+ kmem:kfree [Tracepoint event]
+ kmem:kmem_cache_free [Tracepoint event]
+ kmem:mm_page_free_direct [Tracepoint event]
+ kmem:mm_pagevec_free [Tracepoint event]
+ kmem:mm_page_alloc [Tracepoint event]
+ kmem:mm_page_alloc_zone_locked [Tracepoint event]
+ kmem:mm_page_pcpu_drain [Tracepoint event]
+ kmem:mm_page_alloc_extfrag [Tracepoint event]
+
+Then any (or all) of the above event sources can be activated and
+measured. For example the page alloc/free properties of a 'hackbench
+run' are:
+
+ titan:~> perf stat -e kmem:mm_page_pcpu_drain -e kmem:mm_page_alloc
+ -e kmem:mm_pagevec_free -e kmem:mm_page_free_direct ./hackbench 10
+ Time: 0.575
+
+ Performance counter stats for './hackbench 10':
+
+ 13857 kmem:mm_page_pcpu_drain
+ 27576 kmem:mm_page_alloc
+ 6025 kmem:mm_pagevec_free
+ 20934 kmem:mm_page_free_direct
+
+ 0.613972165 seconds time elapsed
+
+You can observe the statistical properties as well, by using the
+'repeat the workload N times' feature of perf stat:
+
+ titan:~> perf stat --repeat 5 -e kmem:mm_page_pcpu_drain -e
+ kmem:mm_page_alloc -e kmem:mm_pagevec_free -e
+ kmem:mm_page_free_direct ./hackbench 10
+ Time: 0.627
+ Time: 0.644
+ Time: 0.564
+ Time: 0.559
+ Time: 0.626
+
+ Performance counter stats for './hackbench 10' (5 runs):
+
+ 12920 kmem:mm_page_pcpu_drain ( +- 3.359% )
+ 25035 kmem:mm_page_alloc ( +- 3.783% )
+ 6104 kmem:mm_pagevec_free ( +- 0.934% )
+ 18376 kmem:mm_page_free_direct ( +- 4.941% )
+
+ 0.643954516 seconds time elapsed ( +- 2.363% )
+
+Furthermore, these tracepoints can be used to sample the workload as
+well. For example the page allocations done by a 'git gc' can be
+captured the following way:
+
+ titan:~/git> perf record -f -e kmem:mm_page_alloc -c 1 ./git gc
+ Counting objects: 1148, done.
+ Delta compression using up to 2 threads.
+ Compressing objects: 100% (450/450), done.
+ Writing objects: 100% (1148/1148), done.
+ Total 1148 (delta 690), reused 1148 (delta 690)
+ [ perf record: Captured and wrote 0.267 MB perf.data (~11679 samples) ]
+
+To check which functions generated page allocations:
+
+ titan:~/git> perf report
+ # Samples: 10646
+ #
+ # Overhead Command Shared Object
+ # ........ ............... ..........................
+ #
+ 23.57% git-repack /lib64/libc-2.5.so
+ 21.81% git /lib64/libc-2.5.so
+ 14.59% git ./git
+ 11.79% git-repack ./git
+ 7.12% git /lib64/ld-2.5.so
+ 3.16% git-repack /lib64/libpthread-2.5.so
+ 2.09% git-repack /bin/bash
+ 1.97% rm /lib64/libc-2.5.so
+ 1.39% mv /lib64/ld-2.5.so
+ 1.37% mv /lib64/libc-2.5.so
+ 1.12% git-repack /lib64/ld-2.5.so
+ 0.95% rm /lib64/ld-2.5.so
+ 0.90% git-update-serv /lib64/libc-2.5.so
+ 0.73% git-update-serv /lib64/ld-2.5.so
+ 0.68% perf /lib64/libpthread-2.5.so
+ 0.64% git-repack /usr/lib64/libz.so.1.2.3
+
+Or to see it on a more finegrained level:
+
+titan:~/git> perf report --sort comm,dso,symbol
+# Samples: 10646
+#
+# Overhead Command Shared Object Symbol
+# ........ ............... .......................... ......
+#
+ 9.35% git-repack ./git [.] insert_obj_hash
+ 9.12% git ./git [.] insert_obj_hash
+ 7.31% git /lib64/libc-2.5.so [.] memcpy
+ 6.34% git-repack /lib64/libc-2.5.so [.] _int_malloc
+ 6.24% git-repack /lib64/libc-2.5.so [.] memcpy
+ 5.82% git-repack /lib64/libc-2.5.so [.] __GI___fork
+ 5.47% git /lib64/libc-2.5.so [.] _int_malloc
+ 2.99% git /lib64/libc-2.5.so [.] memset
+
+Furthermore, call-graph sampling can be done too, of page
+allocations - to see precisely what kind of page allocations there
+are:
+
+ titan:~/git> perf record -f -g -e kmem:mm_page_alloc -c 1 ./git gc
+ Counting objects: 1148, done.
+ Delta compression using up to 2 threads.
+ Compressing objects: 100% (450/450), done.
+ Writing objects: 100% (1148/1148), done.
+ Total 1148 (delta 690), reused 1148 (delta 690)
+ [ perf record: Captured and wrote 0.963 MB perf.data (~42069 samples) ]
+
+ titan:~/git> perf report -g
+ # Samples: 10686
+ #
+ # Overhead Command Shared Object
+ # ........ ............... ..........................
+ #
+ 23.25% git-repack /lib64/libc-2.5.so
+ |
+ |--50.00%-- _int_free
+ |
+ |--37.50%-- __GI___fork
+ | make_child
+ |
+ |--12.50%-- ptmalloc_unlock_all2
+ | make_child
+ |
+ --6.25%-- __GI_strcpy
+ 21.61% git /lib64/libc-2.5.so
+ |
+ |--30.00%-- __GI_read
+ | |
+ | --83.33%-- git_config_from_file
+ | git_config
+ | |
+ [...]
+
+Or you can observe the whole system's page allocations for 10
+seconds:
+
+titan:~/git> perf stat -a -e kmem:mm_page_pcpu_drain -e
+kmem:mm_page_alloc -e kmem:mm_pagevec_free -e
+kmem:mm_page_free_direct sleep 10
+
+ Performance counter stats for 'sleep 10':
+
+ 171585 kmem:mm_page_pcpu_drain
+ 322114 kmem:mm_page_alloc
+ 73623 kmem:mm_pagevec_free
+ 254115 kmem:mm_page_free_direct
+
+ 10.000591410 seconds time elapsed
+
+Or observe how fluctuating the page allocations are, via statistical
+analysis done over ten 1-second intervals:
+
+ titan:~/git> perf stat --repeat 10 -a -e kmem:mm_page_pcpu_drain -e
+ kmem:mm_page_alloc -e kmem:mm_pagevec_free -e
+ kmem:mm_page_free_direct sleep 1
+
+ Performance counter stats for 'sleep 1' (10 runs):
+
+ 17254 kmem:mm_page_pcpu_drain ( +- 3.709% )
+ 34394 kmem:mm_page_alloc ( +- 4.617% )
+ 7509 kmem:mm_pagevec_free ( +- 4.820% )
+ 25653 kmem:mm_page_free_direct ( +- 3.672% )
+
+ 1.058135029 seconds time elapsed ( +- 3.089% )
+
+Or you can annotate the recorded 'git gc' run on a per symbol basis
+and check which instructions/source-code generated page allocations:
+
+ titan:~/git> perf annotate __GI___fork
+ ------------------------------------------------
+ Percent | Source code & Disassembly of libc-2.5.so
+ ------------------------------------------------
+ :
+ :
+ : Disassembly of section .plt:
+ : Disassembly of section .text:
+ :
+ : 00000031a2e95560 <__fork>:
+ [...]
+ 0.00 : 31a2e95602: b8 38 00 00 00 mov $0x38,%eax
+ 0.00 : 31a2e95607: 0f 05 syscall
+ 83.42 : 31a2e95609: 48 3d 00 f0 ff ff cmp $0xfffffffffffff000,%rax
+ 0.00 : 31a2e9560f: 0f 87 4d 01 00 00 ja 31a2e95762 <__fork+0x202>
+ 0.00 : 31a2e95615: 85 c0 test %eax,%eax
+
+( this shows that 83.42% of __GI___fork's page allocations come from
+ the 0x38 system call it performs. )
+
+etc. etc. - a lot more is possible. I could list a dozen of
+other different usecases straight away - neither of which is
+possible via /proc/vmstat.
+
+/proc/vmstat is not in the same league really, in terms of
+expressive power of system analysis and performance
+analysis.
+
+All that the above results needed were those new tracepoints
+in include/tracing/events/kmem.h.
+
+ Ingo
+
+
diff --git a/tools/perf/Documentation/manpage-1.72.xsl b/tools/perf/Documentation/manpage-1.72.xsl
new file mode 100644
index 00000000..b4d315cb
--- /dev/null
+++ b/tools/perf/Documentation/manpage-1.72.xsl
@@ -0,0 +1,14 @@
+<!-- manpage-1.72.xsl:
+ special settings for manpages rendered from asciidoc+docbook
+ handles peculiarities in docbook-xsl 1.72.0 -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0">
+
+<xsl:import href="manpage-base.xsl"/>
+
+<!-- these are the special values for the roff control characters
+ needed for docbook-xsl 1.72.0 -->
+<xsl:param name="git.docbook.backslash">&#x2593;</xsl:param>
+<xsl:param name="git.docbook.dot" >&#x2302;</xsl:param>
+
+</xsl:stylesheet>
diff --git a/tools/perf/Documentation/manpage-base.xsl b/tools/perf/Documentation/manpage-base.xsl
new file mode 100644
index 00000000..a264fa61
--- /dev/null
+++ b/tools/perf/Documentation/manpage-base.xsl
@@ -0,0 +1,35 @@
+<!-- manpage-base.xsl:
+ special formatting for manpages rendered from asciidoc+docbook -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0">
+
+<!-- these params silence some output from xmlto -->
+<xsl:param name="man.output.quietly" select="1"/>
+<xsl:param name="refentry.meta.get.quietly" select="1"/>
+
+<!-- convert asciidoc callouts to man page format;
+ git.docbook.backslash and git.docbook.dot params
+ must be supplied by another XSL file or other means -->
+<xsl:template match="co">
+ <xsl:value-of select="concat(
+ $git.docbook.backslash,'fB(',
+ substring-after(@id,'-'),')',
+ $git.docbook.backslash,'fR')"/>
+</xsl:template>
+<xsl:template match="calloutlist">
+ <xsl:value-of select="$git.docbook.dot"/>
+ <xsl:text>sp&#10;</xsl:text>
+ <xsl:apply-templates/>
+ <xsl:text>&#10;</xsl:text>
+</xsl:template>
+<xsl:template match="callout">
+ <xsl:value-of select="concat(
+ $git.docbook.backslash,'fB',
+ substring-after(@arearefs,'-'),
+ '. ',$git.docbook.backslash,'fR')"/>
+ <xsl:apply-templates/>
+ <xsl:value-of select="$git.docbook.dot"/>
+ <xsl:text>br&#10;</xsl:text>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/tools/perf/Documentation/manpage-bold-literal.xsl b/tools/perf/Documentation/manpage-bold-literal.xsl
new file mode 100644
index 00000000..608eb5df
--- /dev/null
+++ b/tools/perf/Documentation/manpage-bold-literal.xsl
@@ -0,0 +1,17 @@
+<!-- manpage-bold-literal.xsl:
+ special formatting for manpages rendered from asciidoc+docbook -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0">
+
+<!-- render literal text as bold (instead of plain or monospace);
+ this makes literal text easier to distinguish in manpages
+ viewed on a tty -->
+<xsl:template match="literal">
+ <xsl:value-of select="$git.docbook.backslash"/>
+ <xsl:text>fB</xsl:text>
+ <xsl:apply-templates/>
+ <xsl:value-of select="$git.docbook.backslash"/>
+ <xsl:text>fR</xsl:text>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/tools/perf/Documentation/manpage-normal.xsl b/tools/perf/Documentation/manpage-normal.xsl
new file mode 100644
index 00000000..a48f5b11
--- /dev/null
+++ b/tools/perf/Documentation/manpage-normal.xsl
@@ -0,0 +1,13 @@
+<!-- manpage-normal.xsl:
+ special settings for manpages rendered from asciidoc+docbook
+ handles anything we want to keep away from docbook-xsl 1.72.0 -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0">
+
+<xsl:import href="manpage-base.xsl"/>
+
+<!-- these are the normal values for the roff control characters -->
+<xsl:param name="git.docbook.backslash">\</xsl:param>
+<xsl:param name="git.docbook.dot" >.</xsl:param>
+
+</xsl:stylesheet>
diff --git a/tools/perf/Documentation/manpage-suppress-sp.xsl b/tools/perf/Documentation/manpage-suppress-sp.xsl
new file mode 100644
index 00000000..a63c7632
--- /dev/null
+++ b/tools/perf/Documentation/manpage-suppress-sp.xsl
@@ -0,0 +1,21 @@
+<!-- manpage-suppress-sp.xsl:
+ special settings for manpages rendered from asciidoc+docbook
+ handles erroneous, inline .sp in manpage output of some
+ versions of docbook-xsl -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0">
+
+<!-- attempt to work around spurious .sp at the tail of the line
+ that some versions of docbook stylesheets seem to add -->
+<xsl:template match="simpara">
+ <xsl:variable name="content">
+ <xsl:apply-templates/>
+ </xsl:variable>
+ <xsl:value-of select="normalize-space($content)"/>
+ <xsl:if test="not(ancestor::authorblurb) and
+ not(ancestor::personblurb)">
+ <xsl:text>&#10;&#10;</xsl:text>
+ </xsl:if>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
new file mode 100644
index 00000000..5164a655
--- /dev/null
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -0,0 +1,29 @@
+perf-annotate(1)
+================
+
+NAME
+----
+perf-annotate - Read perf.data (created by perf record) and display annotated code
+
+SYNOPSIS
+--------
+[verse]
+'perf annotate' [-i <file> | --input=file] symbol_name
+
+DESCRIPTION
+-----------
+This command reads the input file and displays an annotated version of the
+code. If the object file has debug symbols then the source code will be
+displayed alongside assembly code.
+
+If there is no debug info in the object, then annotated assembly is displayed.
+
+OPTIONS
+-------
+-i::
+--input=::
+ Input file name. (default: perf.data)
+
+SEE ALSO
+--------
+linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-archive.txt b/tools/perf/Documentation/perf-archive.txt
new file mode 100644
index 00000000..fae174dc
--- /dev/null
+++ b/tools/perf/Documentation/perf-archive.txt
@@ -0,0 +1,22 @@
+perf-archive(1)
+===============
+
+NAME
+----
+perf-archive - Create archive with object files with build-ids found in perf.data file
+
+SYNOPSIS
+--------
+[verse]
+'perf archive' [file]
+
+DESCRIPTION
+-----------
+This command runs runs perf-buildid-list --with-hits, and collects the files
+with the buildids found so that analisys of perf.data contents can be possible
+on another machine.
+
+
+SEE ALSO
+--------
+linkperf:perf-record[1], linkperf:perf-buildid-list[1], linkperf:perf-report[1]
diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt
new file mode 100644
index 00000000..a3dbadb2
--- /dev/null
+++ b/tools/perf/Documentation/perf-bench.txt
@@ -0,0 +1,120 @@
+perf-bench(1)
+=============
+
+NAME
+----
+perf-bench - General framework for benchmark suites
+
+SYNOPSIS
+--------
+[verse]
+'perf bench' [<common options>] <subsystem> <suite> [<options>]
+
+DESCRIPTION
+-----------
+This 'perf bench' command is general framework for benchmark suites.
+
+COMMON OPTIONS
+--------------
+-f::
+--format=::
+Specify format style.
+Current available format styles are:
+
+'default'::
+Default style. This is mainly for human reading.
+---------------------
+% perf bench sched pipe # with no style specified
+(executing 1000000 pipe operations between two tasks)
+ Total time:5.855 sec
+ 5.855061 usecs/op
+ 170792 ops/sec
+---------------------
+
+'simple'::
+This simple style is friendly for automated
+processing by scripts.
+---------------------
+% perf bench --format=simple sched pipe # specified simple
+5.988
+---------------------
+
+SUBSYSTEM
+---------
+
+'sched'::
+ Scheduler and IPC mechanisms.
+
+SUITES FOR 'sched'
+~~~~~~~~~~~~~~~~~~
+*messaging*::
+Suite for evaluating performance of scheduler and IPC mechanisms.
+Based on hackbench by Rusty Russell.
+
+Options of *pipe*
+^^^^^^^^^^^^^^^^^
+-p::
+--pipe::
+Use pipe() instead of socketpair()
+
+-t::
+--thread::
+Be multi thread instead of multi process
+
+-g::
+--group=::
+Specify number of groups
+
+-l::
+--loop=::
+Specify number of loops
+
+Example of *messaging*
+^^^^^^^^^^^^^^^^^^^^^^
+
+---------------------
+% perf bench sched messaging # run with default
+options (20 sender and receiver processes per group)
+(10 groups == 400 processes run)
+
+ Total time:0.308 sec
+
+% perf bench sched messaging -t -g 20 # be multi-thread, with 20 groups
+(20 sender and receiver threads per group)
+(20 groups == 800 threads run)
+
+ Total time:0.582 sec
+---------------------
+
+*pipe*::
+Suite for pipe() system call.
+Based on pipe-test-1m.c by Ingo Molnar.
+
+Options of *pipe*
+^^^^^^^^^^^^^^^^^
+-l::
+--loop=::
+Specify number of loops.
+
+Example of *pipe*
+^^^^^^^^^^^^^^^^^
+
+---------------------
+% perf bench sched pipe
+(executing 1000000 pipe operations between two tasks)
+
+ Total time:8.091 sec
+ 8.091833 usecs/op
+ 123581 ops/sec
+
+% perf bench sched pipe -l 1000 # loop 1000
+(executing 1000 pipe operations between two tasks)
+
+ Total time:0.016 sec
+ 16.948000 usecs/op
+ 59004 ops/sec
+---------------------
+
+SEE ALSO
+--------
+linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt
new file mode 100644
index 00000000..c1057701
--- /dev/null
+++ b/tools/perf/Documentation/perf-buildid-cache.txt
@@ -0,0 +1,33 @@
+perf-buildid-cache(1)
+=====================
+
+NAME
+----
+perf-buildid-cache - Manage build-id cache.
+
+SYNOPSIS
+--------
+[verse]
+'perf buildid-cache <options>'
+
+DESCRIPTION
+-----------
+This command manages the build-id cache. It can add and remove files to/from
+the cache. In the future it should as well purge older entries, set upper
+limits for the space used by the cache, etc.
+
+OPTIONS
+-------
+-a::
+--add=::
+ Add specified file to the cache.
+-r::
+--remove=::
+ Remove specified file from the cache.
+-v::
+--verbose::
+ Be more verbose.
+
+SEE ALSO
+--------
+linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-buildid-list[1]
diff --git a/tools/perf/Documentation/perf-buildid-list.txt b/tools/perf/Documentation/perf-buildid-list.txt
new file mode 100644
index 00000000..01b642c0
--- /dev/null
+++ b/tools/perf/Documentation/perf-buildid-list.txt
@@ -0,0 +1,34 @@
+perf-buildid-list(1)
+====================
+
+NAME
+----
+perf-buildid-list - List the buildids in a perf.data file
+
+SYNOPSIS
+--------
+[verse]
+'perf buildid-list <options>'
+
+DESCRIPTION
+-----------
+This command displays the buildids found in a perf.data file, so that other
+tools can be used to fetch packages with matching symbol tables for use by
+perf report.
+
+OPTIONS
+-------
+-i::
+--input=::
+ Input file name. (default: perf.data)
+-f::
+--force::
+ Don't do ownership validation.
+-v::
+--verbose::
+ Be more verbose.
+
+SEE ALSO
+--------
+linkperf:perf-record[1], linkperf:perf-top[1],
+linkperf:perf-report[1]
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
new file mode 100644
index 00000000..20d97d84
--- /dev/null
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -0,0 +1,55 @@
+perf-diff(1)
+============
+
+NAME
+----
+perf-diff - Read two perf.data files and display the differential profile
+
+SYNOPSIS
+--------
+[verse]
+'perf diff' [oldfile] [newfile]
+
+DESCRIPTION
+-----------
+This command displays the performance difference amongst two perf.data files
+captured via perf record.
+
+If no parameters are passed it will assume perf.data.old and perf.data.
+
+OPTIONS
+-------
+-d::
+--dsos=::
+ Only consider symbols in these dsos. CSV that understands
+ file://filename entries.
+
+-C::
+--comms=::
+ Only consider symbols in these comms. CSV that understands
+ file://filename entries.
+
+-S::
+--symbols=::
+ Only consider these symbols. CSV that understands
+ file://filename entries.
+
+-s::
+--sort=::
+ Sort by key(s): pid, comm, dso, symbol.
+
+-t::
+--field-separator=::
+
+ Use a special separator character and don't pad with spaces, replacing
+ all occurances of this separator in symbol names (and other output)
+ with a '.' character, that thus it's the only non valid separator.
+
+-v::
+--verbose::
+ Be verbose, for instance, show the raw counts in addition to the
+ diff.
+
+SEE ALSO
+--------
+linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-help.txt b/tools/perf/Documentation/perf-help.txt
new file mode 100644
index 00000000..51439181
--- /dev/null
+++ b/tools/perf/Documentation/perf-help.txt
@@ -0,0 +1,38 @@
+perf-help(1)
+============
+
+NAME
+----
+perf-help - display help information about perf
+
+SYNOPSIS
+--------
+'perf help' [-a|--all] [COMMAND]
+
+DESCRIPTION
+-----------
+
+With no options and no COMMAND given, the synopsis of the 'perf'
+command and a list of the most commonly used perf commands are printed
+on the standard output.
+
+If the option '--all' or '-a' is given, then all available commands are
+printed on the standard output.
+
+If a perf command is named, a manual page for that command is brought
+up. The 'man' program is used by default for this purpose, but this
+can be overridden by other options or configuration variables.
+
+Note that `perf --help ...` is identical to `perf help ...` because the
+former is internally converted into the latter.
+
+OPTIONS
+-------
+-a::
+--all::
+ Prints all the available commands on the standard output. This
+ option supersedes any other option.
+
+PERF
+----
+Part of the linkperf:perf[1] suite
diff --git a/tools/perf/Documentation/perf-inject.txt b/tools/perf/Documentation/perf-inject.txt
new file mode 100644
index 00000000..025630d4
--- /dev/null
+++ b/tools/perf/Documentation/perf-inject.txt
@@ -0,0 +1,35 @@
+perf-inject(1)
+==============
+
+NAME
+----
+perf-inject - Filter to augment the events stream with additional information
+
+SYNOPSIS
+--------
+[verse]
+'perf inject <options>'
+
+DESCRIPTION
+-----------
+perf-inject reads a perf-record event stream and repipes it to stdout. At any
+point the processing code can inject other events into the event stream - in
+this case build-ids (-b option) are read and injected as needed into the event
+stream.
+
+Build-ids are just the first user of perf-inject - potentially anything that
+needs userspace processing to augment the events stream with additional
+information could make use of this facility.
+
+OPTIONS
+-------
+-b::
+--build-ids=::
+ Inject build-ids into the output stream
+-v::
+--verbose::
+ Be more verbose.
+
+SEE ALSO
+--------
+linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1]
diff --git a/tools/perf/Documentation/perf-kmem.txt b/tools/perf/Documentation/perf-kmem.txt
new file mode 100644
index 00000000..a52fcde8
--- /dev/null
+++ b/tools/perf/Documentation/perf-kmem.txt
@@ -0,0 +1,47 @@
+perf-kmem(1)
+============
+
+NAME
+----
+perf-kmem - Tool to trace/measure kernel memory(slab) properties
+
+SYNOPSIS
+--------
+[verse]
+'perf kmem' {record|stat} [<options>]
+
+DESCRIPTION
+-----------
+There are two variants of perf kmem:
+
+ 'perf kmem record <command>' to record the kmem events
+ of an arbitrary workload.
+
+ 'perf kmem stat' to report kernel memory statistics.
+
+OPTIONS
+-------
+-i <file>::
+--input=<file>::
+ Select the input file (default: perf.data)
+
+--caller::
+ Show per-callsite statistics
+
+--alloc::
+ Show per-allocation statistics
+
+-s <key[,key2...]>::
+--sort=<key[,key2...]>::
+ Sort the output (default: frag,hit,bytes)
+
+-l <num>::
+--line=<num>::
+ Print n lines only
+
+--raw-ip::
+ Print raw ip instead of symbol
+
+SEE ALSO
+--------
+linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt
new file mode 100644
index 00000000..d004e19f
--- /dev/null
+++ b/tools/perf/Documentation/perf-kvm.txt
@@ -0,0 +1,68 @@
+perf-kvm(1)
+===========
+
+NAME
+----
+perf-kvm - Tool to trace/measure kvm guest os
+
+SYNOPSIS
+--------
+[verse]
+'perf kvm' [--host] [--guest] [--guestmount=<path>
+ [--guestkallsyms=<path> --guestmodules=<path> | --guestvmlinux=<path>]]
+ {top|record|report|diff|buildid-list}
+'perf kvm' [--host] [--guest] [--guestkallsyms=<path> --guestmodules=<path>
+ | --guestvmlinux=<path>] {top|record|report|diff|buildid-list}
+
+DESCRIPTION
+-----------
+There are a couple of variants of perf kvm:
+
+ 'perf kvm [options] top <command>' to generates and displays
+ a performance counter profile of guest os in realtime
+ of an arbitrary workload.
+
+ 'perf kvm record <command>' to record the performance couinter profile
+ of an arbitrary workload and save it into a perf data file. If both
+ --host and --guest are input, the perf data file name is perf.data.kvm.
+ If there is no --host but --guest, the file name is perf.data.guest.
+ If there is no --guest but --host, the file name is perf.data.host.
+
+ 'perf kvm report' to display the performance counter profile information
+ recorded via perf kvm record.
+
+ 'perf kvm diff' to displays the performance difference amongst two perf.data
+ files captured via perf record.
+
+ 'perf kvm buildid-list' to display the buildids found in a perf data file,
+ so that other tools can be used to fetch packages with matching symbol tables
+ for use by perf report.
+
+OPTIONS
+-------
+--host=::
+ Collect host side performance profile.
+--guest=::
+ Collect guest side performance profile.
+--guestmount=<path>::
+ Guest os root file system mount directory. Users mounts guest os
+ root directories under <path> by a specific filesystem access method,
+ typically, sshfs. For example, start 2 guest os. The one's pid is 8888
+ and the other's is 9999.
+ #mkdir ~/guestmount; cd ~/guestmount
+ #sshfs -o allow_other,direct_io -p 5551 localhost:/ 8888/
+ #sshfs -o allow_other,direct_io -p 5552 localhost:/ 9999/
+ #perf kvm --host --guest --guestmount=~/guestmount top
+--guestkallsyms=<path>::
+ Guest os /proc/kallsyms file copy. 'perf' kvm' reads it to get guest
+ kernel symbols. Users copy it out from guest os.
+--guestmodules=<path>::
+ Guest os /proc/modules file copy. 'perf' kvm' reads it to get guest
+ kernel module information. Users copy it out from guest os.
+--guestvmlinux=<path>::
+ Guest os kernel vmlinux.
+
+SEE ALSO
+--------
+linkperf:perf-top[1], linkperf:perf-record[1], linkperf:perf-report[1],
+linkperf:perf-diff[1], linkperf:perf-buildid-list[1]
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
new file mode 100644
index 00000000..43e3dd28
--- /dev/null
+++ b/tools/perf/Documentation/perf-list.txt
@@ -0,0 +1,56 @@
+perf-list(1)
+============
+
+NAME
+----
+perf-list - List all symbolic event types
+
+SYNOPSIS
+--------
+[verse]
+'perf list'
+
+DESCRIPTION
+-----------
+This command displays the symbolic event types which can be selected in the
+various perf commands with the -e option.
+
+RAW HARDWARE EVENT DESCRIPTOR
+-----------------------------
+Even when an event is not available in a symbolic form within perf right now,
+it can be encoded in a per processor specific way.
+
+For instance For x86 CPUs NNN represents the raw register encoding with the
+layout of IA32_PERFEVTSELx MSRs (see [Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide] Figure 30-1 Layout
+of IA32_PERFEVTSELx MSRs) or AMD's PerfEvtSeln (see [AMD64 Architecture Programmer’s Manual Volume 2: System Programming], Page 344,
+Figure 13-7 Performance Event-Select Register (PerfEvtSeln)).
+
+Example:
+
+If the Intel docs for a QM720 Core i7 describe an event as:
+
+ Event Umask Event Mask
+ Num. Value Mnemonic Description Comment
+
+ A8H 01H LSD.UOPS Counts the number of micro-ops Use cmask=1 and
+ delivered by loop stream detector invert to count
+ cycles
+
+raw encoding of 0x1A8 can be used:
+
+ perf stat -e r1a8 -a sleep 1
+ perf record -e r1a8 ...
+
+You should refer to the processor specific documentation for getting these
+details. Some of them are referenced in the SEE ALSO section below.
+
+OPTIONS
+-------
+None
+
+SEE ALSO
+--------
+linkperf:perf-stat[1], linkperf:perf-top[1],
+linkperf:perf-record[1],
+http://www.intel.com/Assets/PDF/manual/253669.pdf[Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide],
+http://support.amd.com/us/Processor_TechDocs/24593.pdf[AMD64 Architecture Programmer’s Manual Volume 2: System Programming]
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
new file mode 100644
index 00000000..b3171021
--- /dev/null
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -0,0 +1,29 @@
+perf-lock(1)
+============
+
+NAME
+----
+perf-lock - Analyze lock events
+
+SYNOPSIS
+--------
+[verse]
+'perf lock' {record|report|trace}
+
+DESCRIPTION
+-----------
+You can analyze various lock behaviours
+and statistics with this 'perf lock' command.
+
+ 'perf lock record <command>' records lock events
+ between start and end <command>. And this command
+ produces the file "perf.data" which contains tracing
+ results of lock events.
+
+ 'perf lock trace' shows raw lock events.
+
+ 'perf lock report' reports statistical data.
+
+SEE ALSO
+--------
+linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
new file mode 100644
index 00000000..27d52dae
--- /dev/null
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -0,0 +1,150 @@
+perf-probe(1)
+=============
+
+NAME
+----
+perf-probe - Define new dynamic tracepoints
+
+SYNOPSIS
+--------
+[verse]
+'perf probe' [options] --add='PROBE' [...]
+or
+'perf probe' [options] PROBE
+or
+'perf probe' [options] --del='[GROUP:]EVENT' [...]
+or
+'perf probe' --list
+or
+'perf probe' --line='FUNC[:RLN[+NUM|:RLN2]]|SRC:ALN[+NUM|:ALN2]'
+
+DESCRIPTION
+-----------
+This command defines dynamic tracepoint events, by symbol and registers
+without debuginfo, or by C expressions (C line numbers, C function names,
+and C local variables) with debuginfo.
+
+
+OPTIONS
+-------
+-k::
+--vmlinux=PATH::
+ Specify vmlinux path which has debuginfo (Dwarf binary).
+
+-s::
+--source=PATH::
+ Specify path to kernel source.
+
+-v::
+--verbose::
+ Be more verbose (show parsed arguments, etc).
+
+-a::
+--add=::
+ Define a probe event (see PROBE SYNTAX for detail).
+
+-d::
+--del=::
+ Delete probe events. This accepts glob wildcards('*', '?') and character
+ classes(e.g. [a-z], [!A-Z]).
+
+-l::
+--list::
+ List up current probe events.
+
+-L::
+--line=::
+ Show source code lines which can be probed. This needs an argument
+ which specifies a range of the source code. (see LINE SYNTAX for detail)
+
+-f::
+--force::
+ Forcibly add events with existing name.
+
+-n::
+--dry-run::
+ Dry run. With this option, --add and --del doesn't execute actual
+ adding and removal operations.
+
+--max-probes::
+ Set the maximum number of probe points for an event. Default is 128.
+
+PROBE SYNTAX
+------------
+Probe points are defined by following syntax.
+
+ 1) Define event based on function name
+ [EVENT=]FUNC[@SRC][:RLN|+OFFS|%return|;PTN] [ARG ...]
+
+ 2) Define event based on source file with line number
+ [EVENT=]SRC:ALN [ARG ...]
+
+ 3) Define event based on source file with lazy pattern
+ [EVENT=]SRC;PTN [ARG ...]
+
+
+'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function. Currently, event group name is set as 'probe'.
+'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, ':RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. And ';PTN' means lazy matching pattern (see LAZY MATCHING). Note that ';PTN' must be the end of the probe point definition. In addition, '@SRC' specifies a source file which has that function.
+It is also possible to specify a probe point by the source line number or lazy matching by using 'SRC:ALN' or 'SRC;PTN' syntax, where 'SRC' is the source file path, ':ALN' is the line number and ';PTN' is the lazy matching pattern.
+'ARG' specifies the arguments of this probe point, (see PROBE ARGUMENT).
+
+PROBE ARGUMENT
+--------------
+Each probe argument follows below syntax.
+
+ [NAME=]LOCALVAR|$retval|%REG|@SYMBOL[:TYPE]
+
+'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.)
+'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type.
+
+LINE SYNTAX
+-----------
+Line range is descripted by following syntax.
+
+ "FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]"
+
+FUNC specifies the function name of showing lines. 'RLN' is the start line
+number from function entry line, and 'RLN2' is the end line number. As same as
+probe syntax, 'SRC' means the source file path, 'ALN' is start line number,
+and 'ALN2' is end line number in the file. It is also possible to specify how
+many lines to show by using 'NUM'.
+So, "source.c:100-120" shows lines between 100th to l20th in source.c file. And "func:10+20" shows 20 lines from 10th line of func function.
+
+LAZY MATCHING
+-------------
+ The lazy line matching is similar to glob matching but ignoring spaces in both of pattern and target. So this accepts wildcards('*', '?') and character classes(e.g. [a-z], [!A-Z]).
+
+e.g.
+ 'a=*' can matches 'a=b', 'a = b', 'a == b' and so on.
+
+This provides some sort of flexibility and robustness to probe point definitions against minor code changes. For example, actual 10th line of schedule() can be moved easily by modifying schedule(), but the same line matching 'rq=cpu_rq*' may still exist in the function.)
+
+
+EXAMPLES
+--------
+Display which lines in schedule() can be probed:
+
+ ./perf probe --line schedule
+
+Add a probe on schedule() function 12th line with recording cpu local variable:
+
+ ./perf probe schedule:12 cpu
+ or
+ ./perf probe --add='schedule:12 cpu'
+
+ this will add one or more probes which has the name start with "schedule".
+
+ Add probes on lines in schedule() function which calls update_rq_clock().
+
+ ./perf probe 'schedule;update_rq_clock*'
+ or
+ ./perf probe --add='schedule;update_rq_clock*'
+
+Delete all probes on schedule().
+
+ ./perf probe --del='schedule*'
+
+
+SEE ALSO
+--------
+linkperf:perf-trace[1], linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
new file mode 100644
index 00000000..3ee27dcc
--- /dev/null
+++ b/tools/perf/Documentation/perf-record.txt
@@ -0,0 +1,121 @@
+perf-record(1)
+==============
+
+NAME
+----
+perf-record - Run a command and record its profile into perf.data
+
+SYNOPSIS
+--------
+[verse]
+'perf record' [-e <EVENT> | --event=EVENT] [-l] [-a] <command>
+'perf record' [-e <EVENT> | --event=EVENT] [-l] [-a] -- <command> [<options>]
+
+DESCRIPTION
+-----------
+This command runs a command and gathers a performance counter profile
+from it, into perf.data - without displaying anything.
+
+This file can then be inspected later on, using 'perf report'.
+
+
+OPTIONS
+-------
+<command>...::
+ Any command you can specify in a shell.
+
+-e::
+--event=::
+ Select the PMU event. Selection can be:
+
+ - a symbolic event name (use 'perf list' to list all events)
+
+ - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
+ hexadecimal event descriptor.
+
+ - a hardware breakpoint event in the form of '\mem:addr[:access]'
+ where addr is the address in memory you want to break in.
+ Access is the memory access type (read, write, execute) it can
+ be passed as follows: '\mem:addr[:[r][w][x]]'.
+ If you want to profile read-write accesses in 0x1000, just set
+ 'mem:0x1000:rw'.
+-a::
+ System-wide collection.
+
+-l::
+ Scale counter values.
+
+-p::
+--pid=::
+ Record events on existing pid.
+
+-r::
+--realtime=::
+ Collect data with this RT SCHED_FIFO priority.
+-A::
+--append::
+ Append to the output file to do incremental profiling.
+
+-f::
+--force::
+ Overwrite existing data file. (deprecated)
+
+-c::
+--count=::
+ Event period to sample.
+
+-o::
+--output=::
+ Output file name.
+
+-i::
+--no-inherit::
+ Child tasks do not inherit counters.
+-F::
+--freq=::
+ Profile at this frequency.
+
+-m::
+--mmap-pages=::
+ Number of mmap data pages.
+
+-g::
+--call-graph::
+ Do call-graph (stack chain/backtrace) recording.
+
+-v::
+--verbose::
+ Be more verbose (show counter open errors, etc).
+
+-s::
+--stat::
+ Per thread counts.
+
+-d::
+--data::
+ Sample addresses.
+
+-n::
+--no-samples::
+ Don't sample.
+
+-R::
+--raw-samples::
+Collect raw sample records from all opened counters (default for tracepoint counters).
+
+-C::
+--cpu::
+Collect samples only on the list of cpus provided. Multiple CPUs can be provided as a
+comma-sperated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2.
+In per-thread mode with inheritance mode on (default), samples are captured only when
+the thread executes on the designated CPUs. Default is to monitor all CPUs.
+
+-N::
+--no-buildid-cache::
+Do not update the builid cache. This saves some overhead in situations
+where the information in the perf.data file (which includes buildids)
+is sufficient.
+
+SEE ALSO
+--------
+linkperf:perf-stat[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
new file mode 100644
index 00000000..abfabe91
--- /dev/null
+++ b/tools/perf/Documentation/perf-report.txt
@@ -0,0 +1,70 @@
+perf-report(1)
+==============
+
+NAME
+----
+perf-report - Read perf.data (created by perf record) and display the profile
+
+SYNOPSIS
+--------
+[verse]
+'perf report' [-i <file> | --input=file]
+
+DESCRIPTION
+-----------
+This command displays the performance counter profile information recorded
+via perf record.
+
+OPTIONS
+-------
+-i::
+--input=::
+ Input file name. (default: perf.data)
+-d::
+--dsos=::
+ Only consider symbols in these dsos. CSV that understands
+ file://filename entries.
+-n::
+--show-nr-samples::
+ Show the number of samples for each symbol
+-T::
+--threads::
+ Show per-thread event counters
+-C::
+--comms=::
+ Only consider symbols in these comms. CSV that understands
+ file://filename entries.
+-S::
+--symbols=::
+ Only consider these symbols. CSV that understands
+ file://filename entries.
+
+-s::
+--sort=::
+ Sort by key(s): pid, comm, dso, symbol, parent.
+
+-w::
+--field-width=::
+ Force each column width to the provided list, for large terminal
+ readability.
+
+-t::
+--field-separator=::
+
+ Use a special separator character and don't pad with spaces, replacing
+ all occurances of this separator in symbol names (and other output)
+ with a '.' character, that thus it's the only non valid separator.
+
+-g [type,min]::
+--call-graph::
+ Display callchains using type and min percent threshold.
+ type can be either:
+ - flat: single column, linear exposure of callchains.
+ - graph: use a graph tree, displaying absolute overhead rates.
+ - fractal: like graph, but displays relative rates. Each branch of
+ the tree is considered as a new profiled object. +
+ Default: fractal,0.5.
+
+SEE ALSO
+--------
+linkperf:perf-stat[1]
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
new file mode 100644
index 00000000..8417644a
--- /dev/null
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -0,0 +1,41 @@
+perf-sched(1)
+==============
+
+NAME
+----
+perf-sched - Tool to trace/measure scheduler properties (latencies)
+
+SYNOPSIS
+--------
+[verse]
+'perf sched' {record|latency|replay|trace}
+
+DESCRIPTION
+-----------
+There are four variants of perf sched:
+
+ 'perf sched record <command>' to record the scheduling events
+ of an arbitrary workload.
+
+ 'perf sched latency' to report the per task scheduling latencies
+ and other scheduling properties of the workload.
+
+ 'perf sched trace' to see a detailed trace of the workload that
+ was recorded.
+
+ 'perf sched replay' to simulate the workload that was recorded
+ via perf sched record. (this is done by starting up mockup threads
+ that mimic the workload based on the events in the trace. These
+ threads can then replay the timings (CPU runtime and sleep patterns)
+ of the workload as it occurred when it was recorded - and can repeat
+ it a number of times, measuring its performance.)
+
+OPTIONS
+-------
+-D::
+--dump-raw-trace=::
+ Display verbose dump of the sched data.
+
+SEE ALSO
+--------
+linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
new file mode 100644
index 00000000..4b3a2d46
--- /dev/null
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -0,0 +1,76 @@
+perf-stat(1)
+============
+
+NAME
+----
+perf-stat - Run a command and gather performance counter statistics
+
+SYNOPSIS
+--------
+[verse]
+'perf stat' [-e <EVENT> | --event=EVENT] [-S] [-a] <command>
+'perf stat' [-e <EVENT> | --event=EVENT] [-S] [-a] -- <command> [<options>]
+
+DESCRIPTION
+-----------
+This command runs a command and gathers performance counter statistics
+from it.
+
+
+OPTIONS
+-------
+<command>...::
+ Any command you can specify in a shell.
+
+
+-e::
+--event=::
+ Select the PMU event. Selection can be a symbolic event name
+ (use 'perf list' to list all events) or a raw PMU
+ event (eventsel+umask) in the form of rNNN where NNN is a
+ hexadecimal event descriptor.
+
+-i::
+--no-inherit::
+ child tasks do not inherit counters
+-p::
+--pid=<pid>::
+ stat events on existing pid
+
+-a::
+ system-wide collection
+
+-c::
+ scale counter values
+
+-B::
+ print large numbers with thousands' separators according to locale
+
+-C::
+--cpu=::
+Count only on the list of cpus provided. Multiple CPUs can be provided as a
+comma-sperated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2.
+In per-thread mode, this option is ignored. The -a option is still necessary
+to activate system-wide monitoring. Default is to count on all CPUs.
+
+EXAMPLES
+--------
+
+$ perf stat -- make -j
+
+ Performance counter stats for 'make -j':
+
+ 8117.370256 task clock ticks # 11.281 CPU utilization factor
+ 678 context switches # 0.000 M/sec
+ 133 CPU migrations # 0.000 M/sec
+ 235724 pagefaults # 0.029 M/sec
+ 24821162526 CPU cycles # 3057.784 M/sec
+ 18687303457 instructions # 2302.138 M/sec
+ 172158895 cache references # 21.209 M/sec
+ 27075259 cache misses # 3.335 M/sec
+
+ Wall-clock time elapsed: 719.554352 msecs
+
+SEE ALSO
+--------
+linkperf:perf-top[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-test.txt b/tools/perf/Documentation/perf-test.txt
new file mode 100644
index 00000000..1c4b5f5b
--- /dev/null
+++ b/tools/perf/Documentation/perf-test.txt
@@ -0,0 +1,22 @@
+perf-test(1)
+============
+
+NAME
+----
+perf-test - Runs sanity tests.
+
+SYNOPSIS
+--------
+[verse]
+'perf test <options>'
+
+DESCRIPTION
+-----------
+This command does assorted sanity tests, initially thru linked routines but
+also will look for a directory with more tests in the form of scripts.
+
+OPTIONS
+-------
+-v::
+--verbose::
+ Be more verbose.
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
new file mode 100644
index 00000000..4b178835
--- /dev/null
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -0,0 +1,44 @@
+perf-timechart(1)
+=================
+
+NAME
+----
+perf-timechart - Tool to visualize total system behavior during a workload
+
+SYNOPSIS
+--------
+[verse]
+'perf timechart' {record}
+
+DESCRIPTION
+-----------
+There are two variants of perf timechart:
+
+ 'perf timechart record <command>' to record the system level events
+ of an arbitrary workload.
+
+ 'perf timechart' to turn a trace into a Scalable Vector Graphics file,
+ that can be viewed with popular SVG viewers such as 'Inkscape'.
+
+OPTIONS
+-------
+-o::
+--output=::
+ Select the output file (default: output.svg)
+-i::
+--input=::
+ Select the input file (default: perf.data)
+-w::
+--width=::
+ Select the width of the SVG file (default: 1000)
+-P::
+--power-only::
+ Only output the CPU power section of the diagram
+-p::
+--process::
+ Select the processes to display, by name or PID
+
+
+SEE ALSO
+--------
+linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
new file mode 100644
index 00000000..1f968766
--- /dev/null
+++ b/tools/perf/Documentation/perf-top.txt
@@ -0,0 +1,127 @@
+perf-top(1)
+===========
+
+NAME
+----
+perf-top - System profiling tool.
+
+SYNOPSIS
+--------
+[verse]
+'perf top' [-e <EVENT> | --event=EVENT] [<options>]
+
+DESCRIPTION
+-----------
+This command generates and displays a performance counter profile in realtime.
+
+
+OPTIONS
+-------
+-a::
+--all-cpus::
+ System-wide collection. (default)
+
+-c <count>::
+--count=<count>::
+ Event period to sample.
+
+-C <cpu-list>::
+--cpu=<cpu>::
+Monitor only on the list of cpus provided. Multiple CPUs can be provided as a
+comma-sperated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2.
+Default is to monitor all CPUS.
+
+-d <seconds>::
+--delay=<seconds>::
+ Number of seconds to delay between refreshes.
+
+-e <event>::
+--event=<event>::
+ Select the PMU event. Selection can be a symbolic event name
+ (use 'perf list' to list all events) or a raw PMU
+ event (eventsel+umask) in the form of rNNN where NNN is a
+ hexadecimal event descriptor.
+
+-E <entries>::
+--entries=<entries>::
+ Display this many functions.
+
+-f <count>::
+--count-filter=<count>::
+ Only display functions with more events than this.
+
+-F <freq>::
+--freq=<freq>::
+ Profile at this frequency.
+
+-i::
+--inherit::
+ Child tasks inherit counters, only makes sens with -p option.
+
+-k <path>::
+--vmlinux=<path>::
+ Path to vmlinux. Required for annotation functionality.
+
+-m <pages>::
+--mmap-pages=<pages>::
+ Number of mmapped data pages.
+
+-p <pid>::
+--pid=<pid>::
+ Profile events on existing pid.
+
+-r <priority>::
+--realtime=<priority>::
+ Collect data with this RT SCHED_FIFO priority.
+
+-s <symbol>::
+--sym-annotate=<symbol>::
+ Annotate this symbol.
+
+-v::
+--verbose::
+ Be more verbose (show counter open errors, etc).
+
+-z::
+--zero::
+ Zero history across display updates.
+
+INTERACTIVE PROMPTING KEYS
+--------------------------
+
+[d]::
+ Display refresh delay.
+
+[e]::
+ Number of entries to display.
+
+[E]::
+ Event to display when multiple counters are active.
+
+[f]::
+ Profile display filter (>= hit count).
+
+[F]::
+ Annotation display filter (>= % of total).
+
+[s]::
+ Annotate symbol.
+
+[S]::
+ Stop annotation, return to full profile display.
+
+[w]::
+ Toggle between weighted sum and individual count[E]r profile.
+
+[z]::
+ Toggle event count zeroing across display updates.
+
+[qQ]::
+ Quit.
+
+Pressing any unmapped key displays a menu, and prompts for input.
+
+
+SEE ALSO
+--------
+linkperf:perf-stat[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-trace-perl.txt b/tools/perf/Documentation/perf-trace-perl.txt
new file mode 100644
index 00000000..ee6525ee
--- /dev/null
+++ b/tools/perf/Documentation/perf-trace-perl.txt
@@ -0,0 +1,217 @@
+perf-trace-perl(1)
+==================
+
+NAME
+----
+perf-trace-perl - Process trace data with a Perl script
+
+SYNOPSIS
+--------
+[verse]
+'perf trace' [-s [Perl]:script[.pl] ]
+
+DESCRIPTION
+-----------
+
+This perf trace option is used to process perf trace data using perf's
+built-in Perl interpreter. It reads and processes the input file and
+displays the results of the trace analysis implemented in the given
+Perl script, if any.
+
+STARTER SCRIPTS
+---------------
+
+You can avoid reading the rest of this document by running 'perf trace
+-g perl' in the same directory as an existing perf.data trace file.
+That will generate a starter script containing a handler for each of
+the event types in the trace file; it simply prints every available
+field for each event in the trace file.
+
+You can also look at the existing scripts in
+~/libexec/perf-core/scripts/perl for typical examples showing how to
+do basic things like aggregate event data, print results, etc. Also,
+the check-perf-trace.pl script, while not interesting for its results,
+attempts to exercise all of the main scripting features.
+
+EVENT HANDLERS
+--------------
+
+When perf trace is invoked using a trace script, a user-defined
+'handler function' is called for each event in the trace. If there's
+no handler function defined for a given event type, the event is
+ignored (or passed to a 'trace_handled' function, see below) and the
+next event is processed.
+
+Most of the event's field values are passed as arguments to the
+handler function; some of the less common ones aren't - those are
+available as calls back into the perf executable (see below).
+
+As an example, the following perf record command can be used to record
+all sched_wakeup events in the system:
+
+ # perf record -a -e sched:sched_wakeup
+
+Traces meant to be processed using a script should be recorded with
+the above option: -a to enable system-wide collection.
+
+The format file for the sched_wakep event defines the following fields
+(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
+
+----
+ format:
+ field:unsigned short common_type;
+ field:unsigned char common_flags;
+ field:unsigned char common_preempt_count;
+ field:int common_pid;
+ field:int common_lock_depth;
+
+ field:char comm[TASK_COMM_LEN];
+ field:pid_t pid;
+ field:int prio;
+ field:int success;
+ field:int target_cpu;
+----
+
+The handler function for this event would be defined as:
+
+----
+sub sched::sched_wakeup
+{
+ my ($event_name, $context, $common_cpu, $common_secs,
+ $common_nsecs, $common_pid, $common_comm,
+ $comm, $pid, $prio, $success, $target_cpu) = @_;
+}
+----
+
+The handler function takes the form subsystem::event_name.
+
+The $common_* arguments in the handler's argument list are the set of
+arguments passed to all event handlers; some of the fields correspond
+to the common_* fields in the format file, but some are synthesized,
+and some of the common_* fields aren't common enough to to be passed
+to every event as arguments but are available as library functions.
+
+Here's a brief description of each of the invariant event args:
+
+ $event_name the name of the event as text
+ $context an opaque 'cookie' used in calls back into perf
+ $common_cpu the cpu the event occurred on
+ $common_secs the secs portion of the event timestamp
+ $common_nsecs the nsecs portion of the event timestamp
+ $common_pid the pid of the current task
+ $common_comm the name of the current process
+
+All of the remaining fields in the event's format file have
+counterparts as handler function arguments of the same name, as can be
+seen in the example above.
+
+The above provides the basics needed to directly access every field of
+every event in a trace, which covers 90% of what you need to know to
+write a useful trace script. The sections below cover the rest.
+
+SCRIPT LAYOUT
+-------------
+
+Every perf trace Perl script should start by setting up a Perl module
+search path and 'use'ing a few support modules (see module
+descriptions below):
+
+----
+ use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+ use lib "./Perf-Trace-Util/lib";
+ use Perf::Trace::Core;
+ use Perf::Trace::Context;
+ use Perf::Trace::Util;
+----
+
+The rest of the script can contain handler functions and support
+functions in any order.
+
+Aside from the event handler functions discussed above, every script
+can implement a set of optional functions:
+
+*trace_begin*, if defined, is called before any event is processed and
+gives scripts a chance to do setup tasks:
+
+----
+ sub trace_begin
+ {
+ }
+----
+
+*trace_end*, if defined, is called after all events have been
+ processed and gives scripts a chance to do end-of-script tasks, such
+ as display results:
+
+----
+sub trace_end
+{
+}
+----
+
+*trace_unhandled*, if defined, is called after for any event that
+ doesn't have a handler explicitly defined for it. The standard set
+ of common arguments are passed into it:
+
+----
+sub trace_unhandled
+{
+ my ($event_name, $context, $common_cpu, $common_secs,
+ $common_nsecs, $common_pid, $common_comm) = @_;
+}
+----
+
+The remaining sections provide descriptions of each of the available
+built-in perf trace Perl modules and their associated functions.
+
+AVAILABLE MODULES AND FUNCTIONS
+-------------------------------
+
+The following sections describe the functions and variables available
+via the various Perf::Trace::* Perl modules. To use the functions and
+variables from the given module, add the corresponding 'use
+Perf::Trace::XXX' line to your perf trace script.
+
+Perf::Trace::Core Module
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+These functions provide some essential functions to user scripts.
+
+The *flag_str* and *symbol_str* functions provide human-readable
+strings for flag and symbolic fields. These correspond to the strings
+and values parsed from the 'print fmt' fields of the event format
+files:
+
+ flag_str($event_name, $field_name, $field_value) - returns the string represention corresponding to $field_value for the flag field $field_name of event $event_name
+ symbol_str($event_name, $field_name, $field_value) - returns the string represention corresponding to $field_value for the symbolic field $field_name of event $event_name
+
+Perf::Trace::Context Module
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some of the 'common' fields in the event format file aren't all that
+common, but need to be made accessible to user scripts nonetheless.
+
+Perf::Trace::Context defines a set of functions that can be used to
+access this data in the context of the current event. Each of these
+functions expects a $context variable, which is the same as the
+$context variable passed into every event handler as the second
+argument.
+
+ common_pc($context) - returns common_preempt count for the current event
+ common_flags($context) - returns common_flags for the current event
+ common_lock_depth($context) - returns common_lock_depth for the current event
+
+Perf::Trace::Util Module
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Various utility functions for use with perf trace:
+
+ nsecs($secs, $nsecs) - returns total nsecs given secs/nsecs pair
+ nsecs_secs($nsecs) - returns whole secs portion given nsecs
+ nsecs_nsecs($nsecs) - returns nsecs remainder given nsecs
+ nsecs_str($nsecs) - returns printable string in the form secs.nsecs
+ avg($total, $n) - returns average given a sum and a total number of values
+
+SEE ALSO
+--------
+linkperf:perf-trace[1]
diff --git a/tools/perf/Documentation/perf-trace-python.txt b/tools/perf/Documentation/perf-trace-python.txt
new file mode 100644
index 00000000..693be804
--- /dev/null
+++ b/tools/perf/Documentation/perf-trace-python.txt
@@ -0,0 +1,623 @@
+perf-trace-python(1)
+====================
+
+NAME
+----
+perf-trace-python - Process trace data with a Python script
+
+SYNOPSIS
+--------
+[verse]
+'perf trace' [-s [Python]:script[.py] ]
+
+DESCRIPTION
+-----------
+
+This perf trace option is used to process perf trace data using perf's
+built-in Python interpreter. It reads and processes the input file and
+displays the results of the trace analysis implemented in the given
+Python script, if any.
+
+A QUICK EXAMPLE
+---------------
+
+This section shows the process, start to finish, of creating a working
+Python script that aggregates and extracts useful information from a
+raw perf trace stream. You can avoid reading the rest of this
+document if an example is enough for you; the rest of the document
+provides more details on each step and lists the library functions
+available to script writers.
+
+This example actually details the steps that were used to create the
+'syscall-counts' script you see when you list the available perf trace
+scripts via 'perf trace -l'. As such, this script also shows how to
+integrate your script into the list of general-purpose 'perf trace'
+scripts listed by that command.
+
+The syscall-counts script is a simple script, but demonstrates all the
+basic ideas necessary to create a useful script. Here's an example
+of its output (syscall names are not yet supported, they will appear
+as numbers):
+
+----
+syscall events:
+
+event count
+---------------------------------------- -----------
+sys_write 455067
+sys_getdents 4072
+sys_close 3037
+sys_swapoff 1769
+sys_read 923
+sys_sched_setparam 826
+sys_open 331
+sys_newfstat 326
+sys_mmap 217
+sys_munmap 216
+sys_futex 141
+sys_select 102
+sys_poll 84
+sys_setitimer 12
+sys_writev 8
+15 8
+sys_lseek 7
+sys_rt_sigprocmask 6
+sys_wait4 3
+sys_ioctl 3
+sys_set_robust_list 1
+sys_exit 1
+56 1
+sys_access 1
+----
+
+Basically our task is to keep a per-syscall tally that gets updated
+every time a system call occurs in the system. Our script will do
+that, but first we need to record the data that will be processed by
+that script. Theoretically, there are a couple of ways we could do
+that:
+
+- we could enable every event under the tracing/events/syscalls
+ directory, but this is over 600 syscalls, well beyond the number
+ allowable by perf. These individual syscall events will however be
+ useful if we want to later use the guidance we get from the
+ general-purpose scripts to drill down and get more detail about
+ individual syscalls of interest.
+
+- we can enable the sys_enter and/or sys_exit syscalls found under
+ tracing/events/raw_syscalls. These are called for all syscalls; the
+ 'id' field can be used to distinguish between individual syscall
+ numbers.
+
+For this script, we only need to know that a syscall was entered; we
+don't care how it exited, so we'll use 'perf record' to record only
+the sys_enter events:
+
+----
+# perf record -a -e raw_syscalls:sys_enter
+
+^C[ perf record: Woken up 1 times to write data ]
+[ perf record: Captured and wrote 56.545 MB perf.data (~2470503 samples) ]
+----
+
+The options basically say to collect data for every syscall event
+system-wide and multiplex the per-cpu output into a single stream.
+That single stream will be recorded in a file in the current directory
+called perf.data.
+
+Once we have a perf.data file containing our data, we can use the -g
+'perf trace' option to generate a Python script that will contain a
+callback handler for each event type found in the perf.data trace
+stream (for more details, see the STARTER SCRIPTS section).
+
+----
+# perf trace -g python
+generated Python script: perf-trace.py
+
+The output file created also in the current directory is named
+perf-trace.py. Here's the file in its entirety:
+
+# perf trace event handlers, generated by perf trace -g python
+# Licensed under the terms of the GNU GPL License version 2
+
+# The common_* event handler fields are the most useful fields common to
+# all events. They don't necessarily correspond to the 'common_*' fields
+# in the format files. Those fields not available as handler params can
+# be retrieved using Python functions of the form common_*(context).
+# See the perf-trace-python Documentation for the list of available functions.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+
+def trace_begin():
+ print "in trace_begin"
+
+def trace_end():
+ print "in trace_end"
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
+
+ print "id=%d, args=%s\n" % \
+ (id, args),
+
+def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
+
+def print_header(event_name, cpu, secs, nsecs, pid, comm):
+ print "%-20s %5u %05u.%09u %8u %-20s " % \
+ (event_name, cpu, secs, nsecs, pid, comm),
+----
+
+At the top is a comment block followed by some import statements and a
+path append which every perf trace script should include.
+
+Following that are a couple generated functions, trace_begin() and
+trace_end(), which are called at the beginning and the end of the
+script respectively (for more details, see the SCRIPT_LAYOUT section
+below).
+
+Following those are the 'event handler' functions generated one for
+every event in the 'perf record' output. The handler functions take
+the form subsystem__event_name, and contain named parameters, one for
+each field in the event; in this case, there's only one event,
+raw_syscalls__sys_enter(). (see the EVENT HANDLERS section below for
+more info on event handlers).
+
+The final couple of functions are, like the begin and end functions,
+generated for every script. The first, trace_unhandled(), is called
+every time the script finds an event in the perf.data file that
+doesn't correspond to any event handler in the script. This could
+mean either that the record step recorded event types that it wasn't
+really interested in, or the script was run against a trace file that
+doesn't correspond to the script.
+
+The script generated by -g option simply prints a line for each
+event found in the trace stream i.e. it basically just dumps the event
+and its parameter values to stdout. The print_header() function is
+simply a utility function used for that purpose. Let's rename the
+script and run it to see the default output:
+
+----
+# mv perf-trace.py syscall-counts.py
+# perf trace -s syscall-counts.py
+
+raw_syscalls__sys_enter 1 00840.847582083 7506 perf id=1, args=
+raw_syscalls__sys_enter 1 00840.847595764 7506 perf id=1, args=
+raw_syscalls__sys_enter 1 00840.847620860 7506 perf id=1, args=
+raw_syscalls__sys_enter 1 00840.847710478 6533 npviewer.bin id=78, args=
+raw_syscalls__sys_enter 1 00840.847719204 6533 npviewer.bin id=142, args=
+raw_syscalls__sys_enter 1 00840.847755445 6533 npviewer.bin id=3, args=
+raw_syscalls__sys_enter 1 00840.847775601 6533 npviewer.bin id=3, args=
+raw_syscalls__sys_enter 1 00840.847781820 6533 npviewer.bin id=3, args=
+.
+.
+.
+----
+
+Of course, for this script, we're not interested in printing every
+trace event, but rather aggregating it in a useful way. So we'll get
+rid of everything to do with printing as well as the trace_begin() and
+trace_unhandled() functions, which we won't be using. That leaves us
+with this minimalistic skeleton:
+
+----
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+
+def trace_end():
+ print "in trace_end"
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
+----
+
+In trace_end(), we'll simply print the results, but first we need to
+generate some results to print. To do that we need to have our
+sys_enter() handler do the necessary tallying until all events have
+been counted. A hash table indexed by syscall id is a good way to
+store that information; every time the sys_enter() handler is called,
+we simply increment a count associated with that hash entry indexed by
+that syscall id:
+
+----
+ syscalls = autodict()
+
+ try:
+ syscalls[id] += 1
+ except TypeError:
+ syscalls[id] = 1
+----
+
+The syscalls 'autodict' object is a special kind of Python dictionary
+(implemented in Core.py) that implements Perl's 'autovivifying' hashes
+in Python i.e. with autovivifying hashes, you can assign nested hash
+values without having to go to the trouble of creating intermediate
+levels if they don't exist e.g syscalls[comm][pid][id] = 1 will create
+the intermediate hash levels and finally assign the value 1 to the
+hash entry for 'id' (because the value being assigned isn't a hash
+object itself, the initial value is assigned in the TypeError
+exception. Well, there may be a better way to do this in Python but
+that's what works for now).
+
+Putting that code into the raw_syscalls__sys_enter() handler, we
+effectively end up with a single-level dictionary keyed on syscall id
+and having the counts we've tallied as values.
+
+The print_syscall_totals() function iterates over the entries in the
+dictionary and displays a line for each entry containing the syscall
+name (the dictonary keys contain the syscall ids, which are passed to
+the Util function syscall_name(), which translates the raw syscall
+numbers to the corresponding syscall name strings). The output is
+displayed after all the events in the trace have been processed, by
+calling the print_syscall_totals() function from the trace_end()
+handler called at the end of script processing.
+
+The final script producing the output shown above is shown in its
+entirety below (syscall_name() helper is not yet available, you can
+only deal with id's for now):
+
+----
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+syscalls = autodict()
+
+def trace_end():
+ print_syscall_totals()
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
+ try:
+ syscalls[id] += 1
+ except TypeError:
+ syscalls[id] = 1
+
+def print_syscall_totals():
+ if for_comm is not None:
+ print "\nsyscall events for %s:\n\n" % (for_comm),
+ else:
+ print "\nsyscall events:\n\n",
+
+ print "%-40s %10s\n" % ("event", "count"),
+ print "%-40s %10s\n" % ("----------------------------------------", \
+ "-----------"),
+
+ for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
+ reverse = True):
+ print "%-40s %10d\n" % (syscall_name(id), val),
+----
+
+The script can be run just as before:
+
+ # perf trace -s syscall-counts.py
+
+So those are the essential steps in writing and running a script. The
+process can be generalized to any tracepoint or set of tracepoints
+you're interested in - basically find the tracepoint(s) you're
+interested in by looking at the list of available events shown by
+'perf list' and/or look in /sys/kernel/debug/tracing events for
+detailed event and field info, record the corresponding trace data
+using 'perf record', passing it the list of interesting events,
+generate a skeleton script using 'perf trace -g python' and modify the
+code to aggregate and display it for your particular needs.
+
+After you've done that you may end up with a general-purpose script
+that you want to keep around and have available for future use. By
+writing a couple of very simple shell scripts and putting them in the
+right place, you can have your script listed alongside the other
+scripts listed by the 'perf trace -l' command e.g.:
+
+----
+root@tropicana:~# perf trace -l
+List of available trace scripts:
+ workqueue-stats workqueue stats (ins/exe/create/destroy)
+ wakeup-latency system-wide min/max/avg wakeup latency
+ rw-by-file <comm> r/w activity for a program, by file
+ rw-by-pid system-wide r/w activity
+----
+
+A nice side effect of doing this is that you also then capture the
+probably lengthy 'perf record' command needed to record the events for
+the script.
+
+To have the script appear as a 'built-in' script, you write two simple
+scripts, one for recording and one for 'reporting'.
+
+The 'record' script is a shell script with the same base name as your
+script, but with -record appended. The shell script should be put
+into the perf/scripts/python/bin directory in the kernel source tree.
+In that script, you write the 'perf record' command-line needed for
+your script:
+
+----
+# cat kernel-source/tools/perf/scripts/python/bin/syscall-counts-record
+
+#!/bin/bash
+perf record -a -e raw_syscalls:sys_enter
+----
+
+The 'report' script is also a shell script with the same base name as
+your script, but with -report appended. It should also be located in
+the perf/scripts/python/bin directory. In that script, you write the
+'perf trace -s' command-line needed for running your script:
+
+----
+# cat kernel-source/tools/perf/scripts/python/bin/syscall-counts-report
+
+#!/bin/bash
+# description: system-wide syscall counts
+perf trace -s ~/libexec/perf-core/scripts/python/syscall-counts.py
+----
+
+Note that the location of the Python script given in the shell script
+is in the libexec/perf-core/scripts/python directory - this is where
+the script will be copied by 'make install' when you install perf.
+For the installation to install your script there, your script needs
+to be located in the perf/scripts/python directory in the kernel
+source tree:
+
+----
+# ls -al kernel-source/tools/perf/scripts/python
+
+root@tropicana:/home/trz/src/tip# ls -al tools/perf/scripts/python
+total 32
+drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 .
+drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 ..
+drwxr-xr-x 2 trz trz 4096 2010-01-26 22:29 bin
+-rw-r--r-- 1 trz trz 2548 2010-01-26 22:29 check-perf-trace.py
+drwxr-xr-x 3 trz trz 4096 2010-01-26 22:49 Perf-Trace-Util
+-rw-r--r-- 1 trz trz 1462 2010-01-26 22:30 syscall-counts.py
+----
+
+Once you've done that (don't forget to do a new 'make install',
+otherwise your script won't show up at run-time), 'perf trace -l'
+should show a new entry for your script:
+
+----
+root@tropicana:~# perf trace -l
+List of available trace scripts:
+ workqueue-stats workqueue stats (ins/exe/create/destroy)
+ wakeup-latency system-wide min/max/avg wakeup latency
+ rw-by-file <comm> r/w activity for a program, by file
+ rw-by-pid system-wide r/w activity
+ syscall-counts system-wide syscall counts
+----
+
+You can now perform the record step via 'perf trace record':
+
+ # perf trace record syscall-counts
+
+and display the output using 'perf trace report':
+
+ # perf trace report syscall-counts
+
+STARTER SCRIPTS
+---------------
+
+You can quickly get started writing a script for a particular set of
+trace data by generating a skeleton script using 'perf trace -g
+python' in the same directory as an existing perf.data trace file.
+That will generate a starter script containing a handler for each of
+the event types in the trace file; it simply prints every available
+field for each event in the trace file.
+
+You can also look at the existing scripts in
+~/libexec/perf-core/scripts/python for typical examples showing how to
+do basic things like aggregate event data, print results, etc. Also,
+the check-perf-trace.py script, while not interesting for its results,
+attempts to exercise all of the main scripting features.
+
+EVENT HANDLERS
+--------------
+
+When perf trace is invoked using a trace script, a user-defined
+'handler function' is called for each event in the trace. If there's
+no handler function defined for a given event type, the event is
+ignored (or passed to a 'trace_handled' function, see below) and the
+next event is processed.
+
+Most of the event's field values are passed as arguments to the
+handler function; some of the less common ones aren't - those are
+available as calls back into the perf executable (see below).
+
+As an example, the following perf record command can be used to record
+all sched_wakeup events in the system:
+
+ # perf record -a -e sched:sched_wakeup
+
+Traces meant to be processed using a script should be recorded with
+the above option: -a to enable system-wide collection.
+
+The format file for the sched_wakep event defines the following fields
+(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
+
+----
+ format:
+ field:unsigned short common_type;
+ field:unsigned char common_flags;
+ field:unsigned char common_preempt_count;
+ field:int common_pid;
+ field:int common_lock_depth;
+
+ field:char comm[TASK_COMM_LEN];
+ field:pid_t pid;
+ field:int prio;
+ field:int success;
+ field:int target_cpu;
+----
+
+The handler function for this event would be defined as:
+
+----
+def sched__sched_wakeup(event_name, context, common_cpu, common_secs,
+ common_nsecs, common_pid, common_comm,
+ comm, pid, prio, success, target_cpu):
+ pass
+----
+
+The handler function takes the form subsystem__event_name.
+
+The common_* arguments in the handler's argument list are the set of
+arguments passed to all event handlers; some of the fields correspond
+to the common_* fields in the format file, but some are synthesized,
+and some of the common_* fields aren't common enough to to be passed
+to every event as arguments but are available as library functions.
+
+Here's a brief description of each of the invariant event args:
+
+ event_name the name of the event as text
+ context an opaque 'cookie' used in calls back into perf
+ common_cpu the cpu the event occurred on
+ common_secs the secs portion of the event timestamp
+ common_nsecs the nsecs portion of the event timestamp
+ common_pid the pid of the current task
+ common_comm the name of the current process
+
+All of the remaining fields in the event's format file have
+counterparts as handler function arguments of the same name, as can be
+seen in the example above.
+
+The above provides the basics needed to directly access every field of
+every event in a trace, which covers 90% of what you need to know to
+write a useful trace script. The sections below cover the rest.
+
+SCRIPT LAYOUT
+-------------
+
+Every perf trace Python script should start by setting up a Python
+module search path and 'import'ing a few support modules (see module
+descriptions below):
+
+----
+ import os
+ import sys
+
+ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+ from perf_trace_context import *
+ from Core import *
+----
+
+The rest of the script can contain handler functions and support
+functions in any order.
+
+Aside from the event handler functions discussed above, every script
+can implement a set of optional functions:
+
+*trace_begin*, if defined, is called before any event is processed and
+gives scripts a chance to do setup tasks:
+
+----
+def trace_begin:
+ pass
+----
+
+*trace_end*, if defined, is called after all events have been
+ processed and gives scripts a chance to do end-of-script tasks, such
+ as display results:
+
+----
+def trace_end:
+ pass
+----
+
+*trace_unhandled*, if defined, is called after for any event that
+ doesn't have a handler explicitly defined for it. The standard set
+ of common arguments are passed into it:
+
+----
+def trace_unhandled(event_name, context, common_cpu, common_secs,
+ common_nsecs, common_pid, common_comm):
+ pass
+----
+
+The remaining sections provide descriptions of each of the available
+built-in perf trace Python modules and their associated functions.
+
+AVAILABLE MODULES AND FUNCTIONS
+-------------------------------
+
+The following sections describe the functions and variables available
+via the various perf trace Python modules. To use the functions and
+variables from the given module, add the corresponding 'from XXXX
+import' line to your perf trace script.
+
+Core.py Module
+~~~~~~~~~~~~~~
+
+These functions provide some essential functions to user scripts.
+
+The *flag_str* and *symbol_str* functions provide human-readable
+strings for flag and symbolic fields. These correspond to the strings
+and values parsed from the 'print fmt' fields of the event format
+files:
+
+ flag_str(event_name, field_name, field_value) - returns the string represention corresponding to field_value for the flag field field_name of event event_name
+ symbol_str(event_name, field_name, field_value) - returns the string represention corresponding to field_value for the symbolic field field_name of event event_name
+
+The *autodict* function returns a special kind of Python
+dictionary that implements Perl's 'autovivifying' hashes in Python
+i.e. with autovivifying hashes, you can assign nested hash values
+without having to go to the trouble of creating intermediate levels if
+they don't exist.
+
+ autodict() - returns an autovivifying dictionary instance
+
+
+perf_trace_context Module
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some of the 'common' fields in the event format file aren't all that
+common, but need to be made accessible to user scripts nonetheless.
+
+perf_trace_context defines a set of functions that can be used to
+access this data in the context of the current event. Each of these
+functions expects a context variable, which is the same as the
+context variable passed into every event handler as the second
+argument.
+
+ common_pc(context) - returns common_preempt count for the current event
+ common_flags(context) - returns common_flags for the current event
+ common_lock_depth(context) - returns common_lock_depth for the current event
+
+Util.py Module
+~~~~~~~~~~~~~~
+
+Various utility functions for use with perf trace:
+
+ nsecs(secs, nsecs) - returns total nsecs given secs/nsecs pair
+ nsecs_secs(nsecs) - returns whole secs portion given nsecs
+ nsecs_nsecs(nsecs) - returns nsecs remainder given nsecs
+ nsecs_str(nsecs) - returns printable string in the form secs.nsecs
+ avg(total, n) - returns average given a sum and a total number of values
+
+SEE ALSO
+--------
+linkperf:perf-trace[1]
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
new file mode 100644
index 00000000..122ec9dc
--- /dev/null
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -0,0 +1,70 @@
+perf-trace(1)
+=============
+
+NAME
+----
+perf-trace - Read perf.data (created by perf record) and display trace output
+
+SYNOPSIS
+--------
+[verse]
+'perf trace' {record <script> | report <script> [args] }
+
+DESCRIPTION
+-----------
+This command reads the input file and displays the trace recorded.
+
+There are several variants of perf trace:
+
+ 'perf trace' to see a detailed trace of the workload that was
+ recorded.
+
+ You can also run a set of pre-canned scripts that aggregate and
+ summarize the raw trace data in various ways (the list of scripts is
+ available via 'perf trace -l'). The following variants allow you to
+ record and run those scripts:
+
+ 'perf trace record <script>' to record the events required for 'perf
+ trace report'. <script> is the name displayed in the output of
+ 'perf trace --list' i.e. the actual script name minus any language
+ extension.
+
+ 'perf trace report <script>' to run and display the results of
+ <script>. <script> is the name displayed in the output of 'perf
+ trace --list' i.e. the actual script name minus any language
+ extension. The perf.data output from a previous run of 'perf trace
+ record <script>' is used and should be present for this command to
+ succeed.
+
+ See the 'SEE ALSO' section for links to language-specific
+ information on how to write and run your own trace scripts.
+
+OPTIONS
+-------
+-D::
+--dump-raw-trace=::
+ Display verbose dump of the trace data.
+
+-L::
+--Latency=::
+ Show latency attributes (irqs/preemption disabled, etc).
+
+-l::
+--list=::
+ Display a list of available trace scripts.
+
+-s ['lang']::
+--script=::
+ Process trace data with the given script ([lang]:script[.ext]).
+ If the string 'lang' is specified in place of a script name, a
+ list of supported languages will be displayed instead.
+
+-g::
+--gen-script=::
+ Generate perf-trace.[ext] starter script for given language,
+ using current perf.data.
+
+SEE ALSO
+--------
+linkperf:perf-record[1], linkperf:perf-trace-perl[1],
+linkperf:perf-trace-python[1]
diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt
new file mode 100644
index 00000000..0eeb247d
--- /dev/null
+++ b/tools/perf/Documentation/perf.txt
@@ -0,0 +1,24 @@
+perf(1)
+=======
+
+NAME
+----
+perf - Performance analysis tools for Linux
+
+SYNOPSIS
+--------
+[verse]
+'perf' [--version] [--help] COMMAND [ARGS]
+
+DESCRIPTION
+-----------
+Performance counters for Linux are a new kernel-based subsystem
+that provide a framework for all things performance analysis. It
+covers hardware level (CPU/PMU, Performance Monitoring Unit) features
+and software features (software counters, tracepoints) as well.
+
+SEE ALSO
+--------
+linkperf:perf-stat[1], linkperf:perf-top[1],
+linkperf:perf-record[1], linkperf:perf-report[1],
+linkperf:perf-list[1]
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
new file mode 100644
index 00000000..8c7fc0c8
--- /dev/null
+++ b/tools/perf/MANIFEST
@@ -0,0 +1,12 @@
+tools/perf
+include/linux/perf_event.h
+include/linux/rbtree.h
+include/linux/list.h
+include/linux/hash.h
+include/linux/stringify.h
+lib/rbtree.c
+include/linux/swab.h
+arch/*/include/asm/unistd*.h
+include/linux/poison.h
+include/linux/magic.h
+include/linux/hw_breakpoint.h
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
new file mode 100644
index 00000000..7f4aff0a
--- /dev/null
+++ b/tools/perf/Makefile
@@ -0,0 +1,1269 @@
+ifeq ("$(origin O)", "command line")
+ OUTPUT := $(O)/
+endif
+
+# The default target of this Makefile is...
+all::
+
+ifneq ($(OUTPUT),)
+# check that the output directory actually exists
+OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
+$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
+endif
+
+# Define V=1 to have a more verbose compile.
+# Define V=2 to have an even more verbose compile.
+#
+# Define SNPRINTF_RETURNS_BOGUS if your are on a system which snprintf()
+# or vsnprintf() return -1 instead of number of characters which would
+# have been written to the final string if enough space had been available.
+#
+# Define FREAD_READS_DIRECTORIES if your are on a system which succeeds
+# when attempting to read from an fopen'ed directory.
+#
+# Define NO_OPENSSL environment variable if you do not have OpenSSL.
+# This also implies MOZILLA_SHA1.
+#
+# Define CURLDIR=/foo/bar if your curl header and library files are in
+# /foo/bar/include and /foo/bar/lib directories.
+#
+# Define EXPATDIR=/foo/bar if your expat header and library files are in
+# /foo/bar/include and /foo/bar/lib directories.
+#
+# Define NO_D_INO_IN_DIRENT if you don't have d_ino in your struct dirent.
+#
+# Define NO_D_TYPE_IN_DIRENT if your platform defines DT_UNKNOWN but lacks
+# d_type in struct dirent (latest Cygwin -- will be fixed soonish).
+#
+# Define NO_C99_FORMAT if your formatted IO functions (printf/scanf et.al.)
+# do not support the 'size specifiers' introduced by C99, namely ll, hh,
+# j, z, t. (representing long long int, char, intmax_t, size_t, ptrdiff_t).
+# some C compilers supported these specifiers prior to C99 as an extension.
+#
+# Define NO_STRCASESTR if you don't have strcasestr.
+#
+# Define NO_MEMMEM if you don't have memmem.
+#
+# Define NO_STRTOUMAX if you don't have strtoumax in the C library.
+# If your compiler also does not support long long or does not have
+# strtoull, define NO_STRTOULL.
+#
+# Define NO_SETENV if you don't have setenv in the C library.
+#
+# Define NO_UNSETENV if you don't have unsetenv in the C library.
+#
+# Define NO_MKDTEMP if you don't have mkdtemp in the C library.
+#
+# Define NO_SYS_SELECT_H if you don't have sys/select.h.
+#
+# Define NO_SYMLINK_HEAD if you never want .perf/HEAD to be a symbolic link.
+# Enable it on Windows. By default, symrefs are still used.
+#
+# Define NO_SVN_TESTS if you want to skip time-consuming SVN interoperability
+# tests. These tests take up a significant amount of the total test time
+# but are not needed unless you plan to talk to SVN repos.
+#
+# Define NO_FINK if you are building on Darwin/Mac OS X, have Fink
+# installed in /sw, but don't want PERF to link against any libraries
+# installed there. If defined you may specify your own (or Fink's)
+# include directories and library directories by defining CFLAGS
+# and LDFLAGS appropriately.
+#
+# Define NO_DARWIN_PORTS if you are building on Darwin/Mac OS X,
+# have DarwinPorts installed in /opt/local, but don't want PERF to
+# link against any libraries installed there. If defined you may
+# specify your own (or DarwinPort's) include directories and
+# library directories by defining CFLAGS and LDFLAGS appropriately.
+#
+# Define PPC_SHA1 environment variable when running make to make use of
+# a bundled SHA1 routine optimized for PowerPC.
+#
+# Define ARM_SHA1 environment variable when running make to make use of
+# a bundled SHA1 routine optimized for ARM.
+#
+# Define MOZILLA_SHA1 environment variable when running make to make use of
+# a bundled SHA1 routine coming from Mozilla. It is GPL'd and should be fast
+# on non-x86 architectures (e.g. PowerPC), while the OpenSSL version (default
+# choice) has very fast version optimized for i586.
+#
+# Define NEEDS_SSL_WITH_CRYPTO if you need -lcrypto with -lssl (Darwin).
+#
+# Define NEEDS_LIBICONV if linking with libc is not enough (Darwin).
+#
+# Define NEEDS_SOCKET if linking with libc is not enough (SunOS,
+# Patrick Mauritz).
+#
+# Define NO_MMAP if you want to avoid mmap.
+#
+# Define NO_PTHREADS if you do not have or do not want to use Pthreads.
+#
+# Define NO_PREAD if you have a problem with pread() system call (e.g.
+# cygwin.dll before v1.5.22).
+#
+# Define NO_FAST_WORKING_DIRECTORY if accessing objects in pack files is
+# generally faster on your platform than accessing the working directory.
+#
+# Define NO_TRUSTABLE_FILEMODE if your filesystem may claim to support
+# the executable mode bit, but doesn't really do so.
+#
+# Define NO_IPV6 if you lack IPv6 support and getaddrinfo().
+#
+# Define NO_SOCKADDR_STORAGE if your platform does not have struct
+# sockaddr_storage.
+#
+# Define NO_ICONV if your libc does not properly support iconv.
+#
+# Define OLD_ICONV if your library has an old iconv(), where the second
+# (input buffer pointer) parameter is declared with type (const char **).
+#
+# Define NO_DEFLATE_BOUND if your zlib does not have deflateBound.
+#
+# Define NO_R_TO_GCC_LINKER if your gcc does not like "-R/path/lib"
+# that tells runtime paths to dynamic libraries;
+# "-Wl,-rpath=/path/lib" is used instead.
+#
+# Define USE_NSEC below if you want perf to care about sub-second file mtimes
+# and ctimes. Note that you need recent glibc (at least 2.2.4) for this, and
+# it will BREAK YOUR LOCAL DIFFS! show-diff and anything using it will likely
+# randomly break unless your underlying filesystem supports those sub-second
+# times (my ext3 doesn't).
+#
+# Define USE_ST_TIMESPEC if your "struct stat" uses "st_ctimespec" instead of
+# "st_ctim"
+#
+# Define NO_NSEC if your "struct stat" does not have "st_ctim.tv_nsec"
+# available. This automatically turns USE_NSEC off.
+#
+# Define USE_STDEV below if you want perf to care about the underlying device
+# change being considered an inode change from the update-index perspective.
+#
+# Define NO_ST_BLOCKS_IN_STRUCT_STAT if your platform does not have st_blocks
+# field that counts the on-disk footprint in 512-byte blocks.
+#
+# Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8
+#
+# Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72.
+#
+# Define NO_PERL_MAKEMAKER if you cannot use Makefiles generated by perl's
+# MakeMaker (e.g. using ActiveState under Cygwin).
+#
+# Define NO_PERL if you do not want Perl scripts or libraries at all.
+#
+# Define INTERNAL_QSORT to use Git's implementation of qsort(), which
+# is a simplified version of the merge sort used in glibc. This is
+# recommended if Git triggers O(n^2) behavior in your platform's qsort().
+#
+# Define NO_EXTERNAL_GREP if you don't want "perf grep" to ever call
+# your external grep (e.g., if your system lacks grep, if its grep is
+# broken, or spawning external process is slower than built-in grep perf has).
+#
+# Define LDFLAGS=-static to build a static binary.
+#
+# Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds.
+#
+# Define NO_DWARF if you do not want debug-info analysis feature at all.
+
+$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
+ @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
+-include $(OUTPUT)PERF-VERSION-FILE
+
+uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not')
+uname_M := $(shell sh -c 'uname -m 2>/dev/null || echo not')
+uname_O := $(shell sh -c 'uname -o 2>/dev/null || echo not')
+uname_R := $(shell sh -c 'uname -r 2>/dev/null || echo not')
+uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not')
+uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not')
+
+ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
+ -e s/arm.*/arm/ -e s/sa110/arm/ \
+ -e s/s390x/s390/ -e s/parisc64/parisc/ \
+ -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
+ -e s/sh[234].*/sh/ )
+
+# Additional ARCH settings for x86
+ifeq ($(ARCH),i386)
+ ARCH := x86
+endif
+ifeq ($(ARCH),x86_64)
+ ARCH := x86
+endif
+
+# CFLAGS and LDFLAGS are for the users to override from the command line.
+
+#
+# Include saner warnings here, which can catch bugs:
+#
+
+EXTRA_WARNINGS := -Wformat
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat-security
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat-y2k
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wshadow
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Winit-self
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wpacked
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wredundant-decls
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstack-protector
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-aliasing=3
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-default
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-enum
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wno-system-headers
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wundef
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wvolatile-register-var
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wwrite-strings
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wbad-function-cast
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-declarations
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-prototypes
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wnested-externs
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wold-style-definition
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes
+EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement
+
+ifeq ("$(origin DEBUG)", "command line")
+ PERF_DEBUG = $(DEBUG)
+endif
+ifndef PERF_DEBUG
+ CFLAGS_OPTIMIZE = -O6
+endif
+
+CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
+EXTLIBS = -lpthread -lrt -lelf -lm
+ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
+ALL_LDFLAGS = $(LDFLAGS)
+STRIP ?= strip
+
+# Among the variables below, these:
+# perfexecdir
+# template_dir
+# mandir
+# infodir
+# htmldir
+# ETC_PERFCONFIG (but not sysconfdir)
+# can be specified as a relative path some/where/else;
+# this is interpreted as relative to $(prefix) and "perf" at
+# runtime figures out where they are based on the path to the executable.
+# This can help installing the suite in a relocatable way.
+
+# Make the path relative to DESTDIR, not to prefix
+ifndef DESTDIR
+prefix = $(HOME)
+endif
+bindir_relative = bin
+bindir = $(prefix)/$(bindir_relative)
+mandir = share/man
+infodir = share/info
+perfexecdir = libexec/perf-core
+sharedir = $(prefix)/share
+template_dir = share/perf-core/templates
+htmldir = share/doc/perf-doc
+ifeq ($(prefix),/usr)
+sysconfdir = /etc
+ETC_PERFCONFIG = $(sysconfdir)/perfconfig
+else
+sysconfdir = $(prefix)/etc
+ETC_PERFCONFIG = etc/perfconfig
+endif
+lib = lib
+
+export prefix bindir sharedir sysconfdir
+
+CC = $(CROSS_COMPILE)gcc
+AR = $(CROSS_COMPILE)ar
+RM = rm -f
+MKDIR = mkdir
+TAR = tar
+FIND = find
+INSTALL = install
+RPMBUILD = rpmbuild
+PTHREAD_LIBS = -lpthread
+
+# sparse is architecture-neutral, which means that we need to tell it
+# explicitly what architecture to check for. Fix this up for yours..
+SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
+
+ifeq ($(V), 2)
+ QUIET_STDERR = ">/dev/null"
+else
+ QUIET_STDERR = ">/dev/null 2>&1"
+endif
+
+-include feature-tests.mak
+
+ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y)
+ CFLAGS := $(CFLAGS) -fstack-protector-all
+endif
+
+
+### --- END CONFIGURATION SECTION ---
+
+# Those must not be GNU-specific; they are shared with perl/ which may
+# be built by a different compiler. (Note that this is an artifact now
+# but it still might be nice to keep that distinction.)
+BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include
+BASIC_LDFLAGS =
+
+# Guard against environment variables
+BUILTIN_OBJS =
+BUILT_INS =
+COMPAT_CFLAGS =
+COMPAT_OBJS =
+LIB_H =
+LIB_OBJS =
+SCRIPT_PERL =
+SCRIPT_SH =
+TEST_PROGRAMS =
+
+SCRIPT_SH += perf-archive.sh
+
+#
+# No Perl scripts right now:
+#
+
+# SCRIPT_PERL += perf-add--interactive.perl
+
+SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) \
+ $(patsubst %.perl,%,$(SCRIPT_PERL))
+
+# Empty...
+EXTRA_PROGRAMS =
+
+# ... and all the rest that could be moved out of bindir to perfexecdir
+PROGRAMS += $(EXTRA_PROGRAMS)
+
+#
+# Single 'perf' binary right now:
+#
+PROGRAMS += $(OUTPUT)perf
+
+# List built-in command $C whose implementation cmd_$C() is not in
+# builtin-$C.o but is linked in as part of some other command.
+#
+
+# what 'all' will build and 'install' will install, in perfexecdir
+ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS)
+
+# what 'all' will build but not install in perfexecdir
+OTHER_PROGRAMS = $(OUTPUT)perf$X
+
+# Set paths to tools early so that they can be used for version tests.
+ifndef SHELL_PATH
+ SHELL_PATH = /bin/sh
+endif
+ifndef PERL_PATH
+ PERL_PATH = /usr/bin/perl
+endif
+
+export PERL_PATH
+
+LIB_FILE=$(OUTPUT)libperf.a
+
+LIB_H += ../../include/linux/perf_event.h
+LIB_H += ../../include/linux/rbtree.h
+LIB_H += ../../include/linux/list.h
+LIB_H += ../../include/linux/hash.h
+LIB_H += ../../include/linux/stringify.h
+LIB_H += util/include/linux/bitmap.h
+LIB_H += util/include/linux/bitops.h
+LIB_H += util/include/linux/compiler.h
+LIB_H += util/include/linux/ctype.h
+LIB_H += util/include/linux/kernel.h
+LIB_H += util/include/linux/list.h
+LIB_H += util/include/linux/module.h
+LIB_H += util/include/linux/poison.h
+LIB_H += util/include/linux/prefetch.h
+LIB_H += util/include/linux/rbtree.h
+LIB_H += util/include/linux/string.h
+LIB_H += util/include/linux/types.h
+LIB_H += util/include/asm/asm-offsets.h
+LIB_H += util/include/asm/bug.h
+LIB_H += util/include/asm/byteorder.h
+LIB_H += util/include/asm/hweight.h
+LIB_H += util/include/asm/swab.h
+LIB_H += util/include/asm/system.h
+LIB_H += util/include/asm/uaccess.h
+LIB_H += util/include/dwarf-regs.h
+LIB_H += perf.h
+LIB_H += util/cache.h
+LIB_H += util/callchain.h
+LIB_H += util/build-id.h
+LIB_H += util/debug.h
+LIB_H += util/debugfs.h
+LIB_H += util/event.h
+LIB_H += util/exec_cmd.h
+LIB_H += util/types.h
+LIB_H += util/levenshtein.h
+LIB_H += util/map.h
+LIB_H += util/parse-options.h
+LIB_H += util/parse-events.h
+LIB_H += util/quote.h
+LIB_H += util/util.h
+LIB_H += util/header.h
+LIB_H += util/help.h
+LIB_H += util/session.h
+LIB_H += util/strbuf.h
+LIB_H += util/strlist.h
+LIB_H += util/svghelper.h
+LIB_H += util/run-command.h
+LIB_H += util/sigchain.h
+LIB_H += util/symbol.h
+LIB_H += util/color.h
+LIB_H += util/values.h
+LIB_H += util/sort.h
+LIB_H += util/hist.h
+LIB_H += util/thread.h
+LIB_H += util/trace-event.h
+LIB_H += util/probe-finder.h
+LIB_H += util/probe-event.h
+LIB_H += util/pstack.h
+LIB_H += util/cpumap.h
+
+LIB_OBJS += $(OUTPUT)util/abspath.o
+LIB_OBJS += $(OUTPUT)util/alias.o
+LIB_OBJS += $(OUTPUT)util/build-id.o
+LIB_OBJS += $(OUTPUT)util/config.o
+LIB_OBJS += $(OUTPUT)util/ctype.o
+LIB_OBJS += $(OUTPUT)util/debugfs.o
+LIB_OBJS += $(OUTPUT)util/environment.o
+LIB_OBJS += $(OUTPUT)util/event.o
+LIB_OBJS += $(OUTPUT)util/exec_cmd.o
+LIB_OBJS += $(OUTPUT)util/help.o
+LIB_OBJS += $(OUTPUT)util/levenshtein.o
+LIB_OBJS += $(OUTPUT)util/parse-options.o
+LIB_OBJS += $(OUTPUT)util/parse-events.o
+LIB_OBJS += $(OUTPUT)util/path.o
+LIB_OBJS += $(OUTPUT)util/rbtree.o
+LIB_OBJS += $(OUTPUT)util/bitmap.o
+LIB_OBJS += $(OUTPUT)util/hweight.o
+LIB_OBJS += $(OUTPUT)util/run-command.o
+LIB_OBJS += $(OUTPUT)util/quote.o
+LIB_OBJS += $(OUTPUT)util/strbuf.o
+LIB_OBJS += $(OUTPUT)util/string.o
+LIB_OBJS += $(OUTPUT)util/strlist.o
+LIB_OBJS += $(OUTPUT)util/usage.o
+LIB_OBJS += $(OUTPUT)util/wrapper.o
+LIB_OBJS += $(OUTPUT)util/sigchain.o
+LIB_OBJS += $(OUTPUT)util/symbol.o
+LIB_OBJS += $(OUTPUT)util/color.o
+LIB_OBJS += $(OUTPUT)util/pager.o
+LIB_OBJS += $(OUTPUT)util/header.o
+LIB_OBJS += $(OUTPUT)util/callchain.o
+LIB_OBJS += $(OUTPUT)util/values.o
+LIB_OBJS += $(OUTPUT)util/debug.o
+LIB_OBJS += $(OUTPUT)util/map.o
+LIB_OBJS += $(OUTPUT)util/pstack.o
+LIB_OBJS += $(OUTPUT)util/session.o
+LIB_OBJS += $(OUTPUT)util/thread.o
+LIB_OBJS += $(OUTPUT)util/trace-event-parse.o
+LIB_OBJS += $(OUTPUT)util/trace-event-read.o
+LIB_OBJS += $(OUTPUT)util/trace-event-info.o
+LIB_OBJS += $(OUTPUT)util/trace-event-scripting.o
+LIB_OBJS += $(OUTPUT)util/svghelper.o
+LIB_OBJS += $(OUTPUT)util/sort.o
+LIB_OBJS += $(OUTPUT)util/hist.o
+LIB_OBJS += $(OUTPUT)util/probe-event.o
+LIB_OBJS += $(OUTPUT)util/util.o
+LIB_OBJS += $(OUTPUT)util/cpumap.o
+
+BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
+
+BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
+
+# Benchmark modules
+BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o
+BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o
+BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
+
+BUILTIN_OBJS += $(OUTPUT)builtin-diff.o
+BUILTIN_OBJS += $(OUTPUT)builtin-help.o
+BUILTIN_OBJS += $(OUTPUT)builtin-sched.o
+BUILTIN_OBJS += $(OUTPUT)builtin-buildid-list.o
+BUILTIN_OBJS += $(OUTPUT)builtin-buildid-cache.o
+BUILTIN_OBJS += $(OUTPUT)builtin-list.o
+BUILTIN_OBJS += $(OUTPUT)builtin-record.o
+BUILTIN_OBJS += $(OUTPUT)builtin-report.o
+BUILTIN_OBJS += $(OUTPUT)builtin-stat.o
+BUILTIN_OBJS += $(OUTPUT)builtin-timechart.o
+BUILTIN_OBJS += $(OUTPUT)builtin-top.o
+BUILTIN_OBJS += $(OUTPUT)builtin-trace.o
+BUILTIN_OBJS += $(OUTPUT)builtin-probe.o
+BUILTIN_OBJS += $(OUTPUT)builtin-kmem.o
+BUILTIN_OBJS += $(OUTPUT)builtin-lock.o
+BUILTIN_OBJS += $(OUTPUT)builtin-kvm.o
+BUILTIN_OBJS += $(OUTPUT)builtin-test.o
+BUILTIN_OBJS += $(OUTPUT)builtin-inject.o
+
+PERFLIBS = $(LIB_FILE)
+
+#
+# Platform specific tweaks
+#
+
+# We choose to avoid "if .. else if .. else .. endif endif"
+# because maintaining the nesting to match is a pain. If
+# we had "elif" things would have been much nicer...
+
+-include config.mak.autogen
+-include config.mak
+
+ifndef NO_DWARF
+FLAGS_DWARF=$(ALL_CFLAGS) -ldw -lelf $(ALL_LDFLAGS) $(EXTLIBS)
+ifneq ($(call try-cc,$(SOURCE_DWARF),$(FLAGS_DWARF)),y)
+ msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
+ NO_DWARF := 1
+endif # Dwarf support
+endif # NO_DWARF
+
+-include arch/$(ARCH)/Makefile
+
+ifeq ($(uname_S),Darwin)
+ ifndef NO_FINK
+ ifeq ($(shell test -d /sw/lib && echo y),y)
+ BASIC_CFLAGS += -I/sw/include
+ BASIC_LDFLAGS += -L/sw/lib
+ endif
+ endif
+ ifndef NO_DARWIN_PORTS
+ ifeq ($(shell test -d /opt/local/lib && echo y),y)
+ BASIC_CFLAGS += -I/opt/local/include
+ BASIC_LDFLAGS += -L/opt/local/lib
+ endif
+ endif
+ PTHREAD_LIBS =
+endif
+
+ifneq ($(OUTPUT),)
+ BASIC_CFLAGS += -I$(OUTPUT)
+endif
+
+FLAGS_LIBELF=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS)
+ifneq ($(call try-cc,$(SOURCE_LIBELF),$(FLAGS_LIBELF)),y)
+ FLAGS_GLIBC=$(ALL_CFLAGS) $(ALL_LDFLAGS)
+ ifneq ($(call try-cc,$(SOURCE_GLIBC),$(FLAGS_GLIBC)),y)
+ msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
+ else
+ msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel);
+ endif
+endif
+
+ifneq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_COMMON)),y)
+ BASIC_CFLAGS += -DLIBELF_NO_MMAP
+endif
+
+ifndef NO_DWARF
+ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
+ msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
+else
+ BASIC_CFLAGS += -DDWARF_SUPPORT
+ EXTLIBS += -lelf -ldw
+ LIB_OBJS += $(OUTPUT)util/probe-finder.o
+endif # PERF_HAVE_DWARF_REGS
+endif # NO_DWARF
+
+ifdef NO_NEWT
+ BASIC_CFLAGS += -DNO_NEWT_SUPPORT
+else
+ FLAGS_NEWT=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -lnewt
+ ifneq ($(call try-cc,$(SOURCE_NEWT),$(FLAGS_NEWT)),y)
+ msg := $(warning newt not found, disables TUI support. Please install newt-devel or libnewt-dev);
+ BASIC_CFLAGS += -DNO_NEWT_SUPPORT
+ else
+ # Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h
+ BASIC_CFLAGS += -I/usr/include/slang
+ EXTLIBS += -lnewt -lslang
+ LIB_OBJS += $(OUTPUT)util/ui/setup.o
+ LIB_OBJS += $(OUTPUT)util/ui/browser.o
+ LIB_OBJS += $(OUTPUT)util/ui/browsers/annotate.o
+ LIB_OBJS += $(OUTPUT)util/ui/browsers/hists.o
+ LIB_OBJS += $(OUTPUT)util/ui/browsers/map.o
+ LIB_OBJS += $(OUTPUT)util/ui/helpline.o
+ LIB_OBJS += $(OUTPUT)util/ui/progress.o
+ LIB_OBJS += $(OUTPUT)util/ui/util.o
+ LIB_H += util/ui/browser.h
+ LIB_H += util/ui/browsers/map.h
+ LIB_H += util/ui/helpline.h
+ LIB_H += util/ui/libslang.h
+ LIB_H += util/ui/progress.h
+ LIB_H += util/ui/util.h
+ endif
+endif
+
+ifdef NO_LIBPERL
+ BASIC_CFLAGS += -DNO_LIBPERL
+else
+ PERL_EMBED_LDOPTS = `perl -MExtUtils::Embed -e ldopts 2>/dev/null`
+ PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
+ FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
+
+ ifneq ($(call try-cc,$(SOURCE_PERL_EMBED),$(FLAGS_PERL_EMBED)),y)
+ BASIC_CFLAGS += -DNO_LIBPERL
+ else
+ ALL_LDFLAGS += $(PERL_EMBED_LDOPTS)
+ LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-perl.o
+ LIB_OBJS += $(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o
+ endif
+endif
+
+ifdef NO_LIBPYTHON
+ BASIC_CFLAGS += -DNO_LIBPYTHON
+else
+ PYTHON_EMBED_LDOPTS = `python-config --ldflags 2>/dev/null`
+ PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null`
+ FLAGS_PYTHON_EMBED=$(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
+ ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y)
+ BASIC_CFLAGS += -DNO_LIBPYTHON
+ else
+ ALL_LDFLAGS += $(PYTHON_EMBED_LDOPTS)
+ LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o
+ LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o
+ endif
+endif
+
+ifdef NO_DEMANGLE
+ BASIC_CFLAGS += -DNO_DEMANGLE
+else
+ ifdef HAVE_CPLUS_DEMANGLE
+ EXTLIBS += -liberty
+ BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
+ else
+ FLAGS_BFD=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd
+ has_bfd := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD))
+ ifeq ($(has_bfd),y)
+ EXTLIBS += -lbfd
+ else
+ FLAGS_BFD_IBERTY=$(FLAGS_BFD) -liberty
+ has_bfd_iberty := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD_IBERTY))
+ ifeq ($(has_bfd_iberty),y)
+ EXTLIBS += -lbfd -liberty
+ else
+ FLAGS_BFD_IBERTY_Z=$(FLAGS_BFD_IBERTY) -lz
+ has_bfd_iberty_z := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD_IBERTY_Z))
+ ifeq ($(has_bfd_iberty_z),y)
+ EXTLIBS += -lbfd -liberty -lz
+ else
+ FLAGS_CPLUS_DEMANGLE=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -liberty
+ has_cplus_demangle := $(call try-cc,$(SOURCE_CPLUS_DEMANGLE),$(FLAGS_CPLUS_DEMANGLE))
+ ifeq ($(has_cplus_demangle),y)
+ EXTLIBS += -liberty
+ BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE
+ else
+ msg := $(warning No bfd.h/libbfd found, install binutils-dev[el]/zlib-static to gain symbol demangling)
+ BASIC_CFLAGS += -DNO_DEMANGLE
+ endif
+ endif
+ endif
+ endif
+ endif
+endif
+
+ifndef CC_LD_DYNPATH
+ ifdef NO_R_TO_GCC_LINKER
+ # Some gcc does not accept and pass -R to the linker to specify
+ # the runtime dynamic library path.
+ CC_LD_DYNPATH = -Wl,-rpath,
+ else
+ CC_LD_DYNPATH = -R
+ endif
+endif
+
+ifdef NEEDS_SOCKET
+ EXTLIBS += -lsocket
+endif
+ifdef NEEDS_NSL
+ EXTLIBS += -lnsl
+endif
+ifdef NO_D_TYPE_IN_DIRENT
+ BASIC_CFLAGS += -DNO_D_TYPE_IN_DIRENT
+endif
+ifdef NO_D_INO_IN_DIRENT
+ BASIC_CFLAGS += -DNO_D_INO_IN_DIRENT
+endif
+ifdef NO_ST_BLOCKS_IN_STRUCT_STAT
+ BASIC_CFLAGS += -DNO_ST_BLOCKS_IN_STRUCT_STAT
+endif
+ifdef USE_NSEC
+ BASIC_CFLAGS += -DUSE_NSEC
+endif
+ifdef USE_ST_TIMESPEC
+ BASIC_CFLAGS += -DUSE_ST_TIMESPEC
+endif
+ifdef NO_NSEC
+ BASIC_CFLAGS += -DNO_NSEC
+endif
+ifdef NO_C99_FORMAT
+ BASIC_CFLAGS += -DNO_C99_FORMAT
+endif
+ifdef SNPRINTF_RETURNS_BOGUS
+ COMPAT_CFLAGS += -DSNPRINTF_RETURNS_BOGUS
+ COMPAT_OBJS += $(OUTPUT)compat/snprintf.o
+endif
+ifdef FREAD_READS_DIRECTORIES
+ COMPAT_CFLAGS += -DFREAD_READS_DIRECTORIES
+ COMPAT_OBJS += $(OUTPUT)compat/fopen.o
+endif
+ifdef NO_SYMLINK_HEAD
+ BASIC_CFLAGS += -DNO_SYMLINK_HEAD
+endif
+ifdef NO_STRCASESTR
+ COMPAT_CFLAGS += -DNO_STRCASESTR
+ COMPAT_OBJS += $(OUTPUT)compat/strcasestr.o
+endif
+ifdef NO_STRTOUMAX
+ COMPAT_CFLAGS += -DNO_STRTOUMAX
+ COMPAT_OBJS += $(OUTPUT)compat/strtoumax.o
+endif
+ifdef NO_STRTOULL
+ COMPAT_CFLAGS += -DNO_STRTOULL
+endif
+ifdef NO_SETENV
+ COMPAT_CFLAGS += -DNO_SETENV
+ COMPAT_OBJS += $(OUTPUT)compat/setenv.o
+endif
+ifdef NO_MKDTEMP
+ COMPAT_CFLAGS += -DNO_MKDTEMP
+ COMPAT_OBJS += $(OUTPUT)compat/mkdtemp.o
+endif
+ifdef NO_UNSETENV
+ COMPAT_CFLAGS += -DNO_UNSETENV
+ COMPAT_OBJS += $(OUTPUT)compat/unsetenv.o
+endif
+ifdef NO_SYS_SELECT_H
+ BASIC_CFLAGS += -DNO_SYS_SELECT_H
+endif
+ifdef NO_MMAP
+ COMPAT_CFLAGS += -DNO_MMAP
+ COMPAT_OBJS += $(OUTPUT)compat/mmap.o
+else
+ ifdef USE_WIN32_MMAP
+ COMPAT_CFLAGS += -DUSE_WIN32_MMAP
+ COMPAT_OBJS += $(OUTPUT)compat/win32mmap.o
+ endif
+endif
+ifdef NO_PREAD
+ COMPAT_CFLAGS += -DNO_PREAD
+ COMPAT_OBJS += $(OUTPUT)compat/pread.o
+endif
+ifdef NO_FAST_WORKING_DIRECTORY
+ BASIC_CFLAGS += -DNO_FAST_WORKING_DIRECTORY
+endif
+ifdef NO_TRUSTABLE_FILEMODE
+ BASIC_CFLAGS += -DNO_TRUSTABLE_FILEMODE
+endif
+ifdef NO_IPV6
+ BASIC_CFLAGS += -DNO_IPV6
+endif
+ifdef NO_UINTMAX_T
+ BASIC_CFLAGS += -Duintmax_t=uint32_t
+endif
+ifdef NO_SOCKADDR_STORAGE
+ifdef NO_IPV6
+ BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in
+else
+ BASIC_CFLAGS += -Dsockaddr_storage=sockaddr_in6
+endif
+endif
+ifdef NO_INET_NTOP
+ LIB_OBJS += $(OUTPUT)compat/inet_ntop.o
+endif
+ifdef NO_INET_PTON
+ LIB_OBJS += $(OUTPUT)compat/inet_pton.o
+endif
+
+ifdef NO_ICONV
+ BASIC_CFLAGS += -DNO_ICONV
+endif
+
+ifdef OLD_ICONV
+ BASIC_CFLAGS += -DOLD_ICONV
+endif
+
+ifdef NO_DEFLATE_BOUND
+ BASIC_CFLAGS += -DNO_DEFLATE_BOUND
+endif
+
+ifdef PPC_SHA1
+ SHA1_HEADER = "ppc/sha1.h"
+ LIB_OBJS += $(OUTPUT)ppc/sha1.o ppc/sha1ppc.o
+else
+ifdef ARM_SHA1
+ SHA1_HEADER = "arm/sha1.h"
+ LIB_OBJS += $(OUTPUT)arm/sha1.o $(OUTPUT)arm/sha1_arm.o
+else
+ifdef MOZILLA_SHA1
+ SHA1_HEADER = "mozilla-sha1/sha1.h"
+ LIB_OBJS += $(OUTPUT)mozilla-sha1/sha1.o
+else
+ SHA1_HEADER = <openssl/sha.h>
+ EXTLIBS += $(LIB_4_CRYPTO)
+endif
+endif
+endif
+ifdef NO_PERL_MAKEMAKER
+ export NO_PERL_MAKEMAKER
+endif
+ifdef NO_HSTRERROR
+ COMPAT_CFLAGS += -DNO_HSTRERROR
+ COMPAT_OBJS += $(OUTPUT)compat/hstrerror.o
+endif
+ifdef NO_MEMMEM
+ COMPAT_CFLAGS += -DNO_MEMMEM
+ COMPAT_OBJS += $(OUTPUT)compat/memmem.o
+endif
+ifdef INTERNAL_QSORT
+ COMPAT_CFLAGS += -DINTERNAL_QSORT
+ COMPAT_OBJS += $(OUTPUT)compat/qsort.o
+endif
+ifdef RUNTIME_PREFIX
+ COMPAT_CFLAGS += -DRUNTIME_PREFIX
+endif
+
+ifdef DIR_HAS_BSD_GROUP_SEMANTICS
+ COMPAT_CFLAGS += -DDIR_HAS_BSD_GROUP_SEMANTICS
+endif
+ifdef NO_EXTERNAL_GREP
+ BASIC_CFLAGS += -DNO_EXTERNAL_GREP
+endif
+
+ifeq ($(PERL_PATH),)
+NO_PERL=NoThanks
+endif
+
+QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir
+QUIET_SUBDIR1 =
+
+ifneq ($(findstring $(MAKEFLAGS),w),w)
+PRINT_DIR = --no-print-directory
+else # "make -w"
+NO_SUBDIR = :
+endif
+
+ifneq ($(findstring $(MAKEFLAGS),s),s)
+ifndef V
+ QUIET_CC = @echo ' ' CC $@;
+ QUIET_AR = @echo ' ' AR $@;
+ QUIET_LINK = @echo ' ' LINK $@;
+ QUIET_MKDIR = @echo ' ' MKDIR $@;
+ QUIET_BUILT_IN = @echo ' ' BUILTIN $@;
+ QUIET_GEN = @echo ' ' GEN $@;
+ QUIET_SUBDIR0 = +@subdir=
+ QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \
+ $(MAKE) $(PRINT_DIR) -C $$subdir
+ export V
+ export QUIET_GEN
+ export QUIET_BUILT_IN
+endif
+endif
+
+ifdef ASCIIDOC8
+ export ASCIIDOC8
+endif
+
+# Shell quote (do not use $(call) to accommodate ancient setups);
+
+SHA1_HEADER_SQ = $(subst ','\'',$(SHA1_HEADER))
+ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG))
+
+DESTDIR_SQ = $(subst ','\'',$(DESTDIR))
+bindir_SQ = $(subst ','\'',$(bindir))
+bindir_relative_SQ = $(subst ','\'',$(bindir_relative))
+mandir_SQ = $(subst ','\'',$(mandir))
+infodir_SQ = $(subst ','\'',$(infodir))
+perfexecdir_SQ = $(subst ','\'',$(perfexecdir))
+template_dir_SQ = $(subst ','\'',$(template_dir))
+htmldir_SQ = $(subst ','\'',$(htmldir))
+prefix_SQ = $(subst ','\'',$(prefix))
+
+SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
+PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH))
+
+LIBS = $(PERFLIBS) $(EXTLIBS)
+
+BASIC_CFLAGS += -DSHA1_HEADER='$(SHA1_HEADER_SQ)' \
+ $(COMPAT_CFLAGS)
+LIB_OBJS += $(COMPAT_OBJS)
+
+ALL_CFLAGS += $(BASIC_CFLAGS)
+ALL_LDFLAGS += $(BASIC_LDFLAGS)
+
+export TAR INSTALL DESTDIR SHELL_PATH
+
+
+### Build rules
+
+SHELL = $(SHELL_PATH)
+
+all:: shell_compatibility_test $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) $(OUTPUT)PERF-BUILD-OPTIONS
+ifneq (,$X)
+ $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) perf$X)), test '$p' -ef '$p$X' || $(RM) '$p';)
+endif
+
+all::
+
+please_set_SHELL_PATH_to_a_more_modern_shell:
+ @$$(:)
+
+shell_compatibility_test: please_set_SHELL_PATH_to_a_more_modern_shell
+
+strip: $(PROGRAMS) $(OUTPUT)perf$X
+ $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf$X
+
+$(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -DPERF_VERSION='"$(PERF_VERSION)"' \
+ '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
+ $(ALL_CFLAGS) -c $(filter %.c,$^) -o $@
+
+$(OUTPUT)perf$X: $(OUTPUT)perf.o $(BUILTIN_OBJS) $(PERFLIBS)
+ $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(OUTPUT)perf.o \
+ $(BUILTIN_OBJS) $(ALL_LDFLAGS) $(LIBS)
+
+$(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
+ '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
+ '-DPERF_MAN_PATH="$(mandir_SQ)"' \
+ '-DPERF_INFO_PATH="$(infodir_SQ)"' $<
+
+$(OUTPUT)builtin-timechart.o: builtin-timechart.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
+ '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
+ '-DPERF_MAN_PATH="$(mandir_SQ)"' \
+ '-DPERF_INFO_PATH="$(infodir_SQ)"' $<
+
+$(BUILT_INS): $(OUTPUT)perf$X
+ $(QUIET_BUILT_IN)$(RM) $@ && \
+ ln perf$X $@ 2>/dev/null || \
+ ln -s perf$X $@ 2>/dev/null || \
+ cp perf$X $@
+
+$(OUTPUT)common-cmds.h: util/generate-cmdlist.sh command-list.txt
+
+$(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt)
+ $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@
+
+$(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh
+ $(QUIET_GEN)$(RM) $(OUTPUT)$@ $(OUTPUT)$@+ && \
+ sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
+ -e 's|@SHELL_PATH@|$(SHELL_PATH_SQ)|' \
+ -e 's|@@PERL@@|$(PERL_PATH_SQ)|g' \
+ -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \
+ -e 's/@@NO_CURL@@/$(NO_CURL)/g' \
+ $@.sh > $(OUTPUT)$@+ && \
+ chmod +x $(OUTPUT)$@+ && \
+ mv $(OUTPUT)$@+ $(OUTPUT)$@
+
+configure: configure.ac
+ $(QUIET_GEN)$(RM) $@ $<+ && \
+ sed -e 's/@@PERF_VERSION@@/$(PERF_VERSION)/g' \
+ $< > $<+ && \
+ autoconf -o $@ $<+ && \
+ $(RM) $<+
+
+# These can record PERF_VERSION
+$(OUTPUT)perf.o perf.spec \
+ $(patsubst %.sh,%,$(SCRIPT_SH)) \
+ $(patsubst %.perl,%,$(SCRIPT_PERL)) \
+ : $(OUTPUT)PERF-VERSION-FILE
+
+$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
+$(OUTPUT)%.s: %.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $<
+$(OUTPUT)%.o: %.S
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
+
+$(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
+ '-DPERF_EXEC_PATH="$(perfexecdir_SQ)"' \
+ '-DBINDIR="$(bindir_relative_SQ)"' \
+ '-DPREFIX="$(prefix_SQ)"' \
+ $<
+
+$(OUTPUT)builtin-init-db.o: builtin-init-db.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DDEFAULT_PERF_TEMPLATE_DIR='"$(template_dir_SQ)"' $<
+
+$(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+
+$(OUTPUT)util/ui/browser.o: util/ui/browser.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
+
+$(OUTPUT)util/ui/browsers/annotate.o: util/ui/browsers/annotate.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
+
+$(OUTPUT)util/ui/browsers/hists.o: util/ui/browsers/hists.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
+
+$(OUTPUT)util/ui/browsers/map.o: util/ui/browsers/map.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DENABLE_SLFUTURE_CONST $<
+
+$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+
+$(OUTPUT)util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
+
+$(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
+
+$(OUTPUT)util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
+
+$(OUTPUT)scripts/python/Perf-Trace-Util/Context.o: scripts/python/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
+
+$(OUTPUT)perf-%$X: %.o $(PERFLIBS)
+ $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
+
+$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H)
+$(patsubst perf-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h)
+builtin-revert.o wt-status.o: wt-status.h
+
+# we compile into subdirectories. if the target directory is not the source directory, they might not exists. So
+# we depend the various files onto their directories.
+DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
+$(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS)))
+# In the second step, we make a rule to actually create these directories
+$(sort $(dir $(DIRECTORY_DEPS))):
+ $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
+
+$(LIB_FILE): $(LIB_OBJS)
+ $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIB_OBJS)
+
+doc:
+ $(MAKE) -C Documentation all
+
+man:
+ $(MAKE) -C Documentation man
+
+html:
+ $(MAKE) -C Documentation html
+
+info:
+ $(MAKE) -C Documentation info
+
+pdf:
+ $(MAKE) -C Documentation pdf
+
+TAGS:
+ $(RM) TAGS
+ $(FIND) . -name '*.[hcS]' -print | xargs etags -a
+
+tags:
+ $(RM) tags
+ $(FIND) . -name '*.[hcS]' -print | xargs ctags -a
+
+cscope:
+ $(RM) cscope*
+ $(FIND) . -name '*.[hcS]' -print | xargs cscope -b
+
+### Detect prefix changes
+TRACK_CFLAGS = $(subst ','\'',$(ALL_CFLAGS)):\
+ $(bindir_SQ):$(perfexecdir_SQ):$(template_dir_SQ):$(prefix_SQ)
+
+$(OUTPUT)PERF-CFLAGS: .FORCE-PERF-CFLAGS
+ @FLAGS='$(TRACK_CFLAGS)'; \
+ if test x"$$FLAGS" != x"`cat $(OUTPUT)PERF-CFLAGS 2>/dev/null`" ; then \
+ echo 1>&2 " * new build flags or prefix"; \
+ echo "$$FLAGS" >$(OUTPUT)PERF-CFLAGS; \
+ fi
+
+# We need to apply sq twice, once to protect from the shell
+# that runs $(OUTPUT)PERF-BUILD-OPTIONS, and then again to protect it
+# and the first level quoting from the shell that runs "echo".
+$(OUTPUT)PERF-BUILD-OPTIONS: .FORCE-PERF-BUILD-OPTIONS
+ @echo SHELL_PATH=\''$(subst ','\'',$(SHELL_PATH_SQ))'\' >$@
+ @echo TAR=\''$(subst ','\'',$(subst ','\'',$(TAR)))'\' >>$@
+ @echo NO_CURL=\''$(subst ','\'',$(subst ','\'',$(NO_CURL)))'\' >>$@
+ @echo NO_PERL=\''$(subst ','\'',$(subst ','\'',$(NO_PERL)))'\' >>$@
+
+### Testing rules
+
+#
+# None right now:
+#
+# TEST_PROGRAMS += test-something$X
+
+all:: $(TEST_PROGRAMS)
+
+# GNU make supports exporting all variables by "export" without parameters.
+# However, the environment gets quite big, and some programs have problems
+# with that.
+
+export NO_SVN_TESTS
+
+check: $(OUTPUT)common-cmds.h
+ if sparse; \
+ then \
+ for i in *.c */*.c; \
+ do \
+ sparse $(ALL_CFLAGS) $(SPARSE_FLAGS) $$i || exit; \
+ done; \
+ else \
+ echo 2>&1 "Did you mean 'make test'?"; \
+ exit 1; \
+ fi
+
+remove-dashes:
+ ./fixup-builtins $(BUILT_INS) $(PROGRAMS) $(SCRIPTS)
+
+### Installation rules
+
+ifneq ($(filter /%,$(firstword $(template_dir))),)
+template_instdir = $(template_dir)
+else
+template_instdir = $(prefix)/$(template_dir)
+endif
+export template_instdir
+
+ifneq ($(filter /%,$(firstword $(perfexecdir))),)
+perfexec_instdir = $(perfexecdir)
+else
+perfexec_instdir = $(prefix)/$(perfexecdir)
+endif
+perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir))
+export perfexec_instdir
+
+install: all
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
+ $(INSTALL) $(OUTPUT)perf$X '$(DESTDIR_SQ)$(bindir_SQ)'
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
+ $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
+ $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
+ $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'
+ $(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
+ $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'
+ $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'
+ $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
+
+ifdef BUILT_INS
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
+ $(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
+ifneq (,$X)
+ $(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) $(OUTPUT)perf$X)), $(RM) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$p';)
+endif
+endif
+
+install-doc:
+ $(MAKE) -C Documentation install
+
+install-man:
+ $(MAKE) -C Documentation install-man
+
+install-html:
+ $(MAKE) -C Documentation install-html
+
+install-info:
+ $(MAKE) -C Documentation install-info
+
+install-pdf:
+ $(MAKE) -C Documentation install-pdf
+
+quick-install-doc:
+ $(MAKE) -C Documentation quick-install
+
+quick-install-man:
+ $(MAKE) -C Documentation quick-install-man
+
+quick-install-html:
+ $(MAKE) -C Documentation quick-install-html
+
+
+### Maintainer's dist rules
+#
+# None right now
+#
+#
+# perf.spec: perf.spec.in
+# sed -e 's/@@VERSION@@/$(PERF_VERSION)/g' < $< > $@+
+# mv $@+ $@
+#
+# PERF_TARNAME=perf-$(PERF_VERSION)
+# dist: perf.spec perf-archive$(X) configure
+# ./perf-archive --format=tar \
+# --prefix=$(PERF_TARNAME)/ HEAD^{tree} > $(PERF_TARNAME).tar
+# @mkdir -p $(PERF_TARNAME)
+# @cp perf.spec configure $(PERF_TARNAME)
+# @echo $(PERF_VERSION) > $(PERF_TARNAME)/version
+# $(TAR) rf $(PERF_TARNAME).tar \
+# $(PERF_TARNAME)/perf.spec \
+# $(PERF_TARNAME)/configure \
+# $(PERF_TARNAME)/version
+# @$(RM) -r $(PERF_TARNAME)
+# gzip -f -9 $(PERF_TARNAME).tar
+#
+# htmldocs = perf-htmldocs-$(PERF_VERSION)
+# manpages = perf-manpages-$(PERF_VERSION)
+# dist-doc:
+# $(RM) -r .doc-tmp-dir
+# mkdir .doc-tmp-dir
+# $(MAKE) -C Documentation WEBDOC_DEST=../.doc-tmp-dir install-webdoc
+# cd .doc-tmp-dir && $(TAR) cf ../$(htmldocs).tar .
+# gzip -n -9 -f $(htmldocs).tar
+# :
+# $(RM) -r .doc-tmp-dir
+# mkdir -p .doc-tmp-dir/man1 .doc-tmp-dir/man5 .doc-tmp-dir/man7
+# $(MAKE) -C Documentation DESTDIR=./ \
+# man1dir=../.doc-tmp-dir/man1 \
+# man5dir=../.doc-tmp-dir/man5 \
+# man7dir=../.doc-tmp-dir/man7 \
+# install
+# cd .doc-tmp-dir && $(TAR) cf ../$(manpages).tar .
+# gzip -n -9 -f $(manpages).tar
+# $(RM) -r .doc-tmp-dir
+#
+# rpm: dist
+# $(RPMBUILD) -ta $(PERF_TARNAME).tar.gz
+
+### Cleaning rules
+
+distclean: clean
+# $(RM) configure
+
+clean:
+ $(RM) *.o */*.o */*/*.o */*/*/*.o $(LIB_FILE)
+ $(RM) $(ALL_PROGRAMS) $(BUILT_INS) perf$X
+ $(RM) $(TEST_PROGRAMS)
+ $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
+ $(RM) -r autom4te.cache
+ $(RM) config.log config.mak.autogen config.mak.append config.status config.cache
+ $(RM) -r $(PERF_TARNAME) .doc-tmp-dir
+ $(RM) $(PERF_TARNAME).tar.gz perf-core_$(PERF_VERSION)-*.tar.gz
+ $(RM) $(htmldocs).tar.gz $(manpages).tar.gz
+ $(MAKE) -C Documentation/ clean
+ $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS $(OUTPUT)PERF-BUILD-OPTIONS
+
+.PHONY: all install clean strip
+.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
+.PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope .FORCE-PERF-CFLAGS
+.PHONY: .FORCE-PERF-BUILD-OPTIONS
+
+### Make sure built-ins do not have dups and listed in perf.c
+#
+check-builtins::
+ ./check-builtins.sh
+
+### Test suite coverage testing
+#
+# None right now
+#
+# .PHONY: coverage coverage-clean coverage-build coverage-report
+#
+# coverage:
+# $(MAKE) coverage-build
+# $(MAKE) coverage-report
+#
+# coverage-clean:
+# rm -f *.gcda *.gcno
+#
+# COVERAGE_CFLAGS = $(CFLAGS) -O0 -ftest-coverage -fprofile-arcs
+# COVERAGE_LDFLAGS = $(CFLAGS) -O0 -lgcov
+#
+# coverage-build: coverage-clean
+# $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" all
+# $(MAKE) CFLAGS="$(COVERAGE_CFLAGS)" LDFLAGS="$(COVERAGE_LDFLAGS)" \
+# -j1 test
+#
+# coverage-report:
+# gcov -b *.c */*.c
+# grep '^function.*called 0 ' *.c.gcov */*.c.gcov \
+# | sed -e 's/\([^:]*\)\.gcov: *function \([^ ]*\) called.*/\1: \2/' \
+# | tee coverage-untested-functions
diff --git a/tools/perf/arch/arm/Makefile b/tools/perf/arch/arm/Makefile
new file mode 100644
index 00000000..15130b50
--- /dev/null
+++ b/tools/perf/arch/arm/Makefile
@@ -0,0 +1,4 @@
+ifndef NO_DWARF
+PERF_HAVE_DWARF_REGS := 1
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
+endif
diff --git a/tools/perf/arch/arm/util/dwarf-regs.c b/tools/perf/arch/arm/util/dwarf-regs.c
new file mode 100644
index 00000000..fff6450c
--- /dev/null
+++ b/tools/perf/arch/arm/util/dwarf-regs.c
@@ -0,0 +1,64 @@
+/*
+ * Mapping of DWARF debug register numbers into register names.
+ *
+ * Copyright (C) 2010 Will Deacon, ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <libio.h>
+#include <dwarf-regs.h>
+
+struct pt_regs_dwarfnum {
+ const char *name;
+ unsigned int dwarfnum;
+};
+
+#define STR(s) #s
+#define REG_DWARFNUM_NAME(r, num) {.name = r, .dwarfnum = num}
+#define GPR_DWARFNUM_NAME(num) \
+ {.name = STR(%r##num), .dwarfnum = num}
+#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0}
+
+/*
+ * Reference:
+ * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0040a/IHI0040A_aadwarf.pdf
+ */
+static const struct pt_regs_dwarfnum regdwarfnum_table[] = {
+ GPR_DWARFNUM_NAME(0),
+ GPR_DWARFNUM_NAME(1),
+ GPR_DWARFNUM_NAME(2),
+ GPR_DWARFNUM_NAME(3),
+ GPR_DWARFNUM_NAME(4),
+ GPR_DWARFNUM_NAME(5),
+ GPR_DWARFNUM_NAME(6),
+ GPR_DWARFNUM_NAME(7),
+ GPR_DWARFNUM_NAME(8),
+ GPR_DWARFNUM_NAME(9),
+ GPR_DWARFNUM_NAME(10),
+ REG_DWARFNUM_NAME("%fp", 11),
+ REG_DWARFNUM_NAME("%ip", 12),
+ REG_DWARFNUM_NAME("%sp", 13),
+ REG_DWARFNUM_NAME("%lr", 14),
+ REG_DWARFNUM_NAME("%pc", 15),
+ REG_DWARFNUM_END,
+};
+
+/**
+ * get_arch_regstr() - lookup register name from it's DWARF register number
+ * @n: the DWARF register number
+ *
+ * get_arch_regstr() returns the name of the register in struct
+ * regdwarfnum_table from it's DWARF register number. If the register is not
+ * found in the table, this returns NULL;
+ */
+const char *get_arch_regstr(unsigned int n)
+{
+ const struct pt_regs_dwarfnum *roff;
+ for (roff = regdwarfnum_table; roff->name != NULL; roff++)
+ if (roff->dwarfnum == n)
+ return roff->name;
+ return NULL;
+}
diff --git a/tools/perf/arch/powerpc/Makefile b/tools/perf/arch/powerpc/Makefile
new file mode 100644
index 00000000..15130b50
--- /dev/null
+++ b/tools/perf/arch/powerpc/Makefile
@@ -0,0 +1,4 @@
+ifndef NO_DWARF
+PERF_HAVE_DWARF_REGS := 1
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
+endif
diff --git a/tools/perf/arch/powerpc/util/dwarf-regs.c b/tools/perf/arch/powerpc/util/dwarf-regs.c
new file mode 100644
index 00000000..48ae0c5e
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/dwarf-regs.c
@@ -0,0 +1,88 @@
+/*
+ * Mapping of DWARF debug register numbers into register names.
+ *
+ * Copyright (C) 2010 Ian Munsie, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <libio.h>
+#include <dwarf-regs.h>
+
+
+struct pt_regs_dwarfnum {
+ const char *name;
+ unsigned int dwarfnum;
+};
+
+#define STR(s) #s
+#define REG_DWARFNUM_NAME(r, num) {.name = r, .dwarfnum = num}
+#define GPR_DWARFNUM_NAME(num) \
+ {.name = STR(%gpr##num), .dwarfnum = num}
+#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0}
+
+/*
+ * Reference:
+ * http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi-1.9.html
+ */
+static const struct pt_regs_dwarfnum regdwarfnum_table[] = {
+ GPR_DWARFNUM_NAME(0),
+ GPR_DWARFNUM_NAME(1),
+ GPR_DWARFNUM_NAME(2),
+ GPR_DWARFNUM_NAME(3),
+ GPR_DWARFNUM_NAME(4),
+ GPR_DWARFNUM_NAME(5),
+ GPR_DWARFNUM_NAME(6),
+ GPR_DWARFNUM_NAME(7),
+ GPR_DWARFNUM_NAME(8),
+ GPR_DWARFNUM_NAME(9),
+ GPR_DWARFNUM_NAME(10),
+ GPR_DWARFNUM_NAME(11),
+ GPR_DWARFNUM_NAME(12),
+ GPR_DWARFNUM_NAME(13),
+ GPR_DWARFNUM_NAME(14),
+ GPR_DWARFNUM_NAME(15),
+ GPR_DWARFNUM_NAME(16),
+ GPR_DWARFNUM_NAME(17),
+ GPR_DWARFNUM_NAME(18),
+ GPR_DWARFNUM_NAME(19),
+ GPR_DWARFNUM_NAME(20),
+ GPR_DWARFNUM_NAME(21),
+ GPR_DWARFNUM_NAME(22),
+ GPR_DWARFNUM_NAME(23),
+ GPR_DWARFNUM_NAME(24),
+ GPR_DWARFNUM_NAME(25),
+ GPR_DWARFNUM_NAME(26),
+ GPR_DWARFNUM_NAME(27),
+ GPR_DWARFNUM_NAME(28),
+ GPR_DWARFNUM_NAME(29),
+ GPR_DWARFNUM_NAME(30),
+ GPR_DWARFNUM_NAME(31),
+ REG_DWARFNUM_NAME("%msr", 66),
+ REG_DWARFNUM_NAME("%ctr", 109),
+ REG_DWARFNUM_NAME("%link", 108),
+ REG_DWARFNUM_NAME("%xer", 101),
+ REG_DWARFNUM_NAME("%dar", 119),
+ REG_DWARFNUM_NAME("%dsisr", 118),
+ REG_DWARFNUM_END,
+};
+
+/**
+ * get_arch_regstr() - lookup register name from it's DWARF register number
+ * @n: the DWARF register number
+ *
+ * get_arch_regstr() returns the name of the register in struct
+ * regdwarfnum_table from it's DWARF register number. If the register is not
+ * found in the table, this returns NULL;
+ */
+const char *get_arch_regstr(unsigned int n)
+{
+ const struct pt_regs_dwarfnum *roff;
+ for (roff = regdwarfnum_table; roff->name != NULL; roff++)
+ if (roff->dwarfnum == n)
+ return roff->name;
+ return NULL;
+}
diff --git a/tools/perf/arch/sh/Makefile b/tools/perf/arch/sh/Makefile
new file mode 100644
index 00000000..15130b50
--- /dev/null
+++ b/tools/perf/arch/sh/Makefile
@@ -0,0 +1,4 @@
+ifndef NO_DWARF
+PERF_HAVE_DWARF_REGS := 1
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
+endif
diff --git a/tools/perf/arch/sh/util/dwarf-regs.c b/tools/perf/arch/sh/util/dwarf-regs.c
new file mode 100644
index 00000000..a11edb00
--- /dev/null
+++ b/tools/perf/arch/sh/util/dwarf-regs.c
@@ -0,0 +1,55 @@
+/*
+ * Mapping of DWARF debug register numbers into register names.
+ *
+ * Copyright (C) 2010 Matt Fleming <matt@console-pimps.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <libio.h>
+#include <dwarf-regs.h>
+
+/*
+ * Generic dwarf analysis helpers
+ */
+
+#define SH_MAX_REGS 18
+const char *sh_regs_table[SH_MAX_REGS] = {
+ "r0",
+ "r1",
+ "r2",
+ "r3",
+ "r4",
+ "r5",
+ "r6",
+ "r7",
+ "r8",
+ "r9",
+ "r10",
+ "r11",
+ "r12",
+ "r13",
+ "r14",
+ "r15",
+ "pc",
+ "pr",
+};
+
+/* Return architecture dependent register string (for kprobe-tracer) */
+const char *get_arch_regstr(unsigned int n)
+{
+ return (n <= SH_MAX_REGS) ? sh_regs_table[n] : NULL;
+}
diff --git a/tools/perf/arch/sparc/Makefile b/tools/perf/arch/sparc/Makefile
new file mode 100644
index 00000000..15130b50
--- /dev/null
+++ b/tools/perf/arch/sparc/Makefile
@@ -0,0 +1,4 @@
+ifndef NO_DWARF
+PERF_HAVE_DWARF_REGS := 1
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
+endif
diff --git a/tools/perf/arch/sparc/util/dwarf-regs.c b/tools/perf/arch/sparc/util/dwarf-regs.c
new file mode 100644
index 00000000..0ab88483
--- /dev/null
+++ b/tools/perf/arch/sparc/util/dwarf-regs.c
@@ -0,0 +1,43 @@
+/*
+ * Mapping of DWARF debug register numbers into register names.
+ *
+ * Copyright (C) 2010 David S. Miller <davem@davemloft.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <libio.h>
+#include <dwarf-regs.h>
+
+#define SPARC_MAX_REGS 96
+
+const char *sparc_regs_table[SPARC_MAX_REGS] = {
+ "%g0", "%g1", "%g2", "%g3", "%g4", "%g5", "%g6", "%g7",
+ "%o0", "%o1", "%o2", "%o3", "%o4", "%o5", "%sp", "%o7",
+ "%l0", "%l1", "%l2", "%l3", "%l4", "%l5", "%l6", "%l7",
+ "%i0", "%i1", "%i2", "%i3", "%i4", "%i5", "%fp", "%i7",
+ "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
+ "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
+ "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
+ "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
+ "%f32", "%f33", "%f34", "%f35", "%f36", "%f37", "%f38", "%f39",
+ "%f40", "%f41", "%f42", "%f43", "%f44", "%f45", "%f46", "%f47",
+ "%f48", "%f49", "%f50", "%f51", "%f52", "%f53", "%f54", "%f55",
+ "%f56", "%f57", "%f58", "%f59", "%f60", "%f61", "%f62", "%f63",
+};
+
+/**
+ * get_arch_regstr() - lookup register name from it's DWARF register number
+ * @n: the DWARF register number
+ *
+ * get_arch_regstr() returns the name of the register in struct
+ * regdwarfnum_table from it's DWARF register number. If the register is not
+ * found in the table, this returns NULL;
+ */
+const char *get_arch_regstr(unsigned int n)
+{
+ return (n <= SPARC_MAX_REGS) ? sparc_regs_table[n] : NULL;
+}
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile
new file mode 100644
index 00000000..15130b50
--- /dev/null
+++ b/tools/perf/arch/x86/Makefile
@@ -0,0 +1,4 @@
+ifndef NO_DWARF
+PERF_HAVE_DWARF_REGS := 1
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
+endif
diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c
new file mode 100644
index 00000000..a794d308
--- /dev/null
+++ b/tools/perf/arch/x86/util/dwarf-regs.c
@@ -0,0 +1,75 @@
+/*
+ * dwarf-regs.c : Mapping of DWARF debug register numbers into register names.
+ * Extracted from probe-finder.c
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <libio.h>
+#include <dwarf-regs.h>
+
+/*
+ * Generic dwarf analysis helpers
+ */
+
+#define X86_32_MAX_REGS 8
+const char *x86_32_regs_table[X86_32_MAX_REGS] = {
+ "%ax",
+ "%cx",
+ "%dx",
+ "%bx",
+ "$stack", /* Stack address instead of %sp */
+ "%bp",
+ "%si",
+ "%di",
+};
+
+#define X86_64_MAX_REGS 16
+const char *x86_64_regs_table[X86_64_MAX_REGS] = {
+ "%ax",
+ "%dx",
+ "%cx",
+ "%bx",
+ "%si",
+ "%di",
+ "%bp",
+ "%sp",
+ "%r8",
+ "%r9",
+ "%r10",
+ "%r11",
+ "%r12",
+ "%r13",
+ "%r14",
+ "%r15",
+};
+
+/* TODO: switching by dwarf address size */
+#ifdef __x86_64__
+#define ARCH_MAX_REGS X86_64_MAX_REGS
+#define arch_regs_table x86_64_regs_table
+#else
+#define ARCH_MAX_REGS X86_32_MAX_REGS
+#define arch_regs_table x86_32_regs_table
+#endif
+
+/* Return architecture dependent register string (for kprobe-tracer) */
+const char *get_arch_regstr(unsigned int n)
+{
+ return (n <= ARCH_MAX_REGS) ? arch_regs_table[n] : NULL;
+}
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
new file mode 100644
index 00000000..f7781c62
--- /dev/null
+++ b/tools/perf/bench/bench.h
@@ -0,0 +1,17 @@
+#ifndef BENCH_H
+#define BENCH_H
+
+extern int bench_sched_messaging(int argc, const char **argv, const char *prefix);
+extern int bench_sched_pipe(int argc, const char **argv, const char *prefix);
+extern int bench_mem_memcpy(int argc, const char **argv, const char *prefix __used);
+
+#define BENCH_FORMAT_DEFAULT_STR "default"
+#define BENCH_FORMAT_DEFAULT 0
+#define BENCH_FORMAT_SIMPLE_STR "simple"
+#define BENCH_FORMAT_SIMPLE 1
+
+#define BENCH_FORMAT_UNKNOWN -1
+
+extern int bench_format;
+
+#endif
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c
new file mode 100644
index 00000000..38dae746
--- /dev/null
+++ b/tools/perf/bench/mem-memcpy.c
@@ -0,0 +1,192 @@
+/*
+ * mem-memcpy.c
+ *
+ * memcpy: Simple memory copy in various ways
+ *
+ * Written by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
+ */
+#include <ctype.h>
+
+#include "../perf.h"
+#include "../util/util.h"
+#include "../util/parse-options.h"
+#include "../util/header.h"
+#include "bench.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/time.h>
+#include <errno.h>
+
+#define K 1024
+
+static const char *length_str = "1MB";
+static const char *routine = "default";
+static bool use_clock = false;
+static int clock_fd;
+
+static const struct option options[] = {
+ OPT_STRING('l', "length", &length_str, "1MB",
+ "Specify length of memory to copy. "
+ "available unit: B, MB, GB (upper and lower)"),
+ OPT_STRING('r', "routine", &routine, "default",
+ "Specify routine to copy"),
+ OPT_BOOLEAN('c', "clock", &use_clock,
+ "Use CPU clock for measuring"),
+ OPT_END()
+};
+
+struct routine {
+ const char *name;
+ const char *desc;
+ void * (*fn)(void *dst, const void *src, size_t len);
+};
+
+struct routine routines[] = {
+ { "default",
+ "Default memcpy() provided by glibc",
+ memcpy },
+ { NULL,
+ NULL,
+ NULL }
+};
+
+static const char * const bench_mem_memcpy_usage[] = {
+ "perf bench mem memcpy <options>",
+ NULL
+};
+
+static struct perf_event_attr clock_attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES
+};
+
+static void init_clock(void)
+{
+ clock_fd = sys_perf_event_open(&clock_attr, getpid(), -1, -1, 0);
+
+ if (clock_fd < 0 && errno == ENOSYS)
+ die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
+ else
+ BUG_ON(clock_fd < 0);
+}
+
+static u64 get_clock(void)
+{
+ int ret;
+ u64 clk;
+
+ ret = read(clock_fd, &clk, sizeof(u64));
+ BUG_ON(ret != sizeof(u64));
+
+ return clk;
+}
+
+static double timeval2double(struct timeval *ts)
+{
+ return (double)ts->tv_sec +
+ (double)ts->tv_usec / (double)1000000;
+}
+
+int bench_mem_memcpy(int argc, const char **argv,
+ const char *prefix __used)
+{
+ int i;
+ void *dst, *src;
+ size_t length;
+ double bps = 0.0;
+ struct timeval tv_start, tv_end, tv_diff;
+ u64 clock_start, clock_end, clock_diff;
+
+ clock_start = clock_end = clock_diff = 0ULL;
+ argc = parse_options(argc, argv, options,
+ bench_mem_memcpy_usage, 0);
+
+ tv_diff.tv_sec = 0;
+ tv_diff.tv_usec = 0;
+ length = (size_t)perf_atoll((char *)length_str);
+
+ if ((s64)length <= 0) {
+ fprintf(stderr, "Invalid length:%s\n", length_str);
+ return 1;
+ }
+
+ for (i = 0; routines[i].name; i++) {
+ if (!strcmp(routines[i].name, routine))
+ break;
+ }
+ if (!routines[i].name) {
+ printf("Unknown routine:%s\n", routine);
+ printf("Available routines...\n");
+ for (i = 0; routines[i].name; i++) {
+ printf("\t%s ... %s\n",
+ routines[i].name, routines[i].desc);
+ }
+ return 1;
+ }
+
+ dst = zalloc(length);
+ if (!dst)
+ die("memory allocation failed - maybe length is too large?\n");
+
+ src = zalloc(length);
+ if (!src)
+ die("memory allocation failed - maybe length is too large?\n");
+
+ if (bench_format == BENCH_FORMAT_DEFAULT) {
+ printf("# Copying %s Bytes from %p to %p ...\n\n",
+ length_str, src, dst);
+ }
+
+ if (use_clock) {
+ init_clock();
+ clock_start = get_clock();
+ } else {
+ BUG_ON(gettimeofday(&tv_start, NULL));
+ }
+
+ routines[i].fn(dst, src, length);
+
+ if (use_clock) {
+ clock_end = get_clock();
+ clock_diff = clock_end - clock_start;
+ } else {
+ BUG_ON(gettimeofday(&tv_end, NULL));
+ timersub(&tv_end, &tv_start, &tv_diff);
+ bps = (double)((double)length / timeval2double(&tv_diff));
+ }
+
+ switch (bench_format) {
+ case BENCH_FORMAT_DEFAULT:
+ if (use_clock) {
+ printf(" %14lf Clock/Byte\n",
+ (double)clock_diff / (double)length);
+ } else {
+ if (bps < K)
+ printf(" %14lf B/Sec\n", bps);
+ else if (bps < K * K)
+ printf(" %14lfd KB/Sec\n", bps / 1024);
+ else if (bps < K * K * K)
+ printf(" %14lf MB/Sec\n", bps / 1024 / 1024);
+ else {
+ printf(" %14lf GB/Sec\n",
+ bps / 1024 / 1024 / 1024);
+ }
+ }
+ break;
+ case BENCH_FORMAT_SIMPLE:
+ if (use_clock) {
+ printf("%14lf\n",
+ (double)clock_diff / (double)length);
+ } else
+ printf("%lf\n", bps);
+ break;
+ default:
+ /* reaching this means there's some disaster: */
+ die("unknown format: %d\n", bench_format);
+ break;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
new file mode 100644
index 00000000..d1d1b30f
--- /dev/null
+++ b/tools/perf/bench/sched-messaging.c
@@ -0,0 +1,336 @@
+/*
+ *
+ * sched-messaging.c
+ *
+ * messaging: Benchmark for scheduler and IPC mechanisms
+ *
+ * Based on hackbench by Rusty Russell <rusty@rustcorp.com.au>
+ * Ported to perf by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
+ *
+ */
+
+#include "../perf.h"
+#include "../util/util.h"
+#include "../util/parse-options.h"
+#include "../builtin.h"
+#include "bench.h"
+
+/* Test groups of 20 processes spraying to 20 receivers */
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/wait.h>
+#include <sys/time.h>
+#include <sys/poll.h>
+#include <limits.h>
+
+#define DATASIZE 100
+
+static bool use_pipes = false;
+static unsigned int loops = 100;
+static bool thread_mode = false;
+static unsigned int num_groups = 10;
+
+struct sender_context {
+ unsigned int num_fds;
+ int ready_out;
+ int wakefd;
+ int out_fds[0];
+};
+
+struct receiver_context {
+ unsigned int num_packets;
+ int in_fds[2];
+ int ready_out;
+ int wakefd;
+};
+
+static void barf(const char *msg)
+{
+ fprintf(stderr, "%s (error: %s)\n", msg, strerror(errno));
+ exit(1);
+}
+
+static void fdpair(int fds[2])
+{
+ if (use_pipes) {
+ if (pipe(fds) == 0)
+ return;
+ } else {
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0)
+ return;
+ }
+
+ barf(use_pipes ? "pipe()" : "socketpair()");
+}
+
+/* Block until we're ready to go */
+static void ready(int ready_out, int wakefd)
+{
+ char dummy;
+ struct pollfd pollfd = { .fd = wakefd, .events = POLLIN };
+
+ /* Tell them we're ready. */
+ if (write(ready_out, &dummy, 1) != 1)
+ barf("CLIENT: ready write");
+
+ /* Wait for "GO" signal */
+ if (poll(&pollfd, 1, -1) != 1)
+ barf("poll");
+}
+
+/* Sender sprays loops messages down each file descriptor */
+static void *sender(struct sender_context *ctx)
+{
+ char data[DATASIZE];
+ unsigned int i, j;
+
+ ready(ctx->ready_out, ctx->wakefd);
+
+ /* Now pump to every receiver. */
+ for (i = 0; i < loops; i++) {
+ for (j = 0; j < ctx->num_fds; j++) {
+ int ret, done = 0;
+
+again:
+ ret = write(ctx->out_fds[j], data + done,
+ sizeof(data)-done);
+ if (ret < 0)
+ barf("SENDER: write");
+ done += ret;
+ if (done < DATASIZE)
+ goto again;
+ }
+ }
+
+ return NULL;
+}
+
+
+/* One receiver per fd */
+static void *receiver(struct receiver_context* ctx)
+{
+ unsigned int i;
+
+ if (!thread_mode)
+ close(ctx->in_fds[1]);
+
+ /* Wait for start... */
+ ready(ctx->ready_out, ctx->wakefd);
+
+ /* Receive them all */
+ for (i = 0; i < ctx->num_packets; i++) {
+ char data[DATASIZE];
+ int ret, done = 0;
+
+again:
+ ret = read(ctx->in_fds[0], data + done, DATASIZE - done);
+ if (ret < 0)
+ barf("SERVER: read");
+ done += ret;
+ if (done < DATASIZE)
+ goto again;
+ }
+
+ return NULL;
+}
+
+static pthread_t create_worker(void *ctx, void *(*func)(void *))
+{
+ pthread_attr_t attr;
+ pthread_t childid;
+ int err;
+
+ if (!thread_mode) {
+ /* process mode */
+ /* Fork the receiver. */
+ switch (fork()) {
+ case -1:
+ barf("fork()");
+ break;
+ case 0:
+ (*func) (ctx);
+ exit(0);
+ break;
+ default:
+ break;
+ }
+
+ return (pthread_t)0;
+ }
+
+ if (pthread_attr_init(&attr) != 0)
+ barf("pthread_attr_init:");
+
+#ifndef __ia64__
+ if (pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN) != 0)
+ barf("pthread_attr_setstacksize");
+#endif
+
+ err = pthread_create(&childid, &attr, func, ctx);
+ if (err != 0) {
+ fprintf(stderr, "pthread_create failed: %s (%d)\n",
+ strerror(err), err);
+ exit(-1);
+ }
+ return childid;
+}
+
+static void reap_worker(pthread_t id)
+{
+ int proc_status;
+ void *thread_status;
+
+ if (!thread_mode) {
+ /* process mode */
+ wait(&proc_status);
+ if (!WIFEXITED(proc_status))
+ exit(1);
+ } else {
+ pthread_join(id, &thread_status);
+ }
+}
+
+/* One group of senders and receivers */
+static unsigned int group(pthread_t *pth,
+ unsigned int num_fds,
+ int ready_out,
+ int wakefd)
+{
+ unsigned int i;
+ struct sender_context *snd_ctx = malloc(sizeof(struct sender_context)
+ + num_fds * sizeof(int));
+
+ if (!snd_ctx)
+ barf("malloc()");
+
+ for (i = 0; i < num_fds; i++) {
+ int fds[2];
+ struct receiver_context *ctx = malloc(sizeof(*ctx));
+
+ if (!ctx)
+ barf("malloc()");
+
+
+ /* Create the pipe between client and server */
+ fdpair(fds);
+
+ ctx->num_packets = num_fds * loops;
+ ctx->in_fds[0] = fds[0];
+ ctx->in_fds[1] = fds[1];
+ ctx->ready_out = ready_out;
+ ctx->wakefd = wakefd;
+
+ pth[i] = create_worker(ctx, (void *)receiver);
+
+ snd_ctx->out_fds[i] = fds[1];
+ if (!thread_mode)
+ close(fds[0]);
+ }
+
+ /* Now we have all the fds, fork the senders */
+ for (i = 0; i < num_fds; i++) {
+ snd_ctx->ready_out = ready_out;
+ snd_ctx->wakefd = wakefd;
+ snd_ctx->num_fds = num_fds;
+
+ pth[num_fds+i] = create_worker(snd_ctx, (void *)sender);
+ }
+
+ /* Close the fds we have left */
+ if (!thread_mode)
+ for (i = 0; i < num_fds; i++)
+ close(snd_ctx->out_fds[i]);
+
+ /* Return number of children to reap */
+ return num_fds * 2;
+}
+
+static const struct option options[] = {
+ OPT_BOOLEAN('p', "pipe", &use_pipes,
+ "Use pipe() instead of socketpair()"),
+ OPT_BOOLEAN('t', "thread", &thread_mode,
+ "Be multi thread instead of multi process"),
+ OPT_UINTEGER('g', "group", &num_groups, "Specify number of groups"),
+ OPT_UINTEGER('l', "loop", &loops, "Specify number of loops"),
+ OPT_END()
+};
+
+static const char * const bench_sched_message_usage[] = {
+ "perf bench sched messaging <options>",
+ NULL
+};
+
+int bench_sched_messaging(int argc, const char **argv,
+ const char *prefix __used)
+{
+ unsigned int i, total_children;
+ struct timeval start, stop, diff;
+ unsigned int num_fds = 20;
+ int readyfds[2], wakefds[2];
+ char dummy;
+ pthread_t *pth_tab;
+
+ argc = parse_options(argc, argv, options,
+ bench_sched_message_usage, 0);
+
+ pth_tab = malloc(num_fds * 2 * num_groups * sizeof(pthread_t));
+ if (!pth_tab)
+ barf("main:malloc()");
+
+ fdpair(readyfds);
+ fdpair(wakefds);
+
+ total_children = 0;
+ for (i = 0; i < num_groups; i++)
+ total_children += group(pth_tab+total_children, num_fds,
+ readyfds[1], wakefds[0]);
+
+ /* Wait for everyone to be ready */
+ for (i = 0; i < total_children; i++)
+ if (read(readyfds[0], &dummy, 1) != 1)
+ barf("Reading for readyfds");
+
+ gettimeofday(&start, NULL);
+
+ /* Kick them off */
+ if (write(wakefds[1], &dummy, 1) != 1)
+ barf("Writing to start them");
+
+ /* Reap them all */
+ for (i = 0; i < total_children; i++)
+ reap_worker(pth_tab[i]);
+
+ gettimeofday(&stop, NULL);
+
+ timersub(&stop, &start, &diff);
+
+ switch (bench_format) {
+ case BENCH_FORMAT_DEFAULT:
+ printf("# %d sender and receiver %s per group\n",
+ num_fds, thread_mode ? "threads" : "processes");
+ printf("# %d groups == %d %s run\n\n",
+ num_groups, num_groups * 2 * num_fds,
+ thread_mode ? "threads" : "processes");
+ printf(" %14s: %lu.%03lu [sec]\n", "Total time",
+ diff.tv_sec,
+ (unsigned long) (diff.tv_usec/1000));
+ break;
+ case BENCH_FORMAT_SIMPLE:
+ printf("%lu.%03lu\n", diff.tv_sec,
+ (unsigned long) (diff.tv_usec/1000));
+ break;
+ default:
+ /* reaching here is something disaster */
+ fprintf(stderr, "Unknown format:%d\n", bench_format);
+ exit(1);
+ break;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
new file mode 100644
index 00000000..d9ab3ce4
--- /dev/null
+++ b/tools/perf/bench/sched-pipe.c
@@ -0,0 +1,127 @@
+/*
+ *
+ * sched-pipe.c
+ *
+ * pipe: Benchmark for pipe()
+ *
+ * Based on pipe-test-1m.c by Ingo Molnar <mingo@redhat.com>
+ * http://people.redhat.com/mingo/cfs-scheduler/tools/pipe-test-1m.c
+ * Ported to perf by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
+ *
+ */
+
+#include "../perf.h"
+#include "../util/util.h"
+#include "../util/parse-options.h"
+#include "../builtin.h"
+#include "bench.h"
+
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <sys/wait.h>
+#include <linux/unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#define LOOPS_DEFAULT 1000000
+static int loops = LOOPS_DEFAULT;
+
+static const struct option options[] = {
+ OPT_INTEGER('l', "loop", &loops,
+ "Specify number of loops"),
+ OPT_END()
+};
+
+static const char * const bench_sched_pipe_usage[] = {
+ "perf bench sched pipe <options>",
+ NULL
+};
+
+int bench_sched_pipe(int argc, const char **argv,
+ const char *prefix __used)
+{
+ int pipe_1[2], pipe_2[2];
+ int m = 0, i;
+ struct timeval start, stop, diff;
+ unsigned long long result_usec = 0;
+
+ /*
+ * why does "ret" exist?
+ * discarding returned value of read(), write()
+ * causes error in building environment for perf
+ */
+ int ret, wait_stat;
+ pid_t pid, retpid;
+
+ argc = parse_options(argc, argv, options,
+ bench_sched_pipe_usage, 0);
+
+ assert(!pipe(pipe_1));
+ assert(!pipe(pipe_2));
+
+ pid = fork();
+ assert(pid >= 0);
+
+ gettimeofday(&start, NULL);
+
+ if (!pid) {
+ for (i = 0; i < loops; i++) {
+ ret = read(pipe_1[0], &m, sizeof(int));
+ ret = write(pipe_2[1], &m, sizeof(int));
+ }
+ } else {
+ for (i = 0; i < loops; i++) {
+ ret = write(pipe_1[1], &m, sizeof(int));
+ ret = read(pipe_2[0], &m, sizeof(int));
+ }
+ }
+
+ gettimeofday(&stop, NULL);
+ timersub(&stop, &start, &diff);
+
+ if (pid) {
+ retpid = waitpid(pid, &wait_stat, 0);
+ assert((retpid == pid) && WIFEXITED(wait_stat));
+ } else {
+ exit(0);
+ }
+
+ switch (bench_format) {
+ case BENCH_FORMAT_DEFAULT:
+ printf("# Executed %d pipe operations between two tasks\n\n",
+ loops);
+
+ result_usec = diff.tv_sec * 1000000;
+ result_usec += diff.tv_usec;
+
+ printf(" %14s: %lu.%03lu [sec]\n\n", "Total time",
+ diff.tv_sec,
+ (unsigned long) (diff.tv_usec/1000));
+
+ printf(" %14lf usecs/op\n",
+ (double)result_usec / (double)loops);
+ printf(" %14d ops/sec\n",
+ (int)((double)loops /
+ ((double)result_usec / (double)1000000)));
+ break;
+
+ case BENCH_FORMAT_SIMPLE:
+ printf("%lu.%03lu\n",
+ diff.tv_sec,
+ (unsigned long) (diff.tv_usec / 1000));
+ break;
+
+ default:
+ /* reaching here is something disaster */
+ fprintf(stderr, "Unknown format:%d\n", bench_format);
+ exit(1);
+ break;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/build.sh b/tools/perf/build.sh
new file mode 100755
index 00000000..41601e0a
--- /dev/null
+++ b/tools/perf/build.sh
@@ -0,0 +1,10 @@
+#!/bin/sh -x
+
+CROSS_COMPILE="ccache /pub/toolchains/gcc-4.4.1/bin/arm-none-linux-gnueabi-"
+
+OPTIONS="NO_DWARF=yes NO_NEWT=yes"
+#DEBUG="V=2" # V=2 --just-print
+JOBS="-j 4"
+STATIC="LDFLAGS=-static"
+make CC="${CROSS_COMPILE}gcc" ${STATIC} ${OPTIONS} ARCH=arm EXTRA_CFLAGS+="-Iexternal/usr/include -Lexternal/usr/lib" ${DEBUG} ${JOBS}
+${CROSS_COMPILE}strip perf
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
new file mode 100644
index 00000000..1478dc64
--- /dev/null
+++ b/tools/perf/builtin-annotate.c
@@ -0,0 +1,473 @@
+/*
+ * builtin-annotate.c
+ *
+ * Builtin annotate command: Analyze the perf.data input file,
+ * look up and read DSOs and symbol information and display
+ * a histogram of results, along various sorting keys.
+ */
+#include "builtin.h"
+
+#include "util/util.h"
+
+#include "util/color.h"
+#include <linux/list.h>
+#include "util/cache.h"
+#include <linux/rbtree.h>
+#include "util/symbol.h"
+
+#include "perf.h"
+#include "util/debug.h"
+
+#include "util/event.h"
+#include "util/parse-options.h"
+#include "util/parse-events.h"
+#include "util/thread.h"
+#include "util/sort.h"
+#include "util/hist.h"
+#include "util/session.h"
+
+static char const *input_name = "perf.data";
+
+static bool force;
+
+static bool full_paths;
+
+static bool print_line;
+
+static const char *sym_hist_filter;
+
+static int hists__add_entry(struct hists *self, struct addr_location *al)
+{
+ struct hist_entry *he;
+
+ if (sym_hist_filter != NULL &&
+ (al->sym == NULL || strcmp(sym_hist_filter, al->sym->name) != 0)) {
+ /* We're only interested in a symbol named sym_hist_filter */
+ if (al->sym != NULL) {
+ rb_erase(&al->sym->rb_node,
+ &al->map->dso->symbols[al->map->type]);
+ symbol__delete(al->sym);
+ }
+ return 0;
+ }
+
+ he = __hists__add_entry(self, al, NULL, 1);
+ if (he == NULL)
+ return -ENOMEM;
+
+ return hist_entry__inc_addr_samples(he, al->addr);
+}
+
+static int process_sample_event(event_t *event, struct perf_session *session)
+{
+ struct addr_location al;
+ struct sample_data data;
+
+ if (event__preprocess_sample(event, session, &al, &data, NULL) < 0) {
+ pr_warning("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ if (!al.filtered && hists__add_entry(&session->hists, &al)) {
+ pr_warning("problem incrementing symbol count, "
+ "skipping event\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int objdump_line__print(struct objdump_line *self,
+ struct list_head *head,
+ struct hist_entry *he, u64 len)
+{
+ struct symbol *sym = he->ms.sym;
+ static const char *prev_line;
+ static const char *prev_color;
+
+ if (self->offset != -1) {
+ const char *path = NULL;
+ unsigned int hits = 0;
+ double percent = 0.0;
+ const char *color;
+ struct sym_priv *priv = symbol__priv(sym);
+ struct sym_ext *sym_ext = priv->ext;
+ struct sym_hist *h = priv->hist;
+ s64 offset = self->offset;
+ struct objdump_line *next = objdump__get_next_ip_line(head, self);
+
+ while (offset < (s64)len &&
+ (next == NULL || offset < next->offset)) {
+ if (sym_ext) {
+ if (path == NULL)
+ path = sym_ext[offset].path;
+ percent += sym_ext[offset].percent;
+ } else
+ hits += h->ip[offset];
+
+ ++offset;
+ }
+
+ if (sym_ext == NULL && h->sum)
+ percent = 100.0 * hits / h->sum;
+
+ color = get_percent_color(percent);
+
+ /*
+ * Also color the filename and line if needed, with
+ * the same color than the percentage. Don't print it
+ * twice for close colored ip with the same filename:line
+ */
+ if (path) {
+ if (!prev_line || strcmp(prev_line, path)
+ || color != prev_color) {
+ color_fprintf(stdout, color, " %s", path);
+ prev_line = path;
+ prev_color = color;
+ }
+ }
+
+ color_fprintf(stdout, color, " %7.2f", percent);
+ printf(" : ");
+ color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", self->line);
+ } else {
+ if (!*self->line)
+ printf(" :\n");
+ else
+ printf(" : %s\n", self->line);
+ }
+
+ return 0;
+}
+
+static struct rb_root root_sym_ext;
+
+static void insert_source_line(struct sym_ext *sym_ext)
+{
+ struct sym_ext *iter;
+ struct rb_node **p = &root_sym_ext.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct sym_ext, node);
+
+ if (sym_ext->percent > iter->percent)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&sym_ext->node, parent, p);
+ rb_insert_color(&sym_ext->node, &root_sym_ext);
+}
+
+static void free_source_line(struct hist_entry *he, int len)
+{
+ struct sym_priv *priv = symbol__priv(he->ms.sym);
+ struct sym_ext *sym_ext = priv->ext;
+ int i;
+
+ if (!sym_ext)
+ return;
+
+ for (i = 0; i < len; i++)
+ free(sym_ext[i].path);
+ free(sym_ext);
+
+ priv->ext = NULL;
+ root_sym_ext = RB_ROOT;
+}
+
+/* Get the filename:line for the colored entries */
+static void
+get_source_line(struct hist_entry *he, int len, const char *filename)
+{
+ struct symbol *sym = he->ms.sym;
+ u64 start;
+ int i;
+ char cmd[PATH_MAX * 2];
+ struct sym_ext *sym_ext;
+ struct sym_priv *priv = symbol__priv(sym);
+ struct sym_hist *h = priv->hist;
+
+ if (!h->sum)
+ return;
+
+ sym_ext = priv->ext = calloc(len, sizeof(struct sym_ext));
+ if (!priv->ext)
+ return;
+
+ start = he->ms.map->unmap_ip(he->ms.map, sym->start);
+
+ for (i = 0; i < len; i++) {
+ char *path = NULL;
+ size_t line_len;
+ u64 offset;
+ FILE *fp;
+
+ sym_ext[i].percent = 100.0 * h->ip[i] / h->sum;
+ if (sym_ext[i].percent <= 0.5)
+ continue;
+
+ offset = start + i;
+ sprintf(cmd, "addr2line -e %s %016llx", filename, offset);
+ fp = popen(cmd, "r");
+ if (!fp)
+ continue;
+
+ if (getline(&path, &line_len, fp) < 0 || !line_len)
+ goto next;
+
+ sym_ext[i].path = malloc(sizeof(char) * line_len + 1);
+ if (!sym_ext[i].path)
+ goto next;
+
+ strcpy(sym_ext[i].path, path);
+ insert_source_line(&sym_ext[i]);
+
+ next:
+ pclose(fp);
+ }
+}
+
+static void print_summary(const char *filename)
+{
+ struct sym_ext *sym_ext;
+ struct rb_node *node;
+
+ printf("\nSorted summary for file %s\n", filename);
+ printf("----------------------------------------------\n\n");
+
+ if (RB_EMPTY_ROOT(&root_sym_ext)) {
+ printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
+ return;
+ }
+
+ node = rb_first(&root_sym_ext);
+ while (node) {
+ double percent;
+ const char *color;
+ char *path;
+
+ sym_ext = rb_entry(node, struct sym_ext, node);
+ percent = sym_ext->percent;
+ color = get_percent_color(percent);
+ path = sym_ext->path;
+
+ color_fprintf(stdout, color, " %7.2f %s", percent, path);
+ node = rb_next(node);
+ }
+}
+
+static void hist_entry__print_hits(struct hist_entry *self)
+{
+ struct symbol *sym = self->ms.sym;
+ struct sym_priv *priv = symbol__priv(sym);
+ struct sym_hist *h = priv->hist;
+ u64 len = sym->end - sym->start, offset;
+
+ for (offset = 0; offset < len; ++offset)
+ if (h->ip[offset] != 0)
+ printf("%*Lx: %Lu\n", BITS_PER_LONG / 2,
+ sym->start + offset, h->ip[offset]);
+ printf("%*s: %Lu\n", BITS_PER_LONG / 2, "h->sum", h->sum);
+}
+
+static int hist_entry__tty_annotate(struct hist_entry *he)
+{
+ struct map *map = he->ms.map;
+ struct dso *dso = map->dso;
+ struct symbol *sym = he->ms.sym;
+ const char *filename = dso->long_name, *d_filename;
+ u64 len;
+ LIST_HEAD(head);
+ struct objdump_line *pos, *n;
+
+ if (hist_entry__annotate(he, &head, 0) < 0)
+ return -1;
+
+ if (full_paths)
+ d_filename = filename;
+ else
+ d_filename = basename(filename);
+
+ len = sym->end - sym->start;
+
+ if (print_line) {
+ get_source_line(he, len, filename);
+ print_summary(filename);
+ }
+
+ printf("\n\n------------------------------------------------\n");
+ printf(" Percent | Source code & Disassembly of %s\n", d_filename);
+ printf("------------------------------------------------\n");
+
+ if (verbose)
+ hist_entry__print_hits(he);
+
+ list_for_each_entry_safe(pos, n, &head, node) {
+ objdump_line__print(pos, &head, he, len);
+ list_del(&pos->node);
+ objdump_line__free(pos);
+ }
+
+ if (print_line)
+ free_source_line(he, len);
+
+ return 0;
+}
+
+static void hists__find_annotations(struct hists *self)
+{
+ struct rb_node *first = rb_first(&self->entries), *nd = first;
+ int key = KEY_RIGHT;
+
+ while (nd) {
+ struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
+ struct sym_priv *priv;
+
+ if (he->ms.sym == NULL || he->ms.map->dso->annotate_warned)
+ goto find_next;
+
+ priv = symbol__priv(he->ms.sym);
+ if (priv->hist == NULL) {
+find_next:
+ if (key == KEY_LEFT)
+ nd = rb_prev(nd);
+ else
+ nd = rb_next(nd);
+ continue;
+ }
+
+ if (use_browser > 0) {
+ key = hist_entry__tui_annotate(he);
+ if (is_exit_key(key))
+ break;
+ switch (key) {
+ case KEY_RIGHT:
+ case '\t':
+ nd = rb_next(nd);
+ break;
+ case KEY_LEFT:
+ if (nd == first)
+ continue;
+ nd = rb_prev(nd);
+ default:
+ break;
+ }
+ } else {
+ hist_entry__tty_annotate(he);
+ nd = rb_next(nd);
+ /*
+ * Since we have a hist_entry per IP for the same
+ * symbol, free he->ms.sym->hist to signal we already
+ * processed this symbol.
+ */
+ free(priv->hist);
+ priv->hist = NULL;
+ }
+ }
+}
+
+static struct perf_event_ops event_ops = {
+ .sample = process_sample_event,
+ .mmap = event__process_mmap,
+ .comm = event__process_comm,
+ .fork = event__process_task,
+};
+
+static int __cmd_annotate(void)
+{
+ int ret;
+ struct perf_session *session;
+
+ session = perf_session__new(input_name, O_RDONLY, force, false);
+ if (session == NULL)
+ return -ENOMEM;
+
+ ret = perf_session__process_events(session, &event_ops);
+ if (ret)
+ goto out_delete;
+
+ if (dump_trace) {
+ perf_session__fprintf_nr_events(session, stdout);
+ goto out_delete;
+ }
+
+ if (verbose > 3)
+ perf_session__fprintf(session, stdout);
+
+ if (verbose > 2)
+ perf_session__fprintf_dsos(session, stdout);
+
+ hists__collapse_resort(&session->hists);
+ hists__output_resort(&session->hists);
+ hists__find_annotations(&session->hists);
+out_delete:
+ perf_session__delete(session);
+
+ return ret;
+}
+
+static const char * const annotate_usage[] = {
+ "perf annotate [<options>] <command>",
+ NULL
+};
+
+static const struct option options[] = {
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+ OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
+ "only consider symbols in these dsos"),
+ OPT_STRING('s', "symbol", &sym_hist_filter, "symbol",
+ "symbol to annotate"),
+ OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
+ "file", "vmlinux pathname"),
+ OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
+ "load module symbols - WARNING: use only with -k and LIVE kernel"),
+ OPT_BOOLEAN('l', "print-line", &print_line,
+ "print matching source lines (may be slow)"),
+ OPT_BOOLEAN('P', "full-paths", &full_paths,
+ "Don't shorten the displayed pathnames"),
+ OPT_END()
+};
+
+int cmd_annotate(int argc, const char **argv, const char *prefix __used)
+{
+ argc = parse_options(argc, argv, options, annotate_usage, 0);
+
+ setup_browser();
+
+ symbol_conf.priv_size = sizeof(struct sym_priv);
+ symbol_conf.try_vmlinux_path = true;
+
+ if (symbol__init() < 0)
+ return -1;
+
+ setup_sorting(annotate_usage, options);
+
+ if (argc) {
+ /*
+ * Special case: if there's an argument left then assume tha
+ * it's a symbol filter:
+ */
+ if (argc > 1)
+ usage_with_options(annotate_usage, options);
+
+ sym_hist_filter = argv[0];
+ }
+
+ if (field_sep && *field_sep == '.') {
+ pr_err("'.' is the only non valid --field-separator argument\n");
+ return -1;
+ }
+
+ return __cmd_annotate();
+}
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
new file mode 100644
index 00000000..fcb96269
--- /dev/null
+++ b/tools/perf/builtin-bench.c
@@ -0,0 +1,245 @@
+/*
+ *
+ * builtin-bench.c
+ *
+ * General benchmarking subsystem provided by perf
+ *
+ * Copyright (C) 2009, Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
+ *
+ */
+
+/*
+ *
+ * Available subsystem list:
+ * sched ... scheduler and IPC mechanism
+ * mem ... memory access performance
+ *
+ */
+
+#include "perf.h"
+#include "util/util.h"
+#include "util/parse-options.h"
+#include "builtin.h"
+#include "bench/bench.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+struct bench_suite {
+ const char *name;
+ const char *summary;
+ int (*fn)(int, const char **, const char *);
+};
+ \
+/* sentinel: easy for help */
+#define suite_all { "all", "test all suite (pseudo suite)", NULL }
+
+static struct bench_suite sched_suites[] = {
+ { "messaging",
+ "Benchmark for scheduler and IPC mechanisms",
+ bench_sched_messaging },
+ { "pipe",
+ "Flood of communication over pipe() between two processes",
+ bench_sched_pipe },
+ suite_all,
+ { NULL,
+ NULL,
+ NULL }
+};
+
+static struct bench_suite mem_suites[] = {
+ { "memcpy",
+ "Simple memory copy in various ways",
+ bench_mem_memcpy },
+ suite_all,
+ { NULL,
+ NULL,
+ NULL }
+};
+
+struct bench_subsys {
+ const char *name;
+ const char *summary;
+ struct bench_suite *suites;
+};
+
+static struct bench_subsys subsystems[] = {
+ { "sched",
+ "scheduler and IPC mechanism",
+ sched_suites },
+ { "mem",
+ "memory access performance",
+ mem_suites },
+ { "all", /* sentinel: easy for help */
+ "test all subsystem (pseudo subsystem)",
+ NULL },
+ { NULL,
+ NULL,
+ NULL }
+};
+
+static void dump_suites(int subsys_index)
+{
+ int i;
+
+ printf("# List of available suites for %s...\n\n",
+ subsystems[subsys_index].name);
+
+ for (i = 0; subsystems[subsys_index].suites[i].name; i++)
+ printf("%14s: %s\n",
+ subsystems[subsys_index].suites[i].name,
+ subsystems[subsys_index].suites[i].summary);
+
+ printf("\n");
+ return;
+}
+
+static const char *bench_format_str;
+int bench_format = BENCH_FORMAT_DEFAULT;
+
+static const struct option bench_options[] = {
+ OPT_STRING('f', "format", &bench_format_str, "default",
+ "Specify format style"),
+ OPT_END()
+};
+
+static const char * const bench_usage[] = {
+ "perf bench [<common options>] <subsystem> <suite> [<options>]",
+ NULL
+};
+
+static void print_usage(void)
+{
+ int i;
+
+ printf("Usage: \n");
+ for (i = 0; bench_usage[i]; i++)
+ printf("\t%s\n", bench_usage[i]);
+ printf("\n");
+
+ printf("# List of available subsystems...\n\n");
+
+ for (i = 0; subsystems[i].name; i++)
+ printf("%14s: %s\n",
+ subsystems[i].name, subsystems[i].summary);
+ printf("\n");
+}
+
+static int bench_str2int(const char *str)
+{
+ if (!str)
+ return BENCH_FORMAT_DEFAULT;
+
+ if (!strcmp(str, BENCH_FORMAT_DEFAULT_STR))
+ return BENCH_FORMAT_DEFAULT;
+ else if (!strcmp(str, BENCH_FORMAT_SIMPLE_STR))
+ return BENCH_FORMAT_SIMPLE;
+
+ return BENCH_FORMAT_UNKNOWN;
+}
+
+static void all_suite(struct bench_subsys *subsys) /* FROM HERE */
+{
+ int i;
+ const char *argv[2];
+ struct bench_suite *suites = subsys->suites;
+
+ argv[1] = NULL;
+ /*
+ * TODO:
+ * preparing preset parameters for
+ * embedded, ordinary PC, HPC, etc...
+ * will be helpful
+ */
+ for (i = 0; suites[i].fn; i++) {
+ printf("# Running %s/%s benchmark...\n",
+ subsys->name,
+ suites[i].name);
+
+ argv[1] = suites[i].name;
+ suites[i].fn(1, argv, NULL);
+ printf("\n");
+ }
+}
+
+static void all_subsystem(void)
+{
+ int i;
+ for (i = 0; subsystems[i].suites; i++)
+ all_suite(&subsystems[i]);
+}
+
+int cmd_bench(int argc, const char **argv, const char *prefix __used)
+{
+ int i, j, status = 0;
+
+ if (argc < 2) {
+ /* No subsystem specified. */
+ print_usage();
+ goto end;
+ }
+
+ argc = parse_options(argc, argv, bench_options, bench_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ bench_format = bench_str2int(bench_format_str);
+ if (bench_format == BENCH_FORMAT_UNKNOWN) {
+ printf("Unknown format descriptor:%s\n", bench_format_str);
+ goto end;
+ }
+
+ if (argc < 1) {
+ print_usage();
+ goto end;
+ }
+
+ if (!strcmp(argv[0], "all")) {
+ all_subsystem();
+ goto end;
+ }
+
+ for (i = 0; subsystems[i].name; i++) {
+ if (strcmp(subsystems[i].name, argv[0]))
+ continue;
+
+ if (argc < 2) {
+ /* No suite specified. */
+ dump_suites(i);
+ goto end;
+ }
+
+ if (!strcmp(argv[1], "all")) {
+ all_suite(&subsystems[i]);
+ goto end;
+ }
+
+ for (j = 0; subsystems[i].suites[j].name; j++) {
+ if (strcmp(subsystems[i].suites[j].name, argv[1]))
+ continue;
+
+ if (bench_format == BENCH_FORMAT_DEFAULT)
+ printf("# Running %s/%s benchmark...\n",
+ subsystems[i].name,
+ subsystems[i].suites[j].name);
+ status = subsystems[i].suites[j].fn(argc - 1,
+ argv + 1, prefix);
+ goto end;
+ }
+
+ if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) {
+ dump_suites(i);
+ goto end;
+ }
+
+ printf("Unknown suite:%s for %s\n", argv[1], argv[0]);
+ status = 1;
+ goto end;
+ }
+
+ printf("Unknown subsystem:%s\n", argv[0]);
+ status = 1;
+
+end:
+ return status;
+}
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
new file mode 100644
index 00000000..29ad20e6
--- /dev/null
+++ b/tools/perf/builtin-buildid-cache.c
@@ -0,0 +1,132 @@
+/*
+ * builtin-buildid-cache.c
+ *
+ * Builtin buildid-cache command: Manages build-id cache
+ *
+ * Copyright (C) 2010, Red Hat Inc.
+ * Copyright (C) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+#include "builtin.h"
+#include "perf.h"
+#include "util/cache.h"
+#include "util/debug.h"
+#include "util/header.h"
+#include "util/parse-options.h"
+#include "util/strlist.h"
+#include "util/symbol.h"
+
+static char const *add_name_list_str, *remove_name_list_str;
+
+static const char * const buildid_cache_usage[] = {
+ "perf buildid-cache [<options>]",
+ NULL
+};
+
+static const struct option buildid_cache_options[] = {
+ OPT_STRING('a', "add", &add_name_list_str,
+ "file list", "file(s) to add"),
+ OPT_STRING('r', "remove", &remove_name_list_str, "file list",
+ "file(s) to remove"),
+ OPT_INCR('v', "verbose", &verbose, "be more verbose"),
+ OPT_END()
+};
+
+static int build_id_cache__add_file(const char *filename, const char *debugdir)
+{
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+ u8 build_id[BUILD_ID_SIZE];
+ int err;
+
+ if (filename__read_build_id(filename, &build_id, sizeof(build_id)) < 0) {
+ pr_debug("Couldn't read a build-id in %s\n", filename);
+ return -1;
+ }
+
+ build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
+ err = build_id_cache__add_s(sbuild_id, debugdir, filename, false);
+ if (verbose)
+ pr_info("Adding %s %s: %s\n", sbuild_id, filename,
+ err ? "FAIL" : "Ok");
+ return err;
+}
+
+static int build_id_cache__remove_file(const char *filename __used,
+ const char *debugdir __used)
+{
+ u8 build_id[BUILD_ID_SIZE];
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ int err;
+
+ if (filename__read_build_id(filename, &build_id, sizeof(build_id)) < 0) {
+ pr_debug("Couldn't read a build-id in %s\n", filename);
+ return -1;
+ }
+
+ build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
+ err = build_id_cache__remove_s(sbuild_id, debugdir);
+ if (verbose)
+ pr_info("Removing %s %s: %s\n", sbuild_id, filename,
+ err ? "FAIL" : "Ok");
+
+ return err;
+}
+
+static int __cmd_buildid_cache(void)
+{
+ struct strlist *list;
+ struct str_node *pos;
+ char debugdir[PATH_MAX];
+
+ snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
+
+ if (add_name_list_str) {
+ list = strlist__new(true, add_name_list_str);
+ if (list) {
+ strlist__for_each(pos, list)
+ if (build_id_cache__add_file(pos->s, debugdir)) {
+ if (errno == EEXIST) {
+ pr_debug("%s already in the cache\n",
+ pos->s);
+ continue;
+ }
+ pr_warning("Couldn't add %s: %s\n",
+ pos->s, strerror(errno));
+ }
+
+ strlist__delete(list);
+ }
+ }
+
+ if (remove_name_list_str) {
+ list = strlist__new(true, remove_name_list_str);
+ if (list) {
+ strlist__for_each(pos, list)
+ if (build_id_cache__remove_file(pos->s, debugdir)) {
+ if (errno == ENOENT) {
+ pr_debug("%s wasn't in the cache\n",
+ pos->s);
+ continue;
+ }
+ pr_warning("Couldn't remove %s: %s\n",
+ pos->s, strerror(errno));
+ }
+
+ strlist__delete(list);
+ }
+ }
+
+ return 0;
+}
+
+int cmd_buildid_cache(int argc, const char **argv, const char *prefix __used)
+{
+ argc = parse_options(argc, argv, buildid_cache_options,
+ buildid_cache_usage, 0);
+
+ if (symbol__init() < 0)
+ return -1;
+
+ setup_pager();
+ return __cmd_buildid_cache();
+}
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
new file mode 100644
index 00000000..44a47e13
--- /dev/null
+++ b/tools/perf/builtin-buildid-list.c
@@ -0,0 +1,60 @@
+/*
+ * builtin-buildid-list.c
+ *
+ * Builtin buildid-list command: list buildids in perf.data
+ *
+ * Copyright (C) 2009, Red Hat Inc.
+ * Copyright (C) 2009, Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+#include "builtin.h"
+#include "perf.h"
+#include "util/build-id.h"
+#include "util/cache.h"
+#include "util/debug.h"
+#include "util/parse-options.h"
+#include "util/session.h"
+#include "util/symbol.h"
+
+static char const *input_name = "perf.data";
+static bool force;
+static bool with_hits;
+
+static const char * const buildid_list_usage[] = {
+ "perf buildid-list [<options>]",
+ NULL
+};
+
+static const struct option options[] = {
+ OPT_BOOLEAN('H', "with-hits", &with_hits, "Show only DSOs with hits"),
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+ OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose"),
+ OPT_END()
+};
+
+static int __cmd_buildid_list(void)
+{
+ int err = -1;
+ struct perf_session *session;
+
+ session = perf_session__new(input_name, O_RDONLY, force, false);
+ if (session == NULL)
+ return -1;
+
+ if (with_hits)
+ perf_session__process_events(session, &build_id__mark_dso_hit_ops);
+
+ perf_session__fprintf_dsos_buildid(session, stdout, with_hits);
+
+ perf_session__delete(session);
+ return err;
+}
+
+int cmd_buildid_list(int argc, const char **argv, const char *prefix __used)
+{
+ argc = parse_options(argc, argv, options, buildid_list_usage, 0);
+ setup_pager();
+ return __cmd_buildid_list();
+}
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
new file mode 100644
index 00000000..fca1d440
--- /dev/null
+++ b/tools/perf/builtin-diff.c
@@ -0,0 +1,227 @@
+/*
+ * builtin-diff.c
+ *
+ * Builtin diff command: Analyze two perf.data input files, look up and read
+ * DSOs and symbol information, sort them and produce a diff.
+ */
+#include "builtin.h"
+
+#include "util/debug.h"
+#include "util/event.h"
+#include "util/hist.h"
+#include "util/session.h"
+#include "util/sort.h"
+#include "util/symbol.h"
+#include "util/util.h"
+
+#include <stdlib.h>
+
+static char const *input_old = "perf.data.old",
+ *input_new = "perf.data";
+static char diff__default_sort_order[] = "dso,symbol";
+static bool force;
+static bool show_displacement;
+
+static int hists__add_entry(struct hists *self,
+ struct addr_location *al, u64 period)
+{
+ if (__hists__add_entry(self, al, NULL, period) != NULL)
+ return 0;
+ return -ENOMEM;
+}
+
+static int diff__process_sample_event(event_t *event, struct perf_session *session)
+{
+ struct addr_location al;
+ struct sample_data data = { .period = 1, };
+
+ if (event__preprocess_sample(event, session, &al, &data, NULL) < 0) {
+ pr_warning("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ if (al.filtered || al.sym == NULL)
+ return 0;
+
+ if (hists__add_entry(&session->hists, &al, data.period)) {
+ pr_warning("problem incrementing symbol period, skipping event\n");
+ return -1;
+ }
+
+ session->hists.stats.total_period += data.period;
+ return 0;
+}
+
+static struct perf_event_ops event_ops = {
+ .sample = diff__process_sample_event,
+ .mmap = event__process_mmap,
+ .comm = event__process_comm,
+ .exit = event__process_task,
+ .fork = event__process_task,
+ .lost = event__process_lost,
+};
+
+static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
+ struct hist_entry *he)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node);
+ if (hist_entry__cmp(he, iter) < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&he->rb_node, parent, p);
+ rb_insert_color(&he->rb_node, root);
+}
+
+static void hists__resort_entries(struct hists *self)
+{
+ unsigned long position = 1;
+ struct rb_root tmp = RB_ROOT;
+ struct rb_node *next = rb_first(&self->entries);
+
+ while (next != NULL) {
+ struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node);
+
+ next = rb_next(&n->rb_node);
+ rb_erase(&n->rb_node, &self->entries);
+ n->position = position++;
+ perf_session__insert_hist_entry_by_name(&tmp, n);
+ }
+
+ self->entries = tmp;
+}
+
+static void hists__set_positions(struct hists *self)
+{
+ hists__output_resort(self);
+ hists__resort_entries(self);
+}
+
+static struct hist_entry *hists__find_entry(struct hists *self,
+ struct hist_entry *he)
+{
+ struct rb_node *n = self->entries.rb_node;
+
+ while (n) {
+ struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node);
+ int64_t cmp = hist_entry__cmp(he, iter);
+
+ if (cmp < 0)
+ n = n->rb_left;
+ else if (cmp > 0)
+ n = n->rb_right;
+ else
+ return iter;
+ }
+
+ return NULL;
+}
+
+static void hists__match(struct hists *older, struct hists *newer)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(&newer->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *pos = rb_entry(nd, struct hist_entry, rb_node);
+ pos->pair = hists__find_entry(older, pos);
+ }
+}
+
+static int __cmd_diff(void)
+{
+ int ret, i;
+ struct perf_session *session[2];
+
+ session[0] = perf_session__new(input_old, O_RDONLY, force, false);
+ session[1] = perf_session__new(input_new, O_RDONLY, force, false);
+ if (session[0] == NULL || session[1] == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < 2; ++i) {
+ ret = perf_session__process_events(session[i], &event_ops);
+ if (ret)
+ goto out_delete;
+ }
+
+ hists__output_resort(&session[1]->hists);
+ if (show_displacement)
+ hists__set_positions(&session[0]->hists);
+
+ hists__match(&session[0]->hists, &session[1]->hists);
+ hists__fprintf(&session[1]->hists, &session[0]->hists,
+ show_displacement, stdout);
+out_delete:
+ for (i = 0; i < 2; ++i)
+ perf_session__delete(session[i]);
+ return ret;
+}
+
+static const char * const diff_usage[] = {
+ "perf diff [<options>] [old_file] [new_file]",
+ NULL,
+};
+
+static const struct option options[] = {
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('m', "displacement", &show_displacement,
+ "Show position displacement relative to baseline"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+ OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
+ "load module symbols - WARNING: use only with -k and LIVE kernel"),
+ OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
+ "only consider symbols in these dsos"),
+ OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
+ "only consider symbols in these comms"),
+ OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
+ "only consider these symbols"),
+ OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
+ "sort by key(s): pid, comm, dso, symbol, parent"),
+ OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator",
+ "separator for columns, no spaces will be added between "
+ "columns '.' is reserved."),
+ OPT_END()
+};
+
+int cmd_diff(int argc, const char **argv, const char *prefix __used)
+{
+ sort_order = diff__default_sort_order;
+ argc = parse_options(argc, argv, options, diff_usage, 0);
+ if (argc) {
+ if (argc > 2)
+ usage_with_options(diff_usage, options);
+ if (argc == 2) {
+ input_old = argv[0];
+ input_new = argv[1];
+ } else
+ input_new = argv[0];
+ } else if (symbol_conf.default_guest_vmlinux_name ||
+ symbol_conf.default_guest_kallsyms) {
+ input_old = "perf.data.host";
+ input_new = "perf.data.guest";
+ }
+
+ symbol_conf.exclude_other = false;
+ if (symbol__init() < 0)
+ return -1;
+
+ setup_sorting(diff_usage, options);
+ setup_pager();
+
+ sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", NULL);
+ sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", NULL);
+ sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", NULL);
+
+ return __cmd_diff();
+}
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
new file mode 100644
index 00000000..6d5a8a7f
--- /dev/null
+++ b/tools/perf/builtin-help.c
@@ -0,0 +1,459 @@
+/*
+ * builtin-help.c
+ *
+ * Builtin help command
+ */
+#include "perf.h"
+#include "util/cache.h"
+#include "builtin.h"
+#include "util/exec_cmd.h"
+#include "common-cmds.h"
+#include "util/parse-options.h"
+#include "util/run-command.h"
+#include "util/help.h"
+
+static struct man_viewer_list {
+ struct man_viewer_list *next;
+ char name[FLEX_ARRAY];
+} *man_viewer_list;
+
+static struct man_viewer_info_list {
+ struct man_viewer_info_list *next;
+ const char *info;
+ char name[FLEX_ARRAY];
+} *man_viewer_info_list;
+
+enum help_format {
+ HELP_FORMAT_MAN,
+ HELP_FORMAT_INFO,
+ HELP_FORMAT_WEB,
+};
+
+static bool show_all = false;
+static enum help_format help_format = HELP_FORMAT_MAN;
+static struct option builtin_help_options[] = {
+ OPT_BOOLEAN('a', "all", &show_all, "print all available commands"),
+ OPT_SET_UINT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN),
+ OPT_SET_UINT('w', "web", &help_format, "show manual in web browser",
+ HELP_FORMAT_WEB),
+ OPT_SET_UINT('i', "info", &help_format, "show info page",
+ HELP_FORMAT_INFO),
+ OPT_END(),
+};
+
+static const char * const builtin_help_usage[] = {
+ "perf help [--all] [--man|--web|--info] [command]",
+ NULL
+};
+
+static enum help_format parse_help_format(const char *format)
+{
+ if (!strcmp(format, "man"))
+ return HELP_FORMAT_MAN;
+ if (!strcmp(format, "info"))
+ return HELP_FORMAT_INFO;
+ if (!strcmp(format, "web") || !strcmp(format, "html"))
+ return HELP_FORMAT_WEB;
+ die("unrecognized help format '%s'", format);
+}
+
+static const char *get_man_viewer_info(const char *name)
+{
+ struct man_viewer_info_list *viewer;
+
+ for (viewer = man_viewer_info_list; viewer; viewer = viewer->next) {
+ if (!strcasecmp(name, viewer->name))
+ return viewer->info;
+ }
+ return NULL;
+}
+
+static int check_emacsclient_version(void)
+{
+ struct strbuf buffer = STRBUF_INIT;
+ struct child_process ec_process;
+ const char *argv_ec[] = { "emacsclient", "--version", NULL };
+ int version;
+
+ /* emacsclient prints its version number on stderr */
+ memset(&ec_process, 0, sizeof(ec_process));
+ ec_process.argv = argv_ec;
+ ec_process.err = -1;
+ ec_process.stdout_to_stderr = 1;
+ if (start_command(&ec_process)) {
+ fprintf(stderr, "Failed to start emacsclient.\n");
+ return -1;
+ }
+ strbuf_read(&buffer, ec_process.err, 20);
+ close(ec_process.err);
+
+ /*
+ * Don't bother checking return value, because "emacsclient --version"
+ * seems to always exits with code 1.
+ */
+ finish_command(&ec_process);
+
+ if (prefixcmp(buffer.buf, "emacsclient")) {
+ fprintf(stderr, "Failed to parse emacsclient version.\n");
+ strbuf_release(&buffer);
+ return -1;
+ }
+
+ strbuf_remove(&buffer, 0, strlen("emacsclient"));
+ version = atoi(buffer.buf);
+
+ if (version < 22) {
+ fprintf(stderr,
+ "emacsclient version '%d' too old (< 22).\n",
+ version);
+ strbuf_release(&buffer);
+ return -1;
+ }
+
+ strbuf_release(&buffer);
+ return 0;
+}
+
+static void exec_woman_emacs(const char *path, const char *page)
+{
+ if (!check_emacsclient_version()) {
+ /* This works only with emacsclient version >= 22. */
+ struct strbuf man_page = STRBUF_INIT;
+
+ if (!path)
+ path = "emacsclient";
+ strbuf_addf(&man_page, "(woman \"%s\")", page);
+ execlp(path, "emacsclient", "-e", man_page.buf, NULL);
+ warning("failed to exec '%s': %s", path, strerror(errno));
+ }
+}
+
+static void exec_man_konqueror(const char *path, const char *page)
+{
+ const char *display = getenv("DISPLAY");
+ if (display && *display) {
+ struct strbuf man_page = STRBUF_INIT;
+ const char *filename = "kfmclient";
+
+ /* It's simpler to launch konqueror using kfmclient. */
+ if (path) {
+ const char *file = strrchr(path, '/');
+ if (file && !strcmp(file + 1, "konqueror")) {
+ char *new = strdup(path);
+ char *dest = strrchr(new, '/');
+
+ /* strlen("konqueror") == strlen("kfmclient") */
+ strcpy(dest + 1, "kfmclient");
+ path = new;
+ }
+ if (file)
+ filename = file;
+ } else
+ path = "kfmclient";
+ strbuf_addf(&man_page, "man:%s(1)", page);
+ execlp(path, filename, "newTab", man_page.buf, NULL);
+ warning("failed to exec '%s': %s", path, strerror(errno));
+ }
+}
+
+static void exec_man_man(const char *path, const char *page)
+{
+ if (!path)
+ path = "man";
+ execlp(path, "man", page, NULL);
+ warning("failed to exec '%s': %s", path, strerror(errno));
+}
+
+static void exec_man_cmd(const char *cmd, const char *page)
+{
+ struct strbuf shell_cmd = STRBUF_INIT;
+ strbuf_addf(&shell_cmd, "%s %s", cmd, page);
+ execl("/bin/sh", "sh", "-c", shell_cmd.buf, NULL);
+ warning("failed to exec '%s': %s", cmd, strerror(errno));
+}
+
+static void add_man_viewer(const char *name)
+{
+ struct man_viewer_list **p = &man_viewer_list;
+ size_t len = strlen(name);
+
+ while (*p)
+ p = &((*p)->next);
+ *p = zalloc(sizeof(**p) + len + 1);
+ strncpy((*p)->name, name, len);
+}
+
+static int supported_man_viewer(const char *name, size_t len)
+{
+ return (!strncasecmp("man", name, len) ||
+ !strncasecmp("woman", name, len) ||
+ !strncasecmp("konqueror", name, len));
+}
+
+static void do_add_man_viewer_info(const char *name,
+ size_t len,
+ const char *value)
+{
+ struct man_viewer_info_list *new = zalloc(sizeof(*new) + len + 1);
+
+ strncpy(new->name, name, len);
+ new->info = strdup(value);
+ new->next = man_viewer_info_list;
+ man_viewer_info_list = new;
+}
+
+static int add_man_viewer_path(const char *name,
+ size_t len,
+ const char *value)
+{
+ if (supported_man_viewer(name, len))
+ do_add_man_viewer_info(name, len, value);
+ else
+ warning("'%s': path for unsupported man viewer.\n"
+ "Please consider using 'man.<tool>.cmd' instead.",
+ name);
+
+ return 0;
+}
+
+static int add_man_viewer_cmd(const char *name,
+ size_t len,
+ const char *value)
+{
+ if (supported_man_viewer(name, len))
+ warning("'%s': cmd for supported man viewer.\n"
+ "Please consider using 'man.<tool>.path' instead.",
+ name);
+ else
+ do_add_man_viewer_info(name, len, value);
+
+ return 0;
+}
+
+static int add_man_viewer_info(const char *var, const char *value)
+{
+ const char *name = var + 4;
+ const char *subkey = strrchr(name, '.');
+
+ if (!subkey)
+ return error("Config with no key for man viewer: %s", name);
+
+ if (!strcmp(subkey, ".path")) {
+ if (!value)
+ return config_error_nonbool(var);
+ return add_man_viewer_path(name, subkey - name, value);
+ }
+ if (!strcmp(subkey, ".cmd")) {
+ if (!value)
+ return config_error_nonbool(var);
+ return add_man_viewer_cmd(name, subkey - name, value);
+ }
+
+ warning("'%s': unsupported man viewer sub key.", subkey);
+ return 0;
+}
+
+static int perf_help_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "help.format")) {
+ if (!value)
+ return config_error_nonbool(var);
+ help_format = parse_help_format(value);
+ return 0;
+ }
+ if (!strcmp(var, "man.viewer")) {
+ if (!value)
+ return config_error_nonbool(var);
+ add_man_viewer(value);
+ return 0;
+ }
+ if (!prefixcmp(var, "man."))
+ return add_man_viewer_info(var, value);
+
+ return perf_default_config(var, value, cb);
+}
+
+static struct cmdnames main_cmds, other_cmds;
+
+void list_common_cmds_help(void)
+{
+ unsigned int i, longest = 0;
+
+ for (i = 0; i < ARRAY_SIZE(common_cmds); i++) {
+ if (longest < strlen(common_cmds[i].name))
+ longest = strlen(common_cmds[i].name);
+ }
+
+ puts(" The most commonly used perf commands are:");
+ for (i = 0; i < ARRAY_SIZE(common_cmds); i++) {
+ printf(" %-*s ", longest, common_cmds[i].name);
+ puts(common_cmds[i].help);
+ }
+}
+
+static int is_perf_command(const char *s)
+{
+ return is_in_cmdlist(&main_cmds, s) ||
+ is_in_cmdlist(&other_cmds, s);
+}
+
+static const char *prepend(const char *prefix, const char *cmd)
+{
+ size_t pre_len = strlen(prefix);
+ size_t cmd_len = strlen(cmd);
+ char *p = malloc(pre_len + cmd_len + 1);
+ memcpy(p, prefix, pre_len);
+ strcpy(p + pre_len, cmd);
+ return p;
+}
+
+static const char *cmd_to_page(const char *perf_cmd)
+{
+ if (!perf_cmd)
+ return "perf";
+ else if (!prefixcmp(perf_cmd, "perf"))
+ return perf_cmd;
+ else
+ return prepend("perf-", perf_cmd);
+}
+
+static void setup_man_path(void)
+{
+ struct strbuf new_path = STRBUF_INIT;
+ const char *old_path = getenv("MANPATH");
+
+ /* We should always put ':' after our path. If there is no
+ * old_path, the ':' at the end will let 'man' to try
+ * system-wide paths after ours to find the manual page. If
+ * there is old_path, we need ':' as delimiter. */
+ strbuf_addstr(&new_path, system_path(PERF_MAN_PATH));
+ strbuf_addch(&new_path, ':');
+ if (old_path)
+ strbuf_addstr(&new_path, old_path);
+
+ setenv("MANPATH", new_path.buf, 1);
+
+ strbuf_release(&new_path);
+}
+
+static void exec_viewer(const char *name, const char *page)
+{
+ const char *info = get_man_viewer_info(name);
+
+ if (!strcasecmp(name, "man"))
+ exec_man_man(info, page);
+ else if (!strcasecmp(name, "woman"))
+ exec_woman_emacs(info, page);
+ else if (!strcasecmp(name, "konqueror"))
+ exec_man_konqueror(info, page);
+ else if (info)
+ exec_man_cmd(info, page);
+ else
+ warning("'%s': unknown man viewer.", name);
+}
+
+static void show_man_page(const char *perf_cmd)
+{
+ struct man_viewer_list *viewer;
+ const char *page = cmd_to_page(perf_cmd);
+ const char *fallback = getenv("PERF_MAN_VIEWER");
+
+ setup_man_path();
+ for (viewer = man_viewer_list; viewer; viewer = viewer->next)
+ exec_viewer(viewer->name, page); /* will return when unable */
+
+ if (fallback)
+ exec_viewer(fallback, page);
+ exec_viewer("man", page);
+ die("no man viewer handled the request");
+}
+
+static void show_info_page(const char *perf_cmd)
+{
+ const char *page = cmd_to_page(perf_cmd);
+ setenv("INFOPATH", system_path(PERF_INFO_PATH), 1);
+ execlp("info", "info", "perfman", page, NULL);
+}
+
+static void get_html_page_path(struct strbuf *page_path, const char *page)
+{
+ struct stat st;
+ const char *html_path = system_path(PERF_HTML_PATH);
+
+ /* Check that we have a perf documentation directory. */
+ if (stat(mkpath("%s/perf.html", html_path), &st)
+ || !S_ISREG(st.st_mode))
+ die("'%s': not a documentation directory.", html_path);
+
+ strbuf_init(page_path, 0);
+ strbuf_addf(page_path, "%s/%s.html", html_path, page);
+}
+
+/*
+ * If open_html is not defined in a platform-specific way (see for
+ * example compat/mingw.h), we use the script web--browse to display
+ * HTML.
+ */
+#ifndef open_html
+static void open_html(const char *path)
+{
+ execl_perf_cmd("web--browse", "-c", "help.browser", path, NULL);
+}
+#endif
+
+static void show_html_page(const char *perf_cmd)
+{
+ const char *page = cmd_to_page(perf_cmd);
+ struct strbuf page_path; /* it leaks but we exec bellow */
+
+ get_html_page_path(&page_path, page);
+
+ open_html(page_path.buf);
+}
+
+int cmd_help(int argc, const char **argv, const char *prefix __used)
+{
+ const char *alias;
+
+ load_command_list("perf-", &main_cmds, &other_cmds);
+
+ perf_config(perf_help_config, NULL);
+
+ argc = parse_options(argc, argv, builtin_help_options,
+ builtin_help_usage, 0);
+
+ if (show_all) {
+ printf("\n usage: %s\n\n", perf_usage_string);
+ list_commands("perf commands", &main_cmds, &other_cmds);
+ printf(" %s\n\n", perf_more_info_string);
+ return 0;
+ }
+
+ if (!argv[0]) {
+ printf("\n usage: %s\n\n", perf_usage_string);
+ list_common_cmds_help();
+ printf("\n %s\n\n", perf_more_info_string);
+ return 0;
+ }
+
+ alias = alias_lookup(argv[0]);
+ if (alias && !is_perf_command(argv[0])) {
+ printf("`perf %s' is aliased to `%s'\n", argv[0], alias);
+ return 0;
+ }
+
+ switch (help_format) {
+ case HELP_FORMAT_MAN:
+ show_man_page(argv[0]);
+ break;
+ case HELP_FORMAT_INFO:
+ show_info_page(argv[0]);
+ break;
+ case HELP_FORMAT_WEB:
+ show_html_page(argv[0]);
+ default:
+ break;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
new file mode 100644
index 00000000..8e3e47b0
--- /dev/null
+++ b/tools/perf/builtin-inject.c
@@ -0,0 +1,228 @@
+/*
+ * builtin-inject.c
+ *
+ * Builtin inject command: Examine the live mode (stdin) event stream
+ * and repipe it to stdout while optionally injecting additional
+ * events into it.
+ */
+#include "builtin.h"
+
+#include "perf.h"
+#include "util/session.h"
+#include "util/debug.h"
+
+#include "util/parse-options.h"
+
+static char const *input_name = "-";
+static bool inject_build_ids;
+
+static int event__repipe(event_t *event __used,
+ struct perf_session *session __used)
+{
+ uint32_t size;
+ void *buf = event;
+
+ size = event->header.size;
+
+ while (size) {
+ int ret = write(STDOUT_FILENO, buf, size);
+ if (ret < 0)
+ return -errno;
+
+ size -= ret;
+ buf += ret;
+ }
+
+ return 0;
+}
+
+static int event__repipe_mmap(event_t *self, struct perf_session *session)
+{
+ int err;
+
+ err = event__process_mmap(self, session);
+ event__repipe(self, session);
+
+ return err;
+}
+
+static int event__repipe_task(event_t *self, struct perf_session *session)
+{
+ int err;
+
+ err = event__process_task(self, session);
+ event__repipe(self, session);
+
+ return err;
+}
+
+static int event__repipe_tracing_data(event_t *self,
+ struct perf_session *session)
+{
+ int err;
+
+ event__repipe(self, session);
+ err = event__process_tracing_data(self, session);
+
+ return err;
+}
+
+static int dso__read_build_id(struct dso *self)
+{
+ if (self->has_build_id)
+ return 0;
+
+ if (filename__read_build_id(self->long_name, self->build_id,
+ sizeof(self->build_id)) > 0) {
+ self->has_build_id = true;
+ return 0;
+ }
+
+ return -1;
+}
+
+static int dso__inject_build_id(struct dso *self, struct perf_session *session)
+{
+ u16 misc = PERF_RECORD_MISC_USER;
+ struct machine *machine;
+ int err;
+
+ if (dso__read_build_id(self) < 0) {
+ pr_debug("no build_id found for %s\n", self->long_name);
+ return -1;
+ }
+
+ machine = perf_session__find_host_machine(session);
+ if (machine == NULL) {
+ pr_err("Can't find machine for session\n");
+ return -1;
+ }
+
+ if (self->kernel)
+ misc = PERF_RECORD_MISC_KERNEL;
+
+ err = event__synthesize_build_id(self, misc, event__repipe,
+ machine, session);
+ if (err) {
+ pr_err("Can't synthesize build_id event for %s\n", self->long_name);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int event__inject_buildid(event_t *event, struct perf_session *session)
+{
+ struct addr_location al;
+ struct thread *thread;
+ u8 cpumode;
+
+ cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
+ thread = perf_session__findnew(session, event->ip.pid);
+ if (thread == NULL) {
+ pr_err("problem processing %d event, skipping it.\n",
+ event->header.type);
+ goto repipe;
+ }
+
+ thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
+ event->ip.pid, event->ip.ip, &al);
+
+ if (al.map != NULL) {
+ if (!al.map->dso->hit) {
+ al.map->dso->hit = 1;
+ if (map__load(al.map, NULL) >= 0) {
+ dso__inject_build_id(al.map->dso, session);
+ /*
+ * If this fails, too bad, let the other side
+ * account this as unresolved.
+ */
+ } else
+ pr_warning("no symbols found in %s, maybe "
+ "install a debug package?\n",
+ al.map->dso->long_name);
+ }
+ }
+
+repipe:
+ event__repipe(event, session);
+ return 0;
+}
+
+struct perf_event_ops inject_ops = {
+ .sample = event__repipe,
+ .mmap = event__repipe,
+ .comm = event__repipe,
+ .fork = event__repipe,
+ .exit = event__repipe,
+ .lost = event__repipe,
+ .read = event__repipe,
+ .throttle = event__repipe,
+ .unthrottle = event__repipe,
+ .attr = event__repipe,
+ .event_type = event__repipe,
+ .tracing_data = event__repipe,
+ .build_id = event__repipe,
+};
+
+extern volatile int session_done;
+
+static void sig_handler(int sig __attribute__((__unused__)))
+{
+ session_done = 1;
+}
+
+static int __cmd_inject(void)
+{
+ struct perf_session *session;
+ int ret = -EINVAL;
+
+ signal(SIGINT, sig_handler);
+
+ if (inject_build_ids) {
+ inject_ops.sample = event__inject_buildid;
+ inject_ops.mmap = event__repipe_mmap;
+ inject_ops.fork = event__repipe_task;
+ inject_ops.tracing_data = event__repipe_tracing_data;
+ }
+
+ session = perf_session__new(input_name, O_RDONLY, false, true);
+ if (session == NULL)
+ return -ENOMEM;
+
+ ret = perf_session__process_events(session, &inject_ops);
+
+ perf_session__delete(session);
+
+ return ret;
+}
+
+static const char * const report_usage[] = {
+ "perf inject [<options>]",
+ NULL
+};
+
+static const struct option options[] = {
+ OPT_BOOLEAN('b', "build-ids", &inject_build_ids,
+ "Inject build-ids into the output stream"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show build ids, etc)"),
+ OPT_END()
+};
+
+int cmd_inject(int argc, const char **argv, const char *prefix __used)
+{
+ argc = parse_options(argc, argv, options, report_usage, 0);
+
+ /*
+ * Any (unrecognized) arguments left?
+ */
+ if (argc)
+ usage_with_options(report_usage, options);
+
+ if (symbol__init() < 0)
+ return -1;
+
+ return __cmd_inject();
+}
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
new file mode 100644
index 00000000..31f60a25
--- /dev/null
+++ b/tools/perf/builtin-kmem.c
@@ -0,0 +1,784 @@
+#include "builtin.h"
+#include "perf.h"
+
+#include "util/util.h"
+#include "util/cache.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+#include "util/header.h"
+#include "util/session.h"
+
+#include "util/parse-options.h"
+#include "util/trace-event.h"
+
+#include "util/debug.h"
+
+#include <linux/rbtree.h>
+
+struct alloc_stat;
+typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
+
+static char const *input_name = "perf.data";
+
+static int alloc_flag;
+static int caller_flag;
+
+static int alloc_lines = -1;
+static int caller_lines = -1;
+
+static bool raw_ip;
+
+static char default_sort_order[] = "frag,hit,bytes";
+
+static int *cpunode_map;
+static int max_cpu_num;
+
+struct alloc_stat {
+ u64 call_site;
+ u64 ptr;
+ u64 bytes_req;
+ u64 bytes_alloc;
+ u32 hit;
+ u32 pingpong;
+
+ short alloc_cpu;
+
+ struct rb_node node;
+};
+
+static struct rb_root root_alloc_stat;
+static struct rb_root root_alloc_sorted;
+static struct rb_root root_caller_stat;
+static struct rb_root root_caller_sorted;
+
+static unsigned long total_requested, total_allocated;
+static unsigned long nr_allocs, nr_cross_allocs;
+
+#define PATH_SYS_NODE "/sys/devices/system/node"
+
+static void init_cpunode_map(void)
+{
+ FILE *fp;
+ int i;
+
+ fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
+ if (!fp) {
+ max_cpu_num = 4096;
+ return;
+ }
+
+ if (fscanf(fp, "%d", &max_cpu_num) < 1)
+ die("Failed to read 'kernel_max' from sysfs");
+ max_cpu_num++;
+
+ cpunode_map = calloc(max_cpu_num, sizeof(int));
+ if (!cpunode_map)
+ die("calloc");
+ for (i = 0; i < max_cpu_num; i++)
+ cpunode_map[i] = -1;
+ fclose(fp);
+}
+
+static void setup_cpunode_map(void)
+{
+ struct dirent *dent1, *dent2;
+ DIR *dir1, *dir2;
+ unsigned int cpu, mem;
+ char buf[PATH_MAX];
+
+ init_cpunode_map();
+
+ dir1 = opendir(PATH_SYS_NODE);
+ if (!dir1)
+ return;
+
+ while ((dent1 = readdir(dir1)) != NULL) {
+ if (dent1->d_type != DT_DIR ||
+ sscanf(dent1->d_name, "node%u", &mem) < 1)
+ continue;
+
+ snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
+ dir2 = opendir(buf);
+ if (!dir2)
+ continue;
+ while ((dent2 = readdir(dir2)) != NULL) {
+ if (dent2->d_type != DT_LNK ||
+ sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
+ continue;
+ cpunode_map[cpu] = mem;
+ }
+ }
+}
+
+static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
+ int bytes_req, int bytes_alloc, int cpu)
+{
+ struct rb_node **node = &root_alloc_stat.rb_node;
+ struct rb_node *parent = NULL;
+ struct alloc_stat *data = NULL;
+
+ while (*node) {
+ parent = *node;
+ data = rb_entry(*node, struct alloc_stat, node);
+
+ if (ptr > data->ptr)
+ node = &(*node)->rb_right;
+ else if (ptr < data->ptr)
+ node = &(*node)->rb_left;
+ else
+ break;
+ }
+
+ if (data && data->ptr == ptr) {
+ data->hit++;
+ data->bytes_req += bytes_req;
+ data->bytes_alloc += bytes_alloc;
+ } else {
+ data = malloc(sizeof(*data));
+ if (!data)
+ die("malloc");
+ data->ptr = ptr;
+ data->pingpong = 0;
+ data->hit = 1;
+ data->bytes_req = bytes_req;
+ data->bytes_alloc = bytes_alloc;
+
+ rb_link_node(&data->node, parent, node);
+ rb_insert_color(&data->node, &root_alloc_stat);
+ }
+ data->call_site = call_site;
+ data->alloc_cpu = cpu;
+}
+
+static void insert_caller_stat(unsigned long call_site,
+ int bytes_req, int bytes_alloc)
+{
+ struct rb_node **node = &root_caller_stat.rb_node;
+ struct rb_node *parent = NULL;
+ struct alloc_stat *data = NULL;
+
+ while (*node) {
+ parent = *node;
+ data = rb_entry(*node, struct alloc_stat, node);
+
+ if (call_site > data->call_site)
+ node = &(*node)->rb_right;
+ else if (call_site < data->call_site)
+ node = &(*node)->rb_left;
+ else
+ break;
+ }
+
+ if (data && data->call_site == call_site) {
+ data->hit++;
+ data->bytes_req += bytes_req;
+ data->bytes_alloc += bytes_alloc;
+ } else {
+ data = malloc(sizeof(*data));
+ if (!data)
+ die("malloc");
+ data->call_site = call_site;
+ data->pingpong = 0;
+ data->hit = 1;
+ data->bytes_req = bytes_req;
+ data->bytes_alloc = bytes_alloc;
+
+ rb_link_node(&data->node, parent, node);
+ rb_insert_color(&data->node, &root_caller_stat);
+ }
+}
+
+static void process_alloc_event(void *data,
+ struct event *event,
+ int cpu,
+ u64 timestamp __used,
+ struct thread *thread __used,
+ int node)
+{
+ unsigned long call_site;
+ unsigned long ptr;
+ int bytes_req;
+ int bytes_alloc;
+ int node1, node2;
+
+ ptr = raw_field_value(event, "ptr", data);
+ call_site = raw_field_value(event, "call_site", data);
+ bytes_req = raw_field_value(event, "bytes_req", data);
+ bytes_alloc = raw_field_value(event, "bytes_alloc", data);
+
+ insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu);
+ insert_caller_stat(call_site, bytes_req, bytes_alloc);
+
+ total_requested += bytes_req;
+ total_allocated += bytes_alloc;
+
+ if (node) {
+ node1 = cpunode_map[cpu];
+ node2 = raw_field_value(event, "node", data);
+ if (node1 != node2)
+ nr_cross_allocs++;
+ }
+ nr_allocs++;
+}
+
+static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
+static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
+
+static struct alloc_stat *search_alloc_stat(unsigned long ptr,
+ unsigned long call_site,
+ struct rb_root *root,
+ sort_fn_t sort_fn)
+{
+ struct rb_node *node = root->rb_node;
+ struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
+
+ while (node) {
+ struct alloc_stat *data;
+ int cmp;
+
+ data = rb_entry(node, struct alloc_stat, node);
+
+ cmp = sort_fn(&key, data);
+ if (cmp < 0)
+ node = node->rb_left;
+ else if (cmp > 0)
+ node = node->rb_right;
+ else
+ return data;
+ }
+ return NULL;
+}
+
+static void process_free_event(void *data,
+ struct event *event,
+ int cpu,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ unsigned long ptr;
+ struct alloc_stat *s_alloc, *s_caller;
+
+ ptr = raw_field_value(event, "ptr", data);
+
+ s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
+ if (!s_alloc)
+ return;
+
+ if (cpu != s_alloc->alloc_cpu) {
+ s_alloc->pingpong++;
+
+ s_caller = search_alloc_stat(0, s_alloc->call_site,
+ &root_caller_stat, callsite_cmp);
+ assert(s_caller);
+ s_caller->pingpong++;
+ }
+ s_alloc->alloc_cpu = -1;
+}
+
+static void
+process_raw_event(event_t *raw_event __used, void *data,
+ int cpu, u64 timestamp, struct thread *thread)
+{
+ struct event *event;
+ int type;
+
+ type = trace_parse_common_type(data);
+ event = trace_find_event(type);
+
+ if (!strcmp(event->name, "kmalloc") ||
+ !strcmp(event->name, "kmem_cache_alloc")) {
+ process_alloc_event(data, event, cpu, timestamp, thread, 0);
+ return;
+ }
+
+ if (!strcmp(event->name, "kmalloc_node") ||
+ !strcmp(event->name, "kmem_cache_alloc_node")) {
+ process_alloc_event(data, event, cpu, timestamp, thread, 1);
+ return;
+ }
+
+ if (!strcmp(event->name, "kfree") ||
+ !strcmp(event->name, "kmem_cache_free")) {
+ process_free_event(data, event, cpu, timestamp, thread);
+ return;
+ }
+}
+
+static int process_sample_event(event_t *event, struct perf_session *session)
+{
+ struct sample_data data;
+ struct thread *thread;
+
+ memset(&data, 0, sizeof(data));
+ data.time = -1;
+ data.cpu = -1;
+ data.period = 1;
+
+ event__parse_sample(event, session->sample_type, &data);
+
+ dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
+ data.pid, data.tid, data.ip, data.period);
+
+ thread = perf_session__findnew(session, event->ip.pid);
+ if (thread == NULL) {
+ pr_debug("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+
+ process_raw_event(event, data.raw_data, data.cpu,
+ data.time, thread);
+
+ return 0;
+}
+
+static struct perf_event_ops event_ops = {
+ .sample = process_sample_event,
+ .comm = event__process_comm,
+ .ordered_samples = true,
+};
+
+static double fragmentation(unsigned long n_req, unsigned long n_alloc)
+{
+ if (n_alloc == 0)
+ return 0.0;
+ else
+ return 100.0 - (100.0 * n_req / n_alloc);
+}
+
+static void __print_result(struct rb_root *root, struct perf_session *session,
+ int n_lines, int is_caller)
+{
+ struct rb_node *next;
+ struct machine *machine;
+
+ printf("%.102s\n", graph_dotted_line);
+ printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
+ printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
+ printf("%.102s\n", graph_dotted_line);
+
+ next = rb_first(root);
+
+ machine = perf_session__find_host_machine(session);
+ if (!machine) {
+ pr_err("__print_result: couldn't find kernel information\n");
+ return;
+ }
+ while (next && n_lines--) {
+ struct alloc_stat *data = rb_entry(next, struct alloc_stat,
+ node);
+ struct symbol *sym = NULL;
+ struct map *map;
+ char buf[BUFSIZ];
+ u64 addr;
+
+ if (is_caller) {
+ addr = data->call_site;
+ if (!raw_ip)
+ sym = machine__find_kernel_function(machine, addr, &map, NULL);
+ } else
+ addr = data->ptr;
+
+ if (sym != NULL)
+ snprintf(buf, sizeof(buf), "%s+%Lx", sym->name,
+ addr - map->unmap_ip(map, sym->start));
+ else
+ snprintf(buf, sizeof(buf), "%#Lx", addr);
+ printf(" %-34s |", buf);
+
+ printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
+ (unsigned long long)data->bytes_alloc,
+ (unsigned long)data->bytes_alloc / data->hit,
+ (unsigned long long)data->bytes_req,
+ (unsigned long)data->bytes_req / data->hit,
+ (unsigned long)data->hit,
+ (unsigned long)data->pingpong,
+ fragmentation(data->bytes_req, data->bytes_alloc));
+
+ next = rb_next(next);
+ }
+
+ if (n_lines == -1)
+ printf(" ... | ... | ... | ... | ... | ... \n");
+
+ printf("%.102s\n", graph_dotted_line);
+}
+
+static void print_summary(void)
+{
+ printf("\nSUMMARY\n=======\n");
+ printf("Total bytes requested: %lu\n", total_requested);
+ printf("Total bytes allocated: %lu\n", total_allocated);
+ printf("Total bytes wasted on internal fragmentation: %lu\n",
+ total_allocated - total_requested);
+ printf("Internal fragmentation: %f%%\n",
+ fragmentation(total_requested, total_allocated));
+ printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
+}
+
+static void print_result(struct perf_session *session)
+{
+ if (caller_flag)
+ __print_result(&root_caller_sorted, session, caller_lines, 1);
+ if (alloc_flag)
+ __print_result(&root_alloc_sorted, session, alloc_lines, 0);
+ print_summary();
+}
+
+struct sort_dimension {
+ const char name[20];
+ sort_fn_t cmp;
+ struct list_head list;
+};
+
+static LIST_HEAD(caller_sort);
+static LIST_HEAD(alloc_sort);
+
+static void sort_insert(struct rb_root *root, struct alloc_stat *data,
+ struct list_head *sort_list)
+{
+ struct rb_node **new = &(root->rb_node);
+ struct rb_node *parent = NULL;
+ struct sort_dimension *sort;
+
+ while (*new) {
+ struct alloc_stat *this;
+ int cmp = 0;
+
+ this = rb_entry(*new, struct alloc_stat, node);
+ parent = *new;
+
+ list_for_each_entry(sort, sort_list, list) {
+ cmp = sort->cmp(data, this);
+ if (cmp)
+ break;
+ }
+
+ if (cmp > 0)
+ new = &((*new)->rb_left);
+ else
+ new = &((*new)->rb_right);
+ }
+
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+}
+
+static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
+ struct list_head *sort_list)
+{
+ struct rb_node *node;
+ struct alloc_stat *data;
+
+ for (;;) {
+ node = rb_first(root);
+ if (!node)
+ break;
+
+ rb_erase(node, root);
+ data = rb_entry(node, struct alloc_stat, node);
+ sort_insert(root_sorted, data, sort_list);
+ }
+}
+
+static void sort_result(void)
+{
+ __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
+ __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
+}
+
+static int __cmd_kmem(void)
+{
+ int err = -EINVAL;
+ struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0, false);
+ if (session == NULL)
+ return -ENOMEM;
+
+ if (perf_session__create_kernel_maps(session) < 0)
+ goto out_delete;
+
+ if (!perf_session__has_traces(session, "kmem record"))
+ goto out_delete;
+
+ setup_pager();
+ err = perf_session__process_events(session, &event_ops);
+ if (err != 0)
+ goto out_delete;
+ sort_result();
+ print_result(session);
+out_delete:
+ perf_session__delete(session);
+ return err;
+}
+
+static const char * const kmem_usage[] = {
+ "perf kmem [<options>] {record|stat}",
+ NULL
+};
+
+static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
+{
+ if (l->ptr < r->ptr)
+ return -1;
+ else if (l->ptr > r->ptr)
+ return 1;
+ return 0;
+}
+
+static struct sort_dimension ptr_sort_dimension = {
+ .name = "ptr",
+ .cmp = ptr_cmp,
+};
+
+static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
+{
+ if (l->call_site < r->call_site)
+ return -1;
+ else if (l->call_site > r->call_site)
+ return 1;
+ return 0;
+}
+
+static struct sort_dimension callsite_sort_dimension = {
+ .name = "callsite",
+ .cmp = callsite_cmp,
+};
+
+static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
+{
+ if (l->hit < r->hit)
+ return -1;
+ else if (l->hit > r->hit)
+ return 1;
+ return 0;
+}
+
+static struct sort_dimension hit_sort_dimension = {
+ .name = "hit",
+ .cmp = hit_cmp,
+};
+
+static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
+{
+ if (l->bytes_alloc < r->bytes_alloc)
+ return -1;
+ else if (l->bytes_alloc > r->bytes_alloc)
+ return 1;
+ return 0;
+}
+
+static struct sort_dimension bytes_sort_dimension = {
+ .name = "bytes",
+ .cmp = bytes_cmp,
+};
+
+static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
+{
+ double x, y;
+
+ x = fragmentation(l->bytes_req, l->bytes_alloc);
+ y = fragmentation(r->bytes_req, r->bytes_alloc);
+
+ if (x < y)
+ return -1;
+ else if (x > y)
+ return 1;
+ return 0;
+}
+
+static struct sort_dimension frag_sort_dimension = {
+ .name = "frag",
+ .cmp = frag_cmp,
+};
+
+static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
+{
+ if (l->pingpong < r->pingpong)
+ return -1;
+ else if (l->pingpong > r->pingpong)
+ return 1;
+ return 0;
+}
+
+static struct sort_dimension pingpong_sort_dimension = {
+ .name = "pingpong",
+ .cmp = pingpong_cmp,
+};
+
+static struct sort_dimension *avail_sorts[] = {
+ &ptr_sort_dimension,
+ &callsite_sort_dimension,
+ &hit_sort_dimension,
+ &bytes_sort_dimension,
+ &frag_sort_dimension,
+ &pingpong_sort_dimension,
+};
+
+#define NUM_AVAIL_SORTS \
+ (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
+
+static int sort_dimension__add(const char *tok, struct list_head *list)
+{
+ struct sort_dimension *sort;
+ int i;
+
+ for (i = 0; i < NUM_AVAIL_SORTS; i++) {
+ if (!strcmp(avail_sorts[i]->name, tok)) {
+ sort = malloc(sizeof(*sort));
+ if (!sort)
+ die("malloc");
+ memcpy(sort, avail_sorts[i], sizeof(*sort));
+ list_add_tail(&sort->list, list);
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static int setup_sorting(struct list_head *sort_list, const char *arg)
+{
+ char *tok;
+ char *str = strdup(arg);
+
+ if (!str)
+ die("strdup");
+
+ while (true) {
+ tok = strsep(&str, ",");
+ if (!tok)
+ break;
+ if (sort_dimension__add(tok, sort_list) < 0) {
+ error("Unknown --sort key: '%s'", tok);
+ return -1;
+ }
+ }
+
+ free(str);
+ return 0;
+}
+
+static int parse_sort_opt(const struct option *opt __used,
+ const char *arg, int unset __used)
+{
+ if (!arg)
+ return -1;
+
+ if (caller_flag > alloc_flag)
+ return setup_sorting(&caller_sort, arg);
+ else
+ return setup_sorting(&alloc_sort, arg);
+
+ return 0;
+}
+
+static int parse_caller_opt(const struct option *opt __used,
+ const char *arg __used, int unset __used)
+{
+ caller_flag = (alloc_flag + 1);
+ return 0;
+}
+
+static int parse_alloc_opt(const struct option *opt __used,
+ const char *arg __used, int unset __used)
+{
+ alloc_flag = (caller_flag + 1);
+ return 0;
+}
+
+static int parse_line_opt(const struct option *opt __used,
+ const char *arg, int unset __used)
+{
+ int lines;
+
+ if (!arg)
+ return -1;
+
+ lines = strtoul(arg, NULL, 10);
+
+ if (caller_flag > alloc_flag)
+ caller_lines = lines;
+ else
+ alloc_lines = lines;
+
+ return 0;
+}
+
+static const struct option kmem_options[] = {
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+ OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
+ "show per-callsite statistics",
+ parse_caller_opt),
+ OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
+ "show per-allocation statistics",
+ parse_alloc_opt),
+ OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
+ "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
+ parse_sort_opt),
+ OPT_CALLBACK('l', "line", NULL, "num",
+ "show n lines",
+ parse_line_opt),
+ OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
+ OPT_END()
+};
+
+static const char *record_args[] = {
+ "record",
+ "-a",
+ "-R",
+ "-f",
+ "-c", "1",
+ "-e", "kmem:kmalloc",
+ "-e", "kmem:kmalloc_node",
+ "-e", "kmem:kfree",
+ "-e", "kmem:kmem_cache_alloc",
+ "-e", "kmem:kmem_cache_alloc_node",
+ "-e", "kmem:kmem_cache_free",
+};
+
+static int __cmd_record(int argc, const char **argv)
+{
+ unsigned int rec_argc, i, j;
+ const char **rec_argv;
+
+ rec_argc = ARRAY_SIZE(record_args) + argc - 1;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+
+ for (i = 0; i < ARRAY_SIZE(record_args); i++)
+ rec_argv[i] = strdup(record_args[i]);
+
+ for (j = 1; j < (unsigned int)argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ return cmd_record(i, rec_argv, NULL);
+}
+
+int cmd_kmem(int argc, const char **argv, const char *prefix __used)
+{
+ argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
+
+ if (!argc)
+ usage_with_options(kmem_usage, kmem_options);
+
+ symbol__init();
+
+ if (!strncmp(argv[0], "rec", 3)) {
+ return __cmd_record(argc, argv);
+ } else if (!strcmp(argv[0], "stat")) {
+ setup_cpunode_map();
+
+ if (list_empty(&caller_sort))
+ setup_sorting(&caller_sort, default_sort_order);
+ if (list_empty(&alloc_sort))
+ setup_sorting(&alloc_sort, default_sort_order);
+
+ return __cmd_kmem();
+ } else
+ usage_with_options(kmem_usage, kmem_options);
+
+ return 0;
+}
+
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
new file mode 100644
index 00000000..34d1e853
--- /dev/null
+++ b/tools/perf/builtin-kvm.c
@@ -0,0 +1,144 @@
+#include "builtin.h"
+#include "perf.h"
+
+#include "util/util.h"
+#include "util/cache.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+#include "util/header.h"
+#include "util/session.h"
+
+#include "util/parse-options.h"
+#include "util/trace-event.h"
+
+#include "util/debug.h"
+
+#include <sys/prctl.h>
+
+#include <semaphore.h>
+#include <pthread.h>
+#include <math.h>
+
+static const char *file_name;
+static char name_buffer[256];
+
+bool perf_host = 1;
+bool perf_guest;
+
+static const char * const kvm_usage[] = {
+ "perf kvm [<options>] {top|record|report|diff|buildid-list}",
+ NULL
+};
+
+static const struct option kvm_options[] = {
+ OPT_STRING('i', "input", &file_name, "file",
+ "Input file name"),
+ OPT_STRING('o', "output", &file_name, "file",
+ "Output file name"),
+ OPT_BOOLEAN(0, "guest", &perf_guest,
+ "Collect guest os data"),
+ OPT_BOOLEAN(0, "host", &perf_host,
+ "Collect guest os data"),
+ OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
+ "guest mount directory under which every guest os"
+ " instance has a subdir"),
+ OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name,
+ "file", "file saving guest os vmlinux"),
+ OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms,
+ "file", "file saving guest os /proc/kallsyms"),
+ OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
+ "file", "file saving guest os /proc/modules"),
+ OPT_END()
+};
+
+static int __cmd_record(int argc, const char **argv)
+{
+ int rec_argc, i = 0, j;
+ const char **rec_argv;
+
+ rec_argc = argc + 2;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+ rec_argv[i++] = strdup("record");
+ rec_argv[i++] = strdup("-o");
+ rec_argv[i++] = strdup(file_name);
+ for (j = 1; j < argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ BUG_ON(i != rec_argc);
+
+ return cmd_record(i, rec_argv, NULL);
+}
+
+static int __cmd_report(int argc, const char **argv)
+{
+ int rec_argc, i = 0, j;
+ const char **rec_argv;
+
+ rec_argc = argc + 2;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+ rec_argv[i++] = strdup("report");
+ rec_argv[i++] = strdup("-i");
+ rec_argv[i++] = strdup(file_name);
+ for (j = 1; j < argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ BUG_ON(i != rec_argc);
+
+ return cmd_report(i, rec_argv, NULL);
+}
+
+static int __cmd_buildid_list(int argc, const char **argv)
+{
+ int rec_argc, i = 0, j;
+ const char **rec_argv;
+
+ rec_argc = argc + 2;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+ rec_argv[i++] = strdup("buildid-list");
+ rec_argv[i++] = strdup("-i");
+ rec_argv[i++] = strdup(file_name);
+ for (j = 1; j < argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ BUG_ON(i != rec_argc);
+
+ return cmd_buildid_list(i, rec_argv, NULL);
+}
+
+int cmd_kvm(int argc, const char **argv, const char *prefix __used)
+{
+ perf_host = perf_guest = 0;
+
+ argc = parse_options(argc, argv, kvm_options, kvm_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (!argc)
+ usage_with_options(kvm_usage, kvm_options);
+
+ if (!perf_host)
+ perf_guest = 1;
+
+ if (!file_name) {
+ if (perf_host && !perf_guest)
+ sprintf(name_buffer, "perf.data.host");
+ else if (!perf_host && perf_guest)
+ sprintf(name_buffer, "perf.data.guest");
+ else
+ sprintf(name_buffer, "perf.data.kvm");
+ file_name = name_buffer;
+ }
+
+ if (!strncmp(argv[0], "rec", 3))
+ return __cmd_record(argc, argv);
+ else if (!strncmp(argv[0], "rep", 3))
+ return __cmd_report(argc, argv);
+ else if (!strncmp(argv[0], "diff", 4))
+ return cmd_diff(argc, argv, NULL);
+ else if (!strncmp(argv[0], "top", 3))
+ return cmd_top(argc, argv, NULL);
+ else if (!strncmp(argv[0], "buildid-list", 12))
+ return __cmd_buildid_list(argc, argv);
+ else
+ usage_with_options(kvm_usage, kvm_options);
+
+ return 0;
+}
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
new file mode 100644
index 00000000..d88c6961
--- /dev/null
+++ b/tools/perf/builtin-list.c
@@ -0,0 +1,21 @@
+/*
+ * builtin-list.c
+ *
+ * Builtin list command: list all event types
+ *
+ * Copyright (C) 2009, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2009, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
+ */
+#include "builtin.h"
+
+#include "perf.h"
+
+#include "util/parse-events.h"
+#include "util/cache.h"
+
+int cmd_list(int argc __used, const char **argv __used, const char *prefix __used)
+{
+ setup_pager();
+ print_events();
+ return 0;
+}
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
new file mode 100644
index 00000000..821c1586
--- /dev/null
+++ b/tools/perf/builtin-lock.c
@@ -0,0 +1,1005 @@
+#include "builtin.h"
+#include "perf.h"
+
+#include "util/util.h"
+#include "util/cache.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+#include "util/header.h"
+
+#include "util/parse-options.h"
+#include "util/trace-event.h"
+
+#include "util/debug.h"
+#include "util/session.h"
+
+#include <sys/types.h>
+#include <sys/prctl.h>
+#include <semaphore.h>
+#include <pthread.h>
+#include <math.h>
+#include <limits.h>
+
+#include <linux/list.h>
+#include <linux/hash.h>
+
+static struct perf_session *session;
+
+/* based on kernel/lockdep.c */
+#define LOCKHASH_BITS 12
+#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
+
+static struct list_head lockhash_table[LOCKHASH_SIZE];
+
+#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
+#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
+
+struct lock_stat {
+ struct list_head hash_entry;
+ struct rb_node rb; /* used for sorting */
+
+ /*
+ * FIXME: raw_field_value() returns unsigned long long,
+ * so address of lockdep_map should be dealed as 64bit.
+ * Is there more better solution?
+ */
+ void *addr; /* address of lockdep_map, used as ID */
+ char *name; /* for strcpy(), we cannot use const */
+
+ unsigned int nr_acquire;
+ unsigned int nr_acquired;
+ unsigned int nr_contended;
+ unsigned int nr_release;
+
+ unsigned int nr_readlock;
+ unsigned int nr_trylock;
+ /* these times are in nano sec. */
+ u64 wait_time_total;
+ u64 wait_time_min;
+ u64 wait_time_max;
+
+ int discard; /* flag of blacklist */
+};
+
+/*
+ * States of lock_seq_stat
+ *
+ * UNINITIALIZED is required for detecting first event of acquire.
+ * As the nature of lock events, there is no guarantee
+ * that the first event for the locks are acquire,
+ * it can be acquired, contended or release.
+ */
+#define SEQ_STATE_UNINITIALIZED 0 /* initial state */
+#define SEQ_STATE_RELEASED 1
+#define SEQ_STATE_ACQUIRING 2
+#define SEQ_STATE_ACQUIRED 3
+#define SEQ_STATE_READ_ACQUIRED 4
+#define SEQ_STATE_CONTENDED 5
+
+/*
+ * MAX_LOCK_DEPTH
+ * Imported from include/linux/sched.h.
+ * Should this be synchronized?
+ */
+#define MAX_LOCK_DEPTH 48
+
+/*
+ * struct lock_seq_stat:
+ * Place to put on state of one lock sequence
+ * 1) acquire -> acquired -> release
+ * 2) acquire -> contended -> acquired -> release
+ * 3) acquire (with read or try) -> release
+ * 4) Are there other patterns?
+ */
+struct lock_seq_stat {
+ struct list_head list;
+ int state;
+ u64 prev_event_time;
+ void *addr;
+
+ int read_count;
+};
+
+struct thread_stat {
+ struct rb_node rb;
+
+ u32 tid;
+ struct list_head seq_list;
+};
+
+static struct rb_root thread_stats;
+
+static struct thread_stat *thread_stat_find(u32 tid)
+{
+ struct rb_node *node;
+ struct thread_stat *st;
+
+ node = thread_stats.rb_node;
+ while (node) {
+ st = container_of(node, struct thread_stat, rb);
+ if (st->tid == tid)
+ return st;
+ else if (tid < st->tid)
+ node = node->rb_left;
+ else
+ node = node->rb_right;
+ }
+
+ return NULL;
+}
+
+static void thread_stat_insert(struct thread_stat *new)
+{
+ struct rb_node **rb = &thread_stats.rb_node;
+ struct rb_node *parent = NULL;
+ struct thread_stat *p;
+
+ while (*rb) {
+ p = container_of(*rb, struct thread_stat, rb);
+ parent = *rb;
+
+ if (new->tid < p->tid)
+ rb = &(*rb)->rb_left;
+ else if (new->tid > p->tid)
+ rb = &(*rb)->rb_right;
+ else
+ BUG_ON("inserting invalid thread_stat\n");
+ }
+
+ rb_link_node(&new->rb, parent, rb);
+ rb_insert_color(&new->rb, &thread_stats);
+}
+
+static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
+{
+ struct thread_stat *st;
+
+ st = thread_stat_find(tid);
+ if (st)
+ return st;
+
+ st = zalloc(sizeof(struct thread_stat));
+ if (!st)
+ die("memory allocation failed\n");
+
+ st->tid = tid;
+ INIT_LIST_HEAD(&st->seq_list);
+
+ thread_stat_insert(st);
+
+ return st;
+}
+
+static struct thread_stat *thread_stat_findnew_first(u32 tid);
+static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
+ thread_stat_findnew_first;
+
+static struct thread_stat *thread_stat_findnew_first(u32 tid)
+{
+ struct thread_stat *st;
+
+ st = zalloc(sizeof(struct thread_stat));
+ if (!st)
+ die("memory allocation failed\n");
+ st->tid = tid;
+ INIT_LIST_HEAD(&st->seq_list);
+
+ rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
+ rb_insert_color(&st->rb, &thread_stats);
+
+ thread_stat_findnew = thread_stat_findnew_after_first;
+ return st;
+}
+
+/* build simple key function one is bigger than two */
+#define SINGLE_KEY(member) \
+ static int lock_stat_key_ ## member(struct lock_stat *one, \
+ struct lock_stat *two) \
+ { \
+ return one->member > two->member; \
+ }
+
+SINGLE_KEY(nr_acquired)
+SINGLE_KEY(nr_contended)
+SINGLE_KEY(wait_time_total)
+SINGLE_KEY(wait_time_min)
+SINGLE_KEY(wait_time_max)
+
+struct lock_key {
+ /*
+ * name: the value for specify by user
+ * this should be simpler than raw name of member
+ * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
+ */
+ const char *name;
+ int (*key)(struct lock_stat*, struct lock_stat*);
+};
+
+static const char *sort_key = "acquired";
+
+static int (*compare)(struct lock_stat *, struct lock_stat *);
+
+static struct rb_root result; /* place to store sorted data */
+
+#define DEF_KEY_LOCK(name, fn_suffix) \
+ { #name, lock_stat_key_ ## fn_suffix }
+struct lock_key keys[] = {
+ DEF_KEY_LOCK(acquired, nr_acquired),
+ DEF_KEY_LOCK(contended, nr_contended),
+ DEF_KEY_LOCK(wait_total, wait_time_total),
+ DEF_KEY_LOCK(wait_min, wait_time_min),
+ DEF_KEY_LOCK(wait_max, wait_time_max),
+
+ /* extra comparisons much complicated should be here */
+
+ { NULL, NULL }
+};
+
+static void select_key(void)
+{
+ int i;
+
+ for (i = 0; keys[i].name; i++) {
+ if (!strcmp(keys[i].name, sort_key)) {
+ compare = keys[i].key;
+ return;
+ }
+ }
+
+ die("Unknown compare key:%s\n", sort_key);
+}
+
+static void insert_to_result(struct lock_stat *st,
+ int (*bigger)(struct lock_stat *, struct lock_stat *))
+{
+ struct rb_node **rb = &result.rb_node;
+ struct rb_node *parent = NULL;
+ struct lock_stat *p;
+
+ while (*rb) {
+ p = container_of(*rb, struct lock_stat, rb);
+ parent = *rb;
+
+ if (bigger(st, p))
+ rb = &(*rb)->rb_left;
+ else
+ rb = &(*rb)->rb_right;
+ }
+
+ rb_link_node(&st->rb, parent, rb);
+ rb_insert_color(&st->rb, &result);
+}
+
+/* returns left most element of result, and erase it */
+static struct lock_stat *pop_from_result(void)
+{
+ struct rb_node *node = result.rb_node;
+
+ if (!node)
+ return NULL;
+
+ while (node->rb_left)
+ node = node->rb_left;
+
+ rb_erase(node, &result);
+ return container_of(node, struct lock_stat, rb);
+}
+
+static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
+{
+ struct list_head *entry = lockhashentry(addr);
+ struct lock_stat *ret, *new;
+
+ list_for_each_entry(ret, entry, hash_entry) {
+ if (ret->addr == addr)
+ return ret;
+ }
+
+ new = zalloc(sizeof(struct lock_stat));
+ if (!new)
+ goto alloc_failed;
+
+ new->addr = addr;
+ new->name = zalloc(sizeof(char) * strlen(name) + 1);
+ if (!new->name)
+ goto alloc_failed;
+ strcpy(new->name, name);
+
+ new->wait_time_min = ULLONG_MAX;
+
+ list_add(&new->hash_entry, entry);
+ return new;
+
+alloc_failed:
+ die("memory allocation failed\n");
+}
+
+static char const *input_name = "perf.data";
+
+struct raw_event_sample {
+ u32 size;
+ char data[0];
+};
+
+struct trace_acquire_event {
+ void *addr;
+ const char *name;
+ int flag;
+};
+
+struct trace_acquired_event {
+ void *addr;
+ const char *name;
+};
+
+struct trace_contended_event {
+ void *addr;
+ const char *name;
+};
+
+struct trace_release_event {
+ void *addr;
+ const char *name;
+};
+
+struct trace_lock_handler {
+ void (*acquire_event)(struct trace_acquire_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+
+ void (*acquired_event)(struct trace_acquired_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+
+ void (*contended_event)(struct trace_contended_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+
+ void (*release_event)(struct trace_release_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+};
+
+static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
+{
+ struct lock_seq_stat *seq;
+
+ list_for_each_entry(seq, &ts->seq_list, list) {
+ if (seq->addr == addr)
+ return seq;
+ }
+
+ seq = zalloc(sizeof(struct lock_seq_stat));
+ if (!seq)
+ die("Not enough memory\n");
+ seq->state = SEQ_STATE_UNINITIALIZED;
+ seq->addr = addr;
+
+ list_add(&seq->list, &ts->seq_list);
+ return seq;
+}
+
+enum broken_state {
+ BROKEN_ACQUIRE,
+ BROKEN_ACQUIRED,
+ BROKEN_CONTENDED,
+ BROKEN_RELEASE,
+ BROKEN_MAX,
+};
+
+static int bad_hist[BROKEN_MAX];
+
+enum acquire_flags {
+ TRY_LOCK = 1,
+ READ_LOCK = 2,
+};
+
+static void
+report_lock_acquire_event(struct trace_acquire_event *acquire_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct lock_stat *ls;
+ struct thread_stat *ts;
+ struct lock_seq_stat *seq;
+
+ ls = lock_stat_findnew(acquire_event->addr, acquire_event->name);
+ if (ls->discard)
+ return;
+
+ ts = thread_stat_findnew(thread->pid);
+ seq = get_seq(ts, acquire_event->addr);
+
+ switch (seq->state) {
+ case SEQ_STATE_UNINITIALIZED:
+ case SEQ_STATE_RELEASED:
+ if (!acquire_event->flag) {
+ seq->state = SEQ_STATE_ACQUIRING;
+ } else {
+ if (acquire_event->flag & TRY_LOCK)
+ ls->nr_trylock++;
+ if (acquire_event->flag & READ_LOCK)
+ ls->nr_readlock++;
+ seq->state = SEQ_STATE_READ_ACQUIRED;
+ seq->read_count = 1;
+ ls->nr_acquired++;
+ }
+ break;
+ case SEQ_STATE_READ_ACQUIRED:
+ if (acquire_event->flag & READ_LOCK) {
+ seq->read_count++;
+ ls->nr_acquired++;
+ goto end;
+ } else {
+ goto broken;
+ }
+ break;
+ case SEQ_STATE_ACQUIRED:
+ case SEQ_STATE_ACQUIRING:
+ case SEQ_STATE_CONTENDED:
+broken:
+ /* broken lock sequence, discard it */
+ ls->discard = 1;
+ bad_hist[BROKEN_ACQUIRE]++;
+ list_del(&seq->list);
+ free(seq);
+ goto end;
+ break;
+ default:
+ BUG_ON("Unknown state of lock sequence found!\n");
+ break;
+ }
+
+ ls->nr_acquire++;
+ seq->prev_event_time = timestamp;
+end:
+ return;
+}
+
+static void
+report_lock_acquired_event(struct trace_acquired_event *acquired_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct lock_stat *ls;
+ struct thread_stat *ts;
+ struct lock_seq_stat *seq;
+ u64 contended_term;
+
+ ls = lock_stat_findnew(acquired_event->addr, acquired_event->name);
+ if (ls->discard)
+ return;
+
+ ts = thread_stat_findnew(thread->pid);
+ seq = get_seq(ts, acquired_event->addr);
+
+ switch (seq->state) {
+ case SEQ_STATE_UNINITIALIZED:
+ /* orphan event, do nothing */
+ return;
+ case SEQ_STATE_ACQUIRING:
+ break;
+ case SEQ_STATE_CONTENDED:
+ contended_term = timestamp - seq->prev_event_time;
+ ls->wait_time_total += contended_term;
+ if (contended_term < ls->wait_time_min)
+ ls->wait_time_min = contended_term;
+ if (ls->wait_time_max < contended_term)
+ ls->wait_time_max = contended_term;
+ break;
+ case SEQ_STATE_RELEASED:
+ case SEQ_STATE_ACQUIRED:
+ case SEQ_STATE_READ_ACQUIRED:
+ /* broken lock sequence, discard it */
+ ls->discard = 1;
+ bad_hist[BROKEN_ACQUIRED]++;
+ list_del(&seq->list);
+ free(seq);
+ goto end;
+ break;
+
+ default:
+ BUG_ON("Unknown state of lock sequence found!\n");
+ break;
+ }
+
+ seq->state = SEQ_STATE_ACQUIRED;
+ ls->nr_acquired++;
+ seq->prev_event_time = timestamp;
+end:
+ return;
+}
+
+static void
+report_lock_contended_event(struct trace_contended_event *contended_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct lock_stat *ls;
+ struct thread_stat *ts;
+ struct lock_seq_stat *seq;
+
+ ls = lock_stat_findnew(contended_event->addr, contended_event->name);
+ if (ls->discard)
+ return;
+
+ ts = thread_stat_findnew(thread->pid);
+ seq = get_seq(ts, contended_event->addr);
+
+ switch (seq->state) {
+ case SEQ_STATE_UNINITIALIZED:
+ /* orphan event, do nothing */
+ return;
+ case SEQ_STATE_ACQUIRING:
+ break;
+ case SEQ_STATE_RELEASED:
+ case SEQ_STATE_ACQUIRED:
+ case SEQ_STATE_READ_ACQUIRED:
+ case SEQ_STATE_CONTENDED:
+ /* broken lock sequence, discard it */
+ ls->discard = 1;
+ bad_hist[BROKEN_CONTENDED]++;
+ list_del(&seq->list);
+ free(seq);
+ goto end;
+ break;
+ default:
+ BUG_ON("Unknown state of lock sequence found!\n");
+ break;
+ }
+
+ seq->state = SEQ_STATE_CONTENDED;
+ ls->nr_contended++;
+ seq->prev_event_time = timestamp;
+end:
+ return;
+}
+
+static void
+report_lock_release_event(struct trace_release_event *release_event,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct lock_stat *ls;
+ struct thread_stat *ts;
+ struct lock_seq_stat *seq;
+
+ ls = lock_stat_findnew(release_event->addr, release_event->name);
+ if (ls->discard)
+ return;
+
+ ts = thread_stat_findnew(thread->pid);
+ seq = get_seq(ts, release_event->addr);
+
+ switch (seq->state) {
+ case SEQ_STATE_UNINITIALIZED:
+ goto end;
+ break;
+ case SEQ_STATE_ACQUIRED:
+ break;
+ case SEQ_STATE_READ_ACQUIRED:
+ seq->read_count--;
+ BUG_ON(seq->read_count < 0);
+ if (!seq->read_count) {
+ ls->nr_release++;
+ goto end;
+ }
+ break;
+ case SEQ_STATE_ACQUIRING:
+ case SEQ_STATE_CONTENDED:
+ case SEQ_STATE_RELEASED:
+ /* broken lock sequence, discard it */
+ ls->discard = 1;
+ bad_hist[BROKEN_RELEASE]++;
+ goto free_seq;
+ break;
+ default:
+ BUG_ON("Unknown state of lock sequence found!\n");
+ break;
+ }
+
+ ls->nr_release++;
+free_seq:
+ list_del(&seq->list);
+ free(seq);
+end:
+ return;
+}
+
+/* lock oriented handlers */
+/* TODO: handlers for CPU oriented, thread oriented */
+static struct trace_lock_handler report_lock_ops = {
+ .acquire_event = report_lock_acquire_event,
+ .acquired_event = report_lock_acquired_event,
+ .contended_event = report_lock_contended_event,
+ .release_event = report_lock_release_event,
+};
+
+static struct trace_lock_handler *trace_handler;
+
+static void
+process_lock_acquire_event(void *data,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_acquire_event acquire_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", data);
+ memcpy(&acquire_event.addr, &tmp, sizeof(void *));
+ acquire_event.name = (char *)raw_field_ptr(event, "name", data);
+ acquire_event.flag = (int)raw_field_value(event, "flag", data);
+
+ if (trace_handler->acquire_event)
+ trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
+}
+
+static void
+process_lock_acquired_event(void *data,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_acquired_event acquired_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", data);
+ memcpy(&acquired_event.addr, &tmp, sizeof(void *));
+ acquired_event.name = (char *)raw_field_ptr(event, "name", data);
+
+ if (trace_handler->acquire_event)
+ trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread);
+}
+
+static void
+process_lock_contended_event(void *data,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_contended_event contended_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", data);
+ memcpy(&contended_event.addr, &tmp, sizeof(void *));
+ contended_event.name = (char *)raw_field_ptr(event, "name", data);
+
+ if (trace_handler->acquire_event)
+ trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread);
+}
+
+static void
+process_lock_release_event(void *data,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_release_event release_event;
+ u64 tmp; /* this is required for casting... */
+
+ tmp = raw_field_value(event, "lockdep_addr", data);
+ memcpy(&release_event.addr, &tmp, sizeof(void *));
+ release_event.name = (char *)raw_field_ptr(event, "name", data);
+
+ if (trace_handler->acquire_event)
+ trace_handler->release_event(&release_event, event, cpu, timestamp, thread);
+}
+
+static void
+process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
+{
+ struct event *event;
+ int type;
+
+ type = trace_parse_common_type(data);
+ event = trace_find_event(type);
+
+ if (!strcmp(event->name, "lock_acquire"))
+ process_lock_acquire_event(data, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "lock_acquired"))
+ process_lock_acquired_event(data, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "lock_contended"))
+ process_lock_contended_event(data, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "lock_release"))
+ process_lock_release_event(data, event, cpu, timestamp, thread);
+}
+
+static void print_bad_events(int bad, int total)
+{
+ /* Output for debug, this have to be removed */
+ int i;
+ const char *name[4] =
+ { "acquire", "acquired", "contended", "release" };
+
+ pr_info("\n=== output for debug===\n\n");
+ pr_info("bad: %d, total: %d\n", bad, total);
+ pr_info("bad rate: %f %%\n", (double)bad / (double)total * 100);
+ pr_info("histogram of events caused bad sequence\n");
+ for (i = 0; i < BROKEN_MAX; i++)
+ pr_info(" %10s: %d\n", name[i], bad_hist[i]);
+}
+
+/* TODO: various way to print, coloring, nano or milli sec */
+static void print_result(void)
+{
+ struct lock_stat *st;
+ char cut_name[20];
+ int bad, total;
+
+ pr_info("%20s ", "Name");
+ pr_info("%10s ", "acquired");
+ pr_info("%10s ", "contended");
+
+ pr_info("%15s ", "total wait (ns)");
+ pr_info("%15s ", "max wait (ns)");
+ pr_info("%15s ", "min wait (ns)");
+
+ pr_info("\n\n");
+
+ bad = total = 0;
+ while ((st = pop_from_result())) {
+ total++;
+ if (st->discard) {
+ bad++;
+ continue;
+ }
+ bzero(cut_name, 20);
+
+ if (strlen(st->name) < 16) {
+ /* output raw name */
+ pr_info("%20s ", st->name);
+ } else {
+ strncpy(cut_name, st->name, 16);
+ cut_name[16] = '.';
+ cut_name[17] = '.';
+ cut_name[18] = '.';
+ cut_name[19] = '\0';
+ /* cut off name for saving output style */
+ pr_info("%20s ", cut_name);
+ }
+
+ pr_info("%10u ", st->nr_acquired);
+ pr_info("%10u ", st->nr_contended);
+
+ pr_info("%15llu ", st->wait_time_total);
+ pr_info("%15llu ", st->wait_time_max);
+ pr_info("%15llu ", st->wait_time_min == ULLONG_MAX ?
+ 0 : st->wait_time_min);
+ pr_info("\n");
+ }
+
+ print_bad_events(bad, total);
+}
+
+static bool info_threads, info_map;
+
+static void dump_threads(void)
+{
+ struct thread_stat *st;
+ struct rb_node *node;
+ struct thread *t;
+
+ pr_info("%10s: comm\n", "Thread ID");
+
+ node = rb_first(&thread_stats);
+ while (node) {
+ st = container_of(node, struct thread_stat, rb);
+ t = perf_session__findnew(session, st->tid);
+ pr_info("%10d: %s\n", st->tid, t->comm);
+ node = rb_next(node);
+ };
+}
+
+static void dump_map(void)
+{
+ unsigned int i;
+ struct lock_stat *st;
+
+ pr_info("Address of instance: name of class\n");
+ for (i = 0; i < LOCKHASH_SIZE; i++) {
+ list_for_each_entry(st, &lockhash_table[i], hash_entry) {
+ pr_info(" %p: %s\n", st->addr, st->name);
+ }
+ }
+}
+
+static void dump_info(void)
+{
+ if (info_threads)
+ dump_threads();
+ else if (info_map)
+ dump_map();
+ else
+ die("Unknown type of information\n");
+}
+
+static int process_sample_event(event_t *self, struct perf_session *s)
+{
+ struct sample_data data;
+ struct thread *thread;
+
+ bzero(&data, sizeof(data));
+ event__parse_sample(self, s->sample_type, &data);
+
+ thread = perf_session__findnew(s, data.tid);
+ if (thread == NULL) {
+ pr_debug("problem processing %d event, skipping it.\n",
+ self->header.type);
+ return -1;
+ }
+
+ process_raw_event(data.raw_data, data.cpu, data.time, thread);
+
+ return 0;
+}
+
+static struct perf_event_ops eops = {
+ .sample = process_sample_event,
+ .comm = event__process_comm,
+ .ordered_samples = true,
+};
+
+static int read_events(void)
+{
+ session = perf_session__new(input_name, O_RDONLY, 0, false);
+ if (!session)
+ die("Initializing perf session failed\n");
+
+ return perf_session__process_events(session, &eops);
+}
+
+static void sort_result(void)
+{
+ unsigned int i;
+ struct lock_stat *st;
+
+ for (i = 0; i < LOCKHASH_SIZE; i++) {
+ list_for_each_entry(st, &lockhash_table[i], hash_entry) {
+ insert_to_result(st, compare);
+ }
+ }
+}
+
+static void __cmd_report(void)
+{
+ setup_pager();
+ select_key();
+ read_events();
+ sort_result();
+ print_result();
+}
+
+static const char * const report_usage[] = {
+ "perf lock report [<options>]",
+ NULL
+};
+
+static const struct option report_options[] = {
+ OPT_STRING('k', "key", &sort_key, "acquired",
+ "key for sorting"),
+ /* TODO: type */
+ OPT_END()
+};
+
+static const char * const info_usage[] = {
+ "perf lock info [<options>]",
+ NULL
+};
+
+static const struct option info_options[] = {
+ OPT_BOOLEAN('t', "threads", &info_threads,
+ "dump thread list in perf.data"),
+ OPT_BOOLEAN('m', "map", &info_map,
+ "map of lock instances (name:address table)"),
+ OPT_END()
+};
+
+static const char * const lock_usage[] = {
+ "perf lock [<options>] {record|trace|report}",
+ NULL
+};
+
+static const struct option lock_options[] = {
+ OPT_STRING('i', "input", &input_name, "file", "input file name"),
+ OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
+ OPT_END()
+};
+
+static const char *record_args[] = {
+ "record",
+ "-R",
+ "-f",
+ "-m", "1024",
+ "-c", "1",
+ "-e", "lock:lock_acquire:r",
+ "-e", "lock:lock_acquired:r",
+ "-e", "lock:lock_contended:r",
+ "-e", "lock:lock_release:r",
+};
+
+static int __cmd_record(int argc, const char **argv)
+{
+ unsigned int rec_argc, i, j;
+ const char **rec_argv;
+
+ rec_argc = ARRAY_SIZE(record_args) + argc - 1;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+
+ for (i = 0; i < ARRAY_SIZE(record_args); i++)
+ rec_argv[i] = strdup(record_args[i]);
+
+ for (j = 1; j < (unsigned int)argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ BUG_ON(i != rec_argc);
+
+ return cmd_record(i, rec_argv, NULL);
+}
+
+int cmd_lock(int argc, const char **argv, const char *prefix __used)
+{
+ unsigned int i;
+
+ symbol__init();
+ for (i = 0; i < LOCKHASH_SIZE; i++)
+ INIT_LIST_HEAD(lockhash_table + i);
+
+ argc = parse_options(argc, argv, lock_options, lock_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (!argc)
+ usage_with_options(lock_usage, lock_options);
+
+ if (!strncmp(argv[0], "rec", 3)) {
+ return __cmd_record(argc, argv);
+ } else if (!strncmp(argv[0], "report", 6)) {
+ trace_handler = &report_lock_ops;
+ if (argc) {
+ argc = parse_options(argc, argv,
+ report_options, report_usage, 0);
+ if (argc)
+ usage_with_options(report_usage, report_options);
+ }
+ __cmd_report();
+ } else if (!strcmp(argv[0], "trace")) {
+ /* Aliased to 'perf trace' */
+ return cmd_trace(argc, argv, prefix);
+ } else if (!strcmp(argv[0], "info")) {
+ if (argc) {
+ argc = parse_options(argc, argv,
+ info_options, info_usage, 0);
+ if (argc)
+ usage_with_options(info_usage, info_options);
+ }
+ /* recycling report_lock_ops */
+ trace_handler = &report_lock_ops;
+ setup_pager();
+ read_events();
+ dump_info();
+ } else {
+ usage_with_options(lock_usage, lock_options);
+ }
+
+ return 0;
+}
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
new file mode 100644
index 00000000..199d5e19
--- /dev/null
+++ b/tools/perf/builtin-probe.c
@@ -0,0 +1,269 @@
+/*
+ * builtin-probe.c
+ *
+ * Builtin probe command: Set up probe events by C expression
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#define _GNU_SOURCE
+#include <sys/utsname.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+
+#undef _GNU_SOURCE
+#include "perf.h"
+#include "builtin.h"
+#include "util/util.h"
+#include "util/strlist.h"
+#include "util/symbol.h"
+#include "util/debug.h"
+#include "util/debugfs.h"
+#include "util/parse-options.h"
+#include "util/probe-finder.h"
+#include "util/probe-event.h"
+
+#define MAX_PATH_LEN 256
+
+/* Session management structure */
+static struct {
+ bool list_events;
+ bool force_add;
+ bool show_lines;
+ int nevents;
+ struct perf_probe_event events[MAX_PROBES];
+ struct strlist *dellist;
+ struct line_range line_range;
+ int max_probe_points;
+} params;
+
+
+/* Parse an event definition. Note that any error must die. */
+static int parse_probe_event(const char *str)
+{
+ struct perf_probe_event *pev = &params.events[params.nevents];
+ int ret;
+
+ pr_debug("probe-definition(%d): %s\n", params.nevents, str);
+ if (++params.nevents == MAX_PROBES) {
+ pr_err("Too many probes (> %d) were specified.", MAX_PROBES);
+ return -1;
+ }
+
+ /* Parse a perf-probe command into event */
+ ret = parse_perf_probe_command(str, pev);
+ pr_debug("%d arguments\n", pev->nargs);
+
+ return ret;
+}
+
+static int parse_probe_event_argv(int argc, const char **argv)
+{
+ int i, len, ret;
+ char *buf;
+
+ /* Bind up rest arguments */
+ len = 0;
+ for (i = 0; i < argc; i++)
+ len += strlen(argv[i]) + 1;
+ buf = zalloc(len + 1);
+ if (buf == NULL)
+ return -ENOMEM;
+ len = 0;
+ for (i = 0; i < argc; i++)
+ len += sprintf(&buf[len], "%s ", argv[i]);
+ ret = parse_probe_event(buf);
+ free(buf);
+ return ret;
+}
+
+static int opt_add_probe_event(const struct option *opt __used,
+ const char *str, int unset __used)
+{
+ if (str)
+ return parse_probe_event(str);
+ else
+ return 0;
+}
+
+static int opt_del_probe_event(const struct option *opt __used,
+ const char *str, int unset __used)
+{
+ if (str) {
+ if (!params.dellist)
+ params.dellist = strlist__new(true, NULL);
+ strlist__add(params.dellist, str);
+ }
+ return 0;
+}
+
+#ifdef DWARF_SUPPORT
+static int opt_show_lines(const struct option *opt __used,
+ const char *str, int unset __used)
+{
+ int ret = 0;
+
+ if (str)
+ ret = parse_line_range_desc(str, &params.line_range);
+ INIT_LIST_HEAD(&params.line_range.line_list);
+ params.show_lines = true;
+
+ return ret;
+}
+#endif
+
+static const char * const probe_usage[] = {
+ "perf probe [<options>] 'PROBEDEF' ['PROBEDEF' ...]",
+ "perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
+ "perf probe [<options>] --del '[GROUP:]EVENT' ...",
+ "perf probe --list",
+#ifdef DWARF_SUPPORT
+ "perf probe --line 'LINEDESC'",
+#endif
+ NULL
+};
+
+static const struct option options[] = {
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show parsed arguments, etc)"),
+ OPT_BOOLEAN('l', "list", &params.list_events,
+ "list up current probe events"),
+ OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.",
+ opt_del_probe_event),
+ OPT_CALLBACK('a', "add", NULL,
+#ifdef DWARF_SUPPORT
+ "[EVENT=]FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT"
+ " [[NAME=]ARG ...]",
+#else
+ "[EVENT=]FUNC[+OFF|%return] [[NAME=]ARG ...]",
+#endif
+ "probe point definition, where\n"
+ "\t\tGROUP:\tGroup name (optional)\n"
+ "\t\tEVENT:\tEvent name\n"
+ "\t\tFUNC:\tFunction name\n"
+ "\t\tOFF:\tOffset from function entry (in byte)\n"
+ "\t\t%return:\tPut the probe at function return\n"
+#ifdef DWARF_SUPPORT
+ "\t\tSRC:\tSource code path\n"
+ "\t\tRL:\tRelative line number from function entry.\n"
+ "\t\tAL:\tAbsolute line number in file.\n"
+ "\t\tPT:\tLazy expression of line code.\n"
+ "\t\tARG:\tProbe argument (local variable name or\n"
+ "\t\t\tkprobe-tracer argument format.)\n",
+#else
+ "\t\tARG:\tProbe argument (kprobe-tracer argument format.)\n",
+#endif
+ opt_add_probe_event),
+ OPT_BOOLEAN('f', "force", &params.force_add, "forcibly add events"
+ " with existing name"),
+#ifdef DWARF_SUPPORT
+ OPT_CALLBACK('L', "line", NULL,
+ "FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]",
+ "Show source code lines.", opt_show_lines),
+ OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
+ "file", "vmlinux pathname"),
+ OPT_STRING('s', "source", &symbol_conf.source_prefix,
+ "directory", "path to kernel source"),
+#endif
+ OPT__DRY_RUN(&probe_event_dry_run),
+ OPT_INTEGER('\0', "max-probes", &params.max_probe_points,
+ "Set how many probe points can be found for a probe."),
+ OPT_END()
+};
+
+int cmd_probe(int argc, const char **argv, const char *prefix __used)
+{
+ int ret;
+
+ argc = parse_options(argc, argv, options, probe_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (argc > 0) {
+ if (strcmp(argv[0], "-") == 0) {
+ pr_warning(" Error: '-' is not supported.\n");
+ usage_with_options(probe_usage, options);
+ }
+ ret = parse_probe_event_argv(argc, argv);
+ if (ret < 0) {
+ pr_err(" Error: Parse Error. (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ if (params.max_probe_points == 0)
+ params.max_probe_points = MAX_PROBES;
+
+ if ((!params.nevents && !params.dellist && !params.list_events &&
+ !params.show_lines))
+ usage_with_options(probe_usage, options);
+
+ if (params.list_events) {
+ if (params.nevents != 0 || params.dellist) {
+ pr_err(" Error: Don't use --list with --add/--del.\n");
+ usage_with_options(probe_usage, options);
+ }
+ if (params.show_lines) {
+ pr_err(" Error: Don't use --list with --line.\n");
+ usage_with_options(probe_usage, options);
+ }
+ ret = show_perf_probe_events();
+ if (ret < 0)
+ pr_err(" Error: Failed to show event list. (%d)\n",
+ ret);
+ return ret;
+ }
+
+#ifdef DWARF_SUPPORT
+ if (params.show_lines) {
+ if (params.nevents != 0 || params.dellist) {
+ pr_warning(" Error: Don't use --line with"
+ " --add/--del.\n");
+ usage_with_options(probe_usage, options);
+ }
+
+ ret = show_line_range(&params.line_range);
+ if (ret < 0)
+ pr_err(" Error: Failed to show lines. (%d)\n", ret);
+ return ret;
+ }
+#endif
+
+ if (params.dellist) {
+ ret = del_perf_probe_events(params.dellist);
+ strlist__delete(params.dellist);
+ if (ret < 0) {
+ pr_err(" Error: Failed to delete events. (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ if (params.nevents) {
+ ret = add_perf_probe_events(params.events, params.nevents,
+ params.force_add,
+ params.max_probe_points);
+ if (ret < 0) {
+ pr_err(" Error: Failed to add events. (%d)\n", ret);
+ return ret;
+ }
+ }
+ return 0;
+}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
new file mode 100644
index 00000000..ff77b805
--- /dev/null
+++ b/tools/perf/builtin-record.c
@@ -0,0 +1,925 @@
+/*
+ * builtin-record.c
+ *
+ * Builtin record command: Record the profile of a workload
+ * (or a CPU, or a PID) into the perf.data output file - for
+ * later analysis via perf report.
+ */
+#define _FILE_OFFSET_BITS 64
+
+#include "builtin.h"
+
+#include "perf.h"
+
+#include "util/build-id.h"
+#include "util/util.h"
+#include "util/parse-options.h"
+#include "util/parse-events.h"
+
+#include "util/header.h"
+#include "util/event.h"
+#include "util/debug.h"
+#include "util/session.h"
+#include "util/symbol.h"
+#include "util/cpumap.h"
+
+#include <unistd.h>
+#include <sched.h>
+#include <sys/mman.h>
+
+enum write_mode_t {
+ WRITE_FORCE,
+ WRITE_APPEND
+};
+
+static int *fd[MAX_NR_CPUS][MAX_COUNTERS];
+
+static u64 user_interval = ULLONG_MAX;
+static u64 default_interval = 0;
+
+static int nr_cpus = 0;
+static unsigned int page_size;
+static unsigned int mmap_pages = 128;
+static unsigned int user_freq = UINT_MAX;
+static int freq = 1000;
+static int output;
+static int pipe_output = 0;
+static const char *output_name = "perf.data";
+static int group = 0;
+static int realtime_prio = 0;
+static bool raw_samples = false;
+static bool system_wide = false;
+static pid_t target_pid = -1;
+static pid_t target_tid = -1;
+static pid_t *all_tids = NULL;
+static int thread_num = 0;
+static pid_t child_pid = -1;
+static bool no_inherit = false;
+static enum write_mode_t write_mode = WRITE_FORCE;
+static bool call_graph = false;
+static bool inherit_stat = false;
+static bool no_samples = false;
+static bool sample_address = false;
+static bool no_buildid = false;
+
+static long samples = 0;
+static u64 bytes_written = 0;
+
+static struct pollfd *event_array;
+
+static int nr_poll = 0;
+static int nr_cpu = 0;
+
+static int file_new = 1;
+static off_t post_processing_offset;
+
+static struct perf_session *session;
+static const char *cpu_list;
+
+struct mmap_data {
+ int counter;
+ void *base;
+ unsigned int mask;
+ unsigned int prev;
+};
+
+static struct mmap_data mmap_array[MAX_NR_CPUS];
+
+static unsigned long mmap_read_head(struct mmap_data *md)
+{
+ struct perf_event_mmap_page *pc = md->base;
+ long head;
+
+ head = pc->data_head;
+ rmb();
+
+ return head;
+}
+
+static void mmap_write_tail(struct mmap_data *md, unsigned long tail)
+{
+ struct perf_event_mmap_page *pc = md->base;
+
+ /*
+ * ensure all reads are done before we write the tail out.
+ */
+ /* mb(); */
+ pc->data_tail = tail;
+}
+
+static void advance_output(size_t size)
+{
+ bytes_written += size;
+}
+
+static void write_output(void *buf, size_t size)
+{
+ while (size) {
+ int ret = write(output, buf, size);
+
+ if (ret < 0)
+ die("failed to write");
+
+ size -= ret;
+ buf += ret;
+
+ bytes_written += ret;
+ }
+}
+
+static int process_synthesized_event(event_t *event,
+ struct perf_session *self __used)
+{
+ write_output(event, event->header.size);
+ return 0;
+}
+
+static void mmap_read(struct mmap_data *md)
+{
+ unsigned int head = mmap_read_head(md);
+ unsigned int old = md->prev;
+ unsigned char *data = md->base + page_size;
+ unsigned long size;
+ void *buf;
+ int diff;
+
+ /*
+ * If we're further behind than half the buffer, there's a chance
+ * the writer will bite our tail and mess up the samples under us.
+ *
+ * If we somehow ended up ahead of the head, we got messed up.
+ *
+ * In either case, truncate and restart at head.
+ */
+ diff = head - old;
+ if (diff < 0) {
+ fprintf(stderr, "WARNING: failed to keep up with mmap data\n");
+ /*
+ * head points to a known good entry, start there.
+ */
+ old = head;
+ }
+
+ if (old != head)
+ samples++;
+
+ size = head - old;
+
+ if ((old & md->mask) + size != (head & md->mask)) {
+ buf = &data[old & md->mask];
+ size = md->mask + 1 - (old & md->mask);
+ old += size;
+
+ write_output(buf, size);
+ }
+
+ buf = &data[old & md->mask];
+ size = head - old;
+ old += size;
+
+ write_output(buf, size);
+
+ md->prev = old;
+ mmap_write_tail(md, old);
+}
+
+static volatile int done = 0;
+static volatile int signr = -1;
+
+static void sig_handler(int sig)
+{
+ done = 1;
+ signr = sig;
+}
+
+static void sig_atexit(void)
+{
+ if (child_pid > 0)
+ kill(child_pid, SIGTERM);
+
+ if (signr == -1)
+ return;
+
+ signal(signr, SIG_DFL);
+ kill(getpid(), signr);
+}
+
+static int group_fd;
+
+static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int nr)
+{
+ struct perf_header_attr *h_attr;
+
+ if (nr < session->header.attrs) {
+ h_attr = session->header.attr[nr];
+ } else {
+ h_attr = perf_header_attr__new(a);
+ if (h_attr != NULL)
+ if (perf_header__add_attr(&session->header, h_attr) < 0) {
+ perf_header_attr__delete(h_attr);
+ h_attr = NULL;
+ }
+ }
+
+ return h_attr;
+}
+
+static void create_counter(int counter, int cpu)
+{
+ char *filter = filters[counter];
+ struct perf_event_attr *attr = attrs + counter;
+ struct perf_header_attr *h_attr;
+ int track = !counter; /* only the first counter needs these */
+ int thread_index;
+ int ret;
+ struct {
+ u64 count;
+ u64 time_enabled;
+ u64 time_running;
+ u64 id;
+ } read_data;
+
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING |
+ PERF_FORMAT_ID;
+
+ attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
+
+ if (nr_counters > 1)
+ attr->sample_type |= PERF_SAMPLE_ID;
+
+ /*
+ * We default some events to a 1 default interval. But keep
+ * it a weak assumption overridable by the user.
+ */
+ if (!attr->sample_period || (user_freq != UINT_MAX &&
+ user_interval != ULLONG_MAX)) {
+ if (freq) {
+ attr->sample_type |= PERF_SAMPLE_PERIOD;
+ attr->freq = 1;
+ attr->sample_freq = freq;
+ } else {
+ attr->sample_period = default_interval;
+ }
+ }
+
+ if (no_samples)
+ attr->sample_freq = 0;
+
+ if (inherit_stat)
+ attr->inherit_stat = 1;
+
+ if (sample_address) {
+ attr->sample_type |= PERF_SAMPLE_ADDR;
+ attr->mmap_data = track;
+ }
+
+ if (call_graph)
+ attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
+
+ if (system_wide)
+ attr->sample_type |= PERF_SAMPLE_CPU;
+
+ if (raw_samples) {
+ attr->sample_type |= PERF_SAMPLE_TIME;
+ attr->sample_type |= PERF_SAMPLE_RAW;
+ attr->sample_type |= PERF_SAMPLE_CPU;
+ }
+
+ attr->mmap = track;
+ attr->comm = track;
+ attr->inherit = !no_inherit;
+ if (target_pid == -1 && target_tid == -1 && !system_wide) {
+ attr->disabled = 1;
+ attr->enable_on_exec = 1;
+ }
+
+ for (thread_index = 0; thread_index < thread_num; thread_index++) {
+try_again:
+ fd[nr_cpu][counter][thread_index] = sys_perf_event_open(attr,
+ all_tids[thread_index], cpu, group_fd, 0);
+
+ if (fd[nr_cpu][counter][thread_index] < 0) {
+ int err = errno;
+
+ if (err == EPERM || err == EACCES)
+ die("Permission error - are you root?\n"
+ "\t Consider tweaking"
+ " /proc/sys/kernel/perf_event_paranoid.\n");
+ else if (err == ENODEV && cpu_list) {
+ die("No such device - did you specify"
+ " an out-of-range profile CPU?\n");
+ }
+
+ /*
+ * If it's cycles then fall back to hrtimer
+ * based cpu-clock-tick sw counter, which
+ * is always available even if no PMU support:
+ */
+ if (attr->type == PERF_TYPE_HARDWARE
+ && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
+
+ if (verbose)
+ warning(" ... trying to fall back to cpu-clock-ticks\n");
+ attr->type = PERF_TYPE_SOFTWARE;
+ attr->config = PERF_COUNT_SW_CPU_CLOCK;
+ goto try_again;
+ }
+ printf("\n");
+ error("perfcounter syscall returned with %d (%s)\n",
+ fd[nr_cpu][counter][thread_index], strerror(err));
+
+#if defined(__i386__) || defined(__x86_64__)
+ if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
+ die("No hardware sampling interrupt available."
+ " No APIC? If so then you can boot the kernel"
+ " with the \"lapic\" boot parameter to"
+ " force-enable it.\n");
+#endif
+
+ die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
+ exit(-1);
+ }
+
+ h_attr = get_header_attr(attr, counter);
+ if (h_attr == NULL)
+ die("nomem\n");
+
+ if (!file_new) {
+ if (memcmp(&h_attr->attr, attr, sizeof(*attr))) {
+ fprintf(stderr, "incompatible append\n");
+ exit(-1);
+ }
+ }
+
+ if (read(fd[nr_cpu][counter][thread_index], &read_data, sizeof(read_data)) == -1) {
+ perror("Unable to read perf file descriptor\n");
+ exit(-1);
+ }
+
+ if (perf_header_attr__add_id(h_attr, read_data.id) < 0) {
+ pr_warning("Not enough memory to add id\n");
+ exit(-1);
+ }
+
+ assert(fd[nr_cpu][counter][thread_index] >= 0);
+ fcntl(fd[nr_cpu][counter][thread_index], F_SETFL, O_NONBLOCK);
+
+ /*
+ * First counter acts as the group leader:
+ */
+ if (group && group_fd == -1)
+ group_fd = fd[nr_cpu][counter][thread_index];
+
+ if (counter || thread_index) {
+ ret = ioctl(fd[nr_cpu][counter][thread_index],
+ PERF_EVENT_IOC_SET_OUTPUT,
+ fd[nr_cpu][0][0]);
+ if (ret) {
+ error("failed to set output: %d (%s)\n", errno,
+ strerror(errno));
+ exit(-1);
+ }
+ } else {
+ mmap_array[nr_cpu].counter = counter;
+ mmap_array[nr_cpu].prev = 0;
+ mmap_array[nr_cpu].mask = mmap_pages*page_size - 1;
+ mmap_array[nr_cpu].base = mmap(NULL, (mmap_pages+1)*page_size,
+ PROT_READ|PROT_WRITE, MAP_SHARED, fd[nr_cpu][counter][thread_index], 0);
+ if (mmap_array[nr_cpu].base == MAP_FAILED) {
+ error("failed to mmap with %d (%s)\n", errno, strerror(errno));
+ exit(-1);
+ }
+
+ event_array[nr_poll].fd = fd[nr_cpu][counter][thread_index];
+ event_array[nr_poll].events = POLLIN;
+ nr_poll++;
+ }
+
+ if (filter != NULL) {
+ ret = ioctl(fd[nr_cpu][counter][thread_index],
+ PERF_EVENT_IOC_SET_FILTER, filter);
+ if (ret) {
+ error("failed to set filter with %d (%s)\n", errno,
+ strerror(errno));
+ exit(-1);
+ }
+ }
+ }
+}
+
+static void open_counters(int cpu)
+{
+ int counter;
+
+ group_fd = -1;
+ for (counter = 0; counter < nr_counters; counter++)
+ create_counter(counter, cpu);
+
+ nr_cpu++;
+}
+
+static int process_buildids(void)
+{
+ u64 size = lseek(output, 0, SEEK_CUR);
+
+ if (size == 0)
+ return 0;
+
+ session->fd = output;
+ return __perf_session__process_events(session, post_processing_offset,
+ size - post_processing_offset,
+ size, &build_id__mark_dso_hit_ops);
+}
+
+static void atexit_header(void)
+{
+ if (!pipe_output) {
+ session->header.data_size += bytes_written;
+
+ process_buildids();
+ perf_header__write(&session->header, output, true);
+ perf_session__delete(session);
+ symbol__exit();
+ }
+}
+
+static void event__synthesize_guest_os(struct machine *machine, void *data)
+{
+ int err;
+ struct perf_session *psession = data;
+
+ if (machine__is_host(machine))
+ return;
+
+ /*
+ *As for guest kernel when processing subcommand record&report,
+ *we arrange module mmap prior to guest kernel mmap and trigger
+ *a preload dso because default guest module symbols are loaded
+ *from guest kallsyms instead of /lib/modules/XXX/XXX. This
+ *method is used to avoid symbol missing when the first addr is
+ *in module instead of in guest kernel.
+ */
+ err = event__synthesize_modules(process_synthesized_event,
+ psession, machine);
+ if (err < 0)
+ pr_err("Couldn't record guest kernel [%d]'s reference"
+ " relocation symbol.\n", machine->pid);
+
+ /*
+ * We use _stext for guest kernel because guest kernel's /proc/kallsyms
+ * have no _text sometimes.
+ */
+ err = event__synthesize_kernel_mmap(process_synthesized_event,
+ psession, machine, "_text");
+ if (err < 0)
+ err = event__synthesize_kernel_mmap(process_synthesized_event,
+ psession, machine, "_stext");
+ if (err < 0)
+ pr_err("Couldn't record guest kernel [%d]'s reference"
+ " relocation symbol.\n", machine->pid);
+}
+
+static struct perf_event_header finished_round_event = {
+ .size = sizeof(struct perf_event_header),
+ .type = PERF_RECORD_FINISHED_ROUND,
+};
+
+static void mmap_read_all(void)
+{
+ int i;
+
+ for (i = 0; i < nr_cpu; i++) {
+ if (mmap_array[i].base)
+ mmap_read(&mmap_array[i]);
+ }
+
+ if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO))
+ write_output(&finished_round_event, sizeof(finished_round_event));
+}
+
+static int __cmd_record(int argc, const char **argv)
+{
+ int i, counter;
+ struct stat st;
+ int flags;
+ int err;
+ unsigned long waking = 0;
+ int child_ready_pipe[2], go_pipe[2];
+ const bool forks = argc > 0;
+ char buf;
+ struct machine *machine;
+
+ page_size = sysconf(_SC_PAGE_SIZE);
+
+ atexit(sig_atexit);
+ signal(SIGCHLD, sig_handler);
+ signal(SIGINT, sig_handler);
+
+ if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
+ perror("failed to create pipes");
+ exit(-1);
+ }
+
+ if (!strcmp(output_name, "-"))
+ pipe_output = 1;
+ else if (!stat(output_name, &st) && st.st_size) {
+ if (write_mode == WRITE_FORCE) {
+ char oldname[PATH_MAX];
+ snprintf(oldname, sizeof(oldname), "%s.old",
+ output_name);
+ unlink(oldname);
+ rename(output_name, oldname);
+ }
+ } else if (write_mode == WRITE_APPEND) {
+ write_mode = WRITE_FORCE;
+ }
+
+ flags = O_CREAT|O_RDWR;
+ if (write_mode == WRITE_APPEND)
+ file_new = 0;
+ else
+ flags |= O_TRUNC;
+
+ if (pipe_output)
+ output = STDOUT_FILENO;
+ else
+ output = open(output_name, flags, S_IRUSR | S_IWUSR);
+ if (output < 0) {
+ perror("failed to create output file");
+ exit(-1);
+ }
+
+ session = perf_session__new(output_name, O_WRONLY,
+ write_mode == WRITE_FORCE, false);
+ if (session == NULL) {
+ pr_err("Not enough memory for reading perf file header\n");
+ return -1;
+ }
+
+ if (!file_new) {
+ err = perf_header__read(session, output);
+ if (err < 0)
+ goto out_delete_session;
+ }
+
+ if (have_tracepoints(attrs, nr_counters))
+ perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
+
+ /*
+ * perf_session__delete(session) will be called at atexit_header()
+ */
+ atexit(atexit_header);
+
+ if (forks) {
+ child_pid = fork();
+ if (child_pid < 0) {
+ perror("failed to fork");
+ exit(-1);
+ }
+
+ if (!child_pid) {
+ if (pipe_output)
+ dup2(2, 1);
+ close(child_ready_pipe[0]);
+ close(go_pipe[1]);
+ fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
+
+ /*
+ * Do a dummy execvp to get the PLT entry resolved,
+ * so we avoid the resolver overhead on the real
+ * execvp call.
+ */
+ execvp("", (char **)argv);
+
+ /*
+ * Tell the parent we're ready to go
+ */
+ close(child_ready_pipe[1]);
+
+ /*
+ * Wait until the parent tells us to go.
+ */
+ if (read(go_pipe[0], &buf, 1) == -1)
+ perror("unable to read pipe");
+
+ execvp(argv[0], (char **)argv);
+
+ perror(argv[0]);
+ exit(-1);
+ }
+
+ if (!system_wide && target_tid == -1 && target_pid == -1)
+ all_tids[0] = child_pid;
+
+ close(child_ready_pipe[1]);
+ close(go_pipe[0]);
+ /*
+ * wait for child to settle
+ */
+ if (read(child_ready_pipe[0], &buf, 1) == -1) {
+ perror("unable to read pipe");
+ exit(-1);
+ }
+ close(child_ready_pipe[0]);
+ }
+
+ nr_cpus = read_cpu_map(cpu_list);
+ if (nr_cpus < 1) {
+ perror("failed to collect number of CPUs\n");
+ return -1;
+ }
+
+ if (!system_wide && no_inherit && !cpu_list) {
+ open_counters(-1);
+ } else {
+ for (i = 0; i < nr_cpus; i++)
+ open_counters(cpumap[i]);
+ }
+
+ if (pipe_output) {
+ err = perf_header__write_pipe(output);
+ if (err < 0)
+ return err;
+ } else if (file_new) {
+ err = perf_header__write(&session->header, output, false);
+ if (err < 0)
+ return err;
+ }
+
+ post_processing_offset = lseek(output, 0, SEEK_CUR);
+
+ if (pipe_output) {
+ err = event__synthesize_attrs(&session->header,
+ process_synthesized_event,
+ session);
+ if (err < 0) {
+ pr_err("Couldn't synthesize attrs.\n");
+ return err;
+ }
+
+ err = event__synthesize_event_types(process_synthesized_event,
+ session);
+ if (err < 0) {
+ pr_err("Couldn't synthesize event_types.\n");
+ return err;
+ }
+
+ if (have_tracepoints(attrs, nr_counters)) {
+ /*
+ * FIXME err <= 0 here actually means that
+ * there were no tracepoints so its not really
+ * an error, just that we don't need to
+ * synthesize anything. We really have to
+ * return this more properly and also
+ * propagate errors that now are calling die()
+ */
+ err = event__synthesize_tracing_data(output, attrs,
+ nr_counters,
+ process_synthesized_event,
+ session);
+ if (err <= 0) {
+ pr_err("Couldn't record tracing data.\n");
+ return err;
+ }
+ advance_output(err);
+ }
+ }
+
+ machine = perf_session__find_host_machine(session);
+ if (!machine) {
+ pr_err("Couldn't find native kernel information.\n");
+ return -1;
+ }
+
+ err = event__synthesize_kernel_mmap(process_synthesized_event,
+ session, machine, "_text");
+ if (err < 0)
+ err = event__synthesize_kernel_mmap(process_synthesized_event,
+ session, machine, "_stext");
+ if (err < 0) {
+ pr_err("Couldn't record kernel reference relocation symbol.\n");
+ return err;
+ }
+
+ err = event__synthesize_modules(process_synthesized_event,
+ session, machine);
+ if (err < 0) {
+ pr_err("Couldn't record kernel reference relocation symbol.\n");
+ return err;
+ }
+ if (perf_guest)
+ perf_session__process_machines(session, event__synthesize_guest_os);
+
+ if (!system_wide)
+ event__synthesize_thread(target_tid, process_synthesized_event,
+ session);
+ else
+ event__synthesize_threads(process_synthesized_event, session);
+
+ if (realtime_prio) {
+ struct sched_param param;
+
+ param.sched_priority = realtime_prio;
+ if (sched_setscheduler(0, SCHED_FIFO, &param)) {
+ pr_err("Could not set realtime priority.\n");
+ exit(-1);
+ }
+ }
+
+ /*
+ * Let the child rip
+ */
+ if (forks)
+ close(go_pipe[1]);
+
+ for (;;) {
+ int hits = samples;
+ int thread;
+
+ mmap_read_all();
+
+ if (hits == samples) {
+ if (done)
+ break;
+ err = poll(event_array, nr_poll, -1);
+ waking++;
+ }
+
+ if (done) {
+ for (i = 0; i < nr_cpu; i++) {
+ for (counter = 0;
+ counter < nr_counters;
+ counter++) {
+ for (thread = 0;
+ thread < thread_num;
+ thread++)
+ ioctl(fd[i][counter][thread],
+ PERF_EVENT_IOC_DISABLE);
+ }
+ }
+ }
+ }
+
+ fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
+
+ /*
+ * Approximate RIP event size: 24 bytes.
+ */
+ fprintf(stderr,
+ "[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n",
+ (double)bytes_written / 1024.0 / 1024.0,
+ output_name,
+ bytes_written / 24);
+
+ return 0;
+
+out_delete_session:
+ perf_session__delete(session);
+ return err;
+}
+
+static const char * const record_usage[] = {
+ "perf record [<options>] [<command>]",
+ "perf record [<options>] -- <command> [<options>]",
+ NULL
+};
+
+static bool force, append_file;
+
+static const struct option options[] = {
+ OPT_CALLBACK('e', "event", NULL, "event",
+ "event selector. use 'perf list' to list available events",
+ parse_events),
+ OPT_CALLBACK(0, "filter", NULL, "filter",
+ "event filter", parse_filter),
+ OPT_INTEGER('p', "pid", &target_pid,
+ "record events on existing process id"),
+ OPT_INTEGER('t', "tid", &target_tid,
+ "record events on existing thread id"),
+ OPT_INTEGER('r', "realtime", &realtime_prio,
+ "collect data with this RT SCHED_FIFO priority"),
+ OPT_BOOLEAN('R', "raw-samples", &raw_samples,
+ "collect raw sample records from all opened counters"),
+ OPT_BOOLEAN('a', "all-cpus", &system_wide,
+ "system-wide collection from all CPUs"),
+ OPT_BOOLEAN('A', "append", &append_file,
+ "append to the output file to do incremental profiling"),
+ OPT_STRING('C', "cpu", &cpu_list, "cpu",
+ "list of cpus to monitor"),
+ OPT_BOOLEAN('f', "force", &force,
+ "overwrite existing data file (deprecated)"),
+ OPT_U64('c', "count", &user_interval, "event period to sample"),
+ OPT_STRING('o', "output", &output_name, "file",
+ "output file name"),
+ OPT_BOOLEAN('i', "no-inherit", &no_inherit,
+ "child tasks do not inherit counters"),
+ OPT_UINTEGER('F', "freq", &user_freq, "profile at this frequency"),
+ OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
+ OPT_BOOLEAN('g', "call-graph", &call_graph,
+ "do call-graph (stack chain/backtrace) recording"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show counter open errors, etc)"),
+ OPT_BOOLEAN('s', "stat", &inherit_stat,
+ "per thread counts"),
+ OPT_BOOLEAN('d', "data", &sample_address,
+ "Sample addresses"),
+ OPT_BOOLEAN('n', "no-samples", &no_samples,
+ "don't sample"),
+ OPT_BOOLEAN('N', "no-buildid-cache", &no_buildid,
+ "do not update the buildid cache"),
+ OPT_END()
+};
+
+int cmd_record(int argc, const char **argv, const char *prefix __used)
+{
+ int i, j, err = -ENOMEM;
+
+ argc = parse_options(argc, argv, options, record_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (!argc && target_pid == -1 && target_tid == -1 &&
+ !system_wide && !cpu_list)
+ usage_with_options(record_usage, options);
+
+ if (force && append_file) {
+ fprintf(stderr, "Can't overwrite and append at the same time."
+ " You need to choose between -f and -A");
+ usage_with_options(record_usage, options);
+ } else if (append_file) {
+ write_mode = WRITE_APPEND;
+ } else {
+ write_mode = WRITE_FORCE;
+ }
+
+ symbol__init();
+ if (no_buildid)
+ disable_buildid_cache();
+
+ if (!nr_counters) {
+ nr_counters = 1;
+ attrs[0].type = PERF_TYPE_HARDWARE;
+ attrs[0].config = PERF_COUNT_HW_CPU_CYCLES;
+ }
+
+ if (target_pid != -1) {
+ target_tid = target_pid;
+ thread_num = find_all_tid(target_pid, &all_tids);
+ if (thread_num <= 0) {
+ fprintf(stderr, "Can't find all threads of pid %d\n",
+ target_pid);
+ usage_with_options(record_usage, options);
+ }
+ } else {
+ all_tids=malloc(sizeof(pid_t));
+ if (!all_tids)
+ goto out_symbol_exit;
+
+ all_tids[0] = target_tid;
+ thread_num = 1;
+ }
+
+ for (i = 0; i < MAX_NR_CPUS; i++) {
+ for (j = 0; j < MAX_COUNTERS; j++) {
+ fd[i][j] = malloc(sizeof(int)*thread_num);
+ if (!fd[i][j])
+ goto out_free_fd;
+ }
+ }
+ event_array = malloc(
+ sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num);
+ if (!event_array)
+ goto out_free_fd;
+
+ if (user_interval != ULLONG_MAX)
+ default_interval = user_interval;
+ if (user_freq != UINT_MAX)
+ freq = user_freq;
+
+ /*
+ * User specified count overrides default frequency.
+ */
+ if (default_interval)
+ freq = 0;
+ else if (freq) {
+ default_interval = freq;
+ } else {
+ fprintf(stderr, "frequency and count are zero, aborting\n");
+ err = -EINVAL;
+ goto out_free_event_array;
+ }
+
+ err = __cmd_record(argc, argv);
+
+out_free_event_array:
+ free(event_array);
+out_free_fd:
+ for (i = 0; i < MAX_NR_CPUS; i++) {
+ for (j = 0; j < MAX_COUNTERS; j++)
+ free(fd[i][j]);
+ }
+ free(all_tids);
+ all_tids = NULL;
+out_symbol_exit:
+ symbol__exit();
+ return err;
+}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
new file mode 100644
index 00000000..55fc1f46
--- /dev/null
+++ b/tools/perf/builtin-report.c
@@ -0,0 +1,534 @@
+/*
+ * builtin-report.c
+ *
+ * Builtin report command: Analyze the perf.data input file,
+ * look up and read DSOs and symbol information and display
+ * a histogram of results, along various sorting keys.
+ */
+#include "builtin.h"
+
+#include "util/util.h"
+
+#include "util/color.h"
+#include <linux/list.h>
+#include "util/cache.h"
+#include <linux/rbtree.h>
+#include "util/symbol.h"
+#include "util/callchain.h"
+#include "util/strlist.h"
+#include "util/values.h"
+
+#include "perf.h"
+#include "util/debug.h"
+#include "util/header.h"
+#include "util/session.h"
+
+#include "util/parse-options.h"
+#include "util/parse-events.h"
+
+#include "util/thread.h"
+#include "util/sort.h"
+#include "util/hist.h"
+
+static char const *input_name = "perf.data";
+
+static bool force;
+static bool hide_unresolved;
+static bool dont_use_callchains;
+
+static bool show_threads;
+static struct perf_read_values show_threads_values;
+
+static const char default_pretty_printing_style[] = "normal";
+static const char *pretty_printing_style = default_pretty_printing_style;
+
+static char callchain_default_opt[] = "fractal,0.5";
+
+static struct hists *perf_session__hists_findnew(struct perf_session *self,
+ u64 event_stream, u32 type,
+ u64 config)
+{
+ struct rb_node **p = &self->hists_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct hists *iter, *new;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hists, rb_node);
+ if (iter->config == config)
+ return iter;
+
+
+ if (config > iter->config)
+ p = &(*p)->rb_right;
+ else
+ p = &(*p)->rb_left;
+ }
+
+ new = malloc(sizeof(struct hists));
+ if (new == NULL)
+ return NULL;
+ memset(new, 0, sizeof(struct hists));
+ new->event_stream = event_stream;
+ new->config = config;
+ new->type = type;
+ rb_link_node(&new->rb_node, parent, p);
+ rb_insert_color(&new->rb_node, &self->hists_tree);
+ return new;
+}
+
+static int perf_session__add_hist_entry(struct perf_session *self,
+ struct addr_location *al,
+ struct sample_data *data)
+{
+ struct map_symbol *syms = NULL;
+ struct symbol *parent = NULL;
+ int err = -ENOMEM;
+ struct hist_entry *he;
+ struct hists *hists;
+ struct perf_event_attr *attr;
+
+ if ((sort__has_parent || symbol_conf.use_callchain) && data->callchain) {
+ syms = perf_session__resolve_callchain(self, al->thread,
+ data->callchain, &parent);
+ if (syms == NULL)
+ return -ENOMEM;
+ }
+
+ attr = perf_header__find_attr(data->id, &self->header);
+ if (attr)
+ hists = perf_session__hists_findnew(self, data->id, attr->type, attr->config);
+ else
+ hists = perf_session__hists_findnew(self, data->id, 0, 0);
+ if (hists == NULL)
+ goto out_free_syms;
+ he = __hists__add_entry(hists, al, parent, data->period);
+ if (he == NULL)
+ goto out_free_syms;
+ err = 0;
+ if (symbol_conf.use_callchain) {
+ err = append_chain(he->callchain, data->callchain, syms, data->period);
+ if (err)
+ goto out_free_syms;
+ }
+ /*
+ * Only in the newt browser we are doing integrated annotation,
+ * so we don't allocated the extra space needed because the stdio
+ * code will not use it.
+ */
+ if (use_browser > 0)
+ err = hist_entry__inc_addr_samples(he, al->addr);
+out_free_syms:
+ free(syms);
+ return err;
+}
+
+static int add_event_total(struct perf_session *session,
+ struct sample_data *data,
+ struct perf_event_attr *attr)
+{
+ struct hists *hists;
+
+ if (attr)
+ hists = perf_session__hists_findnew(session, data->id,
+ attr->type, attr->config);
+ else
+ hists = perf_session__hists_findnew(session, data->id, 0, 0);
+
+ if (!hists)
+ return -ENOMEM;
+
+ hists->stats.total_period += data->period;
+ /*
+ * FIXME: add_event_total should be moved from here to
+ * perf_session__process_event so that the proper hist is passed to
+ * the event_op methods.
+ */
+ hists__inc_nr_events(hists, PERF_RECORD_SAMPLE);
+ session->hists.stats.total_period += data->period;
+ return 0;
+}
+
+static int process_sample_event(event_t *event, struct perf_session *session)
+{
+ struct sample_data data = { .period = 1, };
+ struct addr_location al;
+ struct perf_event_attr *attr;
+
+ if (event__preprocess_sample(event, session, &al, &data, NULL) < 0) {
+ fprintf(stderr, "problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ if (al.filtered || (hide_unresolved && al.sym == NULL))
+ return 0;
+
+ if (perf_session__add_hist_entry(session, &al, &data)) {
+ pr_debug("problem incrementing symbol period, skipping event\n");
+ return -1;
+ }
+
+ attr = perf_header__find_attr(data.id, &session->header);
+
+ if (add_event_total(session, &data, attr)) {
+ pr_debug("problem adding event period\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int process_read_event(event_t *event, struct perf_session *session __used)
+{
+ struct perf_event_attr *attr;
+
+ attr = perf_header__find_attr(event->read.id, &session->header);
+
+ if (show_threads) {
+ const char *name = attr ? __event_name(attr->type, attr->config)
+ : "unknown";
+ perf_read_values_add_value(&show_threads_values,
+ event->read.pid, event->read.tid,
+ event->read.id,
+ name,
+ event->read.value);
+ }
+
+ dump_printf(": %d %d %s %Lu\n", event->read.pid, event->read.tid,
+ attr ? __event_name(attr->type, attr->config) : "FAIL",
+ event->read.value);
+
+ return 0;
+}
+
+static int perf_session__setup_sample_type(struct perf_session *self)
+{
+ if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ if (sort__has_parent) {
+ fprintf(stderr, "selected --sort parent, but no"
+ " callchain data. Did you call"
+ " perf record without -g?\n");
+ return -EINVAL;
+ }
+ if (symbol_conf.use_callchain) {
+ fprintf(stderr, "selected -g but no callchain data."
+ " Did you call perf record without"
+ " -g?\n");
+ return -1;
+ }
+ } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE &&
+ !symbol_conf.use_callchain) {
+ symbol_conf.use_callchain = true;
+ if (register_callchain_param(&callchain_param) < 0) {
+ fprintf(stderr, "Can't register callchain"
+ " params\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static struct perf_event_ops event_ops = {
+ .sample = process_sample_event,
+ .mmap = event__process_mmap,
+ .comm = event__process_comm,
+ .exit = event__process_task,
+ .fork = event__process_task,
+ .lost = event__process_lost,
+ .read = process_read_event,
+ .attr = event__process_attr,
+ .event_type = event__process_event_type,
+ .tracing_data = event__process_tracing_data,
+ .build_id = event__process_build_id,
+};
+
+extern volatile int session_done;
+
+static void sig_handler(int sig __used)
+{
+ session_done = 1;
+}
+
+static size_t hists__fprintf_nr_sample_events(struct hists *self,
+ const char *evname, FILE *fp)
+{
+ size_t ret;
+ char unit;
+ unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE];
+
+ nr_events = convert_unit(nr_events, &unit);
+ ret = fprintf(fp, "# Events: %lu%c", nr_events, unit);
+ if (evname != NULL)
+ ret += fprintf(fp, " %s", evname);
+ return ret + fprintf(fp, "\n#\n");
+}
+
+static int hists__tty_browse_tree(struct rb_root *tree, const char *help)
+{
+ struct rb_node *next = rb_first(tree);
+
+ while (next) {
+ struct hists *hists = rb_entry(next, struct hists, rb_node);
+ const char *evname = NULL;
+
+ if (rb_first(&hists->entries) != rb_last(&hists->entries))
+ evname = __event_name(hists->type, hists->config);
+
+ hists__fprintf_nr_sample_events(hists, evname, stdout);
+ hists__fprintf(hists, NULL, false, stdout);
+ fprintf(stdout, "\n\n");
+ next = rb_next(&hists->rb_node);
+ }
+
+ if (sort_order == default_sort_order &&
+ parent_pattern == default_parent_pattern) {
+ fprintf(stdout, "#\n# (%s)\n#\n", help);
+
+ if (show_threads) {
+ bool style = !strcmp(pretty_printing_style, "raw");
+ perf_read_values_display(stdout, &show_threads_values,
+ style);
+ perf_read_values_destroy(&show_threads_values);
+ }
+ }
+
+ return 0;
+}
+
+static int __cmd_report(void)
+{
+ int ret = -EINVAL;
+ struct perf_session *session;
+ struct rb_node *next;
+ const char *help = "For a higher level overview, try: perf report --sort comm,dso";
+
+ signal(SIGINT, sig_handler);
+
+ session = perf_session__new(input_name, O_RDONLY, force, false);
+ if (session == NULL)
+ return -ENOMEM;
+
+ if (show_threads)
+ perf_read_values_init(&show_threads_values);
+
+ ret = perf_session__setup_sample_type(session);
+ if (ret)
+ goto out_delete;
+
+ ret = perf_session__process_events(session, &event_ops);
+ if (ret)
+ goto out_delete;
+
+ if (dump_trace) {
+ perf_session__fprintf_nr_events(session, stdout);
+ goto out_delete;
+ }
+
+ if (verbose > 3)
+ perf_session__fprintf(session, stdout);
+
+ if (verbose > 2)
+ perf_session__fprintf_dsos(session, stdout);
+
+ next = rb_first(&session->hists_tree);
+ while (next) {
+ struct hists *hists;
+
+ hists = rb_entry(next, struct hists, rb_node);
+ hists__collapse_resort(hists);
+ hists__output_resort(hists);
+ next = rb_next(&hists->rb_node);
+ }
+
+ if (use_browser > 0)
+ hists__tui_browse_tree(&session->hists_tree, help);
+ else
+ hists__tty_browse_tree(&session->hists_tree, help);
+
+out_delete:
+ /*
+ * Speed up the exit process, for large files this can
+ * take quite a while.
+ *
+ * XXX Enable this when using valgrind or if we ever
+ * librarize this command.
+ *
+ * Also experiment with obstacks to see how much speed
+ * up we'll get here.
+ *
+ * perf_session__delete(session);
+ */
+ return ret;
+}
+
+static int
+parse_callchain_opt(const struct option *opt __used, const char *arg,
+ int unset)
+{
+ char *tok, *tok2;
+ char *endptr;
+
+ /*
+ * --no-call-graph
+ */
+ if (unset) {
+ dont_use_callchains = true;
+ return 0;
+ }
+
+ symbol_conf.use_callchain = true;
+
+ if (!arg)
+ return 0;
+
+ tok = strtok((char *)arg, ",");
+ if (!tok)
+ return -1;
+
+ /* get the output mode */
+ if (!strncmp(tok, "graph", strlen(arg)))
+ callchain_param.mode = CHAIN_GRAPH_ABS;
+
+ else if (!strncmp(tok, "flat", strlen(arg)))
+ callchain_param.mode = CHAIN_FLAT;
+
+ else if (!strncmp(tok, "fractal", strlen(arg)))
+ callchain_param.mode = CHAIN_GRAPH_REL;
+
+ else if (!strncmp(tok, "none", strlen(arg))) {
+ callchain_param.mode = CHAIN_NONE;
+ symbol_conf.use_callchain = false;
+
+ return 0;
+ }
+
+ else
+ return -1;
+
+ /* get the min percentage */
+ tok = strtok(NULL, ",");
+ if (!tok)
+ goto setup;
+
+ tok2 = strtok(NULL, ",");
+ callchain_param.min_percent = strtod(tok, &endptr);
+ if (tok == endptr)
+ return -1;
+
+ if (tok2)
+ callchain_param.print_limit = strtod(tok2, &endptr);
+setup:
+ if (register_callchain_param(&callchain_param) < 0) {
+ fprintf(stderr, "Can't register callchain params\n");
+ return -1;
+ }
+ return 0;
+}
+
+static const char * const report_usage[] = {
+ "perf report [<options>] <command>",
+ NULL
+};
+
+static const struct option options[] = {
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
+ "file", "vmlinux pathname"),
+ OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+ OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
+ "load module symbols - WARNING: use only with -k and LIVE kernel"),
+ OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
+ "Show a column with the number of samples"),
+ OPT_BOOLEAN('T', "threads", &show_threads,
+ "Show per-thread event counters"),
+ OPT_STRING(0, "pretty", &pretty_printing_style, "key",
+ "pretty printing style key: normal raw"),
+ OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
+ "sort by key(s): pid, comm, dso, symbol, parent"),
+ OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
+ "Show sample percentage for different cpu modes"),
+ OPT_STRING('p', "parent", &parent_pattern, "regex",
+ "regex filter to identify parent, see: '--sort parent'"),
+ OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
+ "Only display entries with parent-match"),
+ OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent",
+ "Display callchains using output_type (graph, flat, fractal, or none) and min percent threshold. "
+ "Default: fractal,0.5", &parse_callchain_opt, callchain_default_opt),
+ OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
+ "only consider symbols in these dsos"),
+ OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
+ "only consider symbols in these comms"),
+ OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
+ "only consider these symbols"),
+ OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
+ "width[,width...]",
+ "don't try to adjust column width, use these fixed values"),
+ OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator",
+ "separator for columns, no spaces will be added between "
+ "columns '.' is reserved."),
+ OPT_BOOLEAN('U', "hide-unresolved", &hide_unresolved,
+ "Only display entries resolved to a symbol"),
+ OPT_END()
+};
+
+int cmd_report(int argc, const char **argv, const char *prefix __used)
+{
+ argc = parse_options(argc, argv, options, report_usage, 0);
+
+ if (strcmp(input_name, "-") != 0)
+ setup_browser();
+ /*
+ * Only in the newt browser we are doing integrated annotation,
+ * so don't allocate extra space that won't be used in the stdio
+ * implementation.
+ */
+ if (use_browser > 0) {
+ symbol_conf.priv_size = sizeof(struct sym_priv);
+ /*
+ * For searching by name on the "Browse map details".
+ * providing it only in verbose mode not to bloat too
+ * much struct symbol.
+ */
+ if (verbose) {
+ /*
+ * XXX: Need to provide a less kludgy way to ask for
+ * more space per symbol, the u32 is for the index on
+ * the ui browser.
+ * See symbol__browser_index.
+ */
+ symbol_conf.priv_size += sizeof(u32);
+ symbol_conf.sort_by_name = true;
+ }
+ }
+
+ if (symbol__init() < 0)
+ return -1;
+
+ setup_sorting(report_usage, options);
+
+ if (parent_pattern != default_parent_pattern) {
+ if (sort_dimension__add("parent") < 0)
+ return -1;
+ sort_parent.elide = 1;
+ } else
+ symbol_conf.exclude_other = false;
+
+ /*
+ * Any (unrecognized) arguments left?
+ */
+ if (argc)
+ usage_with_options(report_usage, options);
+
+ sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", stdout);
+ sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout);
+ sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout);
+
+ return __cmd_report();
+}
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
new file mode 100644
index 00000000..55f3b5dc
--- /dev/null
+++ b/tools/perf/builtin-sched.c
@@ -0,0 +1,1925 @@
+#include "builtin.h"
+#include "perf.h"
+
+#include "util/util.h"
+#include "util/cache.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+#include "util/header.h"
+#include "util/session.h"
+
+#include "util/parse-options.h"
+#include "util/trace-event.h"
+
+#include "util/debug.h"
+
+#include <sys/prctl.h>
+
+#include <semaphore.h>
+#include <pthread.h>
+#include <math.h>
+
+static char const *input_name = "perf.data";
+
+static char default_sort_order[] = "avg, max, switch, runtime";
+static const char *sort_order = default_sort_order;
+
+static int profile_cpu = -1;
+
+#define PR_SET_NAME 15 /* Set process name */
+#define MAX_CPUS 4096
+
+static u64 run_measurement_overhead;
+static u64 sleep_measurement_overhead;
+
+#define COMM_LEN 20
+#define SYM_LEN 129
+
+#define MAX_PID 65536
+
+static unsigned long nr_tasks;
+
+struct sched_atom;
+
+struct task_desc {
+ unsigned long nr;
+ unsigned long pid;
+ char comm[COMM_LEN];
+
+ unsigned long nr_events;
+ unsigned long curr_event;
+ struct sched_atom **atoms;
+
+ pthread_t thread;
+ sem_t sleep_sem;
+
+ sem_t ready_for_work;
+ sem_t work_done_sem;
+
+ u64 cpu_usage;
+};
+
+enum sched_event_type {
+ SCHED_EVENT_RUN,
+ SCHED_EVENT_SLEEP,
+ SCHED_EVENT_WAKEUP,
+ SCHED_EVENT_MIGRATION,
+};
+
+struct sched_atom {
+ enum sched_event_type type;
+ int specific_wait;
+ u64 timestamp;
+ u64 duration;
+ unsigned long nr;
+ sem_t *wait_sem;
+ struct task_desc *wakee;
+};
+
+static struct task_desc *pid_to_task[MAX_PID];
+
+static struct task_desc **tasks;
+
+static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
+static u64 start_time;
+
+static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static unsigned long nr_run_events;
+static unsigned long nr_sleep_events;
+static unsigned long nr_wakeup_events;
+
+static unsigned long nr_sleep_corrections;
+static unsigned long nr_run_events_optimized;
+
+static unsigned long targetless_wakeups;
+static unsigned long multitarget_wakeups;
+
+static u64 cpu_usage;
+static u64 runavg_cpu_usage;
+static u64 parent_cpu_usage;
+static u64 runavg_parent_cpu_usage;
+
+static unsigned long nr_runs;
+static u64 sum_runtime;
+static u64 sum_fluct;
+static u64 run_avg;
+
+static unsigned int replay_repeat = 10;
+static unsigned long nr_timestamps;
+static unsigned long nr_unordered_timestamps;
+static unsigned long nr_state_machine_bugs;
+static unsigned long nr_context_switch_bugs;
+static unsigned long nr_events;
+static unsigned long nr_lost_chunks;
+static unsigned long nr_lost_events;
+
+#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
+
+enum thread_state {
+ THREAD_SLEEPING = 0,
+ THREAD_WAIT_CPU,
+ THREAD_SCHED_IN,
+ THREAD_IGNORE
+};
+
+struct work_atom {
+ struct list_head list;
+ enum thread_state state;
+ u64 sched_out_time;
+ u64 wake_up_time;
+ u64 sched_in_time;
+ u64 runtime;
+};
+
+struct work_atoms {
+ struct list_head work_list;
+ struct thread *thread;
+ struct rb_node node;
+ u64 max_lat;
+ u64 max_lat_at;
+ u64 total_lat;
+ u64 nb_atoms;
+ u64 total_runtime;
+};
+
+typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
+
+static struct rb_root atom_root, sorted_atom_root;
+
+static u64 all_runtime;
+static u64 all_count;
+
+
+static u64 get_nsecs(void)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+
+ return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
+}
+
+static void burn_nsecs(u64 nsecs)
+{
+ u64 T0 = get_nsecs(), T1;
+
+ do {
+ T1 = get_nsecs();
+ } while (T1 + run_measurement_overhead < T0 + nsecs);
+}
+
+static void sleep_nsecs(u64 nsecs)
+{
+ struct timespec ts;
+
+ ts.tv_nsec = nsecs % 999999999;
+ ts.tv_sec = nsecs / 999999999;
+
+ nanosleep(&ts, NULL);
+}
+
+static void calibrate_run_measurement_overhead(void)
+{
+ u64 T0, T1, delta, min_delta = 1000000000ULL;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ T0 = get_nsecs();
+ burn_nsecs(0);
+ T1 = get_nsecs();
+ delta = T1-T0;
+ min_delta = min(min_delta, delta);
+ }
+ run_measurement_overhead = min_delta;
+
+ printf("run measurement overhead: %Ld nsecs\n", min_delta);
+}
+
+static void calibrate_sleep_measurement_overhead(void)
+{
+ u64 T0, T1, delta, min_delta = 1000000000ULL;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ T0 = get_nsecs();
+ sleep_nsecs(10000);
+ T1 = get_nsecs();
+ delta = T1-T0;
+ min_delta = min(min_delta, delta);
+ }
+ min_delta -= 10000;
+ sleep_measurement_overhead = min_delta;
+
+ printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
+}
+
+static struct sched_atom *
+get_new_event(struct task_desc *task, u64 timestamp)
+{
+ struct sched_atom *event = zalloc(sizeof(*event));
+ unsigned long idx = task->nr_events;
+ size_t size;
+
+ event->timestamp = timestamp;
+ event->nr = idx;
+
+ task->nr_events++;
+ size = sizeof(struct sched_atom *) * task->nr_events;
+ task->atoms = realloc(task->atoms, size);
+ BUG_ON(!task->atoms);
+
+ task->atoms[idx] = event;
+
+ return event;
+}
+
+static struct sched_atom *last_event(struct task_desc *task)
+{
+ if (!task->nr_events)
+ return NULL;
+
+ return task->atoms[task->nr_events - 1];
+}
+
+static void
+add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
+{
+ struct sched_atom *event, *curr_event = last_event(task);
+
+ /*
+ * optimize an existing RUN event by merging this one
+ * to it:
+ */
+ if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
+ nr_run_events_optimized++;
+ curr_event->duration += duration;
+ return;
+ }
+
+ event = get_new_event(task, timestamp);
+
+ event->type = SCHED_EVENT_RUN;
+ event->duration = duration;
+
+ nr_run_events++;
+}
+
+static void
+add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
+ struct task_desc *wakee)
+{
+ struct sched_atom *event, *wakee_event;
+
+ event = get_new_event(task, timestamp);
+ event->type = SCHED_EVENT_WAKEUP;
+ event->wakee = wakee;
+
+ wakee_event = last_event(wakee);
+ if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
+ targetless_wakeups++;
+ return;
+ }
+ if (wakee_event->wait_sem) {
+ multitarget_wakeups++;
+ return;
+ }
+
+ wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
+ sem_init(wakee_event->wait_sem, 0, 0);
+ wakee_event->specific_wait = 1;
+ event->wait_sem = wakee_event->wait_sem;
+
+ nr_wakeup_events++;
+}
+
+static void
+add_sched_event_sleep(struct task_desc *task, u64 timestamp,
+ u64 task_state __used)
+{
+ struct sched_atom *event = get_new_event(task, timestamp);
+
+ event->type = SCHED_EVENT_SLEEP;
+
+ nr_sleep_events++;
+}
+
+static struct task_desc *register_pid(unsigned long pid, const char *comm)
+{
+ struct task_desc *task;
+
+ BUG_ON(pid >= MAX_PID);
+
+ task = pid_to_task[pid];
+
+ if (task)
+ return task;
+
+ task = zalloc(sizeof(*task));
+ task->pid = pid;
+ task->nr = nr_tasks;
+ strcpy(task->comm, comm);
+ /*
+ * every task starts in sleeping state - this gets ignored
+ * if there's no wakeup pointing to this sleep state:
+ */
+ add_sched_event_sleep(task, 0, 0);
+
+ pid_to_task[pid] = task;
+ nr_tasks++;
+ tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
+ BUG_ON(!tasks);
+ tasks[task->nr] = task;
+
+ if (verbose)
+ printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
+
+ return task;
+}
+
+
+static void print_task_traces(void)
+{
+ struct task_desc *task;
+ unsigned long i;
+
+ for (i = 0; i < nr_tasks; i++) {
+ task = tasks[i];
+ printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
+ task->nr, task->comm, task->pid, task->nr_events);
+ }
+}
+
+static void add_cross_task_wakeups(void)
+{
+ struct task_desc *task1, *task2;
+ unsigned long i, j;
+
+ for (i = 0; i < nr_tasks; i++) {
+ task1 = tasks[i];
+ j = i + 1;
+ if (j == nr_tasks)
+ j = 0;
+ task2 = tasks[j];
+ add_sched_event_wakeup(task1, 0, task2);
+ }
+}
+
+static void
+process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
+{
+ int ret = 0;
+ u64 now;
+ long long delta;
+
+ now = get_nsecs();
+ delta = start_time + atom->timestamp - now;
+
+ switch (atom->type) {
+ case SCHED_EVENT_RUN:
+ burn_nsecs(atom->duration);
+ break;
+ case SCHED_EVENT_SLEEP:
+ if (atom->wait_sem)
+ ret = sem_wait(atom->wait_sem);
+ BUG_ON(ret);
+ break;
+ case SCHED_EVENT_WAKEUP:
+ if (atom->wait_sem)
+ ret = sem_post(atom->wait_sem);
+ BUG_ON(ret);
+ break;
+ case SCHED_EVENT_MIGRATION:
+ break;
+ default:
+ BUG_ON(1);
+ }
+}
+
+static u64 get_cpu_usage_nsec_parent(void)
+{
+ struct rusage ru;
+ u64 sum;
+ int err;
+
+ err = getrusage(RUSAGE_SELF, &ru);
+ BUG_ON(err);
+
+ sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
+ sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
+
+ return sum;
+}
+
+static int self_open_counters(void)
+{
+ struct perf_event_attr attr;
+ int fd;
+
+ memset(&attr, 0, sizeof(attr));
+
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.config = PERF_COUNT_SW_TASK_CLOCK;
+
+ fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+
+ if (fd < 0)
+ die("Error: sys_perf_event_open() syscall returned"
+ "with %d (%s)\n", fd, strerror(errno));
+ return fd;
+}
+
+static u64 get_cpu_usage_nsec_self(int fd)
+{
+ u64 runtime;
+ int ret;
+
+ ret = read(fd, &runtime, sizeof(runtime));
+ BUG_ON(ret != sizeof(runtime));
+
+ return runtime;
+}
+
+static void *thread_func(void *ctx)
+{
+ struct task_desc *this_task = ctx;
+ u64 cpu_usage_0, cpu_usage_1;
+ unsigned long i, ret;
+ char comm2[22];
+ int fd;
+
+ sprintf(comm2, ":%s", this_task->comm);
+ prctl(PR_SET_NAME, comm2);
+ fd = self_open_counters();
+
+again:
+ ret = sem_post(&this_task->ready_for_work);
+ BUG_ON(ret);
+ ret = pthread_mutex_lock(&start_work_mutex);
+ BUG_ON(ret);
+ ret = pthread_mutex_unlock(&start_work_mutex);
+ BUG_ON(ret);
+
+ cpu_usage_0 = get_cpu_usage_nsec_self(fd);
+
+ for (i = 0; i < this_task->nr_events; i++) {
+ this_task->curr_event = i;
+ process_sched_event(this_task, this_task->atoms[i]);
+ }
+
+ cpu_usage_1 = get_cpu_usage_nsec_self(fd);
+ this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
+ ret = sem_post(&this_task->work_done_sem);
+ BUG_ON(ret);
+
+ ret = pthread_mutex_lock(&work_done_wait_mutex);
+ BUG_ON(ret);
+ ret = pthread_mutex_unlock(&work_done_wait_mutex);
+ BUG_ON(ret);
+
+ goto again;
+}
+
+static void create_tasks(void)
+{
+ struct task_desc *task;
+ pthread_attr_t attr;
+ unsigned long i;
+ int err;
+
+ err = pthread_attr_init(&attr);
+ BUG_ON(err);
+ err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
+ BUG_ON(err);
+ err = pthread_mutex_lock(&start_work_mutex);
+ BUG_ON(err);
+ err = pthread_mutex_lock(&work_done_wait_mutex);
+ BUG_ON(err);
+ for (i = 0; i < nr_tasks; i++) {
+ task = tasks[i];
+ sem_init(&task->sleep_sem, 0, 0);
+ sem_init(&task->ready_for_work, 0, 0);
+ sem_init(&task->work_done_sem, 0, 0);
+ task->curr_event = 0;
+ err = pthread_create(&task->thread, &attr, thread_func, task);
+ BUG_ON(err);
+ }
+}
+
+static void wait_for_tasks(void)
+{
+ u64 cpu_usage_0, cpu_usage_1;
+ struct task_desc *task;
+ unsigned long i, ret;
+
+ start_time = get_nsecs();
+ cpu_usage = 0;
+ pthread_mutex_unlock(&work_done_wait_mutex);
+
+ for (i = 0; i < nr_tasks; i++) {
+ task = tasks[i];
+ ret = sem_wait(&task->ready_for_work);
+ BUG_ON(ret);
+ sem_init(&task->ready_for_work, 0, 0);
+ }
+ ret = pthread_mutex_lock(&work_done_wait_mutex);
+ BUG_ON(ret);
+
+ cpu_usage_0 = get_cpu_usage_nsec_parent();
+
+ pthread_mutex_unlock(&start_work_mutex);
+
+ for (i = 0; i < nr_tasks; i++) {
+ task = tasks[i];
+ ret = sem_wait(&task->work_done_sem);
+ BUG_ON(ret);
+ sem_init(&task->work_done_sem, 0, 0);
+ cpu_usage += task->cpu_usage;
+ task->cpu_usage = 0;
+ }
+
+ cpu_usage_1 = get_cpu_usage_nsec_parent();
+ if (!runavg_cpu_usage)
+ runavg_cpu_usage = cpu_usage;
+ runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
+
+ parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
+ if (!runavg_parent_cpu_usage)
+ runavg_parent_cpu_usage = parent_cpu_usage;
+ runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
+ parent_cpu_usage)/10;
+
+ ret = pthread_mutex_lock(&start_work_mutex);
+ BUG_ON(ret);
+
+ for (i = 0; i < nr_tasks; i++) {
+ task = tasks[i];
+ sem_init(&task->sleep_sem, 0, 0);
+ task->curr_event = 0;
+ }
+}
+
+static void run_one_test(void)
+{
+ u64 T0, T1, delta, avg_delta, fluct, std_dev;
+
+ T0 = get_nsecs();
+ wait_for_tasks();
+ T1 = get_nsecs();
+
+ delta = T1 - T0;
+ sum_runtime += delta;
+ nr_runs++;
+
+ avg_delta = sum_runtime / nr_runs;
+ if (delta < avg_delta)
+ fluct = avg_delta - delta;
+ else
+ fluct = delta - avg_delta;
+ sum_fluct += fluct;
+ std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
+ if (!run_avg)
+ run_avg = delta;
+ run_avg = (run_avg*9 + delta)/10;
+
+ printf("#%-3ld: %0.3f, ",
+ nr_runs, (double)delta/1000000.0);
+
+ printf("ravg: %0.2f, ",
+ (double)run_avg/1e6);
+
+ printf("cpu: %0.2f / %0.2f",
+ (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
+
+#if 0
+ /*
+ * rusage statistics done by the parent, these are less
+ * accurate than the sum_exec_runtime based statistics:
+ */
+ printf(" [%0.2f / %0.2f]",
+ (double)parent_cpu_usage/1e6,
+ (double)runavg_parent_cpu_usage/1e6);
+#endif
+
+ printf("\n");
+
+ if (nr_sleep_corrections)
+ printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
+ nr_sleep_corrections = 0;
+}
+
+static void test_calibrations(void)
+{
+ u64 T0, T1;
+
+ T0 = get_nsecs();
+ burn_nsecs(1e6);
+ T1 = get_nsecs();
+
+ printf("the run test took %Ld nsecs\n", T1-T0);
+
+ T0 = get_nsecs();
+ sleep_nsecs(1e6);
+ T1 = get_nsecs();
+
+ printf("the sleep test took %Ld nsecs\n", T1-T0);
+}
+
+#define FILL_FIELD(ptr, field, event, data) \
+ ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
+
+#define FILL_ARRAY(ptr, array, event, data) \
+do { \
+ void *__array = raw_field_ptr(event, #array, data); \
+ memcpy(ptr.array, __array, sizeof(ptr.array)); \
+} while(0)
+
+#define FILL_COMMON_FIELDS(ptr, event, data) \
+do { \
+ FILL_FIELD(ptr, common_type, event, data); \
+ FILL_FIELD(ptr, common_flags, event, data); \
+ FILL_FIELD(ptr, common_preempt_count, event, data); \
+ FILL_FIELD(ptr, common_pid, event, data); \
+ FILL_FIELD(ptr, common_tgid, event, data); \
+} while (0)
+
+
+
+struct trace_switch_event {
+ u32 size;
+
+ u16 common_type;
+ u8 common_flags;
+ u8 common_preempt_count;
+ u32 common_pid;
+ u32 common_tgid;
+
+ char prev_comm[16];
+ u32 prev_pid;
+ u32 prev_prio;
+ u64 prev_state;
+ char next_comm[16];
+ u32 next_pid;
+ u32 next_prio;
+};
+
+struct trace_runtime_event {
+ u32 size;
+
+ u16 common_type;
+ u8 common_flags;
+ u8 common_preempt_count;
+ u32 common_pid;
+ u32 common_tgid;
+
+ char comm[16];
+ u32 pid;
+ u64 runtime;
+ u64 vruntime;
+};
+
+struct trace_wakeup_event {
+ u32 size;
+
+ u16 common_type;
+ u8 common_flags;
+ u8 common_preempt_count;
+ u32 common_pid;
+ u32 common_tgid;
+
+ char comm[16];
+ u32 pid;
+
+ u32 prio;
+ u32 success;
+ u32 cpu;
+};
+
+struct trace_fork_event {
+ u32 size;
+
+ u16 common_type;
+ u8 common_flags;
+ u8 common_preempt_count;
+ u32 common_pid;
+ u32 common_tgid;
+
+ char parent_comm[16];
+ u32 parent_pid;
+ char child_comm[16];
+ u32 child_pid;
+};
+
+struct trace_migrate_task_event {
+ u32 size;
+
+ u16 common_type;
+ u8 common_flags;
+ u8 common_preempt_count;
+ u32 common_pid;
+ u32 common_tgid;
+
+ char comm[16];
+ u32 pid;
+
+ u32 prio;
+ u32 cpu;
+};
+
+struct trace_sched_handler {
+ void (*switch_event)(struct trace_switch_event *,
+ struct perf_session *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+
+ void (*runtime_event)(struct trace_runtime_event *,
+ struct perf_session *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+
+ void (*wakeup_event)(struct trace_wakeup_event *,
+ struct perf_session *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+
+ void (*fork_event)(struct trace_fork_event *,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+
+ void (*migrate_task_event)(struct trace_migrate_task_event *,
+ struct perf_session *session,
+ struct event *,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread);
+};
+
+
+static void
+replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
+ struct perf_session *session __used,
+ struct event *event,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct task_desc *waker, *wakee;
+
+ if (verbose) {
+ printf("sched_wakeup event %p\n", event);
+
+ printf(" ... pid %d woke up %s/%d\n",
+ wakeup_event->common_pid,
+ wakeup_event->comm,
+ wakeup_event->pid);
+ }
+
+ waker = register_pid(wakeup_event->common_pid, "<unknown>");
+ wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
+
+ add_sched_event_wakeup(waker, timestamp, wakee);
+}
+
+static u64 cpu_last_switched[MAX_CPUS];
+
+static void
+replay_switch_event(struct trace_switch_event *switch_event,
+ struct perf_session *session __used,
+ struct event *event,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct task_desc *prev, *next;
+ u64 timestamp0;
+ s64 delta;
+
+ if (verbose)
+ printf("sched_switch event %p\n", event);
+
+ if (cpu >= MAX_CPUS || cpu < 0)
+ return;
+
+ timestamp0 = cpu_last_switched[cpu];
+ if (timestamp0)
+ delta = timestamp - timestamp0;
+ else
+ delta = 0;
+
+ if (delta < 0)
+ die("hm, delta: %Ld < 0 ?\n", delta);
+
+ if (verbose) {
+ printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
+ switch_event->prev_comm, switch_event->prev_pid,
+ switch_event->next_comm, switch_event->next_pid,
+ delta);
+ }
+
+ prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
+ next = register_pid(switch_event->next_pid, switch_event->next_comm);
+
+ cpu_last_switched[cpu] = timestamp;
+
+ add_sched_event_run(prev, timestamp, delta);
+ add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
+}
+
+
+static void
+replay_fork_event(struct trace_fork_event *fork_event,
+ struct event *event,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ if (verbose) {
+ printf("sched_fork event %p\n", event);
+ printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
+ printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
+ }
+ register_pid(fork_event->parent_pid, fork_event->parent_comm);
+ register_pid(fork_event->child_pid, fork_event->child_comm);
+}
+
+static struct trace_sched_handler replay_ops = {
+ .wakeup_event = replay_wakeup_event,
+ .switch_event = replay_switch_event,
+ .fork_event = replay_fork_event,
+};
+
+struct sort_dimension {
+ const char *name;
+ sort_fn_t cmp;
+ struct list_head list;
+};
+
+static LIST_HEAD(cmp_pid);
+
+static int
+thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
+{
+ struct sort_dimension *sort;
+ int ret = 0;
+
+ BUG_ON(list_empty(list));
+
+ list_for_each_entry(sort, list, list) {
+ ret = sort->cmp(l, r);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct work_atoms *
+thread_atoms_search(struct rb_root *root, struct thread *thread,
+ struct list_head *sort_list)
+{
+ struct rb_node *node = root->rb_node;
+ struct work_atoms key = { .thread = thread };
+
+ while (node) {
+ struct work_atoms *atoms;
+ int cmp;
+
+ atoms = container_of(node, struct work_atoms, node);
+
+ cmp = thread_lat_cmp(sort_list, &key, atoms);
+ if (cmp > 0)
+ node = node->rb_left;
+ else if (cmp < 0)
+ node = node->rb_right;
+ else {
+ BUG_ON(thread != atoms->thread);
+ return atoms;
+ }
+ }
+ return NULL;
+}
+
+static void
+__thread_latency_insert(struct rb_root *root, struct work_atoms *data,
+ struct list_head *sort_list)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ while (*new) {
+ struct work_atoms *this;
+ int cmp;
+
+ this = container_of(*new, struct work_atoms, node);
+ parent = *new;
+
+ cmp = thread_lat_cmp(sort_list, data, this);
+
+ if (cmp > 0)
+ new = &((*new)->rb_left);
+ else
+ new = &((*new)->rb_right);
+ }
+
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+}
+
+static void thread_atoms_insert(struct thread *thread)
+{
+ struct work_atoms *atoms = zalloc(sizeof(*atoms));
+ if (!atoms)
+ die("No memory");
+
+ atoms->thread = thread;
+ INIT_LIST_HEAD(&atoms->work_list);
+ __thread_latency_insert(&atom_root, atoms, &cmp_pid);
+}
+
+static void
+latency_fork_event(struct trace_fork_event *fork_event __used,
+ struct event *event __used,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ /* should insert the newcomer */
+}
+
+__used
+static char sched_out_state(struct trace_switch_event *switch_event)
+{
+ const char *str = TASK_STATE_TO_CHAR_STR;
+
+ return str[switch_event->prev_state];
+}
+
+static void
+add_sched_out_event(struct work_atoms *atoms,
+ char run_state,
+ u64 timestamp)
+{
+ struct work_atom *atom = zalloc(sizeof(*atom));
+ if (!atom)
+ die("Non memory");
+
+ atom->sched_out_time = timestamp;
+
+ if (run_state == 'R') {
+ atom->state = THREAD_WAIT_CPU;
+ atom->wake_up_time = atom->sched_out_time;
+ }
+
+ list_add_tail(&atom->list, &atoms->work_list);
+}
+
+static void
+add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
+{
+ struct work_atom *atom;
+
+ BUG_ON(list_empty(&atoms->work_list));
+
+ atom = list_entry(atoms->work_list.prev, struct work_atom, list);
+
+ atom->runtime += delta;
+ atoms->total_runtime += delta;
+}
+
+static void
+add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
+{
+ struct work_atom *atom;
+ u64 delta;
+
+ if (list_empty(&atoms->work_list))
+ return;
+
+ atom = list_entry(atoms->work_list.prev, struct work_atom, list);
+
+ if (atom->state != THREAD_WAIT_CPU)
+ return;
+
+ if (timestamp < atom->wake_up_time) {
+ atom->state = THREAD_IGNORE;
+ return;
+ }
+
+ atom->state = THREAD_SCHED_IN;
+ atom->sched_in_time = timestamp;
+
+ delta = atom->sched_in_time - atom->wake_up_time;
+ atoms->total_lat += delta;
+ if (delta > atoms->max_lat) {
+ atoms->max_lat = delta;
+ atoms->max_lat_at = timestamp;
+ }
+ atoms->nb_atoms++;
+}
+
+static void
+latency_switch_event(struct trace_switch_event *switch_event,
+ struct perf_session *session,
+ struct event *event __used,
+ int cpu,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct work_atoms *out_events, *in_events;
+ struct thread *sched_out, *sched_in;
+ u64 timestamp0;
+ s64 delta;
+
+ BUG_ON(cpu >= MAX_CPUS || cpu < 0);
+
+ timestamp0 = cpu_last_switched[cpu];
+ cpu_last_switched[cpu] = timestamp;
+ if (timestamp0)
+ delta = timestamp - timestamp0;
+ else
+ delta = 0;
+
+ if (delta < 0)
+ die("hm, delta: %Ld < 0 ?\n", delta);
+
+
+ sched_out = perf_session__findnew(session, switch_event->prev_pid);
+ sched_in = perf_session__findnew(session, switch_event->next_pid);
+
+ out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
+ if (!out_events) {
+ thread_atoms_insert(sched_out);
+ out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
+ if (!out_events)
+ die("out-event: Internal tree error");
+ }
+ add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
+
+ in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
+ if (!in_events) {
+ thread_atoms_insert(sched_in);
+ in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
+ if (!in_events)
+ die("in-event: Internal tree error");
+ /*
+ * Take came in we have not heard about yet,
+ * add in an initial atom in runnable state:
+ */
+ add_sched_out_event(in_events, 'R', timestamp);
+ }
+ add_sched_in_event(in_events, timestamp);
+}
+
+static void
+latency_runtime_event(struct trace_runtime_event *runtime_event,
+ struct perf_session *session,
+ struct event *event __used,
+ int cpu,
+ u64 timestamp,
+ struct thread *this_thread __used)
+{
+ struct thread *thread = perf_session__findnew(session, runtime_event->pid);
+ struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
+
+ BUG_ON(cpu >= MAX_CPUS || cpu < 0);
+ if (!atoms) {
+ thread_atoms_insert(thread);
+ atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
+ if (!atoms)
+ die("in-event: Internal tree error");
+ add_sched_out_event(atoms, 'R', timestamp);
+ }
+
+ add_runtime_event(atoms, runtime_event->runtime, timestamp);
+}
+
+static void
+latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
+ struct perf_session *session,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct work_atoms *atoms;
+ struct work_atom *atom;
+ struct thread *wakee;
+
+ /* Note for later, it may be interesting to observe the failing cases */
+ if (!wakeup_event->success)
+ return;
+
+ wakee = perf_session__findnew(session, wakeup_event->pid);
+ atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
+ if (!atoms) {
+ thread_atoms_insert(wakee);
+ atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
+ if (!atoms)
+ die("wakeup-event: Internal tree error");
+ add_sched_out_event(atoms, 'S', timestamp);
+ }
+
+ BUG_ON(list_empty(&atoms->work_list));
+
+ atom = list_entry(atoms->work_list.prev, struct work_atom, list);
+
+ /*
+ * You WILL be missing events if you've recorded only
+ * one CPU, or are only looking at only one, so don't
+ * make useless noise.
+ */
+ if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
+ nr_state_machine_bugs++;
+
+ nr_timestamps++;
+ if (atom->sched_out_time > timestamp) {
+ nr_unordered_timestamps++;
+ return;
+ }
+
+ atom->state = THREAD_WAIT_CPU;
+ atom->wake_up_time = timestamp;
+}
+
+static void
+latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
+ struct perf_session *session,
+ struct event *__event __used,
+ int cpu __used,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct work_atoms *atoms;
+ struct work_atom *atom;
+ struct thread *migrant;
+
+ /*
+ * Only need to worry about migration when profiling one CPU.
+ */
+ if (profile_cpu == -1)
+ return;
+
+ migrant = perf_session__findnew(session, migrate_task_event->pid);
+ atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
+ if (!atoms) {
+ thread_atoms_insert(migrant);
+ register_pid(migrant->pid, migrant->comm);
+ atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
+ if (!atoms)
+ die("migration-event: Internal tree error");
+ add_sched_out_event(atoms, 'R', timestamp);
+ }
+
+ BUG_ON(list_empty(&atoms->work_list));
+
+ atom = list_entry(atoms->work_list.prev, struct work_atom, list);
+ atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
+
+ nr_timestamps++;
+
+ if (atom->sched_out_time > timestamp)
+ nr_unordered_timestamps++;
+}
+
+static struct trace_sched_handler lat_ops = {
+ .wakeup_event = latency_wakeup_event,
+ .switch_event = latency_switch_event,
+ .runtime_event = latency_runtime_event,
+ .fork_event = latency_fork_event,
+ .migrate_task_event = latency_migrate_task_event,
+};
+
+static void output_lat_thread(struct work_atoms *work_list)
+{
+ int i;
+ int ret;
+ u64 avg;
+
+ if (!work_list->nb_atoms)
+ return;
+ /*
+ * Ignore idle threads:
+ */
+ if (!strcmp(work_list->thread->comm, "swapper"))
+ return;
+
+ all_runtime += work_list->total_runtime;
+ all_count += work_list->nb_atoms;
+
+ ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid);
+
+ for (i = 0; i < 24 - ret; i++)
+ printf(" ");
+
+ avg = work_list->total_lat / work_list->nb_atoms;
+
+ printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
+ (double)work_list->total_runtime / 1e6,
+ work_list->nb_atoms, (double)avg / 1e6,
+ (double)work_list->max_lat / 1e6,
+ (double)work_list->max_lat_at / 1e9);
+}
+
+static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
+{
+ if (l->thread->pid < r->thread->pid)
+ return -1;
+ if (l->thread->pid > r->thread->pid)
+ return 1;
+
+ return 0;
+}
+
+static struct sort_dimension pid_sort_dimension = {
+ .name = "pid",
+ .cmp = pid_cmp,
+};
+
+static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
+{
+ u64 avgl, avgr;
+
+ if (!l->nb_atoms)
+ return -1;
+
+ if (!r->nb_atoms)
+ return 1;
+
+ avgl = l->total_lat / l->nb_atoms;
+ avgr = r->total_lat / r->nb_atoms;
+
+ if (avgl < avgr)
+ return -1;
+ if (avgl > avgr)
+ return 1;
+
+ return 0;
+}
+
+static struct sort_dimension avg_sort_dimension = {
+ .name = "avg",
+ .cmp = avg_cmp,
+};
+
+static int max_cmp(struct work_atoms *l, struct work_atoms *r)
+{
+ if (l->max_lat < r->max_lat)
+ return -1;
+ if (l->max_lat > r->max_lat)
+ return 1;
+
+ return 0;
+}
+
+static struct sort_dimension max_sort_dimension = {
+ .name = "max",
+ .cmp = max_cmp,
+};
+
+static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
+{
+ if (l->nb_atoms < r->nb_atoms)
+ return -1;
+ if (l->nb_atoms > r->nb_atoms)
+ return 1;
+
+ return 0;
+}
+
+static struct sort_dimension switch_sort_dimension = {
+ .name = "switch",
+ .cmp = switch_cmp,
+};
+
+static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
+{
+ if (l->total_runtime < r->total_runtime)
+ return -1;
+ if (l->total_runtime > r->total_runtime)
+ return 1;
+
+ return 0;
+}
+
+static struct sort_dimension runtime_sort_dimension = {
+ .name = "runtime",
+ .cmp = runtime_cmp,
+};
+
+static struct sort_dimension *available_sorts[] = {
+ &pid_sort_dimension,
+ &avg_sort_dimension,
+ &max_sort_dimension,
+ &switch_sort_dimension,
+ &runtime_sort_dimension,
+};
+
+#define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
+
+static LIST_HEAD(sort_list);
+
+static int sort_dimension__add(const char *tok, struct list_head *list)
+{
+ int i;
+
+ for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
+ if (!strcmp(available_sorts[i]->name, tok)) {
+ list_add_tail(&available_sorts[i]->list, list);
+
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static void setup_sorting(void);
+
+static void sort_lat(void)
+{
+ struct rb_node *node;
+
+ for (;;) {
+ struct work_atoms *data;
+ node = rb_first(&atom_root);
+ if (!node)
+ break;
+
+ rb_erase(node, &atom_root);
+ data = rb_entry(node, struct work_atoms, node);
+ __thread_latency_insert(&sorted_atom_root, data, &sort_list);
+ }
+}
+
+static struct trace_sched_handler *trace_handler;
+
+static void
+process_sched_wakeup_event(void *data, struct perf_session *session,
+ struct event *event,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_wakeup_event wakeup_event;
+
+ FILL_COMMON_FIELDS(wakeup_event, event, data);
+
+ FILL_ARRAY(wakeup_event, comm, event, data);
+ FILL_FIELD(wakeup_event, pid, event, data);
+ FILL_FIELD(wakeup_event, prio, event, data);
+ FILL_FIELD(wakeup_event, success, event, data);
+ FILL_FIELD(wakeup_event, cpu, event, data);
+
+ if (trace_handler->wakeup_event)
+ trace_handler->wakeup_event(&wakeup_event, session, event,
+ cpu, timestamp, thread);
+}
+
+/*
+ * Track the current task - that way we can know whether there's any
+ * weird events, such as a task being switched away that is not current.
+ */
+static int max_cpu;
+
+static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
+
+static struct thread *curr_thread[MAX_CPUS];
+
+static char next_shortname1 = 'A';
+static char next_shortname2 = '0';
+
+static void
+map_switch_event(struct trace_switch_event *switch_event,
+ struct perf_session *session,
+ struct event *event __used,
+ int this_cpu,
+ u64 timestamp,
+ struct thread *thread __used)
+{
+ struct thread *sched_out, *sched_in;
+ int new_shortname;
+ u64 timestamp0;
+ s64 delta;
+ int cpu;
+
+ BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
+
+ if (this_cpu > max_cpu)
+ max_cpu = this_cpu;
+
+ timestamp0 = cpu_last_switched[this_cpu];
+ cpu_last_switched[this_cpu] = timestamp;
+ if (timestamp0)
+ delta = timestamp - timestamp0;
+ else
+ delta = 0;
+
+ if (delta < 0)
+ die("hm, delta: %Ld < 0 ?\n", delta);
+
+
+ sched_out = perf_session__findnew(session, switch_event->prev_pid);
+ sched_in = perf_session__findnew(session, switch_event->next_pid);
+
+ curr_thread[this_cpu] = sched_in;
+
+ printf(" ");
+
+ new_shortname = 0;
+ if (!sched_in->shortname[0]) {
+ sched_in->shortname[0] = next_shortname1;
+ sched_in->shortname[1] = next_shortname2;
+
+ if (next_shortname1 < 'Z') {
+ next_shortname1++;
+ } else {
+ next_shortname1='A';
+ if (next_shortname2 < '9') {
+ next_shortname2++;
+ } else {
+ next_shortname2='0';
+ }
+ }
+ new_shortname = 1;
+ }
+
+ for (cpu = 0; cpu <= max_cpu; cpu++) {
+ if (cpu != this_cpu)
+ printf(" ");
+ else
+ printf("*");
+
+ if (curr_thread[cpu]) {
+ if (curr_thread[cpu]->pid)
+ printf("%2s ", curr_thread[cpu]->shortname);
+ else
+ printf(". ");
+ } else
+ printf(" ");
+ }
+
+ printf(" %12.6f secs ", (double)timestamp/1e9);
+ if (new_shortname) {
+ printf("%s => %s:%d\n",
+ sched_in->shortname, sched_in->comm, sched_in->pid);
+ } else {
+ printf("\n");
+ }
+}
+
+
+static void
+process_sched_switch_event(void *data, struct perf_session *session,
+ struct event *event,
+ int this_cpu,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_switch_event switch_event;
+
+ FILL_COMMON_FIELDS(switch_event, event, data);
+
+ FILL_ARRAY(switch_event, prev_comm, event, data);
+ FILL_FIELD(switch_event, prev_pid, event, data);
+ FILL_FIELD(switch_event, prev_prio, event, data);
+ FILL_FIELD(switch_event, prev_state, event, data);
+ FILL_ARRAY(switch_event, next_comm, event, data);
+ FILL_FIELD(switch_event, next_pid, event, data);
+ FILL_FIELD(switch_event, next_prio, event, data);
+
+ if (curr_pid[this_cpu] != (u32)-1) {
+ /*
+ * Are we trying to switch away a PID that is
+ * not current?
+ */
+ if (curr_pid[this_cpu] != switch_event.prev_pid)
+ nr_context_switch_bugs++;
+ }
+ if (trace_handler->switch_event)
+ trace_handler->switch_event(&switch_event, session, event,
+ this_cpu, timestamp, thread);
+
+ curr_pid[this_cpu] = switch_event.next_pid;
+}
+
+static void
+process_sched_runtime_event(void *data, struct perf_session *session,
+ struct event *event,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_runtime_event runtime_event;
+
+ FILL_ARRAY(runtime_event, comm, event, data);
+ FILL_FIELD(runtime_event, pid, event, data);
+ FILL_FIELD(runtime_event, runtime, event, data);
+ FILL_FIELD(runtime_event, vruntime, event, data);
+
+ if (trace_handler->runtime_event)
+ trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread);
+}
+
+static void
+process_sched_fork_event(void *data,
+ struct event *event,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_fork_event fork_event;
+
+ FILL_COMMON_FIELDS(fork_event, event, data);
+
+ FILL_ARRAY(fork_event, parent_comm, event, data);
+ FILL_FIELD(fork_event, parent_pid, event, data);
+ FILL_ARRAY(fork_event, child_comm, event, data);
+ FILL_FIELD(fork_event, child_pid, event, data);
+
+ if (trace_handler->fork_event)
+ trace_handler->fork_event(&fork_event, event,
+ cpu, timestamp, thread);
+}
+
+static void
+process_sched_exit_event(struct event *event,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ if (verbose)
+ printf("sched_exit event %p\n", event);
+}
+
+static void
+process_sched_migrate_task_event(void *data, struct perf_session *session,
+ struct event *event,
+ int cpu __used,
+ u64 timestamp __used,
+ struct thread *thread __used)
+{
+ struct trace_migrate_task_event migrate_task_event;
+
+ FILL_COMMON_FIELDS(migrate_task_event, event, data);
+
+ FILL_ARRAY(migrate_task_event, comm, event, data);
+ FILL_FIELD(migrate_task_event, pid, event, data);
+ FILL_FIELD(migrate_task_event, prio, event, data);
+ FILL_FIELD(migrate_task_event, cpu, event, data);
+
+ if (trace_handler->migrate_task_event)
+ trace_handler->migrate_task_event(&migrate_task_event, session,
+ event, cpu, timestamp, thread);
+}
+
+static void
+process_raw_event(event_t *raw_event __used, struct perf_session *session,
+ void *data, int cpu, u64 timestamp, struct thread *thread)
+{
+ struct event *event;
+ int type;
+
+
+ type = trace_parse_common_type(data);
+ event = trace_find_event(type);
+
+ if (!strcmp(event->name, "sched_switch"))
+ process_sched_switch_event(data, session, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "sched_stat_runtime"))
+ process_sched_runtime_event(data, session, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "sched_wakeup"))
+ process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "sched_wakeup_new"))
+ process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "sched_process_fork"))
+ process_sched_fork_event(data, event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "sched_process_exit"))
+ process_sched_exit_event(event, cpu, timestamp, thread);
+ if (!strcmp(event->name, "sched_migrate_task"))
+ process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread);
+}
+
+static int process_sample_event(event_t *event, struct perf_session *session)
+{
+ struct sample_data data;
+ struct thread *thread;
+
+ if (!(session->sample_type & PERF_SAMPLE_RAW))
+ return 0;
+
+ memset(&data, 0, sizeof(data));
+ data.time = -1;
+ data.cpu = -1;
+ data.period = -1;
+
+ event__parse_sample(event, session->sample_type, &data);
+
+ dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
+ data.pid, data.tid, data.ip, data.period);
+
+ thread = perf_session__findnew(session, data.pid);
+ if (thread == NULL) {
+ pr_debug("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+
+ if (profile_cpu != -1 && profile_cpu != (int)data.cpu)
+ return 0;
+
+ process_raw_event(event, session, data.raw_data, data.cpu, data.time, thread);
+
+ return 0;
+}
+
+static struct perf_event_ops event_ops = {
+ .sample = process_sample_event,
+ .comm = event__process_comm,
+ .lost = event__process_lost,
+ .fork = event__process_task,
+ .ordered_samples = true,
+};
+
+static int read_events(void)
+{
+ int err = -EINVAL;
+ struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0, false);
+ if (session == NULL)
+ return -ENOMEM;
+
+ if (perf_session__has_traces(session, "record -R")) {
+ err = perf_session__process_events(session, &event_ops);
+ nr_events = session->hists.stats.nr_events[0];
+ nr_lost_events = session->hists.stats.total_lost;
+ nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
+ }
+
+ perf_session__delete(session);
+ return err;
+}
+
+static void print_bad_events(void)
+{
+ if (nr_unordered_timestamps && nr_timestamps) {
+ printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
+ (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
+ nr_unordered_timestamps, nr_timestamps);
+ }
+ if (nr_lost_events && nr_events) {
+ printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
+ (double)nr_lost_events/(double)nr_events*100.0,
+ nr_lost_events, nr_events, nr_lost_chunks);
+ }
+ if (nr_state_machine_bugs && nr_timestamps) {
+ printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
+ (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
+ nr_state_machine_bugs, nr_timestamps);
+ if (nr_lost_events)
+ printf(" (due to lost events?)");
+ printf("\n");
+ }
+ if (nr_context_switch_bugs && nr_timestamps) {
+ printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
+ (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
+ nr_context_switch_bugs, nr_timestamps);
+ if (nr_lost_events)
+ printf(" (due to lost events?)");
+ printf("\n");
+ }
+}
+
+static void __cmd_lat(void)
+{
+ struct rb_node *next;
+
+ setup_pager();
+ read_events();
+ sort_lat();
+
+ printf("\n ---------------------------------------------------------------------------------------------------------------\n");
+ printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
+ printf(" ---------------------------------------------------------------------------------------------------------------\n");
+
+ next = rb_first(&sorted_atom_root);
+
+ while (next) {
+ struct work_atoms *work_list;
+
+ work_list = rb_entry(next, struct work_atoms, node);
+ output_lat_thread(work_list);
+ next = rb_next(next);
+ }
+
+ printf(" -----------------------------------------------------------------------------------------\n");
+ printf(" TOTAL: |%11.3f ms |%9Ld |\n",
+ (double)all_runtime/1e6, all_count);
+
+ printf(" ---------------------------------------------------\n");
+
+ print_bad_events();
+ printf("\n");
+
+}
+
+static struct trace_sched_handler map_ops = {
+ .wakeup_event = NULL,
+ .switch_event = map_switch_event,
+ .runtime_event = NULL,
+ .fork_event = NULL,
+};
+
+static void __cmd_map(void)
+{
+ max_cpu = sysconf(_SC_NPROCESSORS_CONF);
+
+ setup_pager();
+ read_events();
+ print_bad_events();
+}
+
+static void __cmd_replay(void)
+{
+ unsigned long i;
+
+ calibrate_run_measurement_overhead();
+ calibrate_sleep_measurement_overhead();
+
+ test_calibrations();
+
+ read_events();
+
+ printf("nr_run_events: %ld\n", nr_run_events);
+ printf("nr_sleep_events: %ld\n", nr_sleep_events);
+ printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
+
+ if (targetless_wakeups)
+ printf("target-less wakeups: %ld\n", targetless_wakeups);
+ if (multitarget_wakeups)
+ printf("multi-target wakeups: %ld\n", multitarget_wakeups);
+ if (nr_run_events_optimized)
+ printf("run atoms optimized: %ld\n",
+ nr_run_events_optimized);
+
+ print_task_traces();
+ add_cross_task_wakeups();
+
+ create_tasks();
+ printf("------------------------------------------------------------\n");
+ for (i = 0; i < replay_repeat; i++)
+ run_one_test();
+}
+
+
+static const char * const sched_usage[] = {
+ "perf sched [<options>] {record|latency|map|replay|trace}",
+ NULL
+};
+
+static const struct option sched_options[] = {
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_END()
+};
+
+static const char * const latency_usage[] = {
+ "perf sched latency [<options>]",
+ NULL
+};
+
+static const struct option latency_options[] = {
+ OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
+ "sort by key(s): runtime, switch, avg, max"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_INTEGER('C', "CPU", &profile_cpu,
+ "CPU to profile on"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_END()
+};
+
+static const char * const replay_usage[] = {
+ "perf sched replay [<options>]",
+ NULL
+};
+
+static const struct option replay_options[] = {
+ OPT_UINTEGER('r', "repeat", &replay_repeat,
+ "repeat the workload replay N times (-1: infinite)"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_END()
+};
+
+static void setup_sorting(void)
+{
+ char *tmp, *tok, *str = strdup(sort_order);
+
+ for (tok = strtok_r(str, ", ", &tmp);
+ tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ if (sort_dimension__add(tok, &sort_list) < 0) {
+ error("Unknown --sort key: `%s'", tok);
+ usage_with_options(latency_usage, latency_options);
+ }
+ }
+
+ free(str);
+
+ sort_dimension__add("pid", &cmp_pid);
+}
+
+static const char *record_args[] = {
+ "record",
+ "-a",
+ "-R",
+ "-f",
+ "-m", "1024",
+ "-c", "1",
+ "-e", "sched:sched_switch:r",
+ "-e", "sched:sched_stat_wait:r",
+ "-e", "sched:sched_stat_sleep:r",
+ "-e", "sched:sched_stat_iowait:r",
+ "-e", "sched:sched_stat_runtime:r",
+ "-e", "sched:sched_process_exit:r",
+ "-e", "sched:sched_process_fork:r",
+ "-e", "sched:sched_wakeup:r",
+ "-e", "sched:sched_migrate_task:r",
+};
+
+static int __cmd_record(int argc, const char **argv)
+{
+ unsigned int rec_argc, i, j;
+ const char **rec_argv;
+
+ rec_argc = ARRAY_SIZE(record_args) + argc - 1;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+
+ for (i = 0; i < ARRAY_SIZE(record_args); i++)
+ rec_argv[i] = strdup(record_args[i]);
+
+ for (j = 1; j < (unsigned int)argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ BUG_ON(i != rec_argc);
+
+ return cmd_record(i, rec_argv, NULL);
+}
+
+int cmd_sched(int argc, const char **argv, const char *prefix __used)
+{
+ argc = parse_options(argc, argv, sched_options, sched_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (!argc)
+ usage_with_options(sched_usage, sched_options);
+
+ /*
+ * Aliased to 'perf trace' for now:
+ */
+ if (!strcmp(argv[0], "trace"))
+ return cmd_trace(argc, argv, prefix);
+
+ symbol__init();
+ if (!strncmp(argv[0], "rec", 3)) {
+ return __cmd_record(argc, argv);
+ } else if (!strncmp(argv[0], "lat", 3)) {
+ trace_handler = &lat_ops;
+ if (argc > 1) {
+ argc = parse_options(argc, argv, latency_options, latency_usage, 0);
+ if (argc)
+ usage_with_options(latency_usage, latency_options);
+ }
+ setup_sorting();
+ __cmd_lat();
+ } else if (!strcmp(argv[0], "map")) {
+ trace_handler = &map_ops;
+ setup_sorting();
+ __cmd_map();
+ } else if (!strncmp(argv[0], "rep", 3)) {
+ trace_handler = &replay_ops;
+ if (argc) {
+ argc = parse_options(argc, argv, replay_options, replay_usage, 0);
+ if (argc)
+ usage_with_options(replay_usage, replay_options);
+ }
+ __cmd_replay();
+ } else {
+ usage_with_options(sched_usage, sched_options);
+ }
+
+ return 0;
+}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
new file mode 100644
index 00000000..a6b4d44f
--- /dev/null
+++ b/tools/perf/builtin-stat.c
@@ -0,0 +1,626 @@
+/*
+ * builtin-stat.c
+ *
+ * Builtin stat command: Give a precise performance counters summary
+ * overview about any workload, CPU or specific PID.
+ *
+ * Sample output:
+
+ $ perf stat ~/hackbench 10
+ Time: 0.104
+
+ Performance counter stats for '/home/mingo/hackbench':
+
+ 1255.538611 task clock ticks # 10.143 CPU utilization factor
+ 54011 context switches # 0.043 M/sec
+ 385 CPU migrations # 0.000 M/sec
+ 17755 pagefaults # 0.014 M/sec
+ 3808323185 CPU cycles # 3033.219 M/sec
+ 1575111190 instructions # 1254.530 M/sec
+ 17367895 cache references # 13.833 M/sec
+ 7674421 cache misses # 6.112 M/sec
+
+ Wall-clock time elapsed: 123.786620 msecs
+
+ *
+ * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
+ *
+ * Improvements and fixes by:
+ *
+ * Arjan van de Ven <arjan@linux.intel.com>
+ * Yanmin Zhang <yanmin.zhang@intel.com>
+ * Wu Fengguang <fengguang.wu@intel.com>
+ * Mike Galbraith <efault@gmx.de>
+ * Paul Mackerras <paulus@samba.org>
+ * Jaswinder Singh Rajput <jaswinder@kernel.org>
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include "perf.h"
+#include "builtin.h"
+#include "util/util.h"
+#include "util/parse-options.h"
+#include "util/parse-events.h"
+#include "util/event.h"
+#include "util/debug.h"
+#include "util/header.h"
+#include "util/cpumap.h"
+#include "util/thread.h"
+
+#include <sys/prctl.h>
+#include <math.h>
+#include <locale.h>
+
+static struct perf_event_attr default_attrs[] = {
+
+ { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
+ { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
+ { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
+ { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
+
+ { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
+ { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
+ { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+ { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
+ { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES },
+ { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES },
+
+};
+
+static bool system_wide = false;
+static int nr_cpus = 0;
+static int run_idx = 0;
+
+static int run_count = 1;
+static bool no_inherit = false;
+static bool scale = true;
+static pid_t target_pid = -1;
+static pid_t target_tid = -1;
+static pid_t *all_tids = NULL;
+static int thread_num = 0;
+static pid_t child_pid = -1;
+static bool null_run = false;
+static bool big_num = false;
+static const char *cpu_list;
+
+
+static int *fd[MAX_NR_CPUS][MAX_COUNTERS];
+
+static int event_scaled[MAX_COUNTERS];
+
+static volatile int done = 0;
+
+struct stats
+{
+ double n, mean, M2;
+};
+
+static void update_stats(struct stats *stats, u64 val)
+{
+ double delta;
+
+ stats->n++;
+ delta = val - stats->mean;
+ stats->mean += delta / stats->n;
+ stats->M2 += delta*(val - stats->mean);
+}
+
+static double avg_stats(struct stats *stats)
+{
+ return stats->mean;
+}
+
+/*
+ * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
+ *
+ * (\Sum n_i^2) - ((\Sum n_i)^2)/n
+ * s^2 = -------------------------------
+ * n - 1
+ *
+ * http://en.wikipedia.org/wiki/Stddev
+ *
+ * The std dev of the mean is related to the std dev by:
+ *
+ * s
+ * s_mean = -------
+ * sqrt(n)
+ *
+ */
+static double stddev_stats(struct stats *stats)
+{
+ double variance = stats->M2 / (stats->n - 1);
+ double variance_mean = variance / stats->n;
+
+ return sqrt(variance_mean);
+}
+
+struct stats event_res_stats[MAX_COUNTERS][3];
+struct stats runtime_nsecs_stats;
+struct stats walltime_nsecs_stats;
+struct stats runtime_cycles_stats;
+struct stats runtime_branches_stats;
+
+#define MATCH_EVENT(t, c, counter) \
+ (attrs[counter].type == PERF_TYPE_##t && \
+ attrs[counter].config == PERF_COUNT_##c)
+
+#define ERR_PERF_OPEN \
+"Error: counter %d, sys_perf_event_open() syscall returned with %d (%s)\n"
+
+static int create_perf_stat_counter(int counter)
+{
+ struct perf_event_attr *attr = attrs + counter;
+ int thread;
+ int ncreated = 0;
+
+ if (scale)
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING;
+
+ if (system_wide) {
+ int cpu;
+
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ fd[cpu][counter][0] = sys_perf_event_open(attr,
+ -1, cpumap[cpu], -1, 0);
+ if (fd[cpu][counter][0] < 0)
+ pr_debug(ERR_PERF_OPEN, counter,
+ fd[cpu][counter][0], strerror(errno));
+ else
+ ++ncreated;
+ }
+ } else {
+ attr->inherit = !no_inherit;
+ if (target_pid == -1 && target_tid == -1) {
+ attr->disabled = 1;
+ attr->enable_on_exec = 1;
+ }
+ for (thread = 0; thread < thread_num; thread++) {
+ fd[0][counter][thread] = sys_perf_event_open(attr,
+ all_tids[thread], -1, -1, 0);
+ if (fd[0][counter][thread] < 0)
+ pr_debug(ERR_PERF_OPEN, counter,
+ fd[0][counter][thread],
+ strerror(errno));
+ else
+ ++ncreated;
+ }
+ }
+
+ return ncreated;
+}
+
+/*
+ * Does the counter have nsecs as a unit?
+ */
+static inline int nsec_counter(int counter)
+{
+ if (MATCH_EVENT(SOFTWARE, SW_CPU_CLOCK, counter) ||
+ MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Read out the results of a single counter:
+ */
+static void read_counter(int counter)
+{
+ u64 count[3], single_count[3];
+ int cpu;
+ size_t res, nv;
+ int scaled;
+ int i, thread;
+
+ count[0] = count[1] = count[2] = 0;
+
+ nv = scale ? 3 : 1;
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ for (thread = 0; thread < thread_num; thread++) {
+ if (fd[cpu][counter][thread] < 0)
+ continue;
+
+ res = read(fd[cpu][counter][thread],
+ single_count, nv * sizeof(u64));
+ assert(res == nv * sizeof(u64));
+
+ close(fd[cpu][counter][thread]);
+ fd[cpu][counter][thread] = -1;
+
+ count[0] += single_count[0];
+ if (scale) {
+ count[1] += single_count[1];
+ count[2] += single_count[2];
+ }
+ }
+ }
+
+ scaled = 0;
+ if (scale) {
+ if (count[2] == 0) {
+ event_scaled[counter] = -1;
+ count[0] = 0;
+ return;
+ }
+
+ if (count[2] < count[1]) {
+ event_scaled[counter] = 1;
+ count[0] = (unsigned long long)
+ ((double)count[0] * count[1] / count[2] + 0.5);
+ }
+ }
+
+ for (i = 0; i < 3; i++)
+ update_stats(&event_res_stats[counter][i], count[i]);
+
+ if (verbose) {
+ fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter),
+ count[0], count[1], count[2]);
+ }
+
+ /*
+ * Save the full runtime - to allow normalization during printout:
+ */
+ if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
+ update_stats(&runtime_nsecs_stats, count[0]);
+ if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter))
+ update_stats(&runtime_cycles_stats, count[0]);
+ if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter))
+ update_stats(&runtime_branches_stats, count[0]);
+}
+
+static int run_perf_stat(int argc __used, const char **argv)
+{
+ unsigned long long t0, t1;
+ int status = 0;
+ int counter, ncreated = 0;
+ int child_ready_pipe[2], go_pipe[2];
+ const bool forks = (argc > 0);
+ char buf;
+
+ if (!system_wide)
+ nr_cpus = 1;
+
+ if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
+ perror("failed to create pipes");
+ exit(1);
+ }
+
+ if (forks) {
+ if ((child_pid = fork()) < 0)
+ perror("failed to fork");
+
+ if (!child_pid) {
+ close(child_ready_pipe[0]);
+ close(go_pipe[1]);
+ fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
+
+ /*
+ * Do a dummy execvp to get the PLT entry resolved,
+ * so we avoid the resolver overhead on the real
+ * execvp call.
+ */
+ execvp("", (char **)argv);
+
+ /*
+ * Tell the parent we're ready to go
+ */
+ close(child_ready_pipe[1]);
+
+ /*
+ * Wait until the parent tells us to go.
+ */
+ if (read(go_pipe[0], &buf, 1) == -1)
+ perror("unable to read pipe");
+
+ execvp(argv[0], (char **)argv);
+
+ perror(argv[0]);
+ exit(-1);
+ }
+
+ if (target_tid == -1 && target_pid == -1 && !system_wide)
+ all_tids[0] = child_pid;
+
+ /*
+ * Wait for the child to be ready to exec.
+ */
+ close(child_ready_pipe[1]);
+ close(go_pipe[0]);
+ if (read(child_ready_pipe[0], &buf, 1) == -1)
+ perror("unable to read pipe");
+ close(child_ready_pipe[0]);
+ }
+
+ for (counter = 0; counter < nr_counters; counter++)
+ ncreated += create_perf_stat_counter(counter);
+
+ if (ncreated == 0) {
+ pr_err("No permission to collect %sstats.\n"
+ "Consider tweaking /proc/sys/kernel/perf_event_paranoid.\n",
+ system_wide ? "system-wide " : "");
+ if (child_pid != -1)
+ kill(child_pid, SIGTERM);
+ return -1;
+ }
+
+ /*
+ * Enable counters and exec the command:
+ */
+ t0 = rdclock();
+
+ if (forks) {
+ close(go_pipe[1]);
+ wait(&status);
+ } else {
+ while(!done) sleep(1);
+ }
+
+ t1 = rdclock();
+
+ update_stats(&walltime_nsecs_stats, t1 - t0);
+
+ for (counter = 0; counter < nr_counters; counter++)
+ read_counter(counter);
+
+ return WEXITSTATUS(status);
+}
+
+static void print_noise(int counter, double avg)
+{
+ if (run_count == 1)
+ return;
+
+ fprintf(stderr, " ( +- %7.3f%% )",
+ 100 * stddev_stats(&event_res_stats[counter][0]) / avg);
+}
+
+static void nsec_printout(int counter, double avg)
+{
+ double msecs = avg / 1e6;
+
+ fprintf(stderr, " %18.6f %-24s", msecs, event_name(counter));
+
+ if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) {
+ fprintf(stderr, " # %10.3f CPUs ",
+ avg / avg_stats(&walltime_nsecs_stats));
+ }
+}
+
+static void abs_printout(int counter, double avg)
+{
+ double total, ratio = 0.0;
+
+ if (big_num)
+ fprintf(stderr, " %'18.0f %-24s", avg, event_name(counter));
+ else
+ fprintf(stderr, " %18.0f %-24s", avg, event_name(counter));
+
+ if (MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) {
+ total = avg_stats(&runtime_cycles_stats);
+
+ if (total)
+ ratio = avg / total;
+
+ fprintf(stderr, " # %10.3f IPC ", ratio);
+ } else if (MATCH_EVENT(HARDWARE, HW_BRANCH_MISSES, counter) &&
+ runtime_branches_stats.n != 0) {
+ total = avg_stats(&runtime_branches_stats);
+
+ if (total)
+ ratio = avg * 100 / total;
+
+ fprintf(stderr, " # %10.3f %% ", ratio);
+
+ } else if (runtime_nsecs_stats.n != 0) {
+ total = avg_stats(&runtime_nsecs_stats);
+
+ if (total)
+ ratio = 1000.0 * avg / total;
+
+ fprintf(stderr, " # %10.3f M/sec", ratio);
+ }
+}
+
+/*
+ * Print out the results of a single counter:
+ */
+static void print_counter(int counter)
+{
+ double avg = avg_stats(&event_res_stats[counter][0]);
+ int scaled = event_scaled[counter];
+
+ if (scaled == -1) {
+ fprintf(stderr, " %18s %-24s\n",
+ "<not counted>", event_name(counter));
+ return;
+ }
+
+ if (nsec_counter(counter))
+ nsec_printout(counter, avg);
+ else
+ abs_printout(counter, avg);
+
+ print_noise(counter, avg);
+
+ if (scaled) {
+ double avg_enabled, avg_running;
+
+ avg_enabled = avg_stats(&event_res_stats[counter][1]);
+ avg_running = avg_stats(&event_res_stats[counter][2]);
+
+ fprintf(stderr, " (scaled from %.2f%%)",
+ 100 * avg_running / avg_enabled);
+ }
+
+ fprintf(stderr, "\n");
+}
+
+static void print_stat(int argc, const char **argv)
+{
+ int i, counter;
+
+ fflush(stdout);
+
+ fprintf(stderr, "\n");
+ fprintf(stderr, " Performance counter stats for ");
+ if(target_pid == -1 && target_tid == -1) {
+ fprintf(stderr, "\'%s", argv[0]);
+ for (i = 1; i < argc; i++)
+ fprintf(stderr, " %s", argv[i]);
+ } else if (target_pid != -1)
+ fprintf(stderr, "process id \'%d", target_pid);
+ else
+ fprintf(stderr, "thread id \'%d", target_tid);
+
+ fprintf(stderr, "\'");
+ if (run_count > 1)
+ fprintf(stderr, " (%d runs)", run_count);
+ fprintf(stderr, ":\n\n");
+
+ for (counter = 0; counter < nr_counters; counter++)
+ print_counter(counter);
+
+ fprintf(stderr, "\n");
+ fprintf(stderr, " %18.9f seconds time elapsed",
+ avg_stats(&walltime_nsecs_stats)/1e9);
+ if (run_count > 1) {
+ fprintf(stderr, " ( +- %7.3f%% )",
+ 100*stddev_stats(&walltime_nsecs_stats) /
+ avg_stats(&walltime_nsecs_stats));
+ }
+ fprintf(stderr, "\n\n");
+}
+
+static volatile int signr = -1;
+
+static void skip_signal(int signo)
+{
+ if(child_pid == -1)
+ done = 1;
+
+ signr = signo;
+}
+
+static void sig_atexit(void)
+{
+ if (child_pid != -1)
+ kill(child_pid, SIGTERM);
+
+ if (signr == -1)
+ return;
+
+ signal(signr, SIG_DFL);
+ kill(getpid(), signr);
+}
+
+static const char * const stat_usage[] = {
+ "perf stat [<options>] [<command>]",
+ NULL
+};
+
+static const struct option options[] = {
+ OPT_CALLBACK('e', "event", NULL, "event",
+ "event selector. use 'perf list' to list available events",
+ parse_events),
+ OPT_BOOLEAN('i', "no-inherit", &no_inherit,
+ "child tasks do not inherit counters"),
+ OPT_INTEGER('p', "pid", &target_pid,
+ "stat events on existing process id"),
+ OPT_INTEGER('t', "tid", &target_tid,
+ "stat events on existing thread id"),
+ OPT_BOOLEAN('a', "all-cpus", &system_wide,
+ "system-wide collection from all CPUs"),
+ OPT_BOOLEAN('c', "scale", &scale,
+ "scale/normalize counters"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show counter open errors, etc)"),
+ OPT_INTEGER('r', "repeat", &run_count,
+ "repeat command and print average + stddev (max: 100)"),
+ OPT_BOOLEAN('n', "null", &null_run,
+ "null run - dont start any counters"),
+ OPT_BOOLEAN('B', "big-num", &big_num,
+ "print large numbers with thousands\' separators"),
+ OPT_STRING('C', "cpu", &cpu_list, "cpu",
+ "list of cpus to monitor in system-wide"),
+ OPT_END()
+};
+
+int cmd_stat(int argc, const char **argv, const char *prefix __used)
+{
+ int status;
+ int i,j;
+
+ setlocale(LC_ALL, "");
+
+ argc = parse_options(argc, argv, options, stat_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (!argc && target_pid == -1 && target_tid == -1)
+ usage_with_options(stat_usage, options);
+ if (run_count <= 0)
+ usage_with_options(stat_usage, options);
+
+ /* Set attrs and nr_counters if no event is selected and !null_run */
+ if (!null_run && !nr_counters) {
+ memcpy(attrs, default_attrs, sizeof(default_attrs));
+ nr_counters = ARRAY_SIZE(default_attrs);
+ }
+
+ if (system_wide)
+ nr_cpus = read_cpu_map(cpu_list);
+ else
+ nr_cpus = 1;
+
+ if (nr_cpus < 1)
+ usage_with_options(stat_usage, options);
+
+ if (target_pid != -1) {
+ target_tid = target_pid;
+ thread_num = find_all_tid(target_pid, &all_tids);
+ if (thread_num <= 0) {
+ fprintf(stderr, "Can't find all threads of pid %d\n",
+ target_pid);
+ usage_with_options(stat_usage, options);
+ }
+ } else {
+ all_tids=malloc(sizeof(pid_t));
+ if (!all_tids)
+ return -ENOMEM;
+
+ all_tids[0] = target_tid;
+ thread_num = 1;
+ }
+
+ for (i = 0; i < MAX_NR_CPUS; i++) {
+ for (j = 0; j < MAX_COUNTERS; j++) {
+ fd[i][j] = malloc(sizeof(int)*thread_num);
+ if (!fd[i][j])
+ return -ENOMEM;
+ }
+ }
+
+ /*
+ * We dont want to block the signals - that would cause
+ * child tasks to inherit that and Ctrl-C would not work.
+ * What we want is for Ctrl-C to work in the exec()-ed
+ * task, but being ignored by perf stat itself:
+ */
+ atexit(sig_atexit);
+ signal(SIGINT, skip_signal);
+ signal(SIGALRM, skip_signal);
+ signal(SIGABRT, skip_signal);
+
+ status = 0;
+ for (run_idx = 0; run_idx < run_count; run_idx++) {
+ if (run_count != 1 && verbose)
+ fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1);
+ status = run_perf_stat(argc, argv);
+ }
+
+ if (status != -1)
+ print_stat(argc, argv);
+
+ return status;
+}
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
new file mode 100644
index 00000000..035b9fa0
--- /dev/null
+++ b/tools/perf/builtin-test.c
@@ -0,0 +1,281 @@
+/*
+ * builtin-test.c
+ *
+ * Builtin regression testing command: ever growing number of sanity tests
+ */
+#include "builtin.h"
+
+#include "util/cache.h"
+#include "util/debug.h"
+#include "util/parse-options.h"
+#include "util/session.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+
+static long page_size;
+
+static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
+{
+ bool *visited = symbol__priv(sym);
+ *visited = true;
+ return 0;
+}
+
+static int test__vmlinux_matches_kallsyms(void)
+{
+ int err = -1;
+ struct rb_node *nd;
+ struct symbol *sym;
+ struct map *kallsyms_map, *vmlinux_map;
+ struct machine kallsyms, vmlinux;
+ enum map_type type = MAP__FUNCTION;
+ struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
+
+ /*
+ * Step 1:
+ *
+ * Init the machines that will hold kernel, modules obtained from
+ * both vmlinux + .ko files and from /proc/kallsyms split by modules.
+ */
+ machine__init(&kallsyms, "", HOST_KERNEL_ID);
+ machine__init(&vmlinux, "", HOST_KERNEL_ID);
+
+ /*
+ * Step 2:
+ *
+ * Create the kernel maps for kallsyms and the DSO where we will then
+ * load /proc/kallsyms. Also create the modules maps from /proc/modules
+ * and find the .ko files that match them in /lib/modules/`uname -r`/.
+ */
+ if (machine__create_kernel_maps(&kallsyms) < 0) {
+ pr_debug("machine__create_kernel_maps ");
+ return -1;
+ }
+
+ /*
+ * Step 3:
+ *
+ * Load and split /proc/kallsyms into multiple maps, one per module.
+ */
+ if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
+ pr_debug("dso__load_kallsyms ");
+ goto out;
+ }
+
+ /*
+ * Step 4:
+ *
+ * kallsyms will be internally on demand sorted by name so that we can
+ * find the reference relocation * symbol, i.e. the symbol we will use
+ * to see if the running kernel was relocated by checking if it has the
+ * same value in the vmlinux file we load.
+ */
+ kallsyms_map = machine__kernel_map(&kallsyms, type);
+
+ sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
+ if (sym == NULL) {
+ pr_debug("dso__find_symbol_by_name ");
+ goto out;
+ }
+
+ ref_reloc_sym.addr = sym->start;
+
+ /*
+ * Step 5:
+ *
+ * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
+ */
+ if (machine__create_kernel_maps(&vmlinux) < 0) {
+ pr_debug("machine__create_kernel_maps ");
+ goto out;
+ }
+
+ vmlinux_map = machine__kernel_map(&vmlinux, type);
+ map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
+
+ /*
+ * Step 6:
+ *
+ * Locate a vmlinux file in the vmlinux path that has a buildid that
+ * matches the one of the running kernel.
+ *
+ * While doing that look if we find the ref reloc symbol, if we find it
+ * we'll have its ref_reloc_symbol.unrelocated_addr and then
+ * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
+ * to fixup the symbols.
+ */
+ if (machine__load_vmlinux_path(&vmlinux, type,
+ vmlinux_matches_kallsyms_filter) <= 0) {
+ pr_debug("machine__load_vmlinux_path ");
+ goto out;
+ }
+
+ err = 0;
+ /*
+ * Step 7:
+ *
+ * Now look at the symbols in the vmlinux DSO and check if we find all of them
+ * in the kallsyms dso. For the ones that are in both, check its names and
+ * end addresses too.
+ */
+ for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
+ struct symbol *pair;
+
+ sym = rb_entry(nd, struct symbol, rb_node);
+ pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
+
+ if (pair && pair->start == sym->start) {
+next_pair:
+ if (strcmp(sym->name, pair->name) == 0) {
+ /*
+ * kallsyms don't have the symbol end, so we
+ * set that by using the next symbol start - 1,
+ * in some cases we get this up to a page
+ * wrong, trace_kmalloc when I was developing
+ * this code was one such example, 2106 bytes
+ * off the real size. More than that and we
+ * _really_ have a problem.
+ */
+ s64 skew = sym->end - pair->end;
+ if (llabs(skew) < page_size)
+ continue;
+
+ pr_debug("%#Lx: diff end addr for %s v: %#Lx k: %#Lx\n",
+ sym->start, sym->name, sym->end, pair->end);
+ } else {
+ struct rb_node *nnd = rb_prev(&pair->rb_node);
+
+ if (nnd) {
+ struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
+
+ if (next->start == sym->start) {
+ pair = next;
+ goto next_pair;
+ }
+ }
+ pr_debug("%#Lx: diff name v: %s k: %s\n",
+ sym->start, sym->name, pair->name);
+ }
+ } else
+ pr_debug("%#Lx: %s not on kallsyms\n", sym->start, sym->name);
+
+ err = -1;
+ }
+
+ if (!verbose)
+ goto out;
+
+ pr_info("Maps only in vmlinux:\n");
+
+ for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
+ /*
+ * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
+ * the kernel will have the path for the vmlinux file being used,
+ * so use the short name, less descriptive but the same ("[kernel]" in
+ * both cases.
+ */
+ pair = map_groups__find_by_name(&kallsyms.kmaps, type,
+ (pos->dso->kernel ?
+ pos->dso->short_name :
+ pos->dso->name));
+ if (pair)
+ pair->priv = 1;
+ else
+ map__fprintf(pos, stderr);
+ }
+
+ pr_info("Maps in vmlinux with a different name in kallsyms:\n");
+
+ for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
+
+ pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
+ if (pair == NULL || pair->priv)
+ continue;
+
+ if (pair->start == pos->start) {
+ pair->priv = 1;
+ pr_info(" %Lx-%Lx %Lx %s in kallsyms as",
+ pos->start, pos->end, pos->pgoff, pos->dso->name);
+ if (pos->pgoff != pair->pgoff || pos->end != pair->end)
+ pr_info(": \n*%Lx-%Lx %Lx",
+ pair->start, pair->end, pair->pgoff);
+ pr_info(" %s\n", pair->dso->name);
+ pair->priv = 1;
+ }
+ }
+
+ pr_info("Maps only in kallsyms:\n");
+
+ for (nd = rb_first(&kallsyms.kmaps.maps[type]);
+ nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+
+ if (!pos->priv)
+ map__fprintf(pos, stderr);
+ }
+out:
+ return err;
+}
+
+static struct test {
+ const char *desc;
+ int (*func)(void);
+} tests[] = {
+ {
+ .desc = "vmlinux symtab matches kallsyms",
+ .func = test__vmlinux_matches_kallsyms,
+ },
+ {
+ .func = NULL,
+ },
+};
+
+static int __cmd_test(void)
+{
+ int i = 0;
+
+ page_size = sysconf(_SC_PAGE_SIZE);
+
+ while (tests[i].func) {
+ int err;
+ pr_info("%2d: %s:", i + 1, tests[i].desc);
+ pr_debug("\n--- start ---\n");
+ err = tests[i].func();
+ pr_debug("---- end ----\n%s:", tests[i].desc);
+ pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
+ ++i;
+ }
+
+ return 0;
+}
+
+static const char * const test_usage[] = {
+ "perf test [<options>]",
+ NULL,
+};
+
+static const struct option test_options[] = {
+ OPT_INTEGER('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_END()
+};
+
+int cmd_test(int argc, const char **argv, const char *prefix __used)
+{
+ argc = parse_options(argc, argv, test_options, test_usage, 0);
+ if (argc)
+ usage_with_options(test_usage, test_options);
+
+ symbol_conf.priv_size = sizeof(int);
+ symbol_conf.sort_by_name = true;
+ symbol_conf.try_vmlinux_path = true;
+
+ if (symbol__init() < 0)
+ return -1;
+
+ setup_pager();
+
+ return __cmd_test();
+}
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
new file mode 100644
index 00000000..9bcc38f0
--- /dev/null
+++ b/tools/perf/builtin-timechart.c
@@ -0,0 +1,1040 @@
+/*
+ * builtin-timechart.c - make an svg timechart of system activity
+ *
+ * (C) Copyright 2009 Intel Corporation
+ *
+ * Authors:
+ * Arjan van de Ven <arjan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include "builtin.h"
+
+#include "util/util.h"
+
+#include "util/color.h"
+#include <linux/list.h>
+#include "util/cache.h"
+#include <linux/rbtree.h>
+#include "util/symbol.h"
+#include "util/callchain.h"
+#include "util/strlist.h"
+
+#include "perf.h"
+#include "util/header.h"
+#include "util/parse-options.h"
+#include "util/parse-events.h"
+#include "util/event.h"
+#include "util/session.h"
+#include "util/svghelper.h"
+
+static char const *input_name = "perf.data";
+static char const *output_name = "output.svg";
+
+static unsigned int numcpus;
+static u64 min_freq; /* Lowest CPU frequency seen */
+static u64 max_freq; /* Highest CPU frequency seen */
+static u64 turbo_frequency;
+
+static u64 first_time, last_time;
+
+static bool power_only;
+
+
+struct per_pid;
+struct per_pidcomm;
+
+struct cpu_sample;
+struct power_event;
+struct wake_event;
+
+struct sample_wrapper;
+
+/*
+ * Datastructure layout:
+ * We keep an list of "pid"s, matching the kernels notion of a task struct.
+ * Each "pid" entry, has a list of "comm"s.
+ * this is because we want to track different programs different, while
+ * exec will reuse the original pid (by design).
+ * Each comm has a list of samples that will be used to draw
+ * final graph.
+ */
+
+struct per_pid {
+ struct per_pid *next;
+
+ int pid;
+ int ppid;
+
+ u64 start_time;
+ u64 end_time;
+ u64 total_time;
+ int display;
+
+ struct per_pidcomm *all;
+ struct per_pidcomm *current;
+};
+
+
+struct per_pidcomm {
+ struct per_pidcomm *next;
+
+ u64 start_time;
+ u64 end_time;
+ u64 total_time;
+
+ int Y;
+ int display;
+
+ long state;
+ u64 state_since;
+
+ char *comm;
+
+ struct cpu_sample *samples;
+};
+
+struct sample_wrapper {
+ struct sample_wrapper *next;
+
+ u64 timestamp;
+ unsigned char data[0];
+};
+
+#define TYPE_NONE 0
+#define TYPE_RUNNING 1
+#define TYPE_WAITING 2
+#define TYPE_BLOCKED 3
+
+struct cpu_sample {
+ struct cpu_sample *next;
+
+ u64 start_time;
+ u64 end_time;
+ int type;
+ int cpu;
+};
+
+static struct per_pid *all_data;
+
+#define CSTATE 1
+#define PSTATE 2
+
+struct power_event {
+ struct power_event *next;
+ int type;
+ int state;
+ u64 start_time;
+ u64 end_time;
+ int cpu;
+};
+
+struct wake_event {
+ struct wake_event *next;
+ int waker;
+ int wakee;
+ u64 time;
+};
+
+static struct power_event *power_events;
+static struct wake_event *wake_events;
+
+struct process_filter;
+struct process_filter {
+ char *name;
+ int pid;
+ struct process_filter *next;
+};
+
+static struct process_filter *process_filter;
+
+
+static struct per_pid *find_create_pid(int pid)
+{
+ struct per_pid *cursor = all_data;
+
+ while (cursor) {
+ if (cursor->pid == pid)
+ return cursor;
+ cursor = cursor->next;
+ }
+ cursor = malloc(sizeof(struct per_pid));
+ assert(cursor != NULL);
+ memset(cursor, 0, sizeof(struct per_pid));
+ cursor->pid = pid;
+ cursor->next = all_data;
+ all_data = cursor;
+ return cursor;
+}
+
+static void pid_set_comm(int pid, char *comm)
+{
+ struct per_pid *p;
+ struct per_pidcomm *c;
+ p = find_create_pid(pid);
+ c = p->all;
+ while (c) {
+ if (c->comm && strcmp(c->comm, comm) == 0) {
+ p->current = c;
+ return;
+ }
+ if (!c->comm) {
+ c->comm = strdup(comm);
+ p->current = c;
+ return;
+ }
+ c = c->next;
+ }
+ c = malloc(sizeof(struct per_pidcomm));
+ assert(c != NULL);
+ memset(c, 0, sizeof(struct per_pidcomm));
+ c->comm = strdup(comm);
+ p->current = c;
+ c->next = p->all;
+ p->all = c;
+}
+
+static void pid_fork(int pid, int ppid, u64 timestamp)
+{
+ struct per_pid *p, *pp;
+ p = find_create_pid(pid);
+ pp = find_create_pid(ppid);
+ p->ppid = ppid;
+ if (pp->current && pp->current->comm && !p->current)
+ pid_set_comm(pid, pp->current->comm);
+
+ p->start_time = timestamp;
+ if (p->current) {
+ p->current->start_time = timestamp;
+ p->current->state_since = timestamp;
+ }
+}
+
+static void pid_exit(int pid, u64 timestamp)
+{
+ struct per_pid *p;
+ p = find_create_pid(pid);
+ p->end_time = timestamp;
+ if (p->current)
+ p->current->end_time = timestamp;
+}
+
+static void
+pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
+{
+ struct per_pid *p;
+ struct per_pidcomm *c;
+ struct cpu_sample *sample;
+
+ p = find_create_pid(pid);
+ c = p->current;
+ if (!c) {
+ c = malloc(sizeof(struct per_pidcomm));
+ assert(c != NULL);
+ memset(c, 0, sizeof(struct per_pidcomm));
+ p->current = c;
+ c->next = p->all;
+ p->all = c;
+ }
+
+ sample = malloc(sizeof(struct cpu_sample));
+ assert(sample != NULL);
+ memset(sample, 0, sizeof(struct cpu_sample));
+ sample->start_time = start;
+ sample->end_time = end;
+ sample->type = type;
+ sample->next = c->samples;
+ sample->cpu = cpu;
+ c->samples = sample;
+
+ if (sample->type == TYPE_RUNNING && end > start && start > 0) {
+ c->total_time += (end-start);
+ p->total_time += (end-start);
+ }
+
+ if (c->start_time == 0 || c->start_time > start)
+ c->start_time = start;
+ if (p->start_time == 0 || p->start_time > start)
+ p->start_time = start;
+
+ if (cpu > numcpus)
+ numcpus = cpu;
+}
+
+#define MAX_CPUS 4096
+
+static u64 cpus_cstate_start_times[MAX_CPUS];
+static int cpus_cstate_state[MAX_CPUS];
+static u64 cpus_pstate_start_times[MAX_CPUS];
+static u64 cpus_pstate_state[MAX_CPUS];
+
+static int process_comm_event(event_t *event, struct perf_session *session __used)
+{
+ pid_set_comm(event->comm.tid, event->comm.comm);
+ return 0;
+}
+
+static int process_fork_event(event_t *event, struct perf_session *session __used)
+{
+ pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
+ return 0;
+}
+
+static int process_exit_event(event_t *event, struct perf_session *session __used)
+{
+ pid_exit(event->fork.pid, event->fork.time);
+ return 0;
+}
+
+struct trace_entry {
+ unsigned short type;
+ unsigned char flags;
+ unsigned char preempt_count;
+ int pid;
+ int lock_depth;
+};
+
+struct power_entry {
+ struct trace_entry te;
+ u64 type;
+ u64 value;
+ u64 cpu_id;
+};
+
+#define TASK_COMM_LEN 16
+struct wakeup_entry {
+ struct trace_entry te;
+ char comm[TASK_COMM_LEN];
+ int pid;
+ int prio;
+ int success;
+};
+
+/*
+ * trace_flag_type is an enumeration that holds different
+ * states when a trace occurs. These are:
+ * IRQS_OFF - interrupts were disabled
+ * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
+ * NEED_RESCED - reschedule is requested
+ * HARDIRQ - inside an interrupt handler
+ * SOFTIRQ - inside a softirq handler
+ */
+enum trace_flag_type {
+ TRACE_FLAG_IRQS_OFF = 0x01,
+ TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
+ TRACE_FLAG_NEED_RESCHED = 0x04,
+ TRACE_FLAG_HARDIRQ = 0x08,
+ TRACE_FLAG_SOFTIRQ = 0x10,
+};
+
+
+
+struct sched_switch {
+ struct trace_entry te;
+ char prev_comm[TASK_COMM_LEN];
+ int prev_pid;
+ int prev_prio;
+ long prev_state; /* Arjan weeps. */
+ char next_comm[TASK_COMM_LEN];
+ int next_pid;
+ int next_prio;
+};
+
+static void c_state_start(int cpu, u64 timestamp, int state)
+{
+ cpus_cstate_start_times[cpu] = timestamp;
+ cpus_cstate_state[cpu] = state;
+}
+
+static void c_state_end(int cpu, u64 timestamp)
+{
+ struct power_event *pwr;
+ pwr = malloc(sizeof(struct power_event));
+ if (!pwr)
+ return;
+ memset(pwr, 0, sizeof(struct power_event));
+
+ pwr->state = cpus_cstate_state[cpu];
+ pwr->start_time = cpus_cstate_start_times[cpu];
+ pwr->end_time = timestamp;
+ pwr->cpu = cpu;
+ pwr->type = CSTATE;
+ pwr->next = power_events;
+
+ power_events = pwr;
+}
+
+static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
+{
+ struct power_event *pwr;
+ pwr = malloc(sizeof(struct power_event));
+
+ if (new_freq > 8000000) /* detect invalid data */
+ return;
+
+ if (!pwr)
+ return;
+ memset(pwr, 0, sizeof(struct power_event));
+
+ pwr->state = cpus_pstate_state[cpu];
+ pwr->start_time = cpus_pstate_start_times[cpu];
+ pwr->end_time = timestamp;
+ pwr->cpu = cpu;
+ pwr->type = PSTATE;
+ pwr->next = power_events;
+
+ if (!pwr->start_time)
+ pwr->start_time = first_time;
+
+ power_events = pwr;
+
+ cpus_pstate_state[cpu] = new_freq;
+ cpus_pstate_start_times[cpu] = timestamp;
+
+ if ((u64)new_freq > max_freq)
+ max_freq = new_freq;
+
+ if (new_freq < min_freq || min_freq == 0)
+ min_freq = new_freq;
+
+ if (new_freq == max_freq - 1000)
+ turbo_frequency = max_freq;
+}
+
+static void
+sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te)
+{
+ struct wake_event *we;
+ struct per_pid *p;
+ struct wakeup_entry *wake = (void *)te;
+
+ we = malloc(sizeof(struct wake_event));
+ if (!we)
+ return;
+
+ memset(we, 0, sizeof(struct wake_event));
+ we->time = timestamp;
+ we->waker = pid;
+
+ if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ))
+ we->waker = -1;
+
+ we->wakee = wake->pid;
+ we->next = wake_events;
+ wake_events = we;
+ p = find_create_pid(we->wakee);
+
+ if (p && p->current && p->current->state == TYPE_NONE) {
+ p->current->state_since = timestamp;
+ p->current->state = TYPE_WAITING;
+ }
+ if (p && p->current && p->current->state == TYPE_BLOCKED) {
+ pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp);
+ p->current->state_since = timestamp;
+ p->current->state = TYPE_WAITING;
+ }
+}
+
+static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
+{
+ struct per_pid *p = NULL, *prev_p;
+ struct sched_switch *sw = (void *)te;
+
+
+ prev_p = find_create_pid(sw->prev_pid);
+
+ p = find_create_pid(sw->next_pid);
+
+ if (prev_p->current && prev_p->current->state != TYPE_NONE)
+ pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp);
+ if (p && p->current) {
+ if (p->current->state != TYPE_NONE)
+ pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp);
+
+ p->current->state_since = timestamp;
+ p->current->state = TYPE_RUNNING;
+ }
+
+ if (prev_p->current) {
+ prev_p->current->state = TYPE_NONE;
+ prev_p->current->state_since = timestamp;
+ if (sw->prev_state & 2)
+ prev_p->current->state = TYPE_BLOCKED;
+ if (sw->prev_state == 0)
+ prev_p->current->state = TYPE_WAITING;
+ }
+}
+
+
+static int process_sample_event(event_t *event, struct perf_session *session)
+{
+ struct sample_data data;
+ struct trace_entry *te;
+
+ memset(&data, 0, sizeof(data));
+
+ event__parse_sample(event, session->sample_type, &data);
+
+ if (session->sample_type & PERF_SAMPLE_TIME) {
+ if (!first_time || first_time > data.time)
+ first_time = data.time;
+ if (last_time < data.time)
+ last_time = data.time;
+ }
+
+ te = (void *)data.raw_data;
+ if (session->sample_type & PERF_SAMPLE_RAW && data.raw_size > 0) {
+ char *event_str;
+ struct power_entry *pe;
+
+ pe = (void *)te;
+
+ event_str = perf_header__find_event(te->type);
+
+ if (!event_str)
+ return 0;
+
+ if (strcmp(event_str, "power:power_start") == 0)
+ c_state_start(pe->cpu_id, data.time, pe->value);
+
+ if (strcmp(event_str, "power:power_end") == 0)
+ c_state_end(pe->cpu_id, data.time);
+
+ if (strcmp(event_str, "power:power_frequency") == 0)
+ p_state_change(pe->cpu_id, data.time, pe->value);
+
+ if (strcmp(event_str, "sched:sched_wakeup") == 0)
+ sched_wakeup(data.cpu, data.time, data.pid, te);
+
+ if (strcmp(event_str, "sched:sched_switch") == 0)
+ sched_switch(data.cpu, data.time, te);
+ }
+ return 0;
+}
+
+/*
+ * After the last sample we need to wrap up the current C/P state
+ * and close out each CPU for these.
+ */
+static void end_sample_processing(void)
+{
+ u64 cpu;
+ struct power_event *pwr;
+
+ for (cpu = 0; cpu <= numcpus; cpu++) {
+ pwr = malloc(sizeof(struct power_event));
+ if (!pwr)
+ return;
+ memset(pwr, 0, sizeof(struct power_event));
+
+ /* C state */
+#if 0
+ pwr->state = cpus_cstate_state[cpu];
+ pwr->start_time = cpus_cstate_start_times[cpu];
+ pwr->end_time = last_time;
+ pwr->cpu = cpu;
+ pwr->type = CSTATE;
+ pwr->next = power_events;
+
+ power_events = pwr;
+#endif
+ /* P state */
+
+ pwr = malloc(sizeof(struct power_event));
+ if (!pwr)
+ return;
+ memset(pwr, 0, sizeof(struct power_event));
+
+ pwr->state = cpus_pstate_state[cpu];
+ pwr->start_time = cpus_pstate_start_times[cpu];
+ pwr->end_time = last_time;
+ pwr->cpu = cpu;
+ pwr->type = PSTATE;
+ pwr->next = power_events;
+
+ if (!pwr->start_time)
+ pwr->start_time = first_time;
+ if (!pwr->state)
+ pwr->state = min_freq;
+ power_events = pwr;
+ }
+}
+
+/*
+ * Sort the pid datastructure
+ */
+static void sort_pids(void)
+{
+ struct per_pid *new_list, *p, *cursor, *prev;
+ /* sort by ppid first, then by pid, lowest to highest */
+
+ new_list = NULL;
+
+ while (all_data) {
+ p = all_data;
+ all_data = p->next;
+ p->next = NULL;
+
+ if (new_list == NULL) {
+ new_list = p;
+ p->next = NULL;
+ continue;
+ }
+ prev = NULL;
+ cursor = new_list;
+ while (cursor) {
+ if (cursor->ppid > p->ppid ||
+ (cursor->ppid == p->ppid && cursor->pid > p->pid)) {
+ /* must insert before */
+ if (prev) {
+ p->next = prev->next;
+ prev->next = p;
+ cursor = NULL;
+ continue;
+ } else {
+ p->next = new_list;
+ new_list = p;
+ cursor = NULL;
+ continue;
+ }
+ }
+
+ prev = cursor;
+ cursor = cursor->next;
+ if (!cursor)
+ prev->next = p;
+ }
+ }
+ all_data = new_list;
+}
+
+
+static void draw_c_p_states(void)
+{
+ struct power_event *pwr;
+ pwr = power_events;
+
+ /*
+ * two pass drawing so that the P state bars are on top of the C state blocks
+ */
+ while (pwr) {
+ if (pwr->type == CSTATE)
+ svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
+ pwr = pwr->next;
+ }
+
+ pwr = power_events;
+ while (pwr) {
+ if (pwr->type == PSTATE) {
+ if (!pwr->state)
+ pwr->state = min_freq;
+ svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
+ }
+ pwr = pwr->next;
+ }
+}
+
+static void draw_wakeups(void)
+{
+ struct wake_event *we;
+ struct per_pid *p;
+ struct per_pidcomm *c;
+
+ we = wake_events;
+ while (we) {
+ int from = 0, to = 0;
+ char *task_from = NULL, *task_to = NULL;
+
+ /* locate the column of the waker and wakee */
+ p = all_data;
+ while (p) {
+ if (p->pid == we->waker || p->pid == we->wakee) {
+ c = p->all;
+ while (c) {
+ if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
+ if (p->pid == we->waker && !from) {
+ from = c->Y;
+ task_from = strdup(c->comm);
+ }
+ if (p->pid == we->wakee && !to) {
+ to = c->Y;
+ task_to = strdup(c->comm);
+ }
+ }
+ c = c->next;
+ }
+ c = p->all;
+ while (c) {
+ if (p->pid == we->waker && !from) {
+ from = c->Y;
+ task_from = strdup(c->comm);
+ }
+ if (p->pid == we->wakee && !to) {
+ to = c->Y;
+ task_to = strdup(c->comm);
+ }
+ c = c->next;
+ }
+ }
+ p = p->next;
+ }
+
+ if (!task_from) {
+ task_from = malloc(40);
+ sprintf(task_from, "[%i]", we->waker);
+ }
+ if (!task_to) {
+ task_to = malloc(40);
+ sprintf(task_to, "[%i]", we->wakee);
+ }
+
+ if (we->waker == -1)
+ svg_interrupt(we->time, to);
+ else if (from && to && abs(from - to) == 1)
+ svg_wakeline(we->time, from, to);
+ else
+ svg_partial_wakeline(we->time, from, task_from, to, task_to);
+ we = we->next;
+
+ free(task_from);
+ free(task_to);
+ }
+}
+
+static void draw_cpu_usage(void)
+{
+ struct per_pid *p;
+ struct per_pidcomm *c;
+ struct cpu_sample *sample;
+ p = all_data;
+ while (p) {
+ c = p->all;
+ while (c) {
+ sample = c->samples;
+ while (sample) {
+ if (sample->type == TYPE_RUNNING)
+ svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm);
+
+ sample = sample->next;
+ }
+ c = c->next;
+ }
+ p = p->next;
+ }
+}
+
+static void draw_process_bars(void)
+{
+ struct per_pid *p;
+ struct per_pidcomm *c;
+ struct cpu_sample *sample;
+ int Y = 0;
+
+ Y = 2 * numcpus + 2;
+
+ p = all_data;
+ while (p) {
+ c = p->all;
+ while (c) {
+ if (!c->display) {
+ c->Y = 0;
+ c = c->next;
+ continue;
+ }
+
+ svg_box(Y, c->start_time, c->end_time, "process");
+ sample = c->samples;
+ while (sample) {
+ if (sample->type == TYPE_RUNNING)
+ svg_sample(Y, sample->cpu, sample->start_time, sample->end_time);
+ if (sample->type == TYPE_BLOCKED)
+ svg_box(Y, sample->start_time, sample->end_time, "blocked");
+ if (sample->type == TYPE_WAITING)
+ svg_waiting(Y, sample->start_time, sample->end_time);
+ sample = sample->next;
+ }
+
+ if (c->comm) {
+ char comm[256];
+ if (c->total_time > 5000000000) /* 5 seconds */
+ sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
+ else
+ sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
+
+ svg_text(Y, c->start_time, comm);
+ }
+ c->Y = Y;
+ Y++;
+ c = c->next;
+ }
+ p = p->next;
+ }
+}
+
+static void add_process_filter(const char *string)
+{
+ struct process_filter *filt;
+ int pid;
+
+ pid = strtoull(string, NULL, 10);
+ filt = malloc(sizeof(struct process_filter));
+ if (!filt)
+ return;
+
+ filt->name = strdup(string);
+ filt->pid = pid;
+ filt->next = process_filter;
+
+ process_filter = filt;
+}
+
+static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
+{
+ struct process_filter *filt;
+ if (!process_filter)
+ return 1;
+
+ filt = process_filter;
+ while (filt) {
+ if (filt->pid && p->pid == filt->pid)
+ return 1;
+ if (strcmp(filt->name, c->comm) == 0)
+ return 1;
+ filt = filt->next;
+ }
+ return 0;
+}
+
+static int determine_display_tasks_filtered(void)
+{
+ struct per_pid *p;
+ struct per_pidcomm *c;
+ int count = 0;
+
+ p = all_data;
+ while (p) {
+ p->display = 0;
+ if (p->start_time == 1)
+ p->start_time = first_time;
+
+ /* no exit marker, task kept running to the end */
+ if (p->end_time == 0)
+ p->end_time = last_time;
+
+ c = p->all;
+
+ while (c) {
+ c->display = 0;
+
+ if (c->start_time == 1)
+ c->start_time = first_time;
+
+ if (passes_filter(p, c)) {
+ c->display = 1;
+ p->display = 1;
+ count++;
+ }
+
+ if (c->end_time == 0)
+ c->end_time = last_time;
+
+ c = c->next;
+ }
+ p = p->next;
+ }
+ return count;
+}
+
+static int determine_display_tasks(u64 threshold)
+{
+ struct per_pid *p;
+ struct per_pidcomm *c;
+ int count = 0;
+
+ if (process_filter)
+ return determine_display_tasks_filtered();
+
+ p = all_data;
+ while (p) {
+ p->display = 0;
+ if (p->start_time == 1)
+ p->start_time = first_time;
+
+ /* no exit marker, task kept running to the end */
+ if (p->end_time == 0)
+ p->end_time = last_time;
+ if (p->total_time >= threshold && !power_only)
+ p->display = 1;
+
+ c = p->all;
+
+ while (c) {
+ c->display = 0;
+
+ if (c->start_time == 1)
+ c->start_time = first_time;
+
+ if (c->total_time >= threshold && !power_only) {
+ c->display = 1;
+ count++;
+ }
+
+ if (c->end_time == 0)
+ c->end_time = last_time;
+
+ c = c->next;
+ }
+ p = p->next;
+ }
+ return count;
+}
+
+
+
+#define TIME_THRESH 10000000
+
+static void write_svg_file(const char *filename)
+{
+ u64 i;
+ int count;
+
+ numcpus++;
+
+
+ count = determine_display_tasks(TIME_THRESH);
+
+ /* We'd like to show at least 15 tasks; be less picky if we have fewer */
+ if (count < 15)
+ count = determine_display_tasks(TIME_THRESH / 10);
+
+ open_svg(filename, numcpus, count, first_time, last_time);
+
+ svg_time_grid();
+ svg_legenda();
+
+ for (i = 0; i < numcpus; i++)
+ svg_cpu_box(i, max_freq, turbo_frequency);
+
+ draw_cpu_usage();
+ draw_process_bars();
+ draw_c_p_states();
+ draw_wakeups();
+
+ svg_close();
+}
+
+static struct perf_event_ops event_ops = {
+ .comm = process_comm_event,
+ .fork = process_fork_event,
+ .exit = process_exit_event,
+ .sample = process_sample_event,
+ .ordered_samples = true,
+};
+
+static int __cmd_timechart(void)
+{
+ struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0, false);
+ int ret = -EINVAL;
+
+ if (session == NULL)
+ return -ENOMEM;
+
+ if (!perf_session__has_traces(session, "timechart record"))
+ goto out_delete;
+
+ ret = perf_session__process_events(session, &event_ops);
+ if (ret)
+ goto out_delete;
+
+ end_sample_processing();
+
+ sort_pids();
+
+ write_svg_file(output_name);
+
+ pr_info("Written %2.1f seconds of trace to %s.\n",
+ (last_time - first_time) / 1000000000.0, output_name);
+out_delete:
+ perf_session__delete(session);
+ return ret;
+}
+
+static const char * const timechart_usage[] = {
+ "perf timechart [<options>] {record}",
+ NULL
+};
+
+static const char *record_args[] = {
+ "record",
+ "-a",
+ "-R",
+ "-f",
+ "-c", "1",
+ "-e", "power:power_start",
+ "-e", "power:power_end",
+ "-e", "power:power_frequency",
+ "-e", "sched:sched_wakeup",
+ "-e", "sched:sched_switch",
+};
+
+static int __cmd_record(int argc, const char **argv)
+{
+ unsigned int rec_argc, i, j;
+ const char **rec_argv;
+
+ rec_argc = ARRAY_SIZE(record_args) + argc - 1;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+
+ for (i = 0; i < ARRAY_SIZE(record_args); i++)
+ rec_argv[i] = strdup(record_args[i]);
+
+ for (j = 1; j < (unsigned int)argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ return cmd_record(i, rec_argv, NULL);
+}
+
+static int
+parse_process(const struct option *opt __used, const char *arg, int __used unset)
+{
+ if (arg)
+ add_process_filter(arg);
+ return 0;
+}
+
+static const struct option options[] = {
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+ OPT_STRING('o', "output", &output_name, "file",
+ "output file name"),
+ OPT_INTEGER('w', "width", &svg_page_width,
+ "page width"),
+ OPT_BOOLEAN('P', "power-only", &power_only,
+ "output power data only"),
+ OPT_CALLBACK('p', "process", NULL, "process",
+ "process selector. Pass a pid or process name.",
+ parse_process),
+ OPT_END()
+};
+
+
+int cmd_timechart(int argc, const char **argv, const char *prefix __used)
+{
+ argc = parse_options(argc, argv, options, timechart_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ symbol__init();
+
+ if (argc && !strncmp(argv[0], "rec", 3))
+ return __cmd_record(argc, argv);
+ else if (argc)
+ usage_with_options(timechart_usage, options);
+
+ setup_pager();
+
+ return __cmd_timechart();
+}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
new file mode 100644
index 00000000..b513e409
--- /dev/null
+++ b/tools/perf/builtin-top.c
@@ -0,0 +1,1470 @@
+/*
+ * builtin-top.c
+ *
+ * Builtin top command: Display a continuously updated profile of
+ * any workload, CPU or specific PID.
+ *
+ * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
+ *
+ * Improvements and fixes by:
+ *
+ * Arjan van de Ven <arjan@linux.intel.com>
+ * Yanmin Zhang <yanmin.zhang@intel.com>
+ * Wu Fengguang <fengguang.wu@intel.com>
+ * Mike Galbraith <efault@gmx.de>
+ * Paul Mackerras <paulus@samba.org>
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+#include "builtin.h"
+
+#include "perf.h"
+
+#include "util/color.h"
+#include "util/session.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+#include "util/util.h"
+#include <linux/rbtree.h>
+#include "util/parse-options.h"
+#include "util/parse-events.h"
+#include "util/cpumap.h"
+
+#include "util/debug.h"
+
+#include <assert.h>
+#include <fcntl.h>
+
+#include <stdio.h>
+#include <termios.h>
+#include <unistd.h>
+
+#include <errno.h>
+#include <time.h>
+#include <sched.h>
+#include <pthread.h>
+
+#include <sys/syscall.h>
+#include <sys/ioctl.h>
+#include <sys/poll.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <sys/uio.h>
+#include <sys/mman.h>
+
+#include <linux/unistd.h>
+#include <linux/types.h>
+
+static int *fd[MAX_NR_CPUS][MAX_COUNTERS];
+
+static bool system_wide = false;
+
+static int default_interval = 0;
+
+static int count_filter = 5;
+static int print_entries;
+
+static int target_pid = -1;
+static int target_tid = -1;
+static pid_t *all_tids = NULL;
+static int thread_num = 0;
+static bool inherit = false;
+static int profile_cpu = -1;
+static int nr_cpus = 0;
+static int realtime_prio = 0;
+static bool group = false;
+static unsigned int page_size;
+static unsigned int mmap_pages = 16;
+static int freq = 1000; /* 1 KHz */
+
+static int delay_secs = 2;
+static bool zero = false;
+static bool dump_symtab = false;
+
+static bool hide_kernel_symbols = false;
+static bool hide_user_symbols = false;
+static struct winsize winsize;
+
+/*
+ * Source
+ */
+
+struct source_line {
+ u64 eip;
+ unsigned long count[MAX_COUNTERS];
+ char *line;
+ struct source_line *next;
+};
+
+static const char *sym_filter = NULL;
+struct sym_entry *sym_filter_entry = NULL;
+struct sym_entry *sym_filter_entry_sched = NULL;
+static int sym_pcnt_filter = 5;
+static int sym_counter = 0;
+static int display_weighted = -1;
+static const char *cpu_list;
+
+/*
+ * Symbols
+ */
+
+struct sym_entry_source {
+ struct source_line *source;
+ struct source_line *lines;
+ struct source_line **lines_tail;
+ pthread_mutex_t lock;
+};
+
+struct sym_entry {
+ struct rb_node rb_node;
+ struct list_head node;
+ unsigned long snap_count;
+ double weight;
+ int skip;
+ u16 name_len;
+ u8 origin;
+ struct map *map;
+ struct sym_entry_source *src;
+ unsigned long count[0];
+};
+
+/*
+ * Source functions
+ */
+
+static inline struct symbol *sym_entry__symbol(struct sym_entry *self)
+{
+ return ((void *)self) + symbol_conf.priv_size;
+}
+
+void get_term_dimensions(struct winsize *ws)
+{
+ char *s = getenv("LINES");
+
+ if (s != NULL) {
+ ws->ws_row = atoi(s);
+ s = getenv("COLUMNS");
+ if (s != NULL) {
+ ws->ws_col = atoi(s);
+ if (ws->ws_row && ws->ws_col)
+ return;
+ }
+ }
+#ifdef TIOCGWINSZ
+ if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
+ ws->ws_row && ws->ws_col)
+ return;
+#endif
+ ws->ws_row = 25;
+ ws->ws_col = 80;
+}
+
+static void update_print_entries(struct winsize *ws)
+{
+ print_entries = ws->ws_row;
+
+ if (print_entries > 9)
+ print_entries -= 9;
+}
+
+static void sig_winch_handler(int sig __used)
+{
+ get_term_dimensions(&winsize);
+ update_print_entries(&winsize);
+}
+
+static int parse_source(struct sym_entry *syme)
+{
+ struct symbol *sym;
+ struct sym_entry_source *source;
+ struct map *map;
+ FILE *file;
+ char command[PATH_MAX*2];
+ const char *path;
+ u64 len;
+
+ if (!syme)
+ return -1;
+
+ sym = sym_entry__symbol(syme);
+ map = syme->map;
+
+ /*
+ * We can't annotate with just /proc/kallsyms
+ */
+ if (map->dso->origin == DSO__ORIG_KERNEL)
+ return -1;
+
+ if (syme->src == NULL) {
+ syme->src = zalloc(sizeof(*source));
+ if (syme->src == NULL)
+ return -1;
+ pthread_mutex_init(&syme->src->lock, NULL);
+ }
+
+ source = syme->src;
+
+ if (source->lines) {
+ pthread_mutex_lock(&source->lock);
+ goto out_assign;
+ }
+ path = map->dso->long_name;
+
+ len = sym->end - sym->start;
+
+ sprintf(command,
+ "objdump --start-address=%#0*Lx --stop-address=%#0*Lx -dS %s",
+ BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start),
+ BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path);
+
+ file = popen(command, "r");
+ if (!file)
+ return -1;
+
+ pthread_mutex_lock(&source->lock);
+ source->lines_tail = &source->lines;
+ while (!feof(file)) {
+ struct source_line *src;
+ size_t dummy = 0;
+ char *c, *sep;
+
+ src = malloc(sizeof(struct source_line));
+ assert(src != NULL);
+ memset(src, 0, sizeof(struct source_line));
+
+ if (getline(&src->line, &dummy, file) < 0)
+ break;
+ if (!src->line)
+ break;
+
+ c = strchr(src->line, '\n');
+ if (c)
+ *c = 0;
+
+ src->next = NULL;
+ *source->lines_tail = src;
+ source->lines_tail = &src->next;
+
+ src->eip = strtoull(src->line, &sep, 16);
+ if (*sep == ':')
+ src->eip = map__objdump_2ip(map, src->eip);
+ else /* this line has no ip info (e.g. source line) */
+ src->eip = 0;
+ }
+ pclose(file);
+out_assign:
+ sym_filter_entry = syme;
+ pthread_mutex_unlock(&source->lock);
+ return 0;
+}
+
+static void __zero_source_counters(struct sym_entry *syme)
+{
+ int i;
+ struct source_line *line;
+
+ line = syme->src->lines;
+ while (line) {
+ for (i = 0; i < nr_counters; i++)
+ line->count[i] = 0;
+ line = line->next;
+ }
+}
+
+static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
+{
+ struct source_line *line;
+
+ if (syme != sym_filter_entry)
+ return;
+
+ if (pthread_mutex_trylock(&syme->src->lock))
+ return;
+
+ if (syme->src == NULL || syme->src->source == NULL)
+ goto out_unlock;
+
+ for (line = syme->src->lines; line; line = line->next) {
+ /* skip lines without IP info */
+ if (line->eip == 0)
+ continue;
+ if (line->eip == ip) {
+ line->count[counter]++;
+ break;
+ }
+ if (line->eip > ip)
+ break;
+ }
+out_unlock:
+ pthread_mutex_unlock(&syme->src->lock);
+}
+
+#define PATTERN_LEN (BITS_PER_LONG / 4 + 2)
+
+static void lookup_sym_source(struct sym_entry *syme)
+{
+ struct symbol *symbol = sym_entry__symbol(syme);
+ struct source_line *line;
+ char pattern[PATTERN_LEN + 1];
+
+ sprintf(pattern, "%0*Lx <", BITS_PER_LONG / 4,
+ map__rip_2objdump(syme->map, symbol->start));
+
+ pthread_mutex_lock(&syme->src->lock);
+ for (line = syme->src->lines; line; line = line->next) {
+ if (memcmp(line->line, pattern, PATTERN_LEN) == 0) {
+ syme->src->source = line;
+ break;
+ }
+ }
+ pthread_mutex_unlock(&syme->src->lock);
+}
+
+static void show_lines(struct source_line *queue, int count, int total)
+{
+ int i;
+ struct source_line *line;
+
+ line = queue;
+ for (i = 0; i < count; i++) {
+ float pcnt = 100.0*(float)line->count[sym_counter]/(float)total;
+
+ printf("%8li %4.1f%%\t%s\n", line->count[sym_counter], pcnt, line->line);
+ line = line->next;
+ }
+}
+
+#define TRACE_COUNT 3
+
+static void show_details(struct sym_entry *syme)
+{
+ struct symbol *symbol;
+ struct source_line *line;
+ struct source_line *line_queue = NULL;
+ int displayed = 0;
+ int line_queue_count = 0, total = 0, more = 0;
+
+ if (!syme)
+ return;
+
+ if (!syme->src->source)
+ lookup_sym_source(syme);
+
+ if (!syme->src->source)
+ return;
+
+ symbol = sym_entry__symbol(syme);
+ printf("Showing %s for %s\n", event_name(sym_counter), symbol->name);
+ printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter);
+
+ pthread_mutex_lock(&syme->src->lock);
+ line = syme->src->source;
+ while (line) {
+ total += line->count[sym_counter];
+ line = line->next;
+ }
+
+ line = syme->src->source;
+ while (line) {
+ float pcnt = 0.0;
+
+ if (!line_queue_count)
+ line_queue = line;
+ line_queue_count++;
+
+ if (line->count[sym_counter])
+ pcnt = 100.0 * line->count[sym_counter] / (float)total;
+ if (pcnt >= (float)sym_pcnt_filter) {
+ if (displayed <= print_entries)
+ show_lines(line_queue, line_queue_count, total);
+ else more++;
+ displayed += line_queue_count;
+ line_queue_count = 0;
+ line_queue = NULL;
+ } else if (line_queue_count > TRACE_COUNT) {
+ line_queue = line_queue->next;
+ line_queue_count--;
+ }
+
+ line->count[sym_counter] = zero ? 0 : line->count[sym_counter] * 7 / 8;
+ line = line->next;
+ }
+ pthread_mutex_unlock(&syme->src->lock);
+ if (more)
+ printf("%d lines not displayed, maybe increase display entries [e]\n", more);
+}
+
+/*
+ * Symbols will be added here in event__process_sample and will get out
+ * after decayed.
+ */
+static LIST_HEAD(active_symbols);
+static pthread_mutex_t active_symbols_lock = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * Ordering weight: count-1 * count-2 * ... / count-n
+ */
+static double sym_weight(const struct sym_entry *sym)
+{
+ double weight = sym->snap_count;
+ int counter;
+
+ if (!display_weighted)
+ return weight;
+
+ for (counter = 1; counter < nr_counters-1; counter++)
+ weight *= sym->count[counter];
+
+ weight /= (sym->count[counter] + 1);
+
+ return weight;
+}
+
+static long samples;
+static long kernel_samples, us_samples;
+static long exact_samples;
+static long guest_us_samples, guest_kernel_samples;
+static const char CONSOLE_CLEAR[] = "";
+
+static void __list_insert_active_sym(struct sym_entry *syme)
+{
+ list_add(&syme->node, &active_symbols);
+}
+
+static void list_remove_active_sym(struct sym_entry *syme)
+{
+ pthread_mutex_lock(&active_symbols_lock);
+ list_del_init(&syme->node);
+ pthread_mutex_unlock(&active_symbols_lock);
+}
+
+static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
+{
+ struct rb_node **p = &tree->rb_node;
+ struct rb_node *parent = NULL;
+ struct sym_entry *iter;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct sym_entry, rb_node);
+
+ if (se->weight > iter->weight)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&se->rb_node, parent, p);
+ rb_insert_color(&se->rb_node, tree);
+}
+
+static void print_sym_table(void)
+{
+ int printed = 0, j;
+ int counter, snap = !display_weighted ? sym_counter : 0;
+ float samples_per_sec = samples/delay_secs;
+ float ksamples_per_sec = kernel_samples/delay_secs;
+ float us_samples_per_sec = (us_samples)/delay_secs;
+ float guest_kernel_samples_per_sec = (guest_kernel_samples)/delay_secs;
+ float guest_us_samples_per_sec = (guest_us_samples)/delay_secs;
+ float esamples_percent = (100.0*exact_samples)/samples;
+ float sum_ksamples = 0.0;
+ struct sym_entry *syme, *n;
+ struct rb_root tmp = RB_ROOT;
+ struct rb_node *nd;
+ int sym_width = 0, dso_width = 0, dso_short_width = 0;
+ const int win_width = winsize.ws_col - 1;
+
+ samples = us_samples = kernel_samples = exact_samples = 0;
+ guest_kernel_samples = guest_us_samples = 0;
+
+ /* Sort the active symbols */
+ pthread_mutex_lock(&active_symbols_lock);
+ syme = list_entry(active_symbols.next, struct sym_entry, node);
+ pthread_mutex_unlock(&active_symbols_lock);
+
+ list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
+ syme->snap_count = syme->count[snap];
+ if (syme->snap_count != 0) {
+
+ if ((hide_user_symbols &&
+ syme->origin == PERF_RECORD_MISC_USER) ||
+ (hide_kernel_symbols &&
+ syme->origin == PERF_RECORD_MISC_KERNEL)) {
+ list_remove_active_sym(syme);
+ continue;
+ }
+ syme->weight = sym_weight(syme);
+ rb_insert_active_sym(&tmp, syme);
+ sum_ksamples += syme->snap_count;
+
+ for (j = 0; j < nr_counters; j++)
+ syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8;
+ } else
+ list_remove_active_sym(syme);
+ }
+
+ puts(CONSOLE_CLEAR);
+
+ printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
+ if (!perf_guest) {
+ printf(" PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
+ " exact: %4.1f%% [",
+ samples_per_sec,
+ 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
+ samples_per_sec)),
+ esamples_percent);
+ } else {
+ printf(" PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%"
+ " guest kernel:%4.1f%% guest us:%4.1f%%"
+ " exact: %4.1f%% [",
+ samples_per_sec,
+ 100.0 - (100.0 * ((samples_per_sec-ksamples_per_sec) /
+ samples_per_sec)),
+ 100.0 - (100.0 * ((samples_per_sec-us_samples_per_sec) /
+ samples_per_sec)),
+ 100.0 - (100.0 * ((samples_per_sec -
+ guest_kernel_samples_per_sec) /
+ samples_per_sec)),
+ 100.0 - (100.0 * ((samples_per_sec -
+ guest_us_samples_per_sec) /
+ samples_per_sec)),
+ esamples_percent);
+ }
+
+ if (nr_counters == 1 || !display_weighted) {
+ printf("%Ld", (u64)attrs[0].sample_period);
+ if (freq)
+ printf("Hz ");
+ else
+ printf(" ");
+ }
+
+ if (!display_weighted)
+ printf("%s", event_name(sym_counter));
+ else for (counter = 0; counter < nr_counters; counter++) {
+ if (counter)
+ printf("/");
+
+ printf("%s", event_name(counter));
+ }
+
+ printf( "], ");
+
+ if (target_pid != -1)
+ printf(" (target_pid: %d", target_pid);
+ else if (target_tid != -1)
+ printf(" (target_tid: %d", target_tid);
+ else
+ printf(" (all");
+
+ if (profile_cpu != -1)
+ printf(", cpu: %d)\n", profile_cpu);
+ else {
+ if (target_tid != -1)
+ printf(")\n");
+ else
+ printf(", %d CPUs)\n", nr_cpus);
+ }
+
+ printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
+
+ if (sym_filter_entry) {
+ show_details(sym_filter_entry);
+ return;
+ }
+
+ /*
+ * Find the longest symbol name that will be displayed
+ */
+ for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
+ syme = rb_entry(nd, struct sym_entry, rb_node);
+ if (++printed > print_entries ||
+ (int)syme->snap_count < count_filter)
+ continue;
+
+ if (syme->map->dso->long_name_len > dso_width)
+ dso_width = syme->map->dso->long_name_len;
+
+ if (syme->map->dso->short_name_len > dso_short_width)
+ dso_short_width = syme->map->dso->short_name_len;
+
+ if (syme->name_len > sym_width)
+ sym_width = syme->name_len;
+ }
+
+ printed = 0;
+
+ if (sym_width + dso_width > winsize.ws_col - 29) {
+ dso_width = dso_short_width;
+ if (sym_width + dso_width > winsize.ws_col - 29)
+ sym_width = winsize.ws_col - dso_width - 29;
+ }
+ putchar('\n');
+ if (nr_counters == 1)
+ printf(" samples pcnt");
+ else
+ printf(" weight samples pcnt");
+
+ if (verbose)
+ printf(" RIP ");
+ printf(" %-*.*s DSO\n", sym_width, sym_width, "function");
+ printf(" %s _______ _____",
+ nr_counters == 1 ? " " : "______");
+ if (verbose)
+ printf(" ________________");
+ printf(" %-*.*s", sym_width, sym_width, graph_line);
+ printf(" %-*.*s", dso_width, dso_width, graph_line);
+ puts("\n");
+
+ for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
+ struct symbol *sym;
+ double pcnt;
+
+ syme = rb_entry(nd, struct sym_entry, rb_node);
+ sym = sym_entry__symbol(syme);
+ if (++printed > print_entries || (int)syme->snap_count < count_filter)
+ continue;
+
+ pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) /
+ sum_ksamples));
+
+ if (nr_counters == 1 || !display_weighted)
+ printf("%20.2f ", syme->weight);
+ else
+ printf("%9.1f %10ld ", syme->weight, syme->snap_count);
+
+ percent_color_fprintf(stdout, "%4.1f%%", pcnt);
+ if (verbose)
+ printf(" %016llx", sym->start);
+ printf(" %-*.*s", sym_width, sym_width, sym->name);
+ printf(" %-*.*s\n", dso_width, dso_width,
+ dso_width >= syme->map->dso->long_name_len ?
+ syme->map->dso->long_name :
+ syme->map->dso->short_name);
+ }
+}
+
+static void prompt_integer(int *target, const char *msg)
+{
+ char *buf = malloc(0), *p;
+ size_t dummy = 0;
+ int tmp;
+
+ fprintf(stdout, "\n%s: ", msg);
+ if (getline(&buf, &dummy, stdin) < 0)
+ return;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = 0;
+
+ p = buf;
+ while(*p) {
+ if (!isdigit(*p))
+ goto out_free;
+ p++;
+ }
+ tmp = strtoul(buf, NULL, 10);
+ *target = tmp;
+out_free:
+ free(buf);
+}
+
+static void prompt_percent(int *target, const char *msg)
+{
+ int tmp = 0;
+
+ prompt_integer(&tmp, msg);
+ if (tmp >= 0 && tmp <= 100)
+ *target = tmp;
+}
+
+static void prompt_symbol(struct sym_entry **target, const char *msg)
+{
+ char *buf = malloc(0), *p;
+ struct sym_entry *syme = *target, *n, *found = NULL;
+ size_t dummy = 0;
+
+ /* zero counters of active symbol */
+ if (syme) {
+ pthread_mutex_lock(&syme->src->lock);
+ __zero_source_counters(syme);
+ *target = NULL;
+ pthread_mutex_unlock(&syme->src->lock);
+ }
+
+ fprintf(stdout, "\n%s: ", msg);
+ if (getline(&buf, &dummy, stdin) < 0)
+ goto out_free;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = 0;
+
+ pthread_mutex_lock(&active_symbols_lock);
+ syme = list_entry(active_symbols.next, struct sym_entry, node);
+ pthread_mutex_unlock(&active_symbols_lock);
+
+ list_for_each_entry_safe_from(syme, n, &active_symbols, node) {
+ struct symbol *sym = sym_entry__symbol(syme);
+
+ if (!strcmp(buf, sym->name)) {
+ found = syme;
+ break;
+ }
+ }
+
+ if (!found) {
+ fprintf(stderr, "Sorry, %s is not active.\n", buf);
+ sleep(1);
+ return;
+ } else
+ parse_source(found);
+
+out_free:
+ free(buf);
+}
+
+static void print_mapped_keys(void)
+{
+ char *name = NULL;
+
+ if (sym_filter_entry) {
+ struct symbol *sym = sym_entry__symbol(sym_filter_entry);
+ name = sym->name;
+ }
+
+ fprintf(stdout, "\nMapped keys:\n");
+ fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", delay_secs);
+ fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", print_entries);
+
+ if (nr_counters > 1)
+ fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(sym_counter));
+
+ fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter);
+
+ fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
+ fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
+ fprintf(stdout, "\t[S] stop annotation.\n");
+
+ if (nr_counters > 1)
+ fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);
+
+ fprintf(stdout,
+ "\t[K] hide kernel_symbols symbols. \t(%s)\n",
+ hide_kernel_symbols ? "yes" : "no");
+ fprintf(stdout,
+ "\t[U] hide user symbols. \t(%s)\n",
+ hide_user_symbols ? "yes" : "no");
+ fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", zero ? 1 : 0);
+ fprintf(stdout, "\t[qQ] quit.\n");
+}
+
+static int key_mapped(int c)
+{
+ switch (c) {
+ case 'd':
+ case 'e':
+ case 'f':
+ case 'z':
+ case 'q':
+ case 'Q':
+ case 'K':
+ case 'U':
+ case 'F':
+ case 's':
+ case 'S':
+ return 1;
+ case 'E':
+ case 'w':
+ return nr_counters > 1 ? 1 : 0;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void handle_keypress(struct perf_session *session, int c)
+{
+ if (!key_mapped(c)) {
+ struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
+ struct termios tc, save;
+
+ print_mapped_keys();
+ fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
+ fflush(stdout);
+
+ tcgetattr(0, &save);
+ tc = save;
+ tc.c_lflag &= ~(ICANON | ECHO);
+ tc.c_cc[VMIN] = 0;
+ tc.c_cc[VTIME] = 0;
+ tcsetattr(0, TCSANOW, &tc);
+
+ poll(&stdin_poll, 1, -1);
+ c = getc(stdin);
+
+ tcsetattr(0, TCSAFLUSH, &save);
+ if (!key_mapped(c))
+ return;
+ }
+
+ switch (c) {
+ case 'd':
+ prompt_integer(&delay_secs, "Enter display delay");
+ if (delay_secs < 1)
+ delay_secs = 1;
+ break;
+ case 'e':
+ prompt_integer(&print_entries, "Enter display entries (lines)");
+ if (print_entries == 0) {
+ sig_winch_handler(SIGWINCH);
+ signal(SIGWINCH, sig_winch_handler);
+ } else
+ signal(SIGWINCH, SIG_DFL);
+ break;
+ case 'E':
+ if (nr_counters > 1) {
+ int i;
+
+ fprintf(stderr, "\nAvailable events:");
+ for (i = 0; i < nr_counters; i++)
+ fprintf(stderr, "\n\t%d %s", i, event_name(i));
+
+ prompt_integer(&sym_counter, "Enter details event counter");
+
+ if (sym_counter >= nr_counters) {
+ fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(0));
+ sym_counter = 0;
+ sleep(1);
+ }
+ } else sym_counter = 0;
+ break;
+ case 'f':
+ prompt_integer(&count_filter, "Enter display event count filter");
+ break;
+ case 'F':
+ prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)");
+ break;
+ case 'K':
+ hide_kernel_symbols = !hide_kernel_symbols;
+ break;
+ case 'q':
+ case 'Q':
+ printf("exiting.\n");
+ if (dump_symtab)
+ perf_session__fprintf_dsos(session, stderr);
+ exit(0);
+ case 's':
+ prompt_symbol(&sym_filter_entry, "Enter details symbol");
+ break;
+ case 'S':
+ if (!sym_filter_entry)
+ break;
+ else {
+ struct sym_entry *syme = sym_filter_entry;
+
+ pthread_mutex_lock(&syme->src->lock);
+ sym_filter_entry = NULL;
+ __zero_source_counters(syme);
+ pthread_mutex_unlock(&syme->src->lock);
+ }
+ break;
+ case 'U':
+ hide_user_symbols = !hide_user_symbols;
+ break;
+ case 'w':
+ display_weighted = ~display_weighted;
+ break;
+ case 'z':
+ zero = !zero;
+ break;
+ default:
+ break;
+ }
+}
+
+static void *display_thread(void *arg __used)
+{
+ struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
+ struct termios tc, save;
+ int delay_msecs, c;
+ struct perf_session *session = (struct perf_session *) arg;
+
+ tcgetattr(0, &save);
+ tc = save;
+ tc.c_lflag &= ~(ICANON | ECHO);
+ tc.c_cc[VMIN] = 0;
+ tc.c_cc[VTIME] = 0;
+
+repeat:
+ delay_msecs = delay_secs * 1000;
+ tcsetattr(0, TCSANOW, &tc);
+ /* trash return*/
+ getc(stdin);
+
+ do {
+ print_sym_table();
+ } while (!poll(&stdin_poll, 1, delay_msecs) == 1);
+
+ c = getc(stdin);
+ tcsetattr(0, TCSAFLUSH, &save);
+
+ handle_keypress(session, c);
+ goto repeat;
+
+ return NULL;
+}
+
+/* Tag samples to be skipped. */
+static const char *skip_symbols[] = {
+ "default_idle",
+ "cpu_idle",
+ "enter_idle",
+ "exit_idle",
+ "mwait_idle",
+ "mwait_idle_with_hints",
+ "poll_idle",
+ "ppc64_runlatch_off",
+ "pseries_dedicated_idle_sleep",
+ NULL
+};
+
+static int symbol_filter(struct map *map, struct symbol *sym)
+{
+ struct sym_entry *syme;
+ const char *name = sym->name;
+ int i;
+
+ /*
+ * ppc64 uses function descriptors and appends a '.' to the
+ * start of every instruction address. Remove it.
+ */
+ if (name[0] == '.')
+ name++;
+
+ if (!strcmp(name, "_text") ||
+ !strcmp(name, "_etext") ||
+ !strcmp(name, "_sinittext") ||
+ !strncmp("init_module", name, 11) ||
+ !strncmp("cleanup_module", name, 14) ||
+ strstr(name, "_text_start") ||
+ strstr(name, "_text_end"))
+ return 1;
+
+ syme = symbol__priv(sym);
+ syme->map = map;
+ syme->src = NULL;
+
+ if (!sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) {
+ /* schedule initial sym_filter_entry setup */
+ sym_filter_entry_sched = syme;
+ sym_filter = NULL;
+ }
+
+ for (i = 0; skip_symbols[i]; i++) {
+ if (!strcmp(skip_symbols[i], name)) {
+ syme->skip = 1;
+ break;
+ }
+ }
+
+ if (!syme->skip)
+ syme->name_len = strlen(sym->name);
+
+ return 0;
+}
+
+static void event__process_sample(const event_t *self,
+ struct perf_session *session, int counter)
+{
+ u64 ip = self->ip.ip;
+ struct sym_entry *syme;
+ struct addr_location al;
+ struct sample_data data;
+ struct machine *machine;
+ u8 origin = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
+ ++samples;
+
+ switch (origin) {
+ case PERF_RECORD_MISC_USER:
+ ++us_samples;
+ if (hide_user_symbols)
+ return;
+ machine = perf_session__find_host_machine(session);
+ break;
+ case PERF_RECORD_MISC_KERNEL:
+ ++kernel_samples;
+ if (hide_kernel_symbols)
+ return;
+ machine = perf_session__find_host_machine(session);
+ break;
+ case PERF_RECORD_MISC_GUEST_KERNEL:
+ ++guest_kernel_samples;
+ machine = perf_session__find_machine(session, self->ip.pid);
+ break;
+ case PERF_RECORD_MISC_GUEST_USER:
+ ++guest_us_samples;
+ /*
+ * TODO: we don't process guest user from host side
+ * except simple counting.
+ */
+ return;
+ default:
+ return;
+ }
+
+ if (!machine && perf_guest) {
+ pr_err("Can't find guest [%d]'s kernel information\n",
+ self->ip.pid);
+ return;
+ }
+
+ if (self->header.misc & PERF_RECORD_MISC_EXACT_IP)
+ exact_samples++;
+
+ if (event__preprocess_sample(self, session, &al, &data,
+ symbol_filter) < 0 ||
+ al.filtered)
+ return;
+
+ if (al.sym == NULL) {
+ /*
+ * As we do lazy loading of symtabs we only will know if the
+ * specified vmlinux file is invalid when we actually have a
+ * hit in kernel space and then try to load it. So if we get
+ * here and there are _no_ symbols in the DSO backing the
+ * kernel map, bail out.
+ *
+ * We may never get here, for instance, if we use -K/
+ * --hide-kernel-symbols, even if the user specifies an
+ * invalid --vmlinux ;-)
+ */
+ if (al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
+ RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
+ pr_err("The %s file can't be used\n",
+ symbol_conf.vmlinux_name);
+ exit(1);
+ }
+
+ return;
+ }
+
+ /* let's see, whether we need to install initial sym_filter_entry */
+ if (sym_filter_entry_sched) {
+ sym_filter_entry = sym_filter_entry_sched;
+ sym_filter_entry_sched = NULL;
+ if (parse_source(sym_filter_entry) < 0) {
+ struct symbol *sym = sym_entry__symbol(sym_filter_entry);
+
+ pr_err("Can't annotate %s", sym->name);
+ if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
+ pr_err(": No vmlinux file was found in the path:\n");
+ machine__fprintf_vmlinux_path(machine, stderr);
+ } else
+ pr_err(".\n");
+ exit(1);
+ }
+ }
+
+ syme = symbol__priv(al.sym);
+ if (!syme->skip) {
+ syme->count[counter]++;
+ syme->origin = origin;
+ record_precise_ip(syme, counter, ip);
+ pthread_mutex_lock(&active_symbols_lock);
+ if (list_empty(&syme->node) || !syme->node.next)
+ __list_insert_active_sym(syme);
+ pthread_mutex_unlock(&active_symbols_lock);
+ }
+}
+
+struct mmap_data {
+ int counter;
+ void *base;
+ int mask;
+ unsigned int prev;
+};
+
+static unsigned int mmap_read_head(struct mmap_data *md)
+{
+ struct perf_event_mmap_page *pc = md->base;
+ int head;
+
+ head = pc->data_head;
+ rmb();
+
+ return head;
+}
+
+static void perf_session__mmap_read_counter(struct perf_session *self,
+ struct mmap_data *md)
+{
+ unsigned int head = mmap_read_head(md);
+ unsigned int old = md->prev;
+ unsigned char *data = md->base + page_size;
+ int diff;
+
+ /*
+ * If we're further behind than half the buffer, there's a chance
+ * the writer will bite our tail and mess up the samples under us.
+ *
+ * If we somehow ended up ahead of the head, we got messed up.
+ *
+ * In either case, truncate and restart at head.
+ */
+ diff = head - old;
+ if (diff > md->mask / 2 || diff < 0) {
+ fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
+
+ /*
+ * head points to a known good entry, start there.
+ */
+ old = head;
+ }
+
+ for (; old != head;) {
+ event_t *event = (event_t *)&data[old & md->mask];
+
+ event_t event_copy;
+
+ size_t size = event->header.size;
+
+ /*
+ * Event straddles the mmap boundary -- header should always
+ * be inside due to u64 alignment of output.
+ */
+ if ((old & md->mask) + size != ((old + size) & md->mask)) {
+ unsigned int offset = old;
+ unsigned int len = min(sizeof(*event), size), cpy;
+ void *dst = &event_copy;
+
+ do {
+ cpy = min(md->mask + 1 - (offset & md->mask), len);
+ memcpy(dst, &data[offset & md->mask], cpy);
+ offset += cpy;
+ dst += cpy;
+ len -= cpy;
+ } while (len);
+
+ event = &event_copy;
+ }
+
+ if (event->header.type == PERF_RECORD_SAMPLE)
+ event__process_sample(event, self, md->counter);
+ else
+ event__process(event, self);
+ old += size;
+ }
+
+ md->prev = old;
+}
+
+static struct pollfd *event_array;
+static struct mmap_data *mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
+
+static void perf_session__mmap_read(struct perf_session *self)
+{
+ int i, counter, thread_index;
+
+ for (i = 0; i < nr_cpus; i++) {
+ for (counter = 0; counter < nr_counters; counter++)
+ for (thread_index = 0;
+ thread_index < thread_num;
+ thread_index++) {
+ perf_session__mmap_read_counter(self,
+ &mmap_array[i][counter][thread_index]);
+ }
+ }
+}
+
+int nr_poll;
+int group_fd;
+
+static void start_counter(int i, int counter)
+{
+ struct perf_event_attr *attr;
+ int cpu;
+ int thread_index;
+
+ cpu = profile_cpu;
+ if (target_tid == -1 && profile_cpu == -1)
+ cpu = cpumap[i];
+
+ attr = attrs + counter;
+
+ attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
+
+ if (freq) {
+ attr->sample_type |= PERF_SAMPLE_PERIOD;
+ attr->freq = 1;
+ attr->sample_freq = freq;
+ }
+
+ attr->inherit = (cpu < 0) && inherit;
+ attr->mmap = 1;
+
+ for (thread_index = 0; thread_index < thread_num; thread_index++) {
+try_again:
+ fd[i][counter][thread_index] = sys_perf_event_open(attr,
+ all_tids[thread_index], cpu, group_fd, 0);
+
+ if (fd[i][counter][thread_index] < 0) {
+ int err = errno;
+
+ if (err == EPERM || err == EACCES)
+ die("No permission - are you root?\n");
+ /*
+ * If it's cycles then fall back to hrtimer
+ * based cpu-clock-tick sw counter, which
+ * is always available even if no PMU support:
+ */
+ if (attr->type == PERF_TYPE_HARDWARE
+ && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
+
+ if (verbose)
+ warning(" ... trying to fall back to cpu-clock-ticks\n");
+
+ attr->type = PERF_TYPE_SOFTWARE;
+ attr->config = PERF_COUNT_SW_CPU_CLOCK;
+ goto try_again;
+ }
+ printf("\n");
+ error("perfcounter syscall returned with %d (%s)\n",
+ fd[i][counter][thread_index], strerror(err));
+ die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
+ exit(-1);
+ }
+ assert(fd[i][counter][thread_index] >= 0);
+ fcntl(fd[i][counter][thread_index], F_SETFL, O_NONBLOCK);
+
+ /*
+ * First counter acts as the group leader:
+ */
+ if (group && group_fd == -1)
+ group_fd = fd[i][counter][thread_index];
+
+ event_array[nr_poll].fd = fd[i][counter][thread_index];
+ event_array[nr_poll].events = POLLIN;
+ nr_poll++;
+
+ mmap_array[i][counter][thread_index].counter = counter;
+ mmap_array[i][counter][thread_index].prev = 0;
+ mmap_array[i][counter][thread_index].mask = mmap_pages*page_size - 1;
+ mmap_array[i][counter][thread_index].base = mmap(NULL, (mmap_pages+1)*page_size,
+ PROT_READ, MAP_SHARED, fd[i][counter][thread_index], 0);
+ if (mmap_array[i][counter][thread_index].base == MAP_FAILED)
+ die("failed to mmap with %d (%s)\n", errno, strerror(errno));
+ }
+}
+
+static int __cmd_top(void)
+{
+ pthread_t thread;
+ int i, counter;
+ int ret;
+ /*
+ * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
+ * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
+ */
+ struct perf_session *session = perf_session__new(NULL, O_WRONLY, false, false);
+ if (session == NULL)
+ return -ENOMEM;
+
+ if (target_tid != -1)
+ event__synthesize_thread(target_tid, event__process, session);
+ else
+ event__synthesize_threads(event__process, session);
+
+ for (i = 0; i < nr_cpus; i++) {
+ group_fd = -1;
+ for (counter = 0; counter < nr_counters; counter++)
+ start_counter(i, counter);
+ }
+
+ /* Wait for a minimal set of events before starting the snapshot */
+ poll(&event_array[0], nr_poll, 100);
+
+ perf_session__mmap_read(session);
+
+ if (pthread_create(&thread, NULL, display_thread, session)) {
+ printf("Could not create display thread.\n");
+ exit(-1);
+ }
+
+ if (realtime_prio) {
+ struct sched_param param;
+
+ param.sched_priority = realtime_prio;
+ if (sched_setscheduler(0, SCHED_FIFO, &param)) {
+ printf("Could not set realtime priority.\n");
+ exit(-1);
+ }
+ }
+
+ while (1) {
+ int hits = samples;
+
+ perf_session__mmap_read(session);
+
+ if (hits == samples)
+ ret = poll(event_array, nr_poll, 100);
+ }
+
+ return 0;
+}
+
+static const char * const top_usage[] = {
+ "perf top [<options>]",
+ NULL
+};
+
+static const struct option options[] = {
+ OPT_CALLBACK('e', "event", NULL, "event",
+ "event selector. use 'perf list' to list available events",
+ parse_events),
+ OPT_INTEGER('c', "count", &default_interval,
+ "event period to sample"),
+ OPT_INTEGER('p', "pid", &target_pid,
+ "profile events on existing process id"),
+ OPT_INTEGER('t', "tid", &target_tid,
+ "profile events on existing thread id"),
+ OPT_BOOLEAN('a', "all-cpus", &system_wide,
+ "system-wide collection from all CPUs"),
+ OPT_STRING('C', "cpu", &cpu_list, "cpu",
+ "list of cpus to monitor"),
+ OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
+ "file", "vmlinux pathname"),
+ OPT_BOOLEAN('K', "hide_kernel_symbols", &hide_kernel_symbols,
+ "hide kernel symbols"),
+ OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
+ OPT_INTEGER('r', "realtime", &realtime_prio,
+ "collect data with this RT SCHED_FIFO priority"),
+ OPT_INTEGER('d', "delay", &delay_secs,
+ "number of seconds to delay between refreshes"),
+ OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
+ "dump the symbol table used for profiling"),
+ OPT_INTEGER('f', "count-filter", &count_filter,
+ "only display functions with more events than this"),
+ OPT_BOOLEAN('g', "group", &group,
+ "put the counters into a counter group"),
+ OPT_BOOLEAN('i', "inherit", &inherit,
+ "child tasks inherit counters"),
+ OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
+ "symbol to annotate"),
+ OPT_BOOLEAN('z', "zero", &zero,
+ "zero history across updates"),
+ OPT_INTEGER('F', "freq", &freq,
+ "profile at this frequency"),
+ OPT_INTEGER('E', "entries", &print_entries,
+ "display this many functions"),
+ OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols,
+ "hide user symbols"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show counter open errors, etc)"),
+ OPT_END()
+};
+
+int cmd_top(int argc, const char **argv, const char *prefix __used)
+{
+ int counter;
+ int i,j;
+
+ page_size = sysconf(_SC_PAGE_SIZE);
+
+ argc = parse_options(argc, argv, options, top_usage, 0);
+ if (argc)
+ usage_with_options(top_usage, options);
+
+ if (target_pid != -1) {
+ target_tid = target_pid;
+ thread_num = find_all_tid(target_pid, &all_tids);
+ if (thread_num <= 0) {
+ fprintf(stderr, "Can't find all threads of pid %d\n",
+ target_pid);
+ usage_with_options(top_usage, options);
+ }
+ } else {
+ all_tids=malloc(sizeof(pid_t));
+ if (!all_tids)
+ return -ENOMEM;
+
+ all_tids[0] = target_tid;
+ thread_num = 1;
+ }
+
+ for (i = 0; i < MAX_NR_CPUS; i++) {
+ for (j = 0; j < MAX_COUNTERS; j++) {
+ fd[i][j] = malloc(sizeof(int)*thread_num);
+ mmap_array[i][j] = zalloc(
+ sizeof(struct mmap_data)*thread_num);
+ if (!fd[i][j] || !mmap_array[i][j])
+ return -ENOMEM;
+ }
+ }
+ event_array = malloc(
+ sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num);
+ if (!event_array)
+ return -ENOMEM;
+
+ /* CPU and PID are mutually exclusive */
+ if (target_tid > 0 && cpu_list) {
+ printf("WARNING: PID switch overriding CPU\n");
+ sleep(1);
+ cpu_list = NULL;
+ }
+
+ if (!nr_counters)
+ nr_counters = 1;
+
+ symbol_conf.priv_size = (sizeof(struct sym_entry) +
+ (nr_counters + 1) * sizeof(unsigned long));
+
+ symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
+ if (symbol__init() < 0)
+ return -1;
+
+ if (delay_secs < 1)
+ delay_secs = 1;
+
+ /*
+ * User specified count overrides default frequency.
+ */
+ if (default_interval)
+ freq = 0;
+ else if (freq) {
+ default_interval = freq;
+ } else {
+ fprintf(stderr, "frequency and count are zero, aborting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /*
+ * Fill in the ones not specifically initialized via -c:
+ */
+ for (counter = 0; counter < nr_counters; counter++) {
+ if (attrs[counter].sample_period)
+ continue;
+
+ attrs[counter].sample_period = default_interval;
+ }
+
+ if (target_tid != -1)
+ nr_cpus = 1;
+ else
+ nr_cpus = read_cpu_map(cpu_list);
+
+ if (nr_cpus < 1)
+ usage_with_options(top_usage, options);
+
+ get_term_dimensions(&winsize);
+ if (print_entries == 0) {
+ update_print_entries(&winsize);
+ signal(SIGWINCH, sig_winch_handler);
+ }
+
+ return __cmd_top();
+}
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
new file mode 100644
index 00000000..40a6a299
--- /dev/null
+++ b/tools/perf/builtin-trace.c
@@ -0,0 +1,734 @@
+#include "builtin.h"
+
+#include "perf.h"
+#include "util/cache.h"
+#include "util/debug.h"
+#include "util/exec_cmd.h"
+#include "util/header.h"
+#include "util/parse-options.h"
+#include "util/session.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+#include "util/trace-event.h"
+#include "util/util.h"
+
+static char const *script_name;
+static char const *generate_script_lang;
+static bool debug_mode;
+static u64 last_timestamp;
+static u64 nr_unordered;
+
+static int default_start_script(const char *script __unused,
+ int argc __unused,
+ const char **argv __unused)
+{
+ return 0;
+}
+
+static int default_stop_script(void)
+{
+ return 0;
+}
+
+static int default_generate_script(const char *outfile __unused)
+{
+ return 0;
+}
+
+static struct scripting_ops default_scripting_ops = {
+ .start_script = default_start_script,
+ .stop_script = default_stop_script,
+ .process_event = print_event,
+ .generate_script = default_generate_script,
+};
+
+static struct scripting_ops *scripting_ops;
+
+static void setup_scripting(void)
+{
+ /* make sure PERF_EXEC_PATH is set for scripts */
+ perf_set_argv_exec_path(perf_exec_path());
+
+ setup_perl_scripting();
+ setup_python_scripting();
+
+ scripting_ops = &default_scripting_ops;
+}
+
+static int cleanup_scripting(void)
+{
+ pr_debug("\nperf trace script stopped\n");
+
+ return scripting_ops->stop_script();
+}
+
+static char const *input_name = "perf.data";
+
+static int process_sample_event(event_t *event, struct perf_session *session)
+{
+ struct sample_data data;
+ struct thread *thread;
+
+ memset(&data, 0, sizeof(data));
+ data.time = -1;
+ data.cpu = -1;
+ data.period = 1;
+
+ event__parse_sample(event, session->sample_type, &data);
+
+ dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
+ data.pid, data.tid, data.ip, data.period);
+
+ thread = perf_session__findnew(session, event->ip.pid);
+ if (thread == NULL) {
+ pr_debug("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ if (session->sample_type & PERF_SAMPLE_RAW) {
+ if (debug_mode) {
+ if (data.time < last_timestamp) {
+ pr_err("Samples misordered, previous: %llu "
+ "this: %llu\n", last_timestamp,
+ data.time);
+ nr_unordered++;
+ }
+ last_timestamp = data.time;
+ return 0;
+ }
+ /*
+ * FIXME: better resolve from pid from the struct trace_entry
+ * field, although it should be the same than this perf
+ * event pid
+ */
+ scripting_ops->process_event(data.cpu, data.raw_data,
+ data.raw_size,
+ data.time, thread->comm);
+ }
+
+ session->hists.stats.total_period += data.period;
+ return 0;
+}
+
+static u64 nr_lost;
+
+static int process_lost_event(event_t *event, struct perf_session *session __used)
+{
+ nr_lost += event->lost.lost;
+
+ return 0;
+}
+
+static struct perf_event_ops event_ops = {
+ .sample = process_sample_event,
+ .comm = event__process_comm,
+ .attr = event__process_attr,
+ .event_type = event__process_event_type,
+ .tracing_data = event__process_tracing_data,
+ .build_id = event__process_build_id,
+ .lost = process_lost_event,
+ .ordered_samples = true,
+};
+
+extern volatile int session_done;
+
+static void sig_handler(int sig __unused)
+{
+ session_done = 1;
+}
+
+static int __cmd_trace(struct perf_session *session)
+{
+ int ret;
+
+ signal(SIGINT, sig_handler);
+
+ ret = perf_session__process_events(session, &event_ops);
+
+ if (debug_mode) {
+ pr_err("Misordered timestamps: %llu\n", nr_unordered);
+ pr_err("Lost events: %llu\n", nr_lost);
+ }
+
+ return ret;
+}
+
+struct script_spec {
+ struct list_head node;
+ struct scripting_ops *ops;
+ char spec[0];
+};
+
+LIST_HEAD(script_specs);
+
+static struct script_spec *script_spec__new(const char *spec,
+ struct scripting_ops *ops)
+{
+ struct script_spec *s = malloc(sizeof(*s) + strlen(spec) + 1);
+
+ if (s != NULL) {
+ strcpy(s->spec, spec);
+ s->ops = ops;
+ }
+
+ return s;
+}
+
+static void script_spec__delete(struct script_spec *s)
+{
+ free(s->spec);
+ free(s);
+}
+
+static void script_spec__add(struct script_spec *s)
+{
+ list_add_tail(&s->node, &script_specs);
+}
+
+static struct script_spec *script_spec__find(const char *spec)
+{
+ struct script_spec *s;
+
+ list_for_each_entry(s, &script_specs, node)
+ if (strcasecmp(s->spec, spec) == 0)
+ return s;
+ return NULL;
+}
+
+static struct script_spec *script_spec__findnew(const char *spec,
+ struct scripting_ops *ops)
+{
+ struct script_spec *s = script_spec__find(spec);
+
+ if (s)
+ return s;
+
+ s = script_spec__new(spec, ops);
+ if (!s)
+ goto out_delete_spec;
+
+ script_spec__add(s);
+
+ return s;
+
+out_delete_spec:
+ script_spec__delete(s);
+
+ return NULL;
+}
+
+int script_spec_register(const char *spec, struct scripting_ops *ops)
+{
+ struct script_spec *s;
+
+ s = script_spec__find(spec);
+ if (s)
+ return -1;
+
+ s = script_spec__findnew(spec, ops);
+ if (!s)
+ return -1;
+
+ return 0;
+}
+
+static struct scripting_ops *script_spec__lookup(const char *spec)
+{
+ struct script_spec *s = script_spec__find(spec);
+ if (!s)
+ return NULL;
+
+ return s->ops;
+}
+
+static void list_available_languages(void)
+{
+ struct script_spec *s;
+
+ fprintf(stderr, "\n");
+ fprintf(stderr, "Scripting language extensions (used in "
+ "perf trace -s [spec:]script.[spec]):\n\n");
+
+ list_for_each_entry(s, &script_specs, node)
+ fprintf(stderr, " %-42s [%s]\n", s->spec, s->ops->name);
+
+ fprintf(stderr, "\n");
+}
+
+static int parse_scriptname(const struct option *opt __used,
+ const char *str, int unset __used)
+{
+ char spec[PATH_MAX];
+ const char *script, *ext;
+ int len;
+
+ if (strcmp(str, "lang") == 0) {
+ list_available_languages();
+ exit(0);
+ }
+
+ script = strchr(str, ':');
+ if (script) {
+ len = script - str;
+ if (len >= PATH_MAX) {
+ fprintf(stderr, "invalid language specifier");
+ return -1;
+ }
+ strncpy(spec, str, len);
+ spec[len] = '\0';
+ scripting_ops = script_spec__lookup(spec);
+ if (!scripting_ops) {
+ fprintf(stderr, "invalid language specifier");
+ return -1;
+ }
+ script++;
+ } else {
+ script = str;
+ ext = strchr(script, '.');
+ if (!ext) {
+ fprintf(stderr, "invalid script extension");
+ return -1;
+ }
+ scripting_ops = script_spec__lookup(++ext);
+ if (!scripting_ops) {
+ fprintf(stderr, "invalid script extension");
+ return -1;
+ }
+ }
+
+ script_name = strdup(script);
+
+ return 0;
+}
+
+#define for_each_lang(scripts_dir, lang_dirent, lang_next) \
+ while (!readdir_r(scripts_dir, &lang_dirent, &lang_next) && \
+ lang_next) \
+ if (lang_dirent.d_type == DT_DIR && \
+ (strcmp(lang_dirent.d_name, ".")) && \
+ (strcmp(lang_dirent.d_name, "..")))
+
+#define for_each_script(lang_dir, script_dirent, script_next) \
+ while (!readdir_r(lang_dir, &script_dirent, &script_next) && \
+ script_next) \
+ if (script_dirent.d_type != DT_DIR)
+
+
+#define RECORD_SUFFIX "-record"
+#define REPORT_SUFFIX "-report"
+
+struct script_desc {
+ struct list_head node;
+ char *name;
+ char *half_liner;
+ char *args;
+};
+
+LIST_HEAD(script_descs);
+
+static struct script_desc *script_desc__new(const char *name)
+{
+ struct script_desc *s = zalloc(sizeof(*s));
+
+ if (s != NULL)
+ s->name = strdup(name);
+
+ return s;
+}
+
+static void script_desc__delete(struct script_desc *s)
+{
+ free(s->name);
+ free(s);
+}
+
+static void script_desc__add(struct script_desc *s)
+{
+ list_add_tail(&s->node, &script_descs);
+}
+
+static struct script_desc *script_desc__find(const char *name)
+{
+ struct script_desc *s;
+
+ list_for_each_entry(s, &script_descs, node)
+ if (strcasecmp(s->name, name) == 0)
+ return s;
+ return NULL;
+}
+
+static struct script_desc *script_desc__findnew(const char *name)
+{
+ struct script_desc *s = script_desc__find(name);
+
+ if (s)
+ return s;
+
+ s = script_desc__new(name);
+ if (!s)
+ goto out_delete_desc;
+
+ script_desc__add(s);
+
+ return s;
+
+out_delete_desc:
+ script_desc__delete(s);
+
+ return NULL;
+}
+
+static char *ends_with(char *str, const char *suffix)
+{
+ size_t suffix_len = strlen(suffix);
+ char *p = str;
+
+ if (strlen(str) > suffix_len) {
+ p = str + strlen(str) - suffix_len;
+ if (!strncmp(p, suffix, suffix_len))
+ return p;
+ }
+
+ return NULL;
+}
+
+static char *ltrim(char *str)
+{
+ int len = strlen(str);
+
+ while (len && isspace(*str)) {
+ len--;
+ str++;
+ }
+
+ return str;
+}
+
+static int read_script_info(struct script_desc *desc, const char *filename)
+{
+ char line[BUFSIZ], *p;
+ FILE *fp;
+
+ fp = fopen(filename, "r");
+ if (!fp)
+ return -1;
+
+ while (fgets(line, sizeof(line), fp)) {
+ p = ltrim(line);
+ if (strlen(p) == 0)
+ continue;
+ if (*p != '#')
+ continue;
+ p++;
+ if (strlen(p) && *p == '!')
+ continue;
+
+ p = ltrim(p);
+ if (strlen(p) && p[strlen(p) - 1] == '\n')
+ p[strlen(p) - 1] = '\0';
+
+ if (!strncmp(p, "description:", strlen("description:"))) {
+ p += strlen("description:");
+ desc->half_liner = strdup(ltrim(p));
+ continue;
+ }
+
+ if (!strncmp(p, "args:", strlen("args:"))) {
+ p += strlen("args:");
+ desc->args = strdup(ltrim(p));
+ continue;
+ }
+ }
+
+ fclose(fp);
+
+ return 0;
+}
+
+static int list_available_scripts(const struct option *opt __used,
+ const char *s __used, int unset __used)
+{
+ struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+ char scripts_path[MAXPATHLEN];
+ DIR *scripts_dir, *lang_dir;
+ char script_path[MAXPATHLEN];
+ char lang_path[MAXPATHLEN];
+ struct script_desc *desc;
+ char first_half[BUFSIZ];
+ char *script_root;
+ char *str;
+
+ snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path());
+
+ scripts_dir = opendir(scripts_path);
+ if (!scripts_dir)
+ return -1;
+
+ for_each_lang(scripts_dir, lang_dirent, lang_next) {
+ snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
+ lang_dirent.d_name);
+ lang_dir = opendir(lang_path);
+ if (!lang_dir)
+ continue;
+
+ for_each_script(lang_dir, script_dirent, script_next) {
+ script_root = strdup(script_dirent.d_name);
+ str = ends_with(script_root, REPORT_SUFFIX);
+ if (str) {
+ *str = '\0';
+ desc = script_desc__findnew(script_root);
+ snprintf(script_path, MAXPATHLEN, "%s/%s",
+ lang_path, script_dirent.d_name);
+ read_script_info(desc, script_path);
+ }
+ free(script_root);
+ }
+ }
+
+ fprintf(stdout, "List of available trace scripts:\n");
+ list_for_each_entry(desc, &script_descs, node) {
+ sprintf(first_half, "%s %s", desc->name,
+ desc->args ? desc->args : "");
+ fprintf(stdout, " %-36s %s\n", first_half,
+ desc->half_liner ? desc->half_liner : "");
+ }
+
+ exit(0);
+}
+
+static char *get_script_path(const char *script_root, const char *suffix)
+{
+ struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+ char scripts_path[MAXPATHLEN];
+ char script_path[MAXPATHLEN];
+ DIR *scripts_dir, *lang_dir;
+ char lang_path[MAXPATHLEN];
+ char *str, *__script_root;
+ char *path = NULL;
+
+ snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path());
+
+ scripts_dir = opendir(scripts_path);
+ if (!scripts_dir)
+ return NULL;
+
+ for_each_lang(scripts_dir, lang_dirent, lang_next) {
+ snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
+ lang_dirent.d_name);
+ lang_dir = opendir(lang_path);
+ if (!lang_dir)
+ continue;
+
+ for_each_script(lang_dir, script_dirent, script_next) {
+ __script_root = strdup(script_dirent.d_name);
+ str = ends_with(__script_root, suffix);
+ if (str) {
+ *str = '\0';
+ if (strcmp(__script_root, script_root))
+ continue;
+ snprintf(script_path, MAXPATHLEN, "%s/%s",
+ lang_path, script_dirent.d_name);
+ path = strdup(script_path);
+ free(__script_root);
+ break;
+ }
+ free(__script_root);
+ }
+ }
+
+ return path;
+}
+
+static const char * const trace_usage[] = {
+ "perf trace [<options>] <command>",
+ NULL
+};
+
+static const struct option options[] = {
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('L', "Latency", &latency_format,
+ "show latency attributes (irqs/preemption disabled, etc)"),
+ OPT_CALLBACK_NOOPT('l', "list", NULL, NULL, "list available scripts",
+ list_available_scripts),
+ OPT_CALLBACK('s', "script", NULL, "name",
+ "script file name (lang:script name, script name, or *)",
+ parse_scriptname),
+ OPT_STRING('g', "gen-script", &generate_script_lang, "lang",
+ "generate perf-trace.xx script in specified language"),
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+ OPT_BOOLEAN('d', "debug-mode", &debug_mode,
+ "do various checks like samples ordering and lost events"),
+
+ OPT_END()
+};
+
+int cmd_trace(int argc, const char **argv, const char *prefix __used)
+{
+ struct perf_session *session;
+ const char *suffix = NULL;
+ const char **__argv;
+ char *script_path;
+ int i, err;
+
+ if (argc >= 2 && strncmp(argv[1], "rec", strlen("rec")) == 0) {
+ if (argc < 3) {
+ fprintf(stderr,
+ "Please specify a record script\n");
+ return -1;
+ }
+ suffix = RECORD_SUFFIX;
+ }
+
+ if (argc >= 2 && strncmp(argv[1], "rep", strlen("rep")) == 0) {
+ if (argc < 3) {
+ fprintf(stderr,
+ "Please specify a report script\n");
+ return -1;
+ }
+ suffix = REPORT_SUFFIX;
+ }
+
+ if (!suffix && argc >= 2 && strncmp(argv[1], "-", strlen("-")) != 0) {
+ char *record_script_path, *report_script_path;
+ int live_pipe[2];
+ pid_t pid;
+
+ record_script_path = get_script_path(argv[1], RECORD_SUFFIX);
+ if (!record_script_path) {
+ fprintf(stderr, "record script not found\n");
+ return -1;
+ }
+
+ report_script_path = get_script_path(argv[1], REPORT_SUFFIX);
+ if (!report_script_path) {
+ fprintf(stderr, "report script not found\n");
+ return -1;
+ }
+
+ if (pipe(live_pipe) < 0) {
+ perror("failed to create pipe");
+ exit(-1);
+ }
+
+ pid = fork();
+ if (pid < 0) {
+ perror("failed to fork");
+ exit(-1);
+ }
+
+ if (!pid) {
+ dup2(live_pipe[1], 1);
+ close(live_pipe[0]);
+
+ __argv = malloc(5 * sizeof(const char *));
+ __argv[0] = "/bin/sh";
+ __argv[1] = record_script_path;
+ __argv[2] = "-o";
+ __argv[3] = "-";
+ __argv[4] = NULL;
+
+ execvp("/bin/sh", (char **)__argv);
+ exit(-1);
+ }
+
+ dup2(live_pipe[0], 0);
+ close(live_pipe[1]);
+
+ __argv = malloc((argc + 3) * sizeof(const char *));
+ __argv[0] = "/bin/sh";
+ __argv[1] = report_script_path;
+ for (i = 2; i < argc; i++)
+ __argv[i] = argv[i];
+ __argv[i++] = "-i";
+ __argv[i++] = "-";
+ __argv[i++] = NULL;
+
+ execvp("/bin/sh", (char **)__argv);
+ exit(-1);
+ }
+
+ if (suffix) {
+ script_path = get_script_path(argv[2], suffix);
+ if (!script_path) {
+ fprintf(stderr, "script not found\n");
+ return -1;
+ }
+
+ __argv = malloc((argc + 1) * sizeof(const char *));
+ __argv[0] = "/bin/sh";
+ __argv[1] = script_path;
+ for (i = 3; i < argc; i++)
+ __argv[i - 1] = argv[i];
+ __argv[argc - 1] = NULL;
+
+ execvp("/bin/sh", (char **)__argv);
+ exit(-1);
+ }
+
+ setup_scripting();
+
+ argc = parse_options(argc, argv, options, trace_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (symbol__init() < 0)
+ return -1;
+ if (!script_name)
+ setup_pager();
+
+ session = perf_session__new(input_name, O_RDONLY, 0, false);
+ if (session == NULL)
+ return -ENOMEM;
+
+ if (strcmp(input_name, "-") &&
+ !perf_session__has_traces(session, "record -R"))
+ return -EINVAL;
+
+ if (generate_script_lang) {
+ struct stat perf_stat;
+
+ int input = open(input_name, O_RDONLY);
+ if (input < 0) {
+ perror("failed to open file");
+ exit(-1);
+ }
+
+ err = fstat(input, &perf_stat);
+ if (err < 0) {
+ perror("failed to stat file");
+ exit(-1);
+ }
+
+ if (!perf_stat.st_size) {
+ fprintf(stderr, "zero-sized file, nothing to do!\n");
+ exit(0);
+ }
+
+ scripting_ops = script_spec__lookup(generate_script_lang);
+ if (!scripting_ops) {
+ fprintf(stderr, "invalid language specifier");
+ return -1;
+ }
+
+ err = scripting_ops->generate_script("perf-trace");
+ goto out;
+ }
+
+ if (script_name) {
+ err = scripting_ops->start_script(script_name, argc, argv);
+ if (err)
+ goto out;
+ pr_debug("perf trace started with script %s\n\n", script_name);
+ }
+
+ err = __cmd_trace(session);
+
+ perf_session__delete(session);
+ cleanup_scripting();
+out:
+ return err;
+}
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
new file mode 100644
index 00000000..921245b2
--- /dev/null
+++ b/tools/perf/builtin.h
@@ -0,0 +1,39 @@
+#ifndef BUILTIN_H
+#define BUILTIN_H
+
+#include "util/util.h"
+#include "util/strbuf.h"
+
+extern const char perf_version_string[];
+extern const char perf_usage_string[];
+extern const char perf_more_info_string[];
+
+extern void list_common_cmds_help(void);
+extern const char *help_unknown_cmd(const char *cmd);
+extern void prune_packed_objects(int);
+extern int read_line_with_nul(char *buf, int size, FILE *file);
+extern int check_pager_config(const char *cmd);
+
+extern int cmd_annotate(int argc, const char **argv, const char *prefix);
+extern int cmd_bench(int argc, const char **argv, const char *prefix);
+extern int cmd_buildid_cache(int argc, const char **argv, const char *prefix);
+extern int cmd_buildid_list(int argc, const char **argv, const char *prefix);
+extern int cmd_diff(int argc, const char **argv, const char *prefix);
+extern int cmd_help(int argc, const char **argv, const char *prefix);
+extern int cmd_sched(int argc, const char **argv, const char *prefix);
+extern int cmd_list(int argc, const char **argv, const char *prefix);
+extern int cmd_record(int argc, const char **argv, const char *prefix);
+extern int cmd_report(int argc, const char **argv, const char *prefix);
+extern int cmd_stat(int argc, const char **argv, const char *prefix);
+extern int cmd_timechart(int argc, const char **argv, const char *prefix);
+extern int cmd_top(int argc, const char **argv, const char *prefix);
+extern int cmd_trace(int argc, const char **argv, const char *prefix);
+extern int cmd_version(int argc, const char **argv, const char *prefix);
+extern int cmd_probe(int argc, const char **argv, const char *prefix);
+extern int cmd_kmem(int argc, const char **argv, const char *prefix);
+extern int cmd_lock(int argc, const char **argv, const char *prefix);
+extern int cmd_kvm(int argc, const char **argv, const char *prefix);
+extern int cmd_test(int argc, const char **argv, const char *prefix);
+extern int cmd_inject(int argc, const char **argv, const char *prefix);
+
+#endif
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
new file mode 100644
index 00000000..949d77fc
--- /dev/null
+++ b/tools/perf/command-list.txt
@@ -0,0 +1,24 @@
+#
+# List of known perf commands.
+# command name category [deprecated] [common]
+#
+perf-annotate mainporcelain common
+perf-archive mainporcelain common
+perf-bench mainporcelain common
+perf-buildid-cache mainporcelain common
+perf-buildid-list mainporcelain common
+perf-diff mainporcelain common
+perf-inject mainporcelain common
+perf-list mainporcelain common
+perf-sched mainporcelain common
+perf-record mainporcelain common
+perf-report mainporcelain common
+perf-stat mainporcelain common
+perf-timechart mainporcelain common
+perf-top mainporcelain common
+perf-trace mainporcelain common
+perf-probe mainporcelain common
+perf-kmem mainporcelain common
+perf-lock mainporcelain common
+perf-kvm mainporcelain common
+perf-test mainporcelain common
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
new file mode 100644
index 00000000..bd0bb1b1
--- /dev/null
+++ b/tools/perf/design.txt
@@ -0,0 +1,462 @@
+
+Performance Counters for Linux
+------------------------------
+
+Performance counters are special hardware registers available on most modern
+CPUs. These registers count the number of certain types of hw events: such
+as instructions executed, cachemisses suffered, or branches mis-predicted -
+without slowing down the kernel or applications. These registers can also
+trigger interrupts when a threshold number of events have passed - and can
+thus be used to profile the code that runs on that CPU.
+
+The Linux Performance Counter subsystem provides an abstraction of these
+hardware capabilities. It provides per task and per CPU counters, counter
+groups, and it provides event capabilities on top of those. It
+provides "virtual" 64-bit counters, regardless of the width of the
+underlying hardware counters.
+
+Performance counters are accessed via special file descriptors.
+There's one file descriptor per virtual counter used.
+
+The special file descriptor is opened via the perf_event_open()
+system call:
+
+ int sys_perf_event_open(struct perf_event_attr *hw_event_uptr,
+ pid_t pid, int cpu, int group_fd,
+ unsigned long flags);
+
+The syscall returns the new fd. The fd can be used via the normal
+VFS system calls: read() can be used to read the counter, fcntl()
+can be used to set the blocking mode, etc.
+
+Multiple counters can be kept open at a time, and the counters
+can be poll()ed.
+
+When creating a new counter fd, 'perf_event_attr' is:
+
+struct perf_event_attr {
+ /*
+ * The MSB of the config word signifies if the rest contains cpu
+ * specific (raw) counter configuration data, if unset, the next
+ * 7 bits are an event type and the rest of the bits are the event
+ * identifier.
+ */
+ __u64 config;
+
+ __u64 irq_period;
+ __u32 record_type;
+ __u32 read_format;
+
+ __u64 disabled : 1, /* off by default */
+ inherit : 1, /* children inherit it */
+ pinned : 1, /* must always be on PMU */
+ exclusive : 1, /* only group on PMU */
+ exclude_user : 1, /* don't count user */
+ exclude_kernel : 1, /* ditto kernel */
+ exclude_hv : 1, /* ditto hypervisor */
+ exclude_idle : 1, /* don't count when idle */
+ mmap : 1, /* include mmap data */
+ munmap : 1, /* include munmap data */
+ comm : 1, /* include comm data */
+
+ __reserved_1 : 52;
+
+ __u32 extra_config_len;
+ __u32 wakeup_events; /* wakeup every n events */
+
+ __u64 __reserved_2;
+ __u64 __reserved_3;
+};
+
+The 'config' field specifies what the counter should count. It
+is divided into 3 bit-fields:
+
+raw_type: 1 bit (most significant bit) 0x8000_0000_0000_0000
+type: 7 bits (next most significant) 0x7f00_0000_0000_0000
+event_id: 56 bits (least significant) 0x00ff_ffff_ffff_ffff
+
+If 'raw_type' is 1, then the counter will count a hardware event
+specified by the remaining 63 bits of event_config. The encoding is
+machine-specific.
+
+If 'raw_type' is 0, then the 'type' field says what kind of counter
+this is, with the following encoding:
+
+enum perf_event_types {
+ PERF_TYPE_HARDWARE = 0,
+ PERF_TYPE_SOFTWARE = 1,
+ PERF_TYPE_TRACEPOINT = 2,
+};
+
+A counter of PERF_TYPE_HARDWARE will count the hardware event
+specified by 'event_id':
+
+/*
+ * Generalized performance counter event types, used by the hw_event.event_id
+ * parameter of the sys_perf_event_open() syscall:
+ */
+enum hw_event_ids {
+ /*
+ * Common hardware events, generalized by the kernel:
+ */
+ PERF_COUNT_HW_CPU_CYCLES = 0,
+ PERF_COUNT_HW_INSTRUCTIONS = 1,
+ PERF_COUNT_HW_CACHE_REFERENCES = 2,
+ PERF_COUNT_HW_CACHE_MISSES = 3,
+ PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
+ PERF_COUNT_HW_BRANCH_MISSES = 5,
+ PERF_COUNT_HW_BUS_CYCLES = 6,
+};
+
+These are standardized types of events that work relatively uniformly
+on all CPUs that implement Performance Counters support under Linux,
+although there may be variations (e.g., different CPUs might count
+cache references and misses at different levels of the cache hierarchy).
+If a CPU is not able to count the selected event, then the system call
+will return -EINVAL.
+
+More hw_event_types are supported as well, but they are CPU-specific
+and accessed as raw events. For example, to count "External bus
+cycles while bus lock signal asserted" events on Intel Core CPUs, pass
+in a 0x4064 event_id value and set hw_event.raw_type to 1.
+
+A counter of type PERF_TYPE_SOFTWARE will count one of the available
+software events, selected by 'event_id':
+
+/*
+ * Special "software" counters provided by the kernel, even if the hardware
+ * does not support performance counters. These counters measure various
+ * physical and sw events of the kernel (and allow the profiling of them as
+ * well):
+ */
+enum sw_event_ids {
+ PERF_COUNT_SW_CPU_CLOCK = 0,
+ PERF_COUNT_SW_TASK_CLOCK = 1,
+ PERF_COUNT_SW_PAGE_FAULTS = 2,
+ PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
+ PERF_COUNT_SW_CPU_MIGRATIONS = 4,
+ PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
+ PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
+ PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
+ PERF_COUNT_SW_EMULATION_FAULTS = 8,
+};
+
+Counters of the type PERF_TYPE_TRACEPOINT are available when the ftrace event
+tracer is available, and event_id values can be obtained from
+/debug/tracing/events/*/*/id
+
+
+Counters come in two flavours: counting counters and sampling
+counters. A "counting" counter is one that is used for counting the
+number of events that occur, and is characterised by having
+irq_period = 0.
+
+
+A read() on a counter returns the current value of the counter and possible
+additional values as specified by 'read_format', each value is a u64 (8 bytes)
+in size.
+
+/*
+ * Bits that can be set in hw_event.read_format to request that
+ * reads on the counter should return the indicated quantities,
+ * in increasing order of bit value, after the counter value.
+ */
+enum perf_event_read_format {
+ PERF_FORMAT_TOTAL_TIME_ENABLED = 1,
+ PERF_FORMAT_TOTAL_TIME_RUNNING = 2,
+};
+
+Using these additional values one can establish the overcommit ratio for a
+particular counter allowing one to take the round-robin scheduling effect
+into account.
+
+
+A "sampling" counter is one that is set up to generate an interrupt
+every N events, where N is given by 'irq_period'. A sampling counter
+has irq_period > 0. The record_type controls what data is recorded on each
+interrupt:
+
+/*
+ * Bits that can be set in hw_event.record_type to request information
+ * in the overflow packets.
+ */
+enum perf_event_record_format {
+ PERF_RECORD_IP = 1U << 0,
+ PERF_RECORD_TID = 1U << 1,
+ PERF_RECORD_TIME = 1U << 2,
+ PERF_RECORD_ADDR = 1U << 3,
+ PERF_RECORD_GROUP = 1U << 4,
+ PERF_RECORD_CALLCHAIN = 1U << 5,
+};
+
+Such (and other) events will be recorded in a ring-buffer, which is
+available to user-space using mmap() (see below).
+
+The 'disabled' bit specifies whether the counter starts out disabled
+or enabled. If it is initially disabled, it can be enabled by ioctl
+or prctl (see below).
+
+The 'inherit' bit, if set, specifies that this counter should count
+events on descendant tasks as well as the task specified. This only
+applies to new descendents, not to any existing descendents at the
+time the counter is created (nor to any new descendents of existing
+descendents).
+
+The 'pinned' bit, if set, specifies that the counter should always be
+on the CPU if at all possible. It only applies to hardware counters
+and only to group leaders. If a pinned counter cannot be put onto the
+CPU (e.g. because there are not enough hardware counters or because of
+a conflict with some other event), then the counter goes into an
+'error' state, where reads return end-of-file (i.e. read() returns 0)
+until the counter is subsequently enabled or disabled.
+
+The 'exclusive' bit, if set, specifies that when this counter's group
+is on the CPU, it should be the only group using the CPU's counters.
+In future, this will allow sophisticated monitoring programs to supply
+extra configuration information via 'extra_config_len' to exploit
+advanced features of the CPU's Performance Monitor Unit (PMU) that are
+not otherwise accessible and that might disrupt other hardware
+counters.
+
+The 'exclude_user', 'exclude_kernel' and 'exclude_hv' bits provide a
+way to request that counting of events be restricted to times when the
+CPU is in user, kernel and/or hypervisor mode.
+
+The 'mmap' and 'munmap' bits allow recording of PROT_EXEC mmap/munmap
+operations, these can be used to relate userspace IP addresses to actual
+code, even after the mapping (or even the whole process) is gone,
+these events are recorded in the ring-buffer (see below).
+
+The 'comm' bit allows tracking of process comm data on process creation.
+This too is recorded in the ring-buffer (see below).
+
+The 'pid' parameter to the perf_event_open() system call allows the
+counter to be specific to a task:
+
+ pid == 0: if the pid parameter is zero, the counter is attached to the
+ current task.
+
+ pid > 0: the counter is attached to a specific task (if the current task
+ has sufficient privilege to do so)
+
+ pid < 0: all tasks are counted (per cpu counters)
+
+The 'cpu' parameter allows a counter to be made specific to a CPU:
+
+ cpu >= 0: the counter is restricted to a specific CPU
+ cpu == -1: the counter counts on all CPUs
+
+(Note: the combination of 'pid == -1' and 'cpu == -1' is not valid.)
+
+A 'pid > 0' and 'cpu == -1' counter is a per task counter that counts
+events of that task and 'follows' that task to whatever CPU the task
+gets schedule to. Per task counters can be created by any user, for
+their own tasks.
+
+A 'pid == -1' and 'cpu == x' counter is a per CPU counter that counts
+all events on CPU-x. Per CPU counters need CAP_SYS_ADMIN privilege.
+
+The 'flags' parameter is currently unused and must be zero.
+
+The 'group_fd' parameter allows counter "groups" to be set up. A
+counter group has one counter which is the group "leader". The leader
+is created first, with group_fd = -1 in the perf_event_open call
+that creates it. The rest of the group members are created
+subsequently, with group_fd giving the fd of the group leader.
+(A single counter on its own is created with group_fd = -1 and is
+considered to be a group with only 1 member.)
+
+A counter group is scheduled onto the CPU as a unit, that is, it will
+only be put onto the CPU if all of the counters in the group can be
+put onto the CPU. This means that the values of the member counters
+can be meaningfully compared, added, divided (to get ratios), etc.,
+with each other, since they have counted events for the same set of
+executed instructions.
+
+
+Like stated, asynchronous events, like counter overflow or PROT_EXEC mmap
+tracking are logged into a ring-buffer. This ring-buffer is created and
+accessed through mmap().
+
+The mmap size should be 1+2^n pages, where the first page is a meta-data page
+(struct perf_event_mmap_page) that contains various bits of information such
+as where the ring-buffer head is.
+
+/*
+ * Structure of the page that can be mapped via mmap
+ */
+struct perf_event_mmap_page {
+ __u32 version; /* version number of this structure */
+ __u32 compat_version; /* lowest version this is compat with */
+
+ /*
+ * Bits needed to read the hw counters in user-space.
+ *
+ * u32 seq;
+ * s64 count;
+ *
+ * do {
+ * seq = pc->lock;
+ *
+ * barrier()
+ * if (pc->index) {
+ * count = pmc_read(pc->index - 1);
+ * count += pc->offset;
+ * } else
+ * goto regular_read;
+ *
+ * barrier();
+ * } while (pc->lock != seq);
+ *
+ * NOTE: for obvious reason this only works on self-monitoring
+ * processes.
+ */
+ __u32 lock; /* seqlock for synchronization */
+ __u32 index; /* hardware counter identifier */
+ __s64 offset; /* add to hardware counter value */
+
+ /*
+ * Control data for the mmap() data buffer.
+ *
+ * User-space reading this value should issue an rmb(), on SMP capable
+ * platforms, after reading this value -- see perf_event_wakeup().
+ */
+ __u32 data_head; /* head in the data section */
+};
+
+NOTE: the hw-counter userspace bits are arch specific and are currently only
+ implemented on powerpc.
+
+The following 2^n pages are the ring-buffer which contains events of the form:
+
+#define PERF_RECORD_MISC_KERNEL (1 << 0)
+#define PERF_RECORD_MISC_USER (1 << 1)
+#define PERF_RECORD_MISC_OVERFLOW (1 << 2)
+
+struct perf_event_header {
+ __u32 type;
+ __u16 misc;
+ __u16 size;
+};
+
+enum perf_event_type {
+
+ /*
+ * The MMAP events record the PROT_EXEC mappings so that we can
+ * correlate userspace IPs to code. They have the following structure:
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * u64 addr;
+ * u64 len;
+ * u64 pgoff;
+ * char filename[];
+ * };
+ */
+ PERF_RECORD_MMAP = 1,
+ PERF_RECORD_MUNMAP = 2,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * char comm[];
+ * };
+ */
+ PERF_RECORD_COMM = 3,
+
+ /*
+ * When header.misc & PERF_RECORD_MISC_OVERFLOW the event_type field
+ * will be PERF_RECORD_*
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * { u64 ip; } && PERF_RECORD_IP
+ * { u32 pid, tid; } && PERF_RECORD_TID
+ * { u64 time; } && PERF_RECORD_TIME
+ * { u64 addr; } && PERF_RECORD_ADDR
+ *
+ * { u64 nr;
+ * { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP
+ *
+ * { u16 nr,
+ * hv,
+ * kernel,
+ * user;
+ * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN
+ * };
+ */
+};
+
+NOTE: PERF_RECORD_CALLCHAIN is arch specific and currently only implemented
+ on x86.
+
+Notification of new events is possible through poll()/select()/epoll() and
+fcntl() managing signals.
+
+Normally a notification is generated for every page filled, however one can
+additionally set perf_event_attr.wakeup_events to generate one every
+so many counter overflow events.
+
+Future work will include a splice() interface to the ring-buffer.
+
+
+Counters can be enabled and disabled in two ways: via ioctl and via
+prctl. When a counter is disabled, it doesn't count or generate
+events but does continue to exist and maintain its count value.
+
+An individual counter or counter group can be enabled with
+
+ ioctl(fd, PERF_EVENT_IOC_ENABLE);
+
+or disabled with
+
+ ioctl(fd, PERF_EVENT_IOC_DISABLE);
+
+Enabling or disabling the leader of a group enables or disables the
+whole group; that is, while the group leader is disabled, none of the
+counters in the group will count. Enabling or disabling a member of a
+group other than the leader only affects that counter - disabling an
+non-leader stops that counter from counting but doesn't affect any
+other counter.
+
+Additionally, non-inherited overflow counters can use
+
+ ioctl(fd, PERF_EVENT_IOC_REFRESH, nr);
+
+to enable a counter for 'nr' events, after which it gets disabled again.
+
+A process can enable or disable all the counter groups that are
+attached to it, using prctl:
+
+ prctl(PR_TASK_PERF_EVENTS_ENABLE);
+
+ prctl(PR_TASK_PERF_EVENTS_DISABLE);
+
+This applies to all counters on the current process, whether created
+by this process or by another, and doesn't affect any counters that
+this process has created on other processes. It only enables or
+disables the group leaders, not any other members in the groups.
+
+
+Arch requirements
+-----------------
+
+If your architecture does not have hardware performance metrics, you can
+still use the generic software counters based on hrtimers for sampling.
+
+So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you
+will need at least this:
+ - asm/perf_event.h - a basic stub will suffice at first
+ - support for atomic64 types (and associated helper functions)
+ - set_perf_event_pending() implemented
+
+If your architecture does have hardware capabilities, you can override the
+weak stub hw_perf_event_init() to register hardware counters.
+
+Architectures that have d-cache aliassing issues, such as Sparc and ARM,
+should select PERF_USE_VMALLOC in order to avoid these for perf mmap().
diff --git a/tools/perf/external/usr/include/ansidecl.h b/tools/perf/external/usr/include/ansidecl.h
new file mode 100644
index 00000000..8b766474
--- /dev/null
+++ b/tools/perf/external/usr/include/ansidecl.h
@@ -0,0 +1,423 @@
+/* ANSI and traditional C compatability macros
+ Copyright 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
+ 2002, 2003, 2004, 2005, 2006, 2007, 2009, 2010
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* ANSI and traditional C compatibility macros
+
+ ANSI C is assumed if __STDC__ is #defined.
+
+ Macro ANSI C definition Traditional C definition
+ ----- ---- - ---------- ----------- - ----------
+ ANSI_PROTOTYPES 1 not defined
+ PTR `void *' `char *'
+ PTRCONST `void *const' `char *'
+ LONG_DOUBLE `long double' `double'
+ const not defined `'
+ volatile not defined `'
+ signed not defined `'
+ VA_START(ap, var) va_start(ap, var) va_start(ap)
+
+ Note that it is safe to write "void foo();" indicating a function
+ with no return value, in all K+R compilers we have been able to test.
+
+ For declaring functions with prototypes, we also provide these:
+
+ PARAMS ((prototype))
+ -- for functions which take a fixed number of arguments. Use this
+ when declaring the function. When defining the function, write a
+ K+R style argument list. For example:
+
+ char *strcpy PARAMS ((char *dest, char *source));
+ ...
+ char *
+ strcpy (dest, source)
+ char *dest;
+ char *source;
+ { ... }
+
+
+ VPARAMS ((prototype, ...))
+ -- for functions which take a variable number of arguments. Use
+ PARAMS to declare the function, VPARAMS to define it. For example:
+
+ int printf PARAMS ((const char *format, ...));
+ ...
+ int
+ printf VPARAMS ((const char *format, ...))
+ {
+ ...
+ }
+
+ For writing functions which take variable numbers of arguments, we
+ also provide the VA_OPEN, VA_CLOSE, and VA_FIXEDARG macros. These
+ hide the differences between K+R <varargs.h> and C89 <stdarg.h> more
+ thoroughly than the simple VA_START() macro mentioned above.
+
+ VA_OPEN and VA_CLOSE are used *instead of* va_start and va_end.
+ Immediately after VA_OPEN, put a sequence of VA_FIXEDARG calls
+ corresponding to the list of fixed arguments. Then use va_arg
+ normally to get the variable arguments, or pass your va_list object
+ around. You do not declare the va_list yourself; VA_OPEN does it
+ for you.
+
+ Here is a complete example:
+
+ int
+ printf VPARAMS ((const char *format, ...))
+ {
+ int result;
+
+ VA_OPEN (ap, format);
+ VA_FIXEDARG (ap, const char *, format);
+
+ result = vfprintf (stdout, format, ap);
+ VA_CLOSE (ap);
+
+ return result;
+ }
+
+
+ You can declare variables either before or after the VA_OPEN,
+ VA_FIXEDARG sequence. Also, VA_OPEN and VA_CLOSE are the beginning
+ and end of a block. They must appear at the same nesting level,
+ and any variables declared after VA_OPEN go out of scope at
+ VA_CLOSE. Unfortunately, with a K+R compiler, that includes the
+ argument list. You can have multiple instances of VA_OPEN/VA_CLOSE
+ pairs in a single function in case you need to traverse the
+ argument list more than once.
+
+ For ease of writing code which uses GCC extensions but needs to be
+ portable to other compilers, we provide the GCC_VERSION macro that
+ simplifies testing __GNUC__ and __GNUC_MINOR__ together, and various
+ wrappers around __attribute__. Also, __extension__ will be #defined
+ to nothing if it doesn't work. See below.
+
+ This header also defines a lot of obsolete macros:
+ CONST, VOLATILE, SIGNED, PROTO, EXFUN, DEFUN, DEFUN_VOID,
+ AND, DOTS, NOARGS. Don't use them. */
+
+#ifndef _ANSIDECL_H
+#define _ANSIDECL_H 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Every source file includes this file,
+ so they will all get the switch for lint. */
+/* LINTLIBRARY */
+
+/* Using MACRO(x,y) in cpp #if conditionals does not work with some
+ older preprocessors. Thus we can't define something like this:
+
+#define HAVE_GCC_VERSION(MAJOR, MINOR) \
+ (__GNUC__ > (MAJOR) || (__GNUC__ == (MAJOR) && __GNUC_MINOR__ >= (MINOR)))
+
+and then test "#if HAVE_GCC_VERSION(2,7)".
+
+So instead we use the macro below and test it against specific values. */
+
+/* This macro simplifies testing whether we are using gcc, and if it
+ is of a particular minimum version. (Both major & minor numbers are
+ significant.) This macro will evaluate to 0 if we are not using
+ gcc at all. */
+#ifndef GCC_VERSION
+#define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__)
+#endif /* GCC_VERSION */
+
+#if defined (__STDC__) || defined(__cplusplus) || defined (_AIX) || (defined (__mips) && defined (_SYSTYPE_SVR4)) || defined(_WIN32)
+/* All known AIX compilers implement these things (but don't always
+ define __STDC__). The RISC/OS MIPS compiler defines these things
+ in SVR4 mode, but does not define __STDC__. */
+/* eraxxon@alumni.rice.edu: The Compaq C++ compiler, unlike many other
+ C++ compilers, does not define __STDC__, though it acts as if this
+ was so. (Verified versions: 5.7, 6.2, 6.3, 6.5) */
+
+#define ANSI_PROTOTYPES 1
+#define PTR void *
+#define PTRCONST void *const
+#define LONG_DOUBLE long double
+
+/* PARAMS is often defined elsewhere (e.g. by libintl.h), so wrap it in
+ a #ifndef. */
+#ifndef PARAMS
+#define PARAMS(ARGS) ARGS
+#endif
+
+#define VPARAMS(ARGS) ARGS
+#define VA_START(VA_LIST, VAR) va_start(VA_LIST, VAR)
+
+/* variadic function helper macros */
+/* "struct Qdmy" swallows the semicolon after VA_OPEN/VA_FIXEDARG's
+ use without inhibiting further decls and without declaring an
+ actual variable. */
+#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP, VAR); { struct Qdmy
+#define VA_CLOSE(AP) } va_end(AP); }
+#define VA_FIXEDARG(AP, T, N) struct Qdmy
+
+#undef const
+#undef volatile
+#undef signed
+
+/* inline requires special treatment; it's in C99, and GCC >=2.7 supports
+ it too, but it's not in C89. */
+#undef inline
+#if __STDC_VERSION__ >= 199901L || defined(__cplusplus) || (defined(__SUNPRO_C) && defined(__C99FEATURES__))
+/* it's a keyword */
+#else
+# if GCC_VERSION >= 2007
+# define inline __inline__ /* __inline__ prevents -pedantic warnings */
+# else
+# define inline /* nothing */
+# endif
+#endif
+
+/* These are obsolete. Do not use. */
+#ifndef IN_GCC
+#define CONST const
+#define VOLATILE volatile
+#define SIGNED signed
+
+#define PROTO(type, name, arglist) type name arglist
+#define EXFUN(name, proto) name proto
+#define DEFUN(name, arglist, args) name(args)
+#define DEFUN_VOID(name) name(void)
+#define AND ,
+#define DOTS , ...
+#define NOARGS void
+#endif /* ! IN_GCC */
+
+#else /* Not ANSI C. */
+
+#undef ANSI_PROTOTYPES
+#define PTR char *
+#define PTRCONST PTR
+#define LONG_DOUBLE double
+
+#define PARAMS(args) ()
+#define VPARAMS(args) (va_alist) va_dcl
+#define VA_START(va_list, var) va_start(va_list)
+
+#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP); { struct Qdmy
+#define VA_CLOSE(AP) } va_end(AP); }
+#define VA_FIXEDARG(AP, TYPE, NAME) TYPE NAME = va_arg(AP, TYPE)
+
+/* some systems define these in header files for non-ansi mode */
+#undef const
+#undef volatile
+#undef signed
+#undef inline
+#define const
+#define volatile
+#define signed
+#define inline
+
+#ifndef IN_GCC
+#define CONST
+#define VOLATILE
+#define SIGNED
+
+#define PROTO(type, name, arglist) type name ()
+#define EXFUN(name, proto) name()
+#define DEFUN(name, arglist, args) name arglist args;
+#define DEFUN_VOID(name) name()
+#define AND ;
+#define DOTS
+#define NOARGS
+#endif /* ! IN_GCC */
+
+#endif /* ANSI C. */
+
+/* Define macros for some gcc attributes. This permits us to use the
+ macros freely, and know that they will come into play for the
+ version of gcc in which they are supported. */
+
+#if (GCC_VERSION < 2007)
+# define __attribute__(x)
+#endif
+
+/* Attribute __malloc__ on functions was valid as of gcc 2.96. */
+#ifndef ATTRIBUTE_MALLOC
+# if (GCC_VERSION >= 2096)
+# define ATTRIBUTE_MALLOC __attribute__ ((__malloc__))
+# else
+# define ATTRIBUTE_MALLOC
+# endif /* GNUC >= 2.96 */
+#endif /* ATTRIBUTE_MALLOC */
+
+/* Attributes on labels were valid as of gcc 2.93 and g++ 4.5. For
+ g++ an attribute on a label must be followed by a semicolon. */
+#ifndef ATTRIBUTE_UNUSED_LABEL
+# ifndef __cplusplus
+# if GCC_VERSION >= 2093
+# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED
+# else
+# define ATTRIBUTE_UNUSED_LABEL
+# endif
+# else
+# if GCC_VERSION >= 4005
+# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED ;
+# else
+# define ATTRIBUTE_UNUSED_LABEL
+# endif
+# endif
+#endif
+
+#ifndef ATTRIBUTE_UNUSED
+#define ATTRIBUTE_UNUSED __attribute__ ((__unused__))
+#endif /* ATTRIBUTE_UNUSED */
+
+/* Before GCC 3.4, the C++ frontend couldn't parse attributes placed after the
+ identifier name. */
+#if ! defined(__cplusplus) || (GCC_VERSION >= 3004)
+# define ARG_UNUSED(NAME) NAME ATTRIBUTE_UNUSED
+#else /* !__cplusplus || GNUC >= 3.4 */
+# define ARG_UNUSED(NAME) NAME
+#endif /* !__cplusplus || GNUC >= 3.4 */
+
+#ifndef ATTRIBUTE_NORETURN
+#define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__))
+#endif /* ATTRIBUTE_NORETURN */
+
+/* Attribute `nonnull' was valid as of gcc 3.3. */
+#ifndef ATTRIBUTE_NONNULL
+# if (GCC_VERSION >= 3003)
+# define ATTRIBUTE_NONNULL(m) __attribute__ ((__nonnull__ (m)))
+# else
+# define ATTRIBUTE_NONNULL(m)
+# endif /* GNUC >= 3.3 */
+#endif /* ATTRIBUTE_NONNULL */
+
+/* Attribute `pure' was valid as of gcc 3.0. */
+#ifndef ATTRIBUTE_PURE
+# if (GCC_VERSION >= 3000)
+# define ATTRIBUTE_PURE __attribute__ ((__pure__))
+# else
+# define ATTRIBUTE_PURE
+# endif /* GNUC >= 3.0 */
+#endif /* ATTRIBUTE_PURE */
+
+/* Use ATTRIBUTE_PRINTF when the format specifier must not be NULL.
+ This was the case for the `printf' format attribute by itself
+ before GCC 3.3, but as of 3.3 we need to add the `nonnull'
+ attribute to retain this behavior. */
+#ifndef ATTRIBUTE_PRINTF
+#define ATTRIBUTE_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) ATTRIBUTE_NONNULL(m)
+#define ATTRIBUTE_PRINTF_1 ATTRIBUTE_PRINTF(1, 2)
+#define ATTRIBUTE_PRINTF_2 ATTRIBUTE_PRINTF(2, 3)
+#define ATTRIBUTE_PRINTF_3 ATTRIBUTE_PRINTF(3, 4)
+#define ATTRIBUTE_PRINTF_4 ATTRIBUTE_PRINTF(4, 5)
+#define ATTRIBUTE_PRINTF_5 ATTRIBUTE_PRINTF(5, 6)
+#endif /* ATTRIBUTE_PRINTF */
+
+/* Use ATTRIBUTE_FPTR_PRINTF when the format attribute is to be set on
+ a function pointer. Format attributes were allowed on function
+ pointers as of gcc 3.1. */
+#ifndef ATTRIBUTE_FPTR_PRINTF
+# if (GCC_VERSION >= 3001)
+# define ATTRIBUTE_FPTR_PRINTF(m, n) ATTRIBUTE_PRINTF(m, n)
+# else
+# define ATTRIBUTE_FPTR_PRINTF(m, n)
+# endif /* GNUC >= 3.1 */
+# define ATTRIBUTE_FPTR_PRINTF_1 ATTRIBUTE_FPTR_PRINTF(1, 2)
+# define ATTRIBUTE_FPTR_PRINTF_2 ATTRIBUTE_FPTR_PRINTF(2, 3)
+# define ATTRIBUTE_FPTR_PRINTF_3 ATTRIBUTE_FPTR_PRINTF(3, 4)
+# define ATTRIBUTE_FPTR_PRINTF_4 ATTRIBUTE_FPTR_PRINTF(4, 5)
+# define ATTRIBUTE_FPTR_PRINTF_5 ATTRIBUTE_FPTR_PRINTF(5, 6)
+#endif /* ATTRIBUTE_FPTR_PRINTF */
+
+/* Use ATTRIBUTE_NULL_PRINTF when the format specifier may be NULL. A
+ NULL format specifier was allowed as of gcc 3.3. */
+#ifndef ATTRIBUTE_NULL_PRINTF
+# if (GCC_VERSION >= 3003)
+# define ATTRIBUTE_NULL_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n)))
+# else
+# define ATTRIBUTE_NULL_PRINTF(m, n)
+# endif /* GNUC >= 3.3 */
+# define ATTRIBUTE_NULL_PRINTF_1 ATTRIBUTE_NULL_PRINTF(1, 2)
+# define ATTRIBUTE_NULL_PRINTF_2 ATTRIBUTE_NULL_PRINTF(2, 3)
+# define ATTRIBUTE_NULL_PRINTF_3 ATTRIBUTE_NULL_PRINTF(3, 4)
+# define ATTRIBUTE_NULL_PRINTF_4 ATTRIBUTE_NULL_PRINTF(4, 5)
+# define ATTRIBUTE_NULL_PRINTF_5 ATTRIBUTE_NULL_PRINTF(5, 6)
+#endif /* ATTRIBUTE_NULL_PRINTF */
+
+/* Attribute `sentinel' was valid as of gcc 3.5. */
+#ifndef ATTRIBUTE_SENTINEL
+# if (GCC_VERSION >= 3005)
+# define ATTRIBUTE_SENTINEL __attribute__ ((__sentinel__))
+# else
+# define ATTRIBUTE_SENTINEL
+# endif /* GNUC >= 3.5 */
+#endif /* ATTRIBUTE_SENTINEL */
+
+
+#ifndef ATTRIBUTE_ALIGNED_ALIGNOF
+# if (GCC_VERSION >= 3000)
+# define ATTRIBUTE_ALIGNED_ALIGNOF(m) __attribute__ ((__aligned__ (__alignof__ (m))))
+# else
+# define ATTRIBUTE_ALIGNED_ALIGNOF(m)
+# endif /* GNUC >= 3.0 */
+#endif /* ATTRIBUTE_ALIGNED_ALIGNOF */
+
+/* Useful for structures whose layout must much some binary specification
+ regardless of the alignment and padding qualities of the compiler. */
+#ifndef ATTRIBUTE_PACKED
+# define ATTRIBUTE_PACKED __attribute__ ((packed))
+#endif
+
+/* Attribute `hot' and `cold' was valid as of gcc 4.3. */
+#ifndef ATTRIBUTE_COLD
+# if (GCC_VERSION >= 4003)
+# define ATTRIBUTE_COLD __attribute__ ((__cold__))
+# else
+# define ATTRIBUTE_COLD
+# endif /* GNUC >= 4.3 */
+#endif /* ATTRIBUTE_COLD */
+#ifndef ATTRIBUTE_HOT
+# if (GCC_VERSION >= 4003)
+# define ATTRIBUTE_HOT __attribute__ ((__hot__))
+# else
+# define ATTRIBUTE_HOT
+# endif /* GNUC >= 4.3 */
+#endif /* ATTRIBUTE_HOT */
+
+/* We use __extension__ in some places to suppress -pedantic warnings
+ about GCC extensions. This feature didn't work properly before
+ gcc 2.8. */
+#if GCC_VERSION < 2008
+#define __extension__
+#endif
+
+/* This is used to declare a const variable which should be visible
+ outside of the current compilation unit. Use it as
+ EXPORTED_CONST int i = 1;
+ This is because the semantics of const are different in C and C++.
+ "extern const" is permitted in C but it looks strange, and gcc
+ warns about it when -Wc++-compat is not used. */
+#ifdef __cplusplus
+#define EXPORTED_CONST extern const
+#else
+#define EXPORTED_CONST const
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ansidecl.h */
diff --git a/tools/perf/external/usr/include/bfd.h b/tools/perf/external/usr/include/bfd.h
new file mode 100644
index 00000000..63178493
--- /dev/null
+++ b/tools/perf/external/usr/include/bfd.h
@@ -0,0 +1,5902 @@
+/* DO NOT EDIT! -*- buffer-read-only: t -*- This file is automatically
+ generated from "bfd-in.h", "init.c", "opncls.c", "libbfd.c",
+ "bfdio.c", "bfdwin.c", "section.c", "archures.c", "reloc.c",
+ "syms.c", "bfd.c", "archive.c", "corefile.c", "targets.c", "format.c",
+ "linker.c", "simple.c" and "compress.c".
+ Run "make headers" in your build bfd/ to regenerate. */
+
+/* Main header file for the bfd library -- portable access to object files.
+
+ Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ Free Software Foundation, Inc.
+
+ Contributed by Cygnus Support.
+
+ This file is part of BFD, the Binary File Descriptor library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef __BFD_H_SEEN__
+#define __BFD_H_SEEN__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "ansidecl.h"
+#include "symcat.h"
+#if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE)
+#ifndef SABER
+/* This hack is to avoid a problem with some strict ANSI C preprocessors.
+ The problem is, "32_" is not a valid preprocessing token, and we don't
+ want extra underscores (e.g., "nlm_32_"). The XCONCAT2 macro will
+ cause the inner CONCAT2 macros to be evaluated first, producing
+ still-valid pp-tokens. Then the final concatenation can be done. */
+#undef CONCAT4
+#define CONCAT4(a,b,c,d) XCONCAT2(CONCAT2(a,b),CONCAT2(c,d))
+#endif
+#endif
+
+/* This is a utility macro to handle the situation where the code
+ wants to place a constant string into the code, followed by a
+ comma and then the length of the string. Doing this by hand
+ is error prone, so using this macro is safer. */
+#define STRING_COMMA_LEN(STR) (STR), (sizeof (STR) - 1)
+/* Unfortunately it is not possible to use the STRING_COMMA_LEN macro
+ to create the arguments to another macro, since the preprocessor
+ will mis-count the number of arguments to the outer macro (by not
+ evaluating STRING_COMMA_LEN and so missing the comma). This is a
+ problem for example when trying to use STRING_COMMA_LEN to build
+ the arguments to the strncmp() macro. Hence this alternative
+ definition of strncmp is provided here.
+
+ Note - these macros do NOT work if STR2 is not a constant string. */
+#define CONST_STRNEQ(STR1,STR2) (strncmp ((STR1), (STR2), sizeof (STR2) - 1) == 0)
+ /* strcpy() can have a similar problem, but since we know we are
+ copying a constant string, we can use memcpy which will be faster
+ since there is no need to check for a NUL byte inside STR. We
+ can also save time if we do not need to copy the terminating NUL. */
+#define LITMEMCPY(DEST,STR2) memcpy ((DEST), (STR2), sizeof (STR2) - 1)
+#define LITSTRCPY(DEST,STR2) memcpy ((DEST), (STR2), sizeof (STR2))
+
+
+#define BFD_SUPPORTS_PLUGINS 1
+
+/* The word size used by BFD on the host. This may be 64 with a 32
+ bit target if the host is 64 bit, or if other 64 bit targets have
+ been selected with --enable-targets, or if --enable-64-bit-bfd. */
+#define BFD_ARCH_SIZE 32
+
+/* The word size of the default bfd target. */
+#define BFD_DEFAULT_TARGET_SIZE 32
+
+#define BFD_HOST_64BIT_LONG 0
+#define BFD_HOST_64BIT_LONG_LONG 1
+#if 1
+#define BFD_HOST_64_BIT long long
+#define BFD_HOST_U_64_BIT unsigned long long
+typedef BFD_HOST_64_BIT bfd_int64_t;
+typedef BFD_HOST_U_64_BIT bfd_uint64_t;
+#endif
+
+#if BFD_ARCH_SIZE >= 64
+#define BFD64
+#endif
+
+#ifndef INLINE
+#if __GNUC__ >= 2
+#define INLINE __inline__
+#else
+#define INLINE
+#endif
+#endif
+
+/* Declaring a type wide enough to hold a host long and a host pointer. */
+#define BFD_HOSTPTR_T unsigned long
+typedef BFD_HOSTPTR_T bfd_hostptr_t;
+
+/* Forward declaration. */
+typedef struct bfd bfd;
+
+/* Boolean type used in bfd. Too many systems define their own
+ versions of "boolean" for us to safely typedef a "boolean" of
+ our own. Using an enum for "bfd_boolean" has its own set of
+ problems, with strange looking casts required to avoid warnings
+ on some older compilers. Thus we just use an int.
+
+ General rule: Functions which are bfd_boolean return TRUE on
+ success and FALSE on failure (unless they're a predicate). */
+
+typedef int bfd_boolean;
+#undef FALSE
+#undef TRUE
+#define FALSE 0
+#define TRUE 1
+
+#ifdef BFD64
+
+#ifndef BFD_HOST_64_BIT
+ #error No 64 bit integer type available
+#endif /* ! defined (BFD_HOST_64_BIT) */
+
+typedef BFD_HOST_U_64_BIT bfd_vma;
+typedef BFD_HOST_64_BIT bfd_signed_vma;
+typedef BFD_HOST_U_64_BIT bfd_size_type;
+typedef BFD_HOST_U_64_BIT symvalue;
+
+#if BFD_HOST_64BIT_LONG
+#define BFD_VMA_FMT "l"
+#elif defined (__MSVCRT__)
+#define BFD_VMA_FMT "I64"
+#else
+#define BFD_VMA_FMT "ll"
+#endif
+
+#ifndef fprintf_vma
+#define sprintf_vma(s,x) sprintf (s, "%016" BFD_VMA_FMT "x", x)
+#define fprintf_vma(f,x) fprintf (f, "%016" BFD_VMA_FMT "x", x)
+#endif
+
+#else /* not BFD64 */
+
+/* Represent a target address. Also used as a generic unsigned type
+ which is guaranteed to be big enough to hold any arithmetic types
+ we need to deal with. */
+typedef unsigned long bfd_vma;
+
+/* A generic signed type which is guaranteed to be big enough to hold any
+ arithmetic types we need to deal with. Can be assumed to be compatible
+ with bfd_vma in the same way that signed and unsigned ints are compatible
+ (as parameters, in assignment, etc). */
+typedef long bfd_signed_vma;
+
+typedef unsigned long symvalue;
+typedef unsigned long bfd_size_type;
+
+/* Print a bfd_vma x on stream s. */
+#define BFD_VMA_FMT "l"
+#define fprintf_vma(s,x) fprintf (s, "%08" BFD_VMA_FMT "x", x)
+#define sprintf_vma(s,x) sprintf (s, "%08" BFD_VMA_FMT "x", x)
+
+#endif /* not BFD64 */
+
+#define HALF_BFD_SIZE_TYPE \
+ (((bfd_size_type) 1) << (8 * sizeof (bfd_size_type) / 2))
+
+#ifndef BFD_HOST_64_BIT
+/* Fall back on a 32 bit type. The idea is to make these types always
+ available for function return types, but in the case that
+ BFD_HOST_64_BIT is undefined such a function should abort or
+ otherwise signal an error. */
+typedef bfd_signed_vma bfd_int64_t;
+typedef bfd_vma bfd_uint64_t;
+#endif
+
+/* An offset into a file. BFD always uses the largest possible offset
+ based on the build time availability of fseek, fseeko, or fseeko64. */
+typedef BFD_HOST_64_BIT file_ptr;
+typedef unsigned BFD_HOST_64_BIT ufile_ptr;
+
+extern void bfd_sprintf_vma (bfd *, char *, bfd_vma);
+extern void bfd_fprintf_vma (bfd *, void *, bfd_vma);
+
+#define printf_vma(x) fprintf_vma(stdout,x)
+#define bfd_printf_vma(abfd,x) bfd_fprintf_vma (abfd,stdout,x)
+
+typedef unsigned int flagword; /* 32 bits of flags */
+typedef unsigned char bfd_byte;
+
+/* File formats. */
+
+typedef enum bfd_format
+{
+ bfd_unknown = 0, /* File format is unknown. */
+ bfd_object, /* Linker/assembler/compiler output. */
+ bfd_archive, /* Object archive file. */
+ bfd_core, /* Core dump. */
+ bfd_type_end /* Marks the end; don't use it! */
+}
+bfd_format;
+
+/* Symbols and relocation. */
+
+/* A count of carsyms (canonical archive symbols). */
+typedef unsigned long symindex;
+
+/* How to perform a relocation. */
+typedef const struct reloc_howto_struct reloc_howto_type;
+
+#define BFD_NO_MORE_SYMBOLS ((symindex) ~0)
+
+/* General purpose part of a symbol X;
+ target specific parts are in libcoff.h, libaout.h, etc. */
+
+#define bfd_get_section(x) ((x)->section)
+#define bfd_get_output_section(x) ((x)->section->output_section)
+#define bfd_set_section(x,y) ((x)->section) = (y)
+#define bfd_asymbol_base(x) ((x)->section->vma)
+#define bfd_asymbol_value(x) (bfd_asymbol_base(x) + (x)->value)
+#define bfd_asymbol_name(x) ((x)->name)
+/*Perhaps future: #define bfd_asymbol_bfd(x) ((x)->section->owner)*/
+#define bfd_asymbol_bfd(x) ((x)->the_bfd)
+#define bfd_asymbol_flavour(x) \
+ (((x)->flags & BSF_SYNTHETIC) != 0 \
+ ? bfd_target_unknown_flavour \
+ : bfd_asymbol_bfd (x)->xvec->flavour)
+
+/* A canonical archive symbol. */
+/* This is a type pun with struct ranlib on purpose! */
+typedef struct carsym
+{
+ char *name;
+ file_ptr file_offset; /* Look here to find the file. */
+}
+carsym; /* To make these you call a carsymogen. */
+
+/* Used in generating armaps (archive tables of contents).
+ Perhaps just a forward definition would do? */
+struct orl /* Output ranlib. */
+{
+ char **name; /* Symbol name. */
+ union
+ {
+ file_ptr pos;
+ bfd *abfd;
+ } u; /* bfd* or file position. */
+ int namidx; /* Index into string table. */
+};
+
+/* Linenumber stuff. */
+typedef struct lineno_cache_entry
+{
+ unsigned int line_number; /* Linenumber from start of function. */
+ union
+ {
+ struct bfd_symbol *sym; /* Function name. */
+ bfd_vma offset; /* Offset into section. */
+ } u;
+}
+alent;
+
+/* Object and core file sections. */
+
+#define align_power(addr, align) \
+ (((addr) + ((bfd_vma) 1 << (align)) - 1) & ((bfd_vma) -1 << (align)))
+
+typedef struct bfd_section *sec_ptr;
+
+#define bfd_get_section_name(bfd, ptr) ((ptr)->name + 0)
+#define bfd_get_section_vma(bfd, ptr) ((ptr)->vma + 0)
+#define bfd_get_section_lma(bfd, ptr) ((ptr)->lma + 0)
+#define bfd_get_section_alignment(bfd, ptr) ((ptr)->alignment_power + 0)
+#define bfd_section_name(bfd, ptr) ((ptr)->name)
+#define bfd_section_size(bfd, ptr) ((ptr)->size)
+#define bfd_get_section_size(ptr) ((ptr)->size)
+#define bfd_section_vma(bfd, ptr) ((ptr)->vma)
+#define bfd_section_lma(bfd, ptr) ((ptr)->lma)
+#define bfd_section_alignment(bfd, ptr) ((ptr)->alignment_power)
+#define bfd_get_section_flags(bfd, ptr) ((ptr)->flags + 0)
+#define bfd_get_section_userdata(bfd, ptr) ((ptr)->userdata)
+
+#define bfd_is_com_section(ptr) (((ptr)->flags & SEC_IS_COMMON) != 0)
+
+#define bfd_set_section_vma(bfd, ptr, val) (((ptr)->vma = (ptr)->lma = (val)), ((ptr)->user_set_vma = TRUE), TRUE)
+#define bfd_set_section_alignment(bfd, ptr, val) (((ptr)->alignment_power = (val)),TRUE)
+#define bfd_set_section_userdata(bfd, ptr, val) (((ptr)->userdata = (val)),TRUE)
+/* Find the address one past the end of SEC. */
+#define bfd_get_section_limit(bfd, sec) \
+ (((sec)->rawsize ? (sec)->rawsize : (sec)->size) \
+ / bfd_octets_per_byte (bfd))
+
+/* Return TRUE if input section SEC has been discarded. */
+#define elf_discarded_section(sec) \
+ (!bfd_is_abs_section (sec) \
+ && bfd_is_abs_section ((sec)->output_section) \
+ && (sec)->sec_info_type != ELF_INFO_TYPE_MERGE \
+ && (sec)->sec_info_type != ELF_INFO_TYPE_JUST_SYMS)
+
+/* Forward define. */
+struct stat;
+
+typedef enum bfd_print_symbol
+{
+ bfd_print_symbol_name,
+ bfd_print_symbol_more,
+ bfd_print_symbol_all
+} bfd_print_symbol_type;
+
+/* Information about a symbol that nm needs. */
+
+typedef struct _symbol_info
+{
+ symvalue value;
+ char type;
+ const char *name; /* Symbol name. */
+ unsigned char stab_type; /* Stab type. */
+ char stab_other; /* Stab other. */
+ short stab_desc; /* Stab desc. */
+ const char *stab_name; /* String for stab type. */
+} symbol_info;
+
+/* Get the name of a stabs type code. */
+
+extern const char *bfd_get_stab_name (int);
+
+/* Hash table routines. There is no way to free up a hash table. */
+
+/* An element in the hash table. Most uses will actually use a larger
+ structure, and an instance of this will be the first field. */
+
+struct bfd_hash_entry
+{
+ /* Next entry for this hash code. */
+ struct bfd_hash_entry *next;
+ /* String being hashed. */
+ const char *string;
+ /* Hash code. This is the full hash code, not the index into the
+ table. */
+ unsigned long hash;
+};
+
+/* A hash table. */
+
+struct bfd_hash_table
+{
+ /* The hash array. */
+ struct bfd_hash_entry **table;
+ /* A function used to create new elements in the hash table. The
+ first entry is itself a pointer to an element. When this
+ function is first invoked, this pointer will be NULL. However,
+ having the pointer permits a hierarchy of method functions to be
+ built each of which calls the function in the superclass. Thus
+ each function should be written to allocate a new block of memory
+ only if the argument is NULL. */
+ struct bfd_hash_entry *(*newfunc)
+ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *);
+ /* An objalloc for this hash table. This is a struct objalloc *,
+ but we use void * to avoid requiring the inclusion of objalloc.h. */
+ void *memory;
+ /* The number of slots in the hash table. */
+ unsigned int size;
+ /* The number of entries in the hash table. */
+ unsigned int count;
+ /* The size of elements. */
+ unsigned int entsize;
+ /* If non-zero, don't grow the hash table. */
+ unsigned int frozen:1;
+};
+
+/* Initialize a hash table. */
+extern bfd_boolean bfd_hash_table_init
+ (struct bfd_hash_table *,
+ struct bfd_hash_entry *(*) (struct bfd_hash_entry *,
+ struct bfd_hash_table *,
+ const char *),
+ unsigned int);
+
+/* Initialize a hash table specifying a size. */
+extern bfd_boolean bfd_hash_table_init_n
+ (struct bfd_hash_table *,
+ struct bfd_hash_entry *(*) (struct bfd_hash_entry *,
+ struct bfd_hash_table *,
+ const char *),
+ unsigned int, unsigned int);
+
+/* Free up a hash table. */
+extern void bfd_hash_table_free
+ (struct bfd_hash_table *);
+
+/* Look up a string in a hash table. If CREATE is TRUE, a new entry
+ will be created for this string if one does not already exist. The
+ COPY argument must be TRUE if this routine should copy the string
+ into newly allocated memory when adding an entry. */
+extern struct bfd_hash_entry *bfd_hash_lookup
+ (struct bfd_hash_table *, const char *, bfd_boolean create,
+ bfd_boolean copy);
+
+/* Insert an entry in a hash table. */
+extern struct bfd_hash_entry *bfd_hash_insert
+ (struct bfd_hash_table *, const char *, unsigned long);
+
+/* Replace an entry in a hash table. */
+extern void bfd_hash_replace
+ (struct bfd_hash_table *, struct bfd_hash_entry *old,
+ struct bfd_hash_entry *nw);
+
+/* Base method for creating a hash table entry. */
+extern struct bfd_hash_entry *bfd_hash_newfunc
+ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *);
+
+/* Grab some space for a hash table entry. */
+extern void *bfd_hash_allocate
+ (struct bfd_hash_table *, unsigned int);
+
+/* Traverse a hash table in a random order, calling a function on each
+ element. If the function returns FALSE, the traversal stops. The
+ INFO argument is passed to the function. */
+extern void bfd_hash_traverse
+ (struct bfd_hash_table *,
+ bfd_boolean (*) (struct bfd_hash_entry *, void *),
+ void *info);
+
+/* Allows the default size of a hash table to be configured. New hash
+ tables allocated using bfd_hash_table_init will be created with
+ this size. */
+extern void bfd_hash_set_default_size (bfd_size_type);
+
+/* This structure is used to keep track of stabs in sections
+ information while linking. */
+
+struct stab_info
+{
+ /* A hash table used to hold stabs strings. */
+ struct bfd_strtab_hash *strings;
+ /* The header file hash table. */
+ struct bfd_hash_table includes;
+ /* The first .stabstr section. */
+ struct bfd_section *stabstr;
+};
+
+#define COFF_SWAP_TABLE (void *) &bfd_coff_std_swap_table
+
+/* User program access to BFD facilities. */
+
+/* Direct I/O routines, for programs which know more about the object
+ file than BFD does. Use higher level routines if possible. */
+
+extern bfd_size_type bfd_bread (void *, bfd_size_type, bfd *);
+extern bfd_size_type bfd_bwrite (const void *, bfd_size_type, bfd *);
+extern int bfd_seek (bfd *, file_ptr, int);
+extern file_ptr bfd_tell (bfd *);
+extern int bfd_flush (bfd *);
+extern int bfd_stat (bfd *, struct stat *);
+
+/* Deprecated old routines. */
+#if __GNUC__
+#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \
+ (warn_deprecated ("bfd_read", __FILE__, __LINE__, __FUNCTION__), \
+ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
+#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \
+ (warn_deprecated ("bfd_write", __FILE__, __LINE__, __FUNCTION__), \
+ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
+#else
+#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \
+ (warn_deprecated ("bfd_read", (const char *) 0, 0, (const char *) 0), \
+ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
+#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \
+ (warn_deprecated ("bfd_write", (const char *) 0, 0, (const char *) 0),\
+ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
+#endif
+extern void warn_deprecated (const char *, const char *, int, const char *);
+
+/* Cast from const char * to char * so that caller can assign to
+ a char * without a warning. */
+#define bfd_get_filename(abfd) ((char *) (abfd)->filename)
+#define bfd_get_cacheable(abfd) ((abfd)->cacheable)
+#define bfd_get_format(abfd) ((abfd)->format)
+#define bfd_get_target(abfd) ((abfd)->xvec->name)
+#define bfd_get_flavour(abfd) ((abfd)->xvec->flavour)
+#define bfd_family_coff(abfd) \
+ (bfd_get_flavour (abfd) == bfd_target_coff_flavour || \
+ bfd_get_flavour (abfd) == bfd_target_xcoff_flavour)
+#define bfd_big_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG)
+#define bfd_little_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_LITTLE)
+#define bfd_header_big_endian(abfd) \
+ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_BIG)
+#define bfd_header_little_endian(abfd) \
+ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_LITTLE)
+#define bfd_get_file_flags(abfd) ((abfd)->flags)
+#define bfd_applicable_file_flags(abfd) ((abfd)->xvec->object_flags)
+#define bfd_applicable_section_flags(abfd) ((abfd)->xvec->section_flags)
+#define bfd_my_archive(abfd) ((abfd)->my_archive)
+#define bfd_has_map(abfd) ((abfd)->has_armap)
+#define bfd_is_thin_archive(abfd) ((abfd)->is_thin_archive)
+
+#define bfd_valid_reloc_types(abfd) ((abfd)->xvec->valid_reloc_types)
+#define bfd_usrdata(abfd) ((abfd)->usrdata)
+
+#define bfd_get_start_address(abfd) ((abfd)->start_address)
+#define bfd_get_symcount(abfd) ((abfd)->symcount)
+#define bfd_get_outsymbols(abfd) ((abfd)->outsymbols)
+#define bfd_count_sections(abfd) ((abfd)->section_count)
+
+#define bfd_get_dynamic_symcount(abfd) ((abfd)->dynsymcount)
+
+#define bfd_get_symbol_leading_char(abfd) ((abfd)->xvec->symbol_leading_char)
+
+#define bfd_set_cacheable(abfd,bool) (((abfd)->cacheable = bool), TRUE)
+
+extern bfd_boolean bfd_cache_close
+ (bfd *abfd);
+/* NB: This declaration should match the autogenerated one in libbfd.h. */
+
+extern bfd_boolean bfd_cache_close_all (void);
+
+extern bfd_boolean bfd_record_phdr
+ (bfd *, unsigned long, bfd_boolean, flagword, bfd_boolean, bfd_vma,
+ bfd_boolean, bfd_boolean, unsigned int, struct bfd_section **);
+
+/* Byte swapping routines. */
+
+bfd_uint64_t bfd_getb64 (const void *);
+bfd_uint64_t bfd_getl64 (const void *);
+bfd_int64_t bfd_getb_signed_64 (const void *);
+bfd_int64_t bfd_getl_signed_64 (const void *);
+bfd_vma bfd_getb32 (const void *);
+bfd_vma bfd_getl32 (const void *);
+bfd_signed_vma bfd_getb_signed_32 (const void *);
+bfd_signed_vma bfd_getl_signed_32 (const void *);
+bfd_vma bfd_getb16 (const void *);
+bfd_vma bfd_getl16 (const void *);
+bfd_signed_vma bfd_getb_signed_16 (const void *);
+bfd_signed_vma bfd_getl_signed_16 (const void *);
+void bfd_putb64 (bfd_uint64_t, void *);
+void bfd_putl64 (bfd_uint64_t, void *);
+void bfd_putb32 (bfd_vma, void *);
+void bfd_putl32 (bfd_vma, void *);
+void bfd_putb16 (bfd_vma, void *);
+void bfd_putl16 (bfd_vma, void *);
+
+/* Byte swapping routines which take size and endiannes as arguments. */
+
+bfd_uint64_t bfd_get_bits (const void *, int, bfd_boolean);
+void bfd_put_bits (bfd_uint64_t, void *, int, bfd_boolean);
+
+extern bfd_boolean bfd_section_already_linked_table_init (void);
+extern void bfd_section_already_linked_table_free (void);
+
+/* Externally visible ECOFF routines. */
+
+#if defined(__STDC__) || defined(ALMOST_STDC)
+struct ecoff_debug_info;
+struct ecoff_debug_swap;
+struct ecoff_extr;
+struct bfd_symbol;
+struct bfd_link_info;
+struct bfd_link_hash_entry;
+struct bfd_elf_version_tree;
+#endif
+extern bfd_vma bfd_ecoff_get_gp_value
+ (bfd * abfd);
+extern bfd_boolean bfd_ecoff_set_gp_value
+ (bfd *abfd, bfd_vma gp_value);
+extern bfd_boolean bfd_ecoff_set_regmasks
+ (bfd *abfd, unsigned long gprmask, unsigned long fprmask,
+ unsigned long *cprmask);
+extern void *bfd_ecoff_debug_init
+ (bfd *output_bfd, struct ecoff_debug_info *output_debug,
+ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *);
+extern void bfd_ecoff_debug_free
+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
+ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *);
+extern bfd_boolean bfd_ecoff_debug_accumulate
+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
+ const struct ecoff_debug_swap *output_swap, bfd *input_bfd,
+ struct ecoff_debug_info *input_debug,
+ const struct ecoff_debug_swap *input_swap, struct bfd_link_info *);
+extern bfd_boolean bfd_ecoff_debug_accumulate_other
+ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
+ const struct ecoff_debug_swap *output_swap, bfd *input_bfd,
+ struct bfd_link_info *);
+extern bfd_boolean bfd_ecoff_debug_externals
+ (bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap, bfd_boolean relocatable,
+ bfd_boolean (*get_extr) (struct bfd_symbol *, struct ecoff_extr *),
+ void (*set_index) (struct bfd_symbol *, bfd_size_type));
+extern bfd_boolean bfd_ecoff_debug_one_external
+ (bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap, const char *name,
+ struct ecoff_extr *esym);
+extern bfd_size_type bfd_ecoff_debug_size
+ (bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap);
+extern bfd_boolean bfd_ecoff_write_debug
+ (bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap, file_ptr where);
+extern bfd_boolean bfd_ecoff_write_accumulated_debug
+ (void *handle, bfd *abfd, struct ecoff_debug_info *debug,
+ const struct ecoff_debug_swap *swap,
+ struct bfd_link_info *info, file_ptr where);
+
+/* Externally visible ELF routines. */
+
+struct bfd_link_needed_list
+{
+ struct bfd_link_needed_list *next;
+ bfd *by;
+ const char *name;
+};
+
+enum dynamic_lib_link_class {
+ DYN_NORMAL = 0,
+ DYN_AS_NEEDED = 1,
+ DYN_DT_NEEDED = 2,
+ DYN_NO_ADD_NEEDED = 4,
+ DYN_NO_NEEDED = 8
+};
+
+enum notice_asneeded_action {
+ notice_as_needed,
+ notice_not_needed,
+ notice_needed
+};
+
+extern bfd_boolean bfd_elf_record_link_assignment
+ (bfd *, struct bfd_link_info *, const char *, bfd_boolean,
+ bfd_boolean);
+extern struct bfd_link_needed_list *bfd_elf_get_needed_list
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_elf_get_bfd_needed_list
+ (bfd *, struct bfd_link_needed_list **);
+extern bfd_boolean bfd_elf_size_dynamic_sections
+ (bfd *, const char *, const char *, const char *, const char *, const char *,
+ const char * const *, struct bfd_link_info *, struct bfd_section **,
+ struct bfd_elf_version_tree *);
+extern bfd_boolean bfd_elf_size_dynsym_hash_dynstr
+ (bfd *, struct bfd_link_info *);
+extern void bfd_elf_set_dt_needed_name
+ (bfd *, const char *);
+extern const char *bfd_elf_get_dt_soname
+ (bfd *);
+extern void bfd_elf_set_dyn_lib_class
+ (bfd *, enum dynamic_lib_link_class);
+extern int bfd_elf_get_dyn_lib_class
+ (bfd *);
+extern struct bfd_link_needed_list *bfd_elf_get_runpath_list
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_elf_discard_info
+ (bfd *, struct bfd_link_info *);
+extern unsigned int _bfd_elf_default_action_discarded
+ (struct bfd_section *);
+
+/* Return an upper bound on the number of bytes required to store a
+ copy of ABFD's program header table entries. Return -1 if an error
+ occurs; bfd_get_error will return an appropriate code. */
+extern long bfd_get_elf_phdr_upper_bound
+ (bfd *abfd);
+
+/* Copy ABFD's program header table entries to *PHDRS. The entries
+ will be stored as an array of Elf_Internal_Phdr structures, as
+ defined in include/elf/internal.h. To find out how large the
+ buffer needs to be, call bfd_get_elf_phdr_upper_bound.
+
+ Return the number of program header table entries read, or -1 if an
+ error occurs; bfd_get_error will return an appropriate code. */
+extern int bfd_get_elf_phdrs
+ (bfd *abfd, void *phdrs);
+
+/* Create a new BFD as if by bfd_openr. Rather than opening a file,
+ reconstruct an ELF file by reading the segments out of remote memory
+ based on the ELF file header at EHDR_VMA and the ELF program headers it
+ points to. If not null, *LOADBASEP is filled in with the difference
+ between the VMAs from which the segments were read, and the VMAs the
+ file headers (and hence BFD's idea of each section's VMA) put them at.
+
+ The function TARGET_READ_MEMORY is called to copy LEN bytes from the
+ remote memory at target address VMA into the local buffer at MYADDR; it
+ should return zero on success or an `errno' code on failure. TEMPL must
+ be a BFD for an ELF target with the word size and byte order found in
+ the remote memory. */
+extern bfd *bfd_elf_bfd_from_remote_memory
+ (bfd *templ, bfd_vma ehdr_vma, bfd_vma *loadbasep,
+ int (*target_read_memory) (bfd_vma vma, bfd_byte *myaddr, int len));
+
+/* Return the arch_size field of an elf bfd, or -1 if not elf. */
+extern int bfd_get_arch_size
+ (bfd *);
+
+/* Return TRUE if address "naturally" sign extends, or -1 if not elf. */
+extern int bfd_get_sign_extend_vma
+ (bfd *);
+
+extern struct bfd_section *_bfd_elf_tls_setup
+ (bfd *, struct bfd_link_info *);
+
+extern void _bfd_fix_excluded_sec_syms
+ (bfd *, struct bfd_link_info *);
+
+extern unsigned bfd_m68k_mach_to_features (int);
+
+extern int bfd_m68k_features_to_mach (unsigned);
+
+extern bfd_boolean bfd_m68k_elf32_create_embedded_relocs
+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *,
+ char **);
+
+extern void bfd_elf_m68k_set_target_options (struct bfd_link_info *, int);
+
+extern bfd_boolean bfd_bfin_elf32_create_embedded_relocs
+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *,
+ char **);
+
+extern bfd_boolean bfd_cr16_elf32_create_embedded_relocs
+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *,
+ char **);
+
+/* SunOS shared library support routines for the linker. */
+
+extern struct bfd_link_needed_list *bfd_sunos_get_needed_list
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_sunos_record_link_assignment
+ (bfd *, struct bfd_link_info *, const char *);
+extern bfd_boolean bfd_sunos_size_dynamic_sections
+ (bfd *, struct bfd_link_info *, struct bfd_section **,
+ struct bfd_section **, struct bfd_section **);
+
+/* Linux shared library support routines for the linker. */
+
+extern bfd_boolean bfd_i386linux_size_dynamic_sections
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_m68klinux_size_dynamic_sections
+ (bfd *, struct bfd_link_info *);
+extern bfd_boolean bfd_sparclinux_size_dynamic_sections
+ (bfd *, struct bfd_link_info *);
+
+/* mmap hacks */
+
+struct _bfd_window_internal;
+typedef struct _bfd_window_internal bfd_window_internal;
+
+typedef struct _bfd_window
+{
+ /* What the user asked for. */
+ void *data;
+ bfd_size_type size;
+ /* The actual window used by BFD. Small user-requested read-only
+ regions sharing a page may share a single window into the object
+ file. Read-write versions shouldn't until I've fixed things to
+ keep track of which portions have been claimed by the
+ application; don't want to give the same region back when the
+ application wants two writable copies! */
+ struct _bfd_window_internal *i;
+}
+bfd_window;
+
+extern void bfd_init_window
+ (bfd_window *);
+extern void bfd_free_window
+ (bfd_window *);
+extern bfd_boolean bfd_get_file_window
+ (bfd *, file_ptr, bfd_size_type, bfd_window *, bfd_boolean);
+
+/* XCOFF support routines for the linker. */
+
+extern bfd_boolean bfd_xcoff_split_import_path
+ (bfd *, const char *, const char **, const char **);
+extern bfd_boolean bfd_xcoff_set_archive_import_path
+ (struct bfd_link_info *, bfd *, const char *);
+extern bfd_boolean bfd_xcoff_link_record_set
+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_size_type);
+extern bfd_boolean bfd_xcoff_import_symbol
+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_vma,
+ const char *, const char *, const char *, unsigned int);
+extern bfd_boolean bfd_xcoff_export_symbol
+ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *);
+extern bfd_boolean bfd_xcoff_link_count_reloc
+ (bfd *, struct bfd_link_info *, const char *);
+extern bfd_boolean bfd_xcoff_record_link_assignment
+ (bfd *, struct bfd_link_info *, const char *);
+extern bfd_boolean bfd_xcoff_size_dynamic_sections
+ (bfd *, struct bfd_link_info *, const char *, const char *,
+ unsigned long, unsigned long, unsigned long, bfd_boolean,
+ int, bfd_boolean, unsigned int, struct bfd_section **, bfd_boolean);
+extern bfd_boolean bfd_xcoff_link_generate_rtinit
+ (bfd *, const char *, const char *, bfd_boolean);
+
+/* XCOFF support routines for ar. */
+extern bfd_boolean bfd_xcoff_ar_archive_set_magic
+ (bfd *, char *);
+
+/* Externally visible COFF routines. */
+
+#if defined(__STDC__) || defined(ALMOST_STDC)
+struct internal_syment;
+union internal_auxent;
+#endif
+
+extern bfd_boolean bfd_coff_get_syment
+ (bfd *, struct bfd_symbol *, struct internal_syment *);
+
+extern bfd_boolean bfd_coff_get_auxent
+ (bfd *, struct bfd_symbol *, int, union internal_auxent *);
+
+extern bfd_boolean bfd_coff_set_symbol_class
+ (bfd *, struct bfd_symbol *, unsigned int);
+
+extern bfd_boolean bfd_m68k_coff_create_embedded_relocs
+ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, char **);
+
+/* ARM VFP11 erratum workaround support. */
+typedef enum
+{
+ BFD_ARM_VFP11_FIX_DEFAULT,
+ BFD_ARM_VFP11_FIX_NONE,
+ BFD_ARM_VFP11_FIX_SCALAR,
+ BFD_ARM_VFP11_FIX_VECTOR
+} bfd_arm_vfp11_fix;
+
+extern void bfd_elf32_arm_init_maps
+ (bfd *);
+
+extern void bfd_elf32_arm_set_vfp11_fix
+ (bfd *, struct bfd_link_info *);
+
+extern void bfd_elf32_arm_set_cortex_a8_fix
+ (bfd *, struct bfd_link_info *);
+
+extern bfd_boolean bfd_elf32_arm_vfp11_erratum_scan
+ (bfd *, struct bfd_link_info *);
+
+extern void bfd_elf32_arm_vfp11_fix_veneer_locations
+ (bfd *, struct bfd_link_info *);
+
+/* ARM Interworking support. Called from linker. */
+extern bfd_boolean bfd_arm_allocate_interworking_sections
+ (struct bfd_link_info *);
+
+extern bfd_boolean bfd_arm_process_before_allocation
+ (bfd *, struct bfd_link_info *, int);
+
+extern bfd_boolean bfd_arm_get_bfd_for_interworking
+ (bfd *, struct bfd_link_info *);
+
+/* PE ARM Interworking support. Called from linker. */
+extern bfd_boolean bfd_arm_pe_allocate_interworking_sections
+ (struct bfd_link_info *);
+
+extern bfd_boolean bfd_arm_pe_process_before_allocation
+ (bfd *, struct bfd_link_info *, int);
+
+extern bfd_boolean bfd_arm_pe_get_bfd_for_interworking
+ (bfd *, struct bfd_link_info *);
+
+/* ELF ARM Interworking support. Called from linker. */
+extern bfd_boolean bfd_elf32_arm_allocate_interworking_sections
+ (struct bfd_link_info *);
+
+extern bfd_boolean bfd_elf32_arm_process_before_allocation
+ (bfd *, struct bfd_link_info *);
+
+void bfd_elf32_arm_set_target_relocs
+ (bfd *, struct bfd_link_info *, int, char *, int, int, bfd_arm_vfp11_fix,
+ int, int, int, int);
+
+extern bfd_boolean bfd_elf32_arm_get_bfd_for_interworking
+ (bfd *, struct bfd_link_info *);
+
+extern bfd_boolean bfd_elf32_arm_add_glue_sections_to_bfd
+ (bfd *, struct bfd_link_info *);
+
+/* ELF ARM mapping symbol support */
+#define BFD_ARM_SPECIAL_SYM_TYPE_MAP (1 << 0)
+#define BFD_ARM_SPECIAL_SYM_TYPE_TAG (1 << 1)
+#define BFD_ARM_SPECIAL_SYM_TYPE_OTHER (1 << 2)
+#define BFD_ARM_SPECIAL_SYM_TYPE_ANY (~0)
+extern bfd_boolean bfd_is_arm_special_symbol_name
+ (const char * name, int type);
+
+extern void bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *, int);
+
+/* ARM Note section processing. */
+extern bfd_boolean bfd_arm_merge_machines
+ (bfd *, bfd *);
+
+extern bfd_boolean bfd_arm_update_notes
+ (bfd *, const char *);
+
+extern unsigned int bfd_arm_get_mach_from_notes
+ (bfd *, const char *);
+
+/* ARM stub generation support. Called from the linker. */
+extern int elf32_arm_setup_section_lists
+ (bfd *, struct bfd_link_info *);
+extern void elf32_arm_next_input_section
+ (struct bfd_link_info *, struct bfd_section *);
+extern bfd_boolean elf32_arm_size_stubs
+ (bfd *, bfd *, struct bfd_link_info *, bfd_signed_vma,
+ struct bfd_section * (*) (const char *, struct bfd_section *), void (*) (void));
+extern bfd_boolean elf32_arm_build_stubs
+ (struct bfd_link_info *);
+
+/* ARM unwind section editing support. */
+extern bfd_boolean elf32_arm_fix_exidx_coverage
+(struct bfd_section **, unsigned int, struct bfd_link_info *, bfd_boolean);
+
+/* PowerPC @tls opcode transform/validate. */
+extern unsigned int _bfd_elf_ppc_at_tls_transform
+ (unsigned int, unsigned int);
+/* PowerPC @tprel opcode transform/validate. */
+extern unsigned int _bfd_elf_ppc_at_tprel_transform
+ (unsigned int, unsigned int);
+
+/* TI COFF load page support. */
+extern void bfd_ticoff_set_section_load_page
+ (struct bfd_section *, int);
+
+extern int bfd_ticoff_get_section_load_page
+ (struct bfd_section *);
+
+/* H8/300 functions. */
+extern bfd_vma bfd_h8300_pad_address
+ (bfd *, bfd_vma);
+
+/* IA64 Itanium code generation. Called from linker. */
+extern void bfd_elf32_ia64_after_parse
+ (int);
+
+extern void bfd_elf64_ia64_after_parse
+ (int);
+
+/* This structure is used for a comdat section, as in PE. A comdat
+ section is associated with a particular symbol. When the linker
+ sees a comdat section, it keeps only one of the sections with a
+ given name and associated with a given symbol. */
+
+struct coff_comdat_info
+{
+ /* The name of the symbol associated with a comdat section. */
+ const char *name;
+
+ /* The local symbol table index of the symbol associated with a
+ comdat section. This is only meaningful to the object file format
+ specific code; it is not an index into the list returned by
+ bfd_canonicalize_symtab. */
+ long symbol;
+};
+
+extern struct coff_comdat_info *bfd_coff_get_comdat_section
+ (bfd *, struct bfd_section *);
+
+/* Extracted from init.c. */
+void bfd_init (void);
+
+/* Extracted from opncls.c. */
+extern unsigned int bfd_use_reserved_id;
+bfd *bfd_fopen (const char *filename, const char *target,
+ const char *mode, int fd);
+
+bfd *bfd_openr (const char *filename, const char *target);
+
+bfd *bfd_fdopenr (const char *filename, const char *target, int fd);
+
+bfd *bfd_openstreamr (const char *, const char *, void *);
+
+bfd *bfd_openr_iovec (const char *filename, const char *target,
+ void *(*open_func) (struct bfd *nbfd,
+ void *open_closure),
+ void *open_closure,
+ file_ptr (*pread_func) (struct bfd *nbfd,
+ void *stream,
+ void *buf,
+ file_ptr nbytes,
+ file_ptr offset),
+ int (*close_func) (struct bfd *nbfd,
+ void *stream),
+ int (*stat_func) (struct bfd *abfd,
+ void *stream,
+ struct stat *sb));
+
+bfd *bfd_openw (const char *filename, const char *target);
+
+bfd_boolean bfd_close (bfd *abfd);
+
+bfd_boolean bfd_close_all_done (bfd *);
+
+bfd *bfd_create (const char *filename, bfd *templ);
+
+bfd_boolean bfd_make_writable (bfd *abfd);
+
+bfd_boolean bfd_make_readable (bfd *abfd);
+
+void *bfd_alloc (bfd *abfd, bfd_size_type wanted);
+
+void *bfd_zalloc (bfd *abfd, bfd_size_type wanted);
+
+unsigned long bfd_calc_gnu_debuglink_crc32
+ (unsigned long crc, const unsigned char *buf, bfd_size_type len);
+
+char *bfd_follow_gnu_debuglink (bfd *abfd, const char *dir);
+
+struct bfd_section *bfd_create_gnu_debuglink_section
+ (bfd *abfd, const char *filename);
+
+bfd_boolean bfd_fill_in_gnu_debuglink_section
+ (bfd *abfd, struct bfd_section *sect, const char *filename);
+
+/* Extracted from libbfd.c. */
+
+/* Byte swapping macros for user section data. */
+
+#define bfd_put_8(abfd, val, ptr) \
+ ((void) (*((unsigned char *) (ptr)) = (val) & 0xff))
+#define bfd_put_signed_8 \
+ bfd_put_8
+#define bfd_get_8(abfd, ptr) \
+ (*(unsigned char *) (ptr) & 0xff)
+#define bfd_get_signed_8(abfd, ptr) \
+ (((*(unsigned char *) (ptr) & 0xff) ^ 0x80) - 0x80)
+
+#define bfd_put_16(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_putx16, ((val),(ptr)))
+#define bfd_put_signed_16 \
+ bfd_put_16
+#define bfd_get_16(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx16, (ptr))
+#define bfd_get_signed_16(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx_signed_16, (ptr))
+
+#define bfd_put_32(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_putx32, ((val),(ptr)))
+#define bfd_put_signed_32 \
+ bfd_put_32
+#define bfd_get_32(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx32, (ptr))
+#define bfd_get_signed_32(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx_signed_32, (ptr))
+
+#define bfd_put_64(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_putx64, ((val), (ptr)))
+#define bfd_put_signed_64 \
+ bfd_put_64
+#define bfd_get_64(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx64, (ptr))
+#define bfd_get_signed_64(abfd, ptr) \
+ BFD_SEND (abfd, bfd_getx_signed_64, (ptr))
+
+#define bfd_get(bits, abfd, ptr) \
+ ((bits) == 8 ? (bfd_vma) bfd_get_8 (abfd, ptr) \
+ : (bits) == 16 ? bfd_get_16 (abfd, ptr) \
+ : (bits) == 32 ? bfd_get_32 (abfd, ptr) \
+ : (bits) == 64 ? bfd_get_64 (abfd, ptr) \
+ : (abort (), (bfd_vma) - 1))
+
+#define bfd_put(bits, abfd, val, ptr) \
+ ((bits) == 8 ? bfd_put_8 (abfd, val, ptr) \
+ : (bits) == 16 ? bfd_put_16 (abfd, val, ptr) \
+ : (bits) == 32 ? bfd_put_32 (abfd, val, ptr) \
+ : (bits) == 64 ? bfd_put_64 (abfd, val, ptr) \
+ : (abort (), (void) 0))
+
+
+/* Byte swapping macros for file header data. */
+
+#define bfd_h_put_8(abfd, val, ptr) \
+ bfd_put_8 (abfd, val, ptr)
+#define bfd_h_put_signed_8(abfd, val, ptr) \
+ bfd_put_8 (abfd, val, ptr)
+#define bfd_h_get_8(abfd, ptr) \
+ bfd_get_8 (abfd, ptr)
+#define bfd_h_get_signed_8(abfd, ptr) \
+ bfd_get_signed_8 (abfd, ptr)
+
+#define bfd_h_put_16(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_h_putx16, (val, ptr))
+#define bfd_h_put_signed_16 \
+ bfd_h_put_16
+#define bfd_h_get_16(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx16, (ptr))
+#define bfd_h_get_signed_16(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx_signed_16, (ptr))
+
+#define bfd_h_put_32(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_h_putx32, (val, ptr))
+#define bfd_h_put_signed_32 \
+ bfd_h_put_32
+#define bfd_h_get_32(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx32, (ptr))
+#define bfd_h_get_signed_32(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx_signed_32, (ptr))
+
+#define bfd_h_put_64(abfd, val, ptr) \
+ BFD_SEND (abfd, bfd_h_putx64, (val, ptr))
+#define bfd_h_put_signed_64 \
+ bfd_h_put_64
+#define bfd_h_get_64(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx64, (ptr))
+#define bfd_h_get_signed_64(abfd, ptr) \
+ BFD_SEND (abfd, bfd_h_getx_signed_64, (ptr))
+
+/* Aliases for the above, which should eventually go away. */
+
+#define H_PUT_64 bfd_h_put_64
+#define H_PUT_32 bfd_h_put_32
+#define H_PUT_16 bfd_h_put_16
+#define H_PUT_8 bfd_h_put_8
+#define H_PUT_S64 bfd_h_put_signed_64
+#define H_PUT_S32 bfd_h_put_signed_32
+#define H_PUT_S16 bfd_h_put_signed_16
+#define H_PUT_S8 bfd_h_put_signed_8
+#define H_GET_64 bfd_h_get_64
+#define H_GET_32 bfd_h_get_32
+#define H_GET_16 bfd_h_get_16
+#define H_GET_8 bfd_h_get_8
+#define H_GET_S64 bfd_h_get_signed_64
+#define H_GET_S32 bfd_h_get_signed_32
+#define H_GET_S16 bfd_h_get_signed_16
+#define H_GET_S8 bfd_h_get_signed_8
+
+
+/* Extracted from bfdio.c. */
+long bfd_get_mtime (bfd *abfd);
+
+file_ptr bfd_get_size (bfd *abfd);
+
+void *bfd_mmap (bfd *abfd, void *addr, bfd_size_type len,
+ int prot, int flags, file_ptr offset);
+
+/* Extracted from bfdwin.c. */
+/* Extracted from section.c. */
+typedef struct bfd_section
+{
+ /* The name of the section; the name isn't a copy, the pointer is
+ the same as that passed to bfd_make_section. */
+ const char *name;
+
+ /* A unique sequence number. */
+ int id;
+
+ /* Which section in the bfd; 0..n-1 as sections are created in a bfd. */
+ int index;
+
+ /* The next section in the list belonging to the BFD, or NULL. */
+ struct bfd_section *next;
+
+ /* The previous section in the list belonging to the BFD, or NULL. */
+ struct bfd_section *prev;
+
+ /* The field flags contains attributes of the section. Some
+ flags are read in from the object file, and some are
+ synthesized from other information. */
+ flagword flags;
+
+#define SEC_NO_FLAGS 0x000
+
+ /* Tells the OS to allocate space for this section when loading.
+ This is clear for a section containing debug information only. */
+#define SEC_ALLOC 0x001
+
+ /* Tells the OS to load the section from the file when loading.
+ This is clear for a .bss section. */
+#define SEC_LOAD 0x002
+
+ /* The section contains data still to be relocated, so there is
+ some relocation information too. */
+#define SEC_RELOC 0x004
+
+ /* A signal to the OS that the section contains read only data. */
+#define SEC_READONLY 0x008
+
+ /* The section contains code only. */
+#define SEC_CODE 0x010
+
+ /* The section contains data only. */
+#define SEC_DATA 0x020
+
+ /* The section will reside in ROM. */
+#define SEC_ROM 0x040
+
+ /* The section contains constructor information. This section
+ type is used by the linker to create lists of constructors and
+ destructors used by <<g++>>. When a back end sees a symbol
+ which should be used in a constructor list, it creates a new
+ section for the type of name (e.g., <<__CTOR_LIST__>>), attaches
+ the symbol to it, and builds a relocation. To build the lists
+ of constructors, all the linker has to do is catenate all the
+ sections called <<__CTOR_LIST__>> and relocate the data
+ contained within - exactly the operations it would peform on
+ standard data. */
+#define SEC_CONSTRUCTOR 0x080
+
+ /* The section has contents - a data section could be
+ <<SEC_ALLOC>> | <<SEC_HAS_CONTENTS>>; a debug section could be
+ <<SEC_HAS_CONTENTS>> */
+#define SEC_HAS_CONTENTS 0x100
+
+ /* An instruction to the linker to not output the section
+ even if it has information which would normally be written. */
+#define SEC_NEVER_LOAD 0x200
+
+ /* The section contains thread local data. */
+#define SEC_THREAD_LOCAL 0x400
+
+ /* The section has GOT references. This flag is only for the
+ linker, and is currently only used by the elf32-hppa back end.
+ It will be set if global offset table references were detected
+ in this section, which indicate to the linker that the section
+ contains PIC code, and must be handled specially when doing a
+ static link. */
+#define SEC_HAS_GOT_REF 0x800
+
+ /* The section contains common symbols (symbols may be defined
+ multiple times, the value of a symbol is the amount of
+ space it requires, and the largest symbol value is the one
+ used). Most targets have exactly one of these (which we
+ translate to bfd_com_section_ptr), but ECOFF has two. */
+#define SEC_IS_COMMON 0x1000
+
+ /* The section contains only debugging information. For
+ example, this is set for ELF .debug and .stab sections.
+ strip tests this flag to see if a section can be
+ discarded. */
+#define SEC_DEBUGGING 0x2000
+
+ /* The contents of this section are held in memory pointed to
+ by the contents field. This is checked by bfd_get_section_contents,
+ and the data is retrieved from memory if appropriate. */
+#define SEC_IN_MEMORY 0x4000
+
+ /* The contents of this section are to be excluded by the
+ linker for executable and shared objects unless those
+ objects are to be further relocated. */
+#define SEC_EXCLUDE 0x8000
+
+ /* The contents of this section are to be sorted based on the sum of
+ the symbol and addend values specified by the associated relocation
+ entries. Entries without associated relocation entries will be
+ appended to the end of the section in an unspecified order. */
+#define SEC_SORT_ENTRIES 0x10000
+
+ /* When linking, duplicate sections of the same name should be
+ discarded, rather than being combined into a single section as
+ is usually done. This is similar to how common symbols are
+ handled. See SEC_LINK_DUPLICATES below. */
+#define SEC_LINK_ONCE 0x20000
+
+ /* If SEC_LINK_ONCE is set, this bitfield describes how the linker
+ should handle duplicate sections. */
+#define SEC_LINK_DUPLICATES 0xc0000
+
+ /* This value for SEC_LINK_DUPLICATES means that duplicate
+ sections with the same name should simply be discarded. */
+#define SEC_LINK_DUPLICATES_DISCARD 0x0
+
+ /* This value for SEC_LINK_DUPLICATES means that the linker
+ should warn if there are any duplicate sections, although
+ it should still only link one copy. */
+#define SEC_LINK_DUPLICATES_ONE_ONLY 0x40000
+
+ /* This value for SEC_LINK_DUPLICATES means that the linker
+ should warn if any duplicate sections are a different size. */
+#define SEC_LINK_DUPLICATES_SAME_SIZE 0x80000
+
+ /* This value for SEC_LINK_DUPLICATES means that the linker
+ should warn if any duplicate sections contain different
+ contents. */
+#define SEC_LINK_DUPLICATES_SAME_CONTENTS \
+ (SEC_LINK_DUPLICATES_ONE_ONLY | SEC_LINK_DUPLICATES_SAME_SIZE)
+
+ /* This section was created by the linker as part of dynamic
+ relocation or other arcane processing. It is skipped when
+ going through the first-pass output, trusting that someone
+ else up the line will take care of it later. */
+#define SEC_LINKER_CREATED 0x100000
+
+ /* This section should not be subject to garbage collection.
+ Also set to inform the linker that this section should not be
+ listed in the link map as discarded. */
+#define SEC_KEEP 0x200000
+
+ /* This section contains "short" data, and should be placed
+ "near" the GP. */
+#define SEC_SMALL_DATA 0x400000
+
+ /* Attempt to merge identical entities in the section.
+ Entity size is given in the entsize field. */
+#define SEC_MERGE 0x800000
+
+ /* If given with SEC_MERGE, entities to merge are zero terminated
+ strings where entsize specifies character size instead of fixed
+ size entries. */
+#define SEC_STRINGS 0x1000000
+
+ /* This section contains data about section groups. */
+#define SEC_GROUP 0x2000000
+
+ /* The section is a COFF shared library section. This flag is
+ only for the linker. If this type of section appears in
+ the input file, the linker must copy it to the output file
+ without changing the vma or size. FIXME: Although this
+ was originally intended to be general, it really is COFF
+ specific (and the flag was renamed to indicate this). It
+ might be cleaner to have some more general mechanism to
+ allow the back end to control what the linker does with
+ sections. */
+#define SEC_COFF_SHARED_LIBRARY 0x4000000
+
+ /* This section contains data which may be shared with other
+ executables or shared objects. This is for COFF only. */
+#define SEC_COFF_SHARED 0x8000000
+
+ /* When a section with this flag is being linked, then if the size of
+ the input section is less than a page, it should not cross a page
+ boundary. If the size of the input section is one page or more,
+ it should be aligned on a page boundary. This is for TI
+ TMS320C54X only. */
+#define SEC_TIC54X_BLOCK 0x10000000
+
+ /* Conditionally link this section; do not link if there are no
+ references found to any symbol in the section. This is for TI
+ TMS320C54X only. */
+#define SEC_TIC54X_CLINK 0x20000000
+
+ /* Indicate that section has the no read flag set. This happens
+ when memory read flag isn't set. */
+#define SEC_COFF_NOREAD 0x40000000
+
+ /* End of section flags. */
+
+ /* Some internal packed boolean fields. */
+
+ /* See the vma field. */
+ unsigned int user_set_vma : 1;
+
+ /* A mark flag used by some of the linker backends. */
+ unsigned int linker_mark : 1;
+
+ /* Another mark flag used by some of the linker backends. Set for
+ output sections that have an input section. */
+ unsigned int linker_has_input : 1;
+
+ /* Mark flag used by some linker backends for garbage collection. */
+ unsigned int gc_mark : 1;
+
+ /* The following flags are used by the ELF linker. */
+
+ /* Mark sections which have been allocated to segments. */
+ unsigned int segment_mark : 1;
+
+ /* Type of sec_info information. */
+ unsigned int sec_info_type:3;
+#define ELF_INFO_TYPE_NONE 0
+#define ELF_INFO_TYPE_STABS 1
+#define ELF_INFO_TYPE_MERGE 2
+#define ELF_INFO_TYPE_EH_FRAME 3
+#define ELF_INFO_TYPE_JUST_SYMS 4
+
+ /* Nonzero if this section uses RELA relocations, rather than REL. */
+ unsigned int use_rela_p:1;
+
+ /* Bits used by various backends. The generic code doesn't touch
+ these fields. */
+
+ unsigned int sec_flg0:1;
+ unsigned int sec_flg1:1;
+ unsigned int sec_flg2:1;
+ unsigned int sec_flg3:1;
+ unsigned int sec_flg4:1;
+ unsigned int sec_flg5:1;
+
+ /* End of internal packed boolean fields. */
+
+ /* The virtual memory address of the section - where it will be
+ at run time. The symbols are relocated against this. The
+ user_set_vma flag is maintained by bfd; if it's not set, the
+ backend can assign addresses (for example, in <<a.out>>, where
+ the default address for <<.data>> is dependent on the specific
+ target and various flags). */
+ bfd_vma vma;
+
+ /* The load address of the section - where it would be in a
+ rom image; really only used for writing section header
+ information. */
+ bfd_vma lma;
+
+ /* The size of the section in octets, as it will be output.
+ Contains a value even if the section has no contents (e.g., the
+ size of <<.bss>>). */
+ bfd_size_type size;
+
+ /* For input sections, the original size on disk of the section, in
+ octets. This field should be set for any section whose size is
+ changed by linker relaxation. It is required for sections where
+ the linker relaxation scheme doesn't cache altered section and
+ reloc contents (stabs, eh_frame, SEC_MERGE, some coff relaxing
+ targets), and thus the original size needs to be kept to read the
+ section multiple times. For output sections, rawsize holds the
+ section size calculated on a previous linker relaxation pass. */
+ bfd_size_type rawsize;
+
+ /* Relaxation table. */
+ struct relax_table *relax;
+
+ /* Count of used relaxation table entries. */
+ int relax_count;
+
+
+ /* If this section is going to be output, then this value is the
+ offset in *bytes* into the output section of the first byte in the
+ input section (byte ==> smallest addressable unit on the
+ target). In most cases, if this was going to start at the
+ 100th octet (8-bit quantity) in the output section, this value
+ would be 100. However, if the target byte size is 16 bits
+ (bfd_octets_per_byte is "2"), this value would be 50. */
+ bfd_vma output_offset;
+
+ /* The output section through which to map on output. */
+ struct bfd_section *output_section;
+
+ /* The alignment requirement of the section, as an exponent of 2 -
+ e.g., 3 aligns to 2^3 (or 8). */
+ unsigned int alignment_power;
+
+ /* If an input section, a pointer to a vector of relocation
+ records for the data in this section. */
+ struct reloc_cache_entry *relocation;
+
+ /* If an output section, a pointer to a vector of pointers to
+ relocation records for the data in this section. */
+ struct reloc_cache_entry **orelocation;
+
+ /* The number of relocation records in one of the above. */
+ unsigned reloc_count;
+
+ /* Information below is back end specific - and not always used
+ or updated. */
+
+ /* File position of section data. */
+ file_ptr filepos;
+
+ /* File position of relocation info. */
+ file_ptr rel_filepos;
+
+ /* File position of line data. */
+ file_ptr line_filepos;
+
+ /* Pointer to data for applications. */
+ void *userdata;
+
+ /* If the SEC_IN_MEMORY flag is set, this points to the actual
+ contents. */
+ unsigned char *contents;
+
+ /* Attached line number information. */
+ alent *lineno;
+
+ /* Number of line number records. */
+ unsigned int lineno_count;
+
+ /* Entity size for merging purposes. */
+ unsigned int entsize;
+
+ /* Points to the kept section if this section is a link-once section,
+ and is discarded. */
+ struct bfd_section *kept_section;
+
+ /* When a section is being output, this value changes as more
+ linenumbers are written out. */
+ file_ptr moving_line_filepos;
+
+ /* What the section number is in the target world. */
+ int target_index;
+
+ void *used_by_bfd;
+
+ /* If this is a constructor section then here is a list of the
+ relocations created to relocate items within it. */
+ struct relent_chain *constructor_chain;
+
+ /* The BFD which owns the section. */
+ bfd *owner;
+
+ /* A symbol which points at this section only. */
+ struct bfd_symbol *symbol;
+ struct bfd_symbol **symbol_ptr_ptr;
+
+ /* Early in the link process, map_head and map_tail are used to build
+ a list of input sections attached to an output section. Later,
+ output sections use these fields for a list of bfd_link_order
+ structs. */
+ union {
+ struct bfd_link_order *link_order;
+ struct bfd_section *s;
+ } map_head, map_tail;
+} asection;
+
+/* Relax table contains information about instructions which can
+ be removed by relaxation -- replacing a long address with a
+ short address. */
+struct relax_table {
+ /* Address where bytes may be deleted. */
+ bfd_vma addr;
+
+ /* Number of bytes to be deleted. */
+ int size;
+};
+
+/* These sections are global, and are managed by BFD. The application
+ and target back end are not permitted to change the values in
+ these sections. New code should use the section_ptr macros rather
+ than referring directly to the const sections. The const sections
+ may eventually vanish. */
+#define BFD_ABS_SECTION_NAME "*ABS*"
+#define BFD_UND_SECTION_NAME "*UND*"
+#define BFD_COM_SECTION_NAME "*COM*"
+#define BFD_IND_SECTION_NAME "*IND*"
+
+/* The absolute section. */
+extern asection bfd_abs_section;
+#define bfd_abs_section_ptr ((asection *) &bfd_abs_section)
+#define bfd_is_abs_section(sec) ((sec) == bfd_abs_section_ptr)
+/* Pointer to the undefined section. */
+extern asection bfd_und_section;
+#define bfd_und_section_ptr ((asection *) &bfd_und_section)
+#define bfd_is_und_section(sec) ((sec) == bfd_und_section_ptr)
+/* Pointer to the common section. */
+extern asection bfd_com_section;
+#define bfd_com_section_ptr ((asection *) &bfd_com_section)
+/* Pointer to the indirect section. */
+extern asection bfd_ind_section;
+#define bfd_ind_section_ptr ((asection *) &bfd_ind_section)
+#define bfd_is_ind_section(sec) ((sec) == bfd_ind_section_ptr)
+
+#define bfd_is_const_section(SEC) \
+ ( ((SEC) == bfd_abs_section_ptr) \
+ || ((SEC) == bfd_und_section_ptr) \
+ || ((SEC) == bfd_com_section_ptr) \
+ || ((SEC) == bfd_ind_section_ptr))
+
+/* Macros to handle insertion and deletion of a bfd's sections. These
+ only handle the list pointers, ie. do not adjust section_count,
+ target_index etc. */
+#define bfd_section_list_remove(ABFD, S) \
+ do \
+ { \
+ asection *_s = S; \
+ asection *_next = _s->next; \
+ asection *_prev = _s->prev; \
+ if (_prev) \
+ _prev->next = _next; \
+ else \
+ (ABFD)->sections = _next; \
+ if (_next) \
+ _next->prev = _prev; \
+ else \
+ (ABFD)->section_last = _prev; \
+ } \
+ while (0)
+#define bfd_section_list_append(ABFD, S) \
+ do \
+ { \
+ asection *_s = S; \
+ bfd *_abfd = ABFD; \
+ _s->next = NULL; \
+ if (_abfd->section_last) \
+ { \
+ _s->prev = _abfd->section_last; \
+ _abfd->section_last->next = _s; \
+ } \
+ else \
+ { \
+ _s->prev = NULL; \
+ _abfd->sections = _s; \
+ } \
+ _abfd->section_last = _s; \
+ } \
+ while (0)
+#define bfd_section_list_prepend(ABFD, S) \
+ do \
+ { \
+ asection *_s = S; \
+ bfd *_abfd = ABFD; \
+ _s->prev = NULL; \
+ if (_abfd->sections) \
+ { \
+ _s->next = _abfd->sections; \
+ _abfd->sections->prev = _s; \
+ } \
+ else \
+ { \
+ _s->next = NULL; \
+ _abfd->section_last = _s; \
+ } \
+ _abfd->sections = _s; \
+ } \
+ while (0)
+#define bfd_section_list_insert_after(ABFD, A, S) \
+ do \
+ { \
+ asection *_a = A; \
+ asection *_s = S; \
+ asection *_next = _a->next; \
+ _s->next = _next; \
+ _s->prev = _a; \
+ _a->next = _s; \
+ if (_next) \
+ _next->prev = _s; \
+ else \
+ (ABFD)->section_last = _s; \
+ } \
+ while (0)
+#define bfd_section_list_insert_before(ABFD, B, S) \
+ do \
+ { \
+ asection *_b = B; \
+ asection *_s = S; \
+ asection *_prev = _b->prev; \
+ _s->prev = _prev; \
+ _s->next = _b; \
+ _b->prev = _s; \
+ if (_prev) \
+ _prev->next = _s; \
+ else \
+ (ABFD)->sections = _s; \
+ } \
+ while (0)
+#define bfd_section_removed_from_list(ABFD, S) \
+ ((S)->next == NULL ? (ABFD)->section_last != (S) : (S)->next->prev != (S))
+
+#define BFD_FAKE_SECTION(SEC, FLAGS, SYM, NAME, IDX) \
+ /* name, id, index, next, prev, flags, user_set_vma, */ \
+ { NAME, IDX, 0, NULL, NULL, FLAGS, 0, \
+ \
+ /* linker_mark, linker_has_input, gc_mark, segment_mark, */ \
+ 0, 0, 1, 0, \
+ \
+ /* sec_info_type, use_rela_p, */ \
+ 0, 0, \
+ \
+ /* sec_flg0, sec_flg1, sec_flg2, sec_flg3, sec_flg4, sec_flg5, */ \
+ 0, 0, 0, 0, 0, 0, \
+ \
+ /* vma, lma, size, rawsize, relax, relax_count, */ \
+ 0, 0, 0, 0, 0, 0, \
+ \
+ /* output_offset, output_section, alignment_power, */ \
+ 0, (struct bfd_section *) &SEC, 0, \
+ \
+ /* relocation, orelocation, reloc_count, filepos, rel_filepos, */ \
+ NULL, NULL, 0, 0, 0, \
+ \
+ /* line_filepos, userdata, contents, lineno, lineno_count, */ \
+ 0, NULL, NULL, NULL, 0, \
+ \
+ /* entsize, kept_section, moving_line_filepos, */ \
+ 0, NULL, 0, \
+ \
+ /* target_index, used_by_bfd, constructor_chain, owner, */ \
+ 0, NULL, NULL, NULL, \
+ \
+ /* symbol, symbol_ptr_ptr, */ \
+ (struct bfd_symbol *) SYM, &SEC.symbol, \
+ \
+ /* map_head, map_tail */ \
+ { NULL }, { NULL } \
+ }
+
+void bfd_section_list_clear (bfd *);
+
+asection *bfd_get_section_by_name (bfd *abfd, const char *name);
+
+asection *bfd_get_section_by_name_if
+ (bfd *abfd,
+ const char *name,
+ bfd_boolean (*func) (bfd *abfd, asection *sect, void *obj),
+ void *obj);
+
+char *bfd_get_unique_section_name
+ (bfd *abfd, const char *templat, int *count);
+
+asection *bfd_make_section_old_way (bfd *abfd, const char *name);
+
+asection *bfd_make_section_anyway_with_flags
+ (bfd *abfd, const char *name, flagword flags);
+
+asection *bfd_make_section_anyway (bfd *abfd, const char *name);
+
+asection *bfd_make_section_with_flags
+ (bfd *, const char *name, flagword flags);
+
+asection *bfd_make_section (bfd *, const char *name);
+
+bfd_boolean bfd_set_section_flags
+ (bfd *abfd, asection *sec, flagword flags);
+
+void bfd_map_over_sections
+ (bfd *abfd,
+ void (*func) (bfd *abfd, asection *sect, void *obj),
+ void *obj);
+
+asection *bfd_sections_find_if
+ (bfd *abfd,
+ bfd_boolean (*operation) (bfd *abfd, asection *sect, void *obj),
+ void *obj);
+
+bfd_boolean bfd_set_section_size
+ (bfd *abfd, asection *sec, bfd_size_type val);
+
+bfd_boolean bfd_set_section_contents
+ (bfd *abfd, asection *section, const void *data,
+ file_ptr offset, bfd_size_type count);
+
+bfd_boolean bfd_get_section_contents
+ (bfd *abfd, asection *section, void *location, file_ptr offset,
+ bfd_size_type count);
+
+bfd_boolean bfd_malloc_and_get_section
+ (bfd *abfd, asection *section, bfd_byte **buf);
+
+bfd_boolean bfd_copy_private_section_data
+ (bfd *ibfd, asection *isec, bfd *obfd, asection *osec);
+
+#define bfd_copy_private_section_data(ibfd, isection, obfd, osection) \
+ BFD_SEND (obfd, _bfd_copy_private_section_data, \
+ (ibfd, isection, obfd, osection))
+bfd_boolean bfd_generic_is_group_section (bfd *, const asection *sec);
+
+bfd_boolean bfd_generic_discard_group (bfd *abfd, asection *group);
+
+/* Extracted from archures.c. */
+enum bfd_architecture
+{
+ bfd_arch_unknown, /* File arch not known. */
+ bfd_arch_obscure, /* Arch known, not one of these. */
+ bfd_arch_m68k, /* Motorola 68xxx */
+#define bfd_mach_m68000 1
+#define bfd_mach_m68008 2
+#define bfd_mach_m68010 3
+#define bfd_mach_m68020 4
+#define bfd_mach_m68030 5
+#define bfd_mach_m68040 6
+#define bfd_mach_m68060 7
+#define bfd_mach_cpu32 8
+#define bfd_mach_fido 9
+#define bfd_mach_mcf_isa_a_nodiv 10
+#define bfd_mach_mcf_isa_a 11
+#define bfd_mach_mcf_isa_a_mac 12
+#define bfd_mach_mcf_isa_a_emac 13
+#define bfd_mach_mcf_isa_aplus 14
+#define bfd_mach_mcf_isa_aplus_mac 15
+#define bfd_mach_mcf_isa_aplus_emac 16
+#define bfd_mach_mcf_isa_b_nousp 17
+#define bfd_mach_mcf_isa_b_nousp_mac 18
+#define bfd_mach_mcf_isa_b_nousp_emac 19
+#define bfd_mach_mcf_isa_b 20
+#define bfd_mach_mcf_isa_b_mac 21
+#define bfd_mach_mcf_isa_b_emac 22
+#define bfd_mach_mcf_isa_b_float 23
+#define bfd_mach_mcf_isa_b_float_mac 24
+#define bfd_mach_mcf_isa_b_float_emac 25
+#define bfd_mach_mcf_isa_c 26
+#define bfd_mach_mcf_isa_c_mac 27
+#define bfd_mach_mcf_isa_c_emac 28
+#define bfd_mach_mcf_isa_c_nodiv 29
+#define bfd_mach_mcf_isa_c_nodiv_mac 30
+#define bfd_mach_mcf_isa_c_nodiv_emac 31
+ bfd_arch_vax, /* DEC Vax */
+ bfd_arch_i960, /* Intel 960 */
+ /* The order of the following is important.
+ lower number indicates a machine type that
+ only accepts a subset of the instructions
+ available to machines with higher numbers.
+ The exception is the "ca", which is
+ incompatible with all other machines except
+ "core". */
+
+#define bfd_mach_i960_core 1
+#define bfd_mach_i960_ka_sa 2
+#define bfd_mach_i960_kb_sb 3
+#define bfd_mach_i960_mc 4
+#define bfd_mach_i960_xa 5
+#define bfd_mach_i960_ca 6
+#define bfd_mach_i960_jx 7
+#define bfd_mach_i960_hx 8
+
+ bfd_arch_or32, /* OpenRISC 32 */
+
+ bfd_arch_sparc, /* SPARC */
+#define bfd_mach_sparc 1
+/* The difference between v8plus and v9 is that v9 is a true 64 bit env. */
+#define bfd_mach_sparc_sparclet 2
+#define bfd_mach_sparc_sparclite 3
+#define bfd_mach_sparc_v8plus 4
+#define bfd_mach_sparc_v8plusa 5 /* with ultrasparc add'ns. */
+#define bfd_mach_sparc_sparclite_le 6
+#define bfd_mach_sparc_v9 7
+#define bfd_mach_sparc_v9a 8 /* with ultrasparc add'ns. */
+#define bfd_mach_sparc_v8plusb 9 /* with cheetah add'ns. */
+#define bfd_mach_sparc_v9b 10 /* with cheetah add'ns. */
+/* Nonzero if MACH has the v9 instruction set. */
+#define bfd_mach_sparc_v9_p(mach) \
+ ((mach) >= bfd_mach_sparc_v8plus && (mach) <= bfd_mach_sparc_v9b \
+ && (mach) != bfd_mach_sparc_sparclite_le)
+/* Nonzero if MACH is a 64 bit sparc architecture. */
+#define bfd_mach_sparc_64bit_p(mach) \
+ ((mach) >= bfd_mach_sparc_v9 && (mach) != bfd_mach_sparc_v8plusb)
+ bfd_arch_spu, /* PowerPC SPU */
+#define bfd_mach_spu 256
+ bfd_arch_mips, /* MIPS Rxxxx */
+#define bfd_mach_mips3000 3000
+#define bfd_mach_mips3900 3900
+#define bfd_mach_mips4000 4000
+#define bfd_mach_mips4010 4010
+#define bfd_mach_mips4100 4100
+#define bfd_mach_mips4111 4111
+#define bfd_mach_mips4120 4120
+#define bfd_mach_mips4300 4300
+#define bfd_mach_mips4400 4400
+#define bfd_mach_mips4600 4600
+#define bfd_mach_mips4650 4650
+#define bfd_mach_mips5000 5000
+#define bfd_mach_mips5400 5400
+#define bfd_mach_mips5500 5500
+#define bfd_mach_mips6000 6000
+#define bfd_mach_mips7000 7000
+#define bfd_mach_mips8000 8000
+#define bfd_mach_mips9000 9000
+#define bfd_mach_mips10000 10000
+#define bfd_mach_mips12000 12000
+#define bfd_mach_mips14000 14000
+#define bfd_mach_mips16000 16000
+#define bfd_mach_mips16 16
+#define bfd_mach_mips5 5
+#define bfd_mach_mips_loongson_2e 3001
+#define bfd_mach_mips_loongson_2f 3002
+#define bfd_mach_mips_sb1 12310201 /* octal 'SB', 01 */
+#define bfd_mach_mips_octeon 6501
+#define bfd_mach_mips_xlr 887682 /* decimal 'XLR' */
+#define bfd_mach_mipsisa32 32
+#define bfd_mach_mipsisa32r2 33
+#define bfd_mach_mipsisa64 64
+#define bfd_mach_mipsisa64r2 65
+ bfd_arch_i386, /* Intel 386 */
+#define bfd_mach_i386_i386 1
+#define bfd_mach_i386_i8086 2
+#define bfd_mach_i386_i386_intel_syntax 3
+#define bfd_mach_x86_64 64
+#define bfd_mach_x86_64_intel_syntax 65
+ bfd_arch_l1om, /* Intel L1OM */
+#define bfd_mach_l1om 66
+#define bfd_mach_l1om_intel_syntax 67
+ bfd_arch_we32k, /* AT&T WE32xxx */
+ bfd_arch_tahoe, /* CCI/Harris Tahoe */
+ bfd_arch_i860, /* Intel 860 */
+ bfd_arch_i370, /* IBM 360/370 Mainframes */
+ bfd_arch_romp, /* IBM ROMP PC/RT */
+ bfd_arch_convex, /* Convex */
+ bfd_arch_m88k, /* Motorola 88xxx */
+ bfd_arch_m98k, /* Motorola 98xxx */
+ bfd_arch_pyramid, /* Pyramid Technology */
+ bfd_arch_h8300, /* Renesas H8/300 (formerly Hitachi H8/300) */
+#define bfd_mach_h8300 1
+#define bfd_mach_h8300h 2
+#define bfd_mach_h8300s 3
+#define bfd_mach_h8300hn 4
+#define bfd_mach_h8300sn 5
+#define bfd_mach_h8300sx 6
+#define bfd_mach_h8300sxn 7
+ bfd_arch_pdp11, /* DEC PDP-11 */
+ bfd_arch_plugin,
+ bfd_arch_powerpc, /* PowerPC */
+#define bfd_mach_ppc 32
+#define bfd_mach_ppc64 64
+#define bfd_mach_ppc_403 403
+#define bfd_mach_ppc_403gc 4030
+#define bfd_mach_ppc_405 405
+#define bfd_mach_ppc_505 505
+#define bfd_mach_ppc_601 601
+#define bfd_mach_ppc_602 602
+#define bfd_mach_ppc_603 603
+#define bfd_mach_ppc_ec603e 6031
+#define bfd_mach_ppc_604 604
+#define bfd_mach_ppc_620 620
+#define bfd_mach_ppc_630 630
+#define bfd_mach_ppc_750 750
+#define bfd_mach_ppc_860 860
+#define bfd_mach_ppc_a35 35
+#define bfd_mach_ppc_rs64ii 642
+#define bfd_mach_ppc_rs64iii 643
+#define bfd_mach_ppc_7400 7400
+#define bfd_mach_ppc_e500 500
+#define bfd_mach_ppc_e500mc 5001
+#define bfd_mach_ppc_e500mc64 5005
+#define bfd_mach_ppc_titan 83
+ bfd_arch_rs6000, /* IBM RS/6000 */
+#define bfd_mach_rs6k 6000
+#define bfd_mach_rs6k_rs1 6001
+#define bfd_mach_rs6k_rsc 6003
+#define bfd_mach_rs6k_rs2 6002
+ bfd_arch_hppa, /* HP PA RISC */
+#define bfd_mach_hppa10 10
+#define bfd_mach_hppa11 11
+#define bfd_mach_hppa20 20
+#define bfd_mach_hppa20w 25
+ bfd_arch_d10v, /* Mitsubishi D10V */
+#define bfd_mach_d10v 1
+#define bfd_mach_d10v_ts2 2
+#define bfd_mach_d10v_ts3 3
+ bfd_arch_d30v, /* Mitsubishi D30V */
+ bfd_arch_dlx, /* DLX */
+ bfd_arch_m68hc11, /* Motorola 68HC11 */
+ bfd_arch_m68hc12, /* Motorola 68HC12 */
+#define bfd_mach_m6812_default 0
+#define bfd_mach_m6812 1
+#define bfd_mach_m6812s 2
+ bfd_arch_z8k, /* Zilog Z8000 */
+#define bfd_mach_z8001 1
+#define bfd_mach_z8002 2
+ bfd_arch_h8500, /* Renesas H8/500 (formerly Hitachi H8/500) */
+ bfd_arch_sh, /* Renesas / SuperH SH (formerly Hitachi SH) */
+#define bfd_mach_sh 1
+#define bfd_mach_sh2 0x20
+#define bfd_mach_sh_dsp 0x2d
+#define bfd_mach_sh2a 0x2a
+#define bfd_mach_sh2a_nofpu 0x2b
+#define bfd_mach_sh2a_nofpu_or_sh4_nommu_nofpu 0x2a1
+#define bfd_mach_sh2a_nofpu_or_sh3_nommu 0x2a2
+#define bfd_mach_sh2a_or_sh4 0x2a3
+#define bfd_mach_sh2a_or_sh3e 0x2a4
+#define bfd_mach_sh2e 0x2e
+#define bfd_mach_sh3 0x30
+#define bfd_mach_sh3_nommu 0x31
+#define bfd_mach_sh3_dsp 0x3d
+#define bfd_mach_sh3e 0x3e
+#define bfd_mach_sh4 0x40
+#define bfd_mach_sh4_nofpu 0x41
+#define bfd_mach_sh4_nommu_nofpu 0x42
+#define bfd_mach_sh4a 0x4a
+#define bfd_mach_sh4a_nofpu 0x4b
+#define bfd_mach_sh4al_dsp 0x4d
+#define bfd_mach_sh5 0x50
+ bfd_arch_alpha, /* Dec Alpha */
+#define bfd_mach_alpha_ev4 0x10
+#define bfd_mach_alpha_ev5 0x20
+#define bfd_mach_alpha_ev6 0x30
+ bfd_arch_arm, /* Advanced Risc Machines ARM. */
+#define bfd_mach_arm_unknown 0
+#define bfd_mach_arm_2 1
+#define bfd_mach_arm_2a 2
+#define bfd_mach_arm_3 3
+#define bfd_mach_arm_3M 4
+#define bfd_mach_arm_4 5
+#define bfd_mach_arm_4T 6
+#define bfd_mach_arm_5 7
+#define bfd_mach_arm_5T 8
+#define bfd_mach_arm_5TE 9
+#define bfd_mach_arm_XScale 10
+#define bfd_mach_arm_ep9312 11
+#define bfd_mach_arm_iWMMXt 12
+#define bfd_mach_arm_iWMMXt2 13
+ bfd_arch_ns32k, /* National Semiconductors ns32000 */
+ bfd_arch_w65, /* WDC 65816 */
+ bfd_arch_tic30, /* Texas Instruments TMS320C30 */
+ bfd_arch_tic4x, /* Texas Instruments TMS320C3X/4X */
+#define bfd_mach_tic3x 30
+#define bfd_mach_tic4x 40
+ bfd_arch_tic54x, /* Texas Instruments TMS320C54X */
+ bfd_arch_tic6x, /* Texas Instruments TMS320C6X */
+ bfd_arch_tic80, /* TI TMS320c80 (MVP) */
+ bfd_arch_v850, /* NEC V850 */
+#define bfd_mach_v850 1
+#define bfd_mach_v850e 'E'
+#define bfd_mach_v850e1 '1'
+#define bfd_mach_v850e2 0x4532
+#define bfd_mach_v850e2v3 0x45325633
+ bfd_arch_arc, /* ARC Cores */
+#define bfd_mach_arc_5 5
+#define bfd_mach_arc_6 6
+#define bfd_mach_arc_7 7
+#define bfd_mach_arc_8 8
+ bfd_arch_m32c, /* Renesas M16C/M32C. */
+#define bfd_mach_m16c 0x75
+#define bfd_mach_m32c 0x78
+ bfd_arch_m32r, /* Renesas M32R (formerly Mitsubishi M32R/D) */
+#define bfd_mach_m32r 1 /* For backwards compatibility. */
+#define bfd_mach_m32rx 'x'
+#define bfd_mach_m32r2 '2'
+ bfd_arch_mn10200, /* Matsushita MN10200 */
+ bfd_arch_mn10300, /* Matsushita MN10300 */
+#define bfd_mach_mn10300 300
+#define bfd_mach_am33 330
+#define bfd_mach_am33_2 332
+ bfd_arch_fr30,
+#define bfd_mach_fr30 0x46523330
+ bfd_arch_frv,
+#define bfd_mach_frv 1
+#define bfd_mach_frvsimple 2
+#define bfd_mach_fr300 300
+#define bfd_mach_fr400 400
+#define bfd_mach_fr450 450
+#define bfd_mach_frvtomcat 499 /* fr500 prototype */
+#define bfd_mach_fr500 500
+#define bfd_mach_fr550 550
+ bfd_arch_moxie, /* The moxie processor */
+#define bfd_mach_moxie 1
+ bfd_arch_mcore,
+ bfd_arch_mep,
+#define bfd_mach_mep 1
+#define bfd_mach_mep_h1 0x6831
+#define bfd_mach_mep_c5 0x6335
+ bfd_arch_ia64, /* HP/Intel ia64 */
+#define bfd_mach_ia64_elf64 64
+#define bfd_mach_ia64_elf32 32
+ bfd_arch_ip2k, /* Ubicom IP2K microcontrollers. */
+#define bfd_mach_ip2022 1
+#define bfd_mach_ip2022ext 2
+ bfd_arch_iq2000, /* Vitesse IQ2000. */
+#define bfd_mach_iq2000 1
+#define bfd_mach_iq10 2
+ bfd_arch_mt,
+#define bfd_mach_ms1 1
+#define bfd_mach_mrisc2 2
+#define bfd_mach_ms2 3
+ bfd_arch_pj,
+ bfd_arch_avr, /* Atmel AVR microcontrollers. */
+#define bfd_mach_avr1 1
+#define bfd_mach_avr2 2
+#define bfd_mach_avr25 25
+#define bfd_mach_avr3 3
+#define bfd_mach_avr31 31
+#define bfd_mach_avr35 35
+#define bfd_mach_avr4 4
+#define bfd_mach_avr5 5
+#define bfd_mach_avr51 51
+#define bfd_mach_avr6 6
+ bfd_arch_bfin, /* ADI Blackfin */
+#define bfd_mach_bfin 1
+ bfd_arch_cr16, /* National Semiconductor CompactRISC (ie CR16). */
+#define bfd_mach_cr16 1
+ bfd_arch_cr16c, /* National Semiconductor CompactRISC. */
+#define bfd_mach_cr16c 1
+ bfd_arch_crx, /* National Semiconductor CRX. */
+#define bfd_mach_crx 1
+ bfd_arch_cris, /* Axis CRIS */
+#define bfd_mach_cris_v0_v10 255
+#define bfd_mach_cris_v32 32
+#define bfd_mach_cris_v10_v32 1032
+ bfd_arch_rx, /* Renesas RX. */
+#define bfd_mach_rx 0x75
+ bfd_arch_s390, /* IBM s390 */
+#define bfd_mach_s390_31 31
+#define bfd_mach_s390_64 64
+ bfd_arch_score, /* Sunplus score */
+#define bfd_mach_score3 3
+#define bfd_mach_score7 7
+ bfd_arch_openrisc, /* OpenRISC */
+ bfd_arch_mmix, /* Donald Knuth's educational processor. */
+ bfd_arch_xstormy16,
+#define bfd_mach_xstormy16 1
+ bfd_arch_msp430, /* Texas Instruments MSP430 architecture. */
+#define bfd_mach_msp11 11
+#define bfd_mach_msp110 110
+#define bfd_mach_msp12 12
+#define bfd_mach_msp13 13
+#define bfd_mach_msp14 14
+#define bfd_mach_msp15 15
+#define bfd_mach_msp16 16
+#define bfd_mach_msp21 21
+#define bfd_mach_msp31 31
+#define bfd_mach_msp32 32
+#define bfd_mach_msp33 33
+#define bfd_mach_msp41 41
+#define bfd_mach_msp42 42
+#define bfd_mach_msp43 43
+#define bfd_mach_msp44 44
+ bfd_arch_xc16x, /* Infineon's XC16X Series. */
+#define bfd_mach_xc16x 1
+#define bfd_mach_xc16xl 2
+#define bfd_mach_xc16xs 3
+ bfd_arch_xtensa, /* Tensilica's Xtensa cores. */
+#define bfd_mach_xtensa 1
+ bfd_arch_z80,
+#define bfd_mach_z80strict 1 /* No undocumented opcodes. */
+#define bfd_mach_z80 3 /* With ixl, ixh, iyl, and iyh. */
+#define bfd_mach_z80full 7 /* All undocumented instructions. */
+#define bfd_mach_r800 11 /* R800: successor with multiplication. */
+ bfd_arch_lm32, /* Lattice Mico32 */
+#define bfd_mach_lm32 1
+ bfd_arch_microblaze,/* Xilinx MicroBlaze. */
+ bfd_arch_last
+ };
+
+typedef struct bfd_arch_info
+{
+ int bits_per_word;
+ int bits_per_address;
+ int bits_per_byte;
+ enum bfd_architecture arch;
+ unsigned long mach;
+ const char *arch_name;
+ const char *printable_name;
+ unsigned int section_align_power;
+ /* TRUE if this is the default machine for the architecture.
+ The default arch should be the first entry for an arch so that
+ all the entries for that arch can be accessed via <<next>>. */
+ bfd_boolean the_default;
+ const struct bfd_arch_info * (*compatible)
+ (const struct bfd_arch_info *a, const struct bfd_arch_info *b);
+
+ bfd_boolean (*scan) (const struct bfd_arch_info *, const char *);
+
+ const struct bfd_arch_info *next;
+}
+bfd_arch_info_type;
+
+const char *bfd_printable_name (bfd *abfd);
+
+const bfd_arch_info_type *bfd_scan_arch (const char *string);
+
+const char **bfd_arch_list (void);
+
+const bfd_arch_info_type *bfd_arch_get_compatible
+ (const bfd *abfd, const bfd *bbfd, bfd_boolean accept_unknowns);
+
+void bfd_set_arch_info (bfd *abfd, const bfd_arch_info_type *arg);
+
+enum bfd_architecture bfd_get_arch (bfd *abfd);
+
+unsigned long bfd_get_mach (bfd *abfd);
+
+unsigned int bfd_arch_bits_per_byte (bfd *abfd);
+
+unsigned int bfd_arch_bits_per_address (bfd *abfd);
+
+const bfd_arch_info_type *bfd_get_arch_info (bfd *abfd);
+
+const bfd_arch_info_type *bfd_lookup_arch
+ (enum bfd_architecture arch, unsigned long machine);
+
+const char *bfd_printable_arch_mach
+ (enum bfd_architecture arch, unsigned long machine);
+
+unsigned int bfd_octets_per_byte (bfd *abfd);
+
+unsigned int bfd_arch_mach_octets_per_byte
+ (enum bfd_architecture arch, unsigned long machine);
+
+/* Extracted from reloc.c. */
+typedef enum bfd_reloc_status
+{
+ /* No errors detected. */
+ bfd_reloc_ok,
+
+ /* The relocation was performed, but there was an overflow. */
+ bfd_reloc_overflow,
+
+ /* The address to relocate was not within the section supplied. */
+ bfd_reloc_outofrange,
+
+ /* Used by special functions. */
+ bfd_reloc_continue,
+
+ /* Unsupported relocation size requested. */
+ bfd_reloc_notsupported,
+
+ /* Unused. */
+ bfd_reloc_other,
+
+ /* The symbol to relocate against was undefined. */
+ bfd_reloc_undefined,
+
+ /* The relocation was performed, but may not be ok - presently
+ generated only when linking i960 coff files with i960 b.out
+ symbols. If this type is returned, the error_message argument
+ to bfd_perform_relocation will be set. */
+ bfd_reloc_dangerous
+ }
+ bfd_reloc_status_type;
+
+
+typedef struct reloc_cache_entry
+{
+ /* A pointer into the canonical table of pointers. */
+ struct bfd_symbol **sym_ptr_ptr;
+
+ /* offset in section. */
+ bfd_size_type address;
+
+ /* addend for relocation value. */
+ bfd_vma addend;
+
+ /* Pointer to how to perform the required relocation. */
+ reloc_howto_type *howto;
+
+}
+arelent;
+
+enum complain_overflow
+{
+ /* Do not complain on overflow. */
+ complain_overflow_dont,
+
+ /* Complain if the value overflows when considered as a signed
+ number one bit larger than the field. ie. A bitfield of N bits
+ is allowed to represent -2**n to 2**n-1. */
+ complain_overflow_bitfield,
+
+ /* Complain if the value overflows when considered as a signed
+ number. */
+ complain_overflow_signed,
+
+ /* Complain if the value overflows when considered as an
+ unsigned number. */
+ complain_overflow_unsigned
+};
+
+struct reloc_howto_struct
+{
+ /* The type field has mainly a documentary use - the back end can
+ do what it wants with it, though normally the back end's
+ external idea of what a reloc number is stored
+ in this field. For example, a PC relative word relocation
+ in a coff environment has the type 023 - because that's
+ what the outside world calls a R_PCRWORD reloc. */
+ unsigned int type;
+
+ /* The value the final relocation is shifted right by. This drops
+ unwanted data from the relocation. */
+ unsigned int rightshift;
+
+ /* The size of the item to be relocated. This is *not* a
+ power-of-two measure. To get the number of bytes operated
+ on by a type of relocation, use bfd_get_reloc_size. */
+ int size;
+
+ /* The number of bits in the item to be relocated. This is used
+ when doing overflow checking. */
+ unsigned int bitsize;
+
+ /* The relocation is relative to the field being relocated. */
+ bfd_boolean pc_relative;
+
+ /* The bit position of the reloc value in the destination.
+ The relocated value is left shifted by this amount. */
+ unsigned int bitpos;
+
+ /* What type of overflow error should be checked for when
+ relocating. */
+ enum complain_overflow complain_on_overflow;
+
+ /* If this field is non null, then the supplied function is
+ called rather than the normal function. This allows really
+ strange relocation methods to be accommodated (e.g., i960 callj
+ instructions). */
+ bfd_reloc_status_type (*special_function)
+ (bfd *, arelent *, struct bfd_symbol *, void *, asection *,
+ bfd *, char **);
+
+ /* The textual name of the relocation type. */
+ char *name;
+
+ /* Some formats record a relocation addend in the section contents
+ rather than with the relocation. For ELF formats this is the
+ distinction between USE_REL and USE_RELA (though the code checks
+ for USE_REL == 1/0). The value of this field is TRUE if the
+ addend is recorded with the section contents; when performing a
+ partial link (ld -r) the section contents (the data) will be
+ modified. The value of this field is FALSE if addends are
+ recorded with the relocation (in arelent.addend); when performing
+ a partial link the relocation will be modified.
+ All relocations for all ELF USE_RELA targets should set this field
+ to FALSE (values of TRUE should be looked on with suspicion).
+ However, the converse is not true: not all relocations of all ELF
+ USE_REL targets set this field to TRUE. Why this is so is peculiar
+ to each particular target. For relocs that aren't used in partial
+ links (e.g. GOT stuff) it doesn't matter what this is set to. */
+ bfd_boolean partial_inplace;
+
+ /* src_mask selects the part of the instruction (or data) to be used
+ in the relocation sum. If the target relocations don't have an
+ addend in the reloc, eg. ELF USE_REL, src_mask will normally equal
+ dst_mask to extract the addend from the section contents. If
+ relocations do have an addend in the reloc, eg. ELF USE_RELA, this
+ field should be zero. Non-zero values for ELF USE_RELA targets are
+ bogus as in those cases the value in the dst_mask part of the
+ section contents should be treated as garbage. */
+ bfd_vma src_mask;
+
+ /* dst_mask selects which parts of the instruction (or data) are
+ replaced with a relocated value. */
+ bfd_vma dst_mask;
+
+ /* When some formats create PC relative instructions, they leave
+ the value of the pc of the place being relocated in the offset
+ slot of the instruction, so that a PC relative relocation can
+ be made just by adding in an ordinary offset (e.g., sun3 a.out).
+ Some formats leave the displacement part of an instruction
+ empty (e.g., m88k bcs); this flag signals the fact. */
+ bfd_boolean pcrel_offset;
+};
+
+#define HOWTO(C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC) \
+ { (unsigned) C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC }
+#define NEWHOWTO(FUNCTION, NAME, SIZE, REL, IN) \
+ HOWTO (0, 0, SIZE, 0, REL, 0, complain_overflow_dont, FUNCTION, \
+ NAME, FALSE, 0, 0, IN)
+
+#define EMPTY_HOWTO(C) \
+ HOWTO ((C), 0, 0, 0, FALSE, 0, complain_overflow_dont, NULL, \
+ NULL, FALSE, 0, 0, FALSE)
+
+#define HOWTO_PREPARE(relocation, symbol) \
+ { \
+ if (symbol != NULL) \
+ { \
+ if (bfd_is_com_section (symbol->section)) \
+ { \
+ relocation = 0; \
+ } \
+ else \
+ { \
+ relocation = symbol->value; \
+ } \
+ } \
+ }
+
+unsigned int bfd_get_reloc_size (reloc_howto_type *);
+
+typedef struct relent_chain
+{
+ arelent relent;
+ struct relent_chain *next;
+}
+arelent_chain;
+
+bfd_reloc_status_type bfd_check_overflow
+ (enum complain_overflow how,
+ unsigned int bitsize,
+ unsigned int rightshift,
+ unsigned int addrsize,
+ bfd_vma relocation);
+
+bfd_reloc_status_type bfd_perform_relocation
+ (bfd *abfd,
+ arelent *reloc_entry,
+ void *data,
+ asection *input_section,
+ bfd *output_bfd,
+ char **error_message);
+
+bfd_reloc_status_type bfd_install_relocation
+ (bfd *abfd,
+ arelent *reloc_entry,
+ void *data, bfd_vma data_start,
+ asection *input_section,
+ char **error_message);
+
+enum bfd_reloc_code_real {
+ _dummy_first_bfd_reloc_code_real,
+
+
+/* Basic absolute relocations of N bits. */
+ BFD_RELOC_64,
+ BFD_RELOC_32,
+ BFD_RELOC_26,
+ BFD_RELOC_24,
+ BFD_RELOC_16,
+ BFD_RELOC_14,
+ BFD_RELOC_8,
+
+/* PC-relative relocations. Sometimes these are relative to the address
+of the relocation itself; sometimes they are relative to the start of
+the section containing the relocation. It depends on the specific target.
+
+The 24-bit relocation is used in some Intel 960 configurations. */
+ BFD_RELOC_64_PCREL,
+ BFD_RELOC_32_PCREL,
+ BFD_RELOC_24_PCREL,
+ BFD_RELOC_16_PCREL,
+ BFD_RELOC_12_PCREL,
+ BFD_RELOC_8_PCREL,
+
+/* Section relative relocations. Some targets need this for DWARF2. */
+ BFD_RELOC_32_SECREL,
+
+/* For ELF. */
+ BFD_RELOC_32_GOT_PCREL,
+ BFD_RELOC_16_GOT_PCREL,
+ BFD_RELOC_8_GOT_PCREL,
+ BFD_RELOC_32_GOTOFF,
+ BFD_RELOC_16_GOTOFF,
+ BFD_RELOC_LO16_GOTOFF,
+ BFD_RELOC_HI16_GOTOFF,
+ BFD_RELOC_HI16_S_GOTOFF,
+ BFD_RELOC_8_GOTOFF,
+ BFD_RELOC_64_PLT_PCREL,
+ BFD_RELOC_32_PLT_PCREL,
+ BFD_RELOC_24_PLT_PCREL,
+ BFD_RELOC_16_PLT_PCREL,
+ BFD_RELOC_8_PLT_PCREL,
+ BFD_RELOC_64_PLTOFF,
+ BFD_RELOC_32_PLTOFF,
+ BFD_RELOC_16_PLTOFF,
+ BFD_RELOC_LO16_PLTOFF,
+ BFD_RELOC_HI16_PLTOFF,
+ BFD_RELOC_HI16_S_PLTOFF,
+ BFD_RELOC_8_PLTOFF,
+
+/* Relocations used by 68K ELF. */
+ BFD_RELOC_68K_GLOB_DAT,
+ BFD_RELOC_68K_JMP_SLOT,
+ BFD_RELOC_68K_RELATIVE,
+ BFD_RELOC_68K_TLS_GD32,
+ BFD_RELOC_68K_TLS_GD16,
+ BFD_RELOC_68K_TLS_GD8,
+ BFD_RELOC_68K_TLS_LDM32,
+ BFD_RELOC_68K_TLS_LDM16,
+ BFD_RELOC_68K_TLS_LDM8,
+ BFD_RELOC_68K_TLS_LDO32,
+ BFD_RELOC_68K_TLS_LDO16,
+ BFD_RELOC_68K_TLS_LDO8,
+ BFD_RELOC_68K_TLS_IE32,
+ BFD_RELOC_68K_TLS_IE16,
+ BFD_RELOC_68K_TLS_IE8,
+ BFD_RELOC_68K_TLS_LE32,
+ BFD_RELOC_68K_TLS_LE16,
+ BFD_RELOC_68K_TLS_LE8,
+
+/* Linkage-table relative. */
+ BFD_RELOC_32_BASEREL,
+ BFD_RELOC_16_BASEREL,
+ BFD_RELOC_LO16_BASEREL,
+ BFD_RELOC_HI16_BASEREL,
+ BFD_RELOC_HI16_S_BASEREL,
+ BFD_RELOC_8_BASEREL,
+ BFD_RELOC_RVA,
+
+/* Absolute 8-bit relocation, but used to form an address like 0xFFnn. */
+ BFD_RELOC_8_FFnn,
+
+/* These PC-relative relocations are stored as word displacements --
+i.e., byte displacements shifted right two bits. The 30-bit word
+displacement (<<32_PCREL_S2>> -- 32 bits, shifted 2) is used on the
+SPARC. (SPARC tools generally refer to this as <<WDISP30>>.) The
+signed 16-bit displacement is used on the MIPS, and the 23-bit
+displacement is used on the Alpha. */
+ BFD_RELOC_32_PCREL_S2,
+ BFD_RELOC_16_PCREL_S2,
+ BFD_RELOC_23_PCREL_S2,
+
+/* High 22 bits and low 10 bits of 32-bit value, placed into lower bits of
+the target word. These are used on the SPARC. */
+ BFD_RELOC_HI22,
+ BFD_RELOC_LO10,
+
+/* For systems that allocate a Global Pointer register, these are
+displacements off that register. These relocation types are
+handled specially, because the value the register will have is
+decided relatively late. */
+ BFD_RELOC_GPREL16,
+ BFD_RELOC_GPREL32,
+
+/* Reloc types used for i960/b.out. */
+ BFD_RELOC_I960_CALLJ,
+
+/* SPARC ELF relocations. There is probably some overlap with other
+relocation types already defined. */
+ BFD_RELOC_NONE,
+ BFD_RELOC_SPARC_WDISP22,
+ BFD_RELOC_SPARC22,
+ BFD_RELOC_SPARC13,
+ BFD_RELOC_SPARC_GOT10,
+ BFD_RELOC_SPARC_GOT13,
+ BFD_RELOC_SPARC_GOT22,
+ BFD_RELOC_SPARC_PC10,
+ BFD_RELOC_SPARC_PC22,
+ BFD_RELOC_SPARC_WPLT30,
+ BFD_RELOC_SPARC_COPY,
+ BFD_RELOC_SPARC_GLOB_DAT,
+ BFD_RELOC_SPARC_JMP_SLOT,
+ BFD_RELOC_SPARC_RELATIVE,
+ BFD_RELOC_SPARC_UA16,
+ BFD_RELOC_SPARC_UA32,
+ BFD_RELOC_SPARC_UA64,
+ BFD_RELOC_SPARC_GOTDATA_HIX22,
+ BFD_RELOC_SPARC_GOTDATA_LOX10,
+ BFD_RELOC_SPARC_GOTDATA_OP_HIX22,
+ BFD_RELOC_SPARC_GOTDATA_OP_LOX10,
+ BFD_RELOC_SPARC_GOTDATA_OP,
+ BFD_RELOC_SPARC_JMP_IREL,
+ BFD_RELOC_SPARC_IRELATIVE,
+
+/* I think these are specific to SPARC a.out (e.g., Sun 4). */
+ BFD_RELOC_SPARC_BASE13,
+ BFD_RELOC_SPARC_BASE22,
+
+/* SPARC64 relocations */
+#define BFD_RELOC_SPARC_64 BFD_RELOC_64
+ BFD_RELOC_SPARC_10,
+ BFD_RELOC_SPARC_11,
+ BFD_RELOC_SPARC_OLO10,
+ BFD_RELOC_SPARC_HH22,
+ BFD_RELOC_SPARC_HM10,
+ BFD_RELOC_SPARC_LM22,
+ BFD_RELOC_SPARC_PC_HH22,
+ BFD_RELOC_SPARC_PC_HM10,
+ BFD_RELOC_SPARC_PC_LM22,
+ BFD_RELOC_SPARC_WDISP16,
+ BFD_RELOC_SPARC_WDISP19,
+ BFD_RELOC_SPARC_7,
+ BFD_RELOC_SPARC_6,
+ BFD_RELOC_SPARC_5,
+#define BFD_RELOC_SPARC_DISP64 BFD_RELOC_64_PCREL
+ BFD_RELOC_SPARC_PLT32,
+ BFD_RELOC_SPARC_PLT64,
+ BFD_RELOC_SPARC_HIX22,
+ BFD_RELOC_SPARC_LOX10,
+ BFD_RELOC_SPARC_H44,
+ BFD_RELOC_SPARC_M44,
+ BFD_RELOC_SPARC_L44,
+ BFD_RELOC_SPARC_REGISTER,
+
+/* SPARC little endian relocation */
+ BFD_RELOC_SPARC_REV32,
+
+/* SPARC TLS relocations */
+ BFD_RELOC_SPARC_TLS_GD_HI22,
+ BFD_RELOC_SPARC_TLS_GD_LO10,
+ BFD_RELOC_SPARC_TLS_GD_ADD,
+ BFD_RELOC_SPARC_TLS_GD_CALL,
+ BFD_RELOC_SPARC_TLS_LDM_HI22,
+ BFD_RELOC_SPARC_TLS_LDM_LO10,
+ BFD_RELOC_SPARC_TLS_LDM_ADD,
+ BFD_RELOC_SPARC_TLS_LDM_CALL,
+ BFD_RELOC_SPARC_TLS_LDO_HIX22,
+ BFD_RELOC_SPARC_TLS_LDO_LOX10,
+ BFD_RELOC_SPARC_TLS_LDO_ADD,
+ BFD_RELOC_SPARC_TLS_IE_HI22,
+ BFD_RELOC_SPARC_TLS_IE_LO10,
+ BFD_RELOC_SPARC_TLS_IE_LD,
+ BFD_RELOC_SPARC_TLS_IE_LDX,
+ BFD_RELOC_SPARC_TLS_IE_ADD,
+ BFD_RELOC_SPARC_TLS_LE_HIX22,
+ BFD_RELOC_SPARC_TLS_LE_LOX10,
+ BFD_RELOC_SPARC_TLS_DTPMOD32,
+ BFD_RELOC_SPARC_TLS_DTPMOD64,
+ BFD_RELOC_SPARC_TLS_DTPOFF32,
+ BFD_RELOC_SPARC_TLS_DTPOFF64,
+ BFD_RELOC_SPARC_TLS_TPOFF32,
+ BFD_RELOC_SPARC_TLS_TPOFF64,
+
+/* SPU Relocations. */
+ BFD_RELOC_SPU_IMM7,
+ BFD_RELOC_SPU_IMM8,
+ BFD_RELOC_SPU_IMM10,
+ BFD_RELOC_SPU_IMM10W,
+ BFD_RELOC_SPU_IMM16,
+ BFD_RELOC_SPU_IMM16W,
+ BFD_RELOC_SPU_IMM18,
+ BFD_RELOC_SPU_PCREL9a,
+ BFD_RELOC_SPU_PCREL9b,
+ BFD_RELOC_SPU_PCREL16,
+ BFD_RELOC_SPU_LO16,
+ BFD_RELOC_SPU_HI16,
+ BFD_RELOC_SPU_PPU32,
+ BFD_RELOC_SPU_PPU64,
+ BFD_RELOC_SPU_ADD_PIC,
+
+/* Alpha ECOFF and ELF relocations. Some of these treat the symbol or
+"addend" in some special way.
+For GPDISP_HI16 ("gpdisp") relocations, the symbol is ignored when
+writing; when reading, it will be the absolute section symbol. The
+addend is the displacement in bytes of the "lda" instruction from
+the "ldah" instruction (which is at the address of this reloc). */
+ BFD_RELOC_ALPHA_GPDISP_HI16,
+
+/* For GPDISP_LO16 ("ignore") relocations, the symbol is handled as
+with GPDISP_HI16 relocs. The addend is ignored when writing the
+relocations out, and is filled in with the file's GP value on
+reading, for convenience. */
+ BFD_RELOC_ALPHA_GPDISP_LO16,
+
+/* The ELF GPDISP relocation is exactly the same as the GPDISP_HI16
+relocation except that there is no accompanying GPDISP_LO16
+relocation. */
+ BFD_RELOC_ALPHA_GPDISP,
+
+/* The Alpha LITERAL/LITUSE relocs are produced by a symbol reference;
+the assembler turns it into a LDQ instruction to load the address of
+the symbol, and then fills in a register in the real instruction.
+
+The LITERAL reloc, at the LDQ instruction, refers to the .lita
+section symbol. The addend is ignored when writing, but is filled
+in with the file's GP value on reading, for convenience, as with the
+GPDISP_LO16 reloc.
+
+The ELF_LITERAL reloc is somewhere between 16_GOTOFF and GPDISP_LO16.
+It should refer to the symbol to be referenced, as with 16_GOTOFF,
+but it generates output not based on the position within the .got
+section, but relative to the GP value chosen for the file during the
+final link stage.
+
+The LITUSE reloc, on the instruction using the loaded address, gives
+information to the linker that it might be able to use to optimize
+away some literal section references. The symbol is ignored (read
+as the absolute section symbol), and the "addend" indicates the type
+of instruction using the register:
+1 - "memory" fmt insn
+2 - byte-manipulation (byte offset reg)
+3 - jsr (target of branch) */
+ BFD_RELOC_ALPHA_LITERAL,
+ BFD_RELOC_ALPHA_ELF_LITERAL,
+ BFD_RELOC_ALPHA_LITUSE,
+
+/* The HINT relocation indicates a value that should be filled into the
+"hint" field of a jmp/jsr/ret instruction, for possible branch-
+prediction logic which may be provided on some processors. */
+ BFD_RELOC_ALPHA_HINT,
+
+/* The LINKAGE relocation outputs a linkage pair in the object file,
+which is filled by the linker. */
+ BFD_RELOC_ALPHA_LINKAGE,
+
+/* The CODEADDR relocation outputs a STO_CA in the object file,
+which is filled by the linker. */
+ BFD_RELOC_ALPHA_CODEADDR,
+
+/* The GPREL_HI/LO relocations together form a 32-bit offset from the
+GP register. */
+ BFD_RELOC_ALPHA_GPREL_HI16,
+ BFD_RELOC_ALPHA_GPREL_LO16,
+
+/* Like BFD_RELOC_23_PCREL_S2, except that the source and target must
+share a common GP, and the target address is adjusted for
+STO_ALPHA_STD_GPLOAD. */
+ BFD_RELOC_ALPHA_BRSGP,
+
+/* The NOP relocation outputs a NOP if the longword displacement
+between two procedure entry points is < 2^21. */
+ BFD_RELOC_ALPHA_NOP,
+
+/* The BSR relocation outputs a BSR if the longword displacement
+between two procedure entry points is < 2^21. */
+ BFD_RELOC_ALPHA_BSR,
+
+/* The LDA relocation outputs a LDA if the longword displacement
+between two procedure entry points is < 2^16. */
+ BFD_RELOC_ALPHA_LDA,
+
+/* The BOH relocation outputs a BSR if the longword displacement
+between two procedure entry points is < 2^21, or else a hint. */
+ BFD_RELOC_ALPHA_BOH,
+
+/* Alpha thread-local storage relocations. */
+ BFD_RELOC_ALPHA_TLSGD,
+ BFD_RELOC_ALPHA_TLSLDM,
+ BFD_RELOC_ALPHA_DTPMOD64,
+ BFD_RELOC_ALPHA_GOTDTPREL16,
+ BFD_RELOC_ALPHA_DTPREL64,
+ BFD_RELOC_ALPHA_DTPREL_HI16,
+ BFD_RELOC_ALPHA_DTPREL_LO16,
+ BFD_RELOC_ALPHA_DTPREL16,
+ BFD_RELOC_ALPHA_GOTTPREL16,
+ BFD_RELOC_ALPHA_TPREL64,
+ BFD_RELOC_ALPHA_TPREL_HI16,
+ BFD_RELOC_ALPHA_TPREL_LO16,
+ BFD_RELOC_ALPHA_TPREL16,
+
+/* Bits 27..2 of the relocation address shifted right 2 bits;
+simple reloc otherwise. */
+ BFD_RELOC_MIPS_JMP,
+
+/* The MIPS16 jump instruction. */
+ BFD_RELOC_MIPS16_JMP,
+
+/* MIPS16 GP relative reloc. */
+ BFD_RELOC_MIPS16_GPREL,
+
+/* High 16 bits of 32-bit value; simple reloc. */
+ BFD_RELOC_HI16,
+
+/* High 16 bits of 32-bit value but the low 16 bits will be sign
+extended and added to form the final result. If the low 16
+bits form a negative number, we need to add one to the high value
+to compensate for the borrow when the low bits are added. */
+ BFD_RELOC_HI16_S,
+
+/* Low 16 bits. */
+ BFD_RELOC_LO16,
+
+/* High 16 bits of 32-bit pc-relative value */
+ BFD_RELOC_HI16_PCREL,
+
+/* High 16 bits of 32-bit pc-relative value, adjusted */
+ BFD_RELOC_HI16_S_PCREL,
+
+/* Low 16 bits of pc-relative value */
+ BFD_RELOC_LO16_PCREL,
+
+/* Equivalent of BFD_RELOC_MIPS_*, but with the MIPS16 layout of
+16-bit immediate fields */
+ BFD_RELOC_MIPS16_GOT16,
+ BFD_RELOC_MIPS16_CALL16,
+
+/* MIPS16 high 16 bits of 32-bit value. */
+ BFD_RELOC_MIPS16_HI16,
+
+/* MIPS16 high 16 bits of 32-bit value but the low 16 bits will be sign
+extended and added to form the final result. If the low 16
+bits form a negative number, we need to add one to the high value
+to compensate for the borrow when the low bits are added. */
+ BFD_RELOC_MIPS16_HI16_S,
+
+/* MIPS16 low 16 bits. */
+ BFD_RELOC_MIPS16_LO16,
+
+/* Relocation against a MIPS literal section. */
+ BFD_RELOC_MIPS_LITERAL,
+
+/* MIPS ELF relocations. */
+ BFD_RELOC_MIPS_GOT16,
+ BFD_RELOC_MIPS_CALL16,
+ BFD_RELOC_MIPS_GOT_HI16,
+ BFD_RELOC_MIPS_GOT_LO16,
+ BFD_RELOC_MIPS_CALL_HI16,
+ BFD_RELOC_MIPS_CALL_LO16,
+ BFD_RELOC_MIPS_SUB,
+ BFD_RELOC_MIPS_GOT_PAGE,
+ BFD_RELOC_MIPS_GOT_OFST,
+ BFD_RELOC_MIPS_GOT_DISP,
+ BFD_RELOC_MIPS_SHIFT5,
+ BFD_RELOC_MIPS_SHIFT6,
+ BFD_RELOC_MIPS_INSERT_A,
+ BFD_RELOC_MIPS_INSERT_B,
+ BFD_RELOC_MIPS_DELETE,
+ BFD_RELOC_MIPS_HIGHEST,
+ BFD_RELOC_MIPS_HIGHER,
+ BFD_RELOC_MIPS_SCN_DISP,
+ BFD_RELOC_MIPS_REL16,
+ BFD_RELOC_MIPS_RELGOT,
+ BFD_RELOC_MIPS_JALR,
+ BFD_RELOC_MIPS_TLS_DTPMOD32,
+ BFD_RELOC_MIPS_TLS_DTPREL32,
+ BFD_RELOC_MIPS_TLS_DTPMOD64,
+ BFD_RELOC_MIPS_TLS_DTPREL64,
+ BFD_RELOC_MIPS_TLS_GD,
+ BFD_RELOC_MIPS_TLS_LDM,
+ BFD_RELOC_MIPS_TLS_DTPREL_HI16,
+ BFD_RELOC_MIPS_TLS_DTPREL_LO16,
+ BFD_RELOC_MIPS_TLS_GOTTPREL,
+ BFD_RELOC_MIPS_TLS_TPREL32,
+ BFD_RELOC_MIPS_TLS_TPREL64,
+ BFD_RELOC_MIPS_TLS_TPREL_HI16,
+ BFD_RELOC_MIPS_TLS_TPREL_LO16,
+
+
+/* MIPS ELF relocations (VxWorks and PLT extensions). */
+ BFD_RELOC_MIPS_COPY,
+ BFD_RELOC_MIPS_JUMP_SLOT,
+
+
+/* Moxie ELF relocations. */
+ BFD_RELOC_MOXIE_10_PCREL,
+
+
+/* Fujitsu Frv Relocations. */
+ BFD_RELOC_FRV_LABEL16,
+ BFD_RELOC_FRV_LABEL24,
+ BFD_RELOC_FRV_LO16,
+ BFD_RELOC_FRV_HI16,
+ BFD_RELOC_FRV_GPREL12,
+ BFD_RELOC_FRV_GPRELU12,
+ BFD_RELOC_FRV_GPREL32,
+ BFD_RELOC_FRV_GPRELHI,
+ BFD_RELOC_FRV_GPRELLO,
+ BFD_RELOC_FRV_GOT12,
+ BFD_RELOC_FRV_GOTHI,
+ BFD_RELOC_FRV_GOTLO,
+ BFD_RELOC_FRV_FUNCDESC,
+ BFD_RELOC_FRV_FUNCDESC_GOT12,
+ BFD_RELOC_FRV_FUNCDESC_GOTHI,
+ BFD_RELOC_FRV_FUNCDESC_GOTLO,
+ BFD_RELOC_FRV_FUNCDESC_VALUE,
+ BFD_RELOC_FRV_FUNCDESC_GOTOFF12,
+ BFD_RELOC_FRV_FUNCDESC_GOTOFFHI,
+ BFD_RELOC_FRV_FUNCDESC_GOTOFFLO,
+ BFD_RELOC_FRV_GOTOFF12,
+ BFD_RELOC_FRV_GOTOFFHI,
+ BFD_RELOC_FRV_GOTOFFLO,
+ BFD_RELOC_FRV_GETTLSOFF,
+ BFD_RELOC_FRV_TLSDESC_VALUE,
+ BFD_RELOC_FRV_GOTTLSDESC12,
+ BFD_RELOC_FRV_GOTTLSDESCHI,
+ BFD_RELOC_FRV_GOTTLSDESCLO,
+ BFD_RELOC_FRV_TLSMOFF12,
+ BFD_RELOC_FRV_TLSMOFFHI,
+ BFD_RELOC_FRV_TLSMOFFLO,
+ BFD_RELOC_FRV_GOTTLSOFF12,
+ BFD_RELOC_FRV_GOTTLSOFFHI,
+ BFD_RELOC_FRV_GOTTLSOFFLO,
+ BFD_RELOC_FRV_TLSOFF,
+ BFD_RELOC_FRV_TLSDESC_RELAX,
+ BFD_RELOC_FRV_GETTLSOFF_RELAX,
+ BFD_RELOC_FRV_TLSOFF_RELAX,
+ BFD_RELOC_FRV_TLSMOFF,
+
+
+/* This is a 24bit GOT-relative reloc for the mn10300. */
+ BFD_RELOC_MN10300_GOTOFF24,
+
+/* This is a 32bit GOT-relative reloc for the mn10300, offset by two bytes
+in the instruction. */
+ BFD_RELOC_MN10300_GOT32,
+
+/* This is a 24bit GOT-relative reloc for the mn10300, offset by two bytes
+in the instruction. */
+ BFD_RELOC_MN10300_GOT24,
+
+/* This is a 16bit GOT-relative reloc for the mn10300, offset by two bytes
+in the instruction. */
+ BFD_RELOC_MN10300_GOT16,
+
+/* Copy symbol at runtime. */
+ BFD_RELOC_MN10300_COPY,
+
+/* Create GOT entry. */
+ BFD_RELOC_MN10300_GLOB_DAT,
+
+/* Create PLT entry. */
+ BFD_RELOC_MN10300_JMP_SLOT,
+
+/* Adjust by program base. */
+ BFD_RELOC_MN10300_RELATIVE,
+
+/* Together with another reloc targeted at the same location,
+allows for a value that is the difference of two symbols
+in the same section. */
+ BFD_RELOC_MN10300_SYM_DIFF,
+
+/* The addend of this reloc is an alignment power that must
+be honoured at the offset's location, regardless of linker
+relaxation. */
+ BFD_RELOC_MN10300_ALIGN,
+
+
+/* i386/elf relocations */
+ BFD_RELOC_386_GOT32,
+ BFD_RELOC_386_PLT32,
+ BFD_RELOC_386_COPY,
+ BFD_RELOC_386_GLOB_DAT,
+ BFD_RELOC_386_JUMP_SLOT,
+ BFD_RELOC_386_RELATIVE,
+ BFD_RELOC_386_GOTOFF,
+ BFD_RELOC_386_GOTPC,
+ BFD_RELOC_386_TLS_TPOFF,
+ BFD_RELOC_386_TLS_IE,
+ BFD_RELOC_386_TLS_GOTIE,
+ BFD_RELOC_386_TLS_LE,
+ BFD_RELOC_386_TLS_GD,
+ BFD_RELOC_386_TLS_LDM,
+ BFD_RELOC_386_TLS_LDO_32,
+ BFD_RELOC_386_TLS_IE_32,
+ BFD_RELOC_386_TLS_LE_32,
+ BFD_RELOC_386_TLS_DTPMOD32,
+ BFD_RELOC_386_TLS_DTPOFF32,
+ BFD_RELOC_386_TLS_TPOFF32,
+ BFD_RELOC_386_TLS_GOTDESC,
+ BFD_RELOC_386_TLS_DESC_CALL,
+ BFD_RELOC_386_TLS_DESC,
+ BFD_RELOC_386_IRELATIVE,
+
+/* x86-64/elf relocations */
+ BFD_RELOC_X86_64_GOT32,
+ BFD_RELOC_X86_64_PLT32,
+ BFD_RELOC_X86_64_COPY,
+ BFD_RELOC_X86_64_GLOB_DAT,
+ BFD_RELOC_X86_64_JUMP_SLOT,
+ BFD_RELOC_X86_64_RELATIVE,
+ BFD_RELOC_X86_64_GOTPCREL,
+ BFD_RELOC_X86_64_32S,
+ BFD_RELOC_X86_64_DTPMOD64,
+ BFD_RELOC_X86_64_DTPOFF64,
+ BFD_RELOC_X86_64_TPOFF64,
+ BFD_RELOC_X86_64_TLSGD,
+ BFD_RELOC_X86_64_TLSLD,
+ BFD_RELOC_X86_64_DTPOFF32,
+ BFD_RELOC_X86_64_GOTTPOFF,
+ BFD_RELOC_X86_64_TPOFF32,
+ BFD_RELOC_X86_64_GOTOFF64,
+ BFD_RELOC_X86_64_GOTPC32,
+ BFD_RELOC_X86_64_GOT64,
+ BFD_RELOC_X86_64_GOTPCREL64,
+ BFD_RELOC_X86_64_GOTPC64,
+ BFD_RELOC_X86_64_GOTPLT64,
+ BFD_RELOC_X86_64_PLTOFF64,
+ BFD_RELOC_X86_64_GOTPC32_TLSDESC,
+ BFD_RELOC_X86_64_TLSDESC_CALL,
+ BFD_RELOC_X86_64_TLSDESC,
+ BFD_RELOC_X86_64_IRELATIVE,
+
+/* ns32k relocations */
+ BFD_RELOC_NS32K_IMM_8,
+ BFD_RELOC_NS32K_IMM_16,
+ BFD_RELOC_NS32K_IMM_32,
+ BFD_RELOC_NS32K_IMM_8_PCREL,
+ BFD_RELOC_NS32K_IMM_16_PCREL,
+ BFD_RELOC_NS32K_IMM_32_PCREL,
+ BFD_RELOC_NS32K_DISP_8,
+ BFD_RELOC_NS32K_DISP_16,
+ BFD_RELOC_NS32K_DISP_32,
+ BFD_RELOC_NS32K_DISP_8_PCREL,
+ BFD_RELOC_NS32K_DISP_16_PCREL,
+ BFD_RELOC_NS32K_DISP_32_PCREL,
+
+/* PDP11 relocations */
+ BFD_RELOC_PDP11_DISP_8_PCREL,
+ BFD_RELOC_PDP11_DISP_6_PCREL,
+
+/* Picojava relocs. Not all of these appear in object files. */
+ BFD_RELOC_PJ_CODE_HI16,
+ BFD_RELOC_PJ_CODE_LO16,
+ BFD_RELOC_PJ_CODE_DIR16,
+ BFD_RELOC_PJ_CODE_DIR32,
+ BFD_RELOC_PJ_CODE_REL16,
+ BFD_RELOC_PJ_CODE_REL32,
+
+/* Power(rs6000) and PowerPC relocations. */
+ BFD_RELOC_PPC_B26,
+ BFD_RELOC_PPC_BA26,
+ BFD_RELOC_PPC_TOC16,
+ BFD_RELOC_PPC_B16,
+ BFD_RELOC_PPC_B16_BRTAKEN,
+ BFD_RELOC_PPC_B16_BRNTAKEN,
+ BFD_RELOC_PPC_BA16,
+ BFD_RELOC_PPC_BA16_BRTAKEN,
+ BFD_RELOC_PPC_BA16_BRNTAKEN,
+ BFD_RELOC_PPC_COPY,
+ BFD_RELOC_PPC_GLOB_DAT,
+ BFD_RELOC_PPC_JMP_SLOT,
+ BFD_RELOC_PPC_RELATIVE,
+ BFD_RELOC_PPC_LOCAL24PC,
+ BFD_RELOC_PPC_EMB_NADDR32,
+ BFD_RELOC_PPC_EMB_NADDR16,
+ BFD_RELOC_PPC_EMB_NADDR16_LO,
+ BFD_RELOC_PPC_EMB_NADDR16_HI,
+ BFD_RELOC_PPC_EMB_NADDR16_HA,
+ BFD_RELOC_PPC_EMB_SDAI16,
+ BFD_RELOC_PPC_EMB_SDA2I16,
+ BFD_RELOC_PPC_EMB_SDA2REL,
+ BFD_RELOC_PPC_EMB_SDA21,
+ BFD_RELOC_PPC_EMB_MRKREF,
+ BFD_RELOC_PPC_EMB_RELSEC16,
+ BFD_RELOC_PPC_EMB_RELST_LO,
+ BFD_RELOC_PPC_EMB_RELST_HI,
+ BFD_RELOC_PPC_EMB_RELST_HA,
+ BFD_RELOC_PPC_EMB_BIT_FLD,
+ BFD_RELOC_PPC_EMB_RELSDA,
+ BFD_RELOC_PPC64_HIGHER,
+ BFD_RELOC_PPC64_HIGHER_S,
+ BFD_RELOC_PPC64_HIGHEST,
+ BFD_RELOC_PPC64_HIGHEST_S,
+ BFD_RELOC_PPC64_TOC16_LO,
+ BFD_RELOC_PPC64_TOC16_HI,
+ BFD_RELOC_PPC64_TOC16_HA,
+ BFD_RELOC_PPC64_TOC,
+ BFD_RELOC_PPC64_PLTGOT16,
+ BFD_RELOC_PPC64_PLTGOT16_LO,
+ BFD_RELOC_PPC64_PLTGOT16_HI,
+ BFD_RELOC_PPC64_PLTGOT16_HA,
+ BFD_RELOC_PPC64_ADDR16_DS,
+ BFD_RELOC_PPC64_ADDR16_LO_DS,
+ BFD_RELOC_PPC64_GOT16_DS,
+ BFD_RELOC_PPC64_GOT16_LO_DS,
+ BFD_RELOC_PPC64_PLT16_LO_DS,
+ BFD_RELOC_PPC64_SECTOFF_DS,
+ BFD_RELOC_PPC64_SECTOFF_LO_DS,
+ BFD_RELOC_PPC64_TOC16_DS,
+ BFD_RELOC_PPC64_TOC16_LO_DS,
+ BFD_RELOC_PPC64_PLTGOT16_DS,
+ BFD_RELOC_PPC64_PLTGOT16_LO_DS,
+
+/* PowerPC and PowerPC64 thread-local storage relocations. */
+ BFD_RELOC_PPC_TLS,
+ BFD_RELOC_PPC_TLSGD,
+ BFD_RELOC_PPC_TLSLD,
+ BFD_RELOC_PPC_DTPMOD,
+ BFD_RELOC_PPC_TPREL16,
+ BFD_RELOC_PPC_TPREL16_LO,
+ BFD_RELOC_PPC_TPREL16_HI,
+ BFD_RELOC_PPC_TPREL16_HA,
+ BFD_RELOC_PPC_TPREL,
+ BFD_RELOC_PPC_DTPREL16,
+ BFD_RELOC_PPC_DTPREL16_LO,
+ BFD_RELOC_PPC_DTPREL16_HI,
+ BFD_RELOC_PPC_DTPREL16_HA,
+ BFD_RELOC_PPC_DTPREL,
+ BFD_RELOC_PPC_GOT_TLSGD16,
+ BFD_RELOC_PPC_GOT_TLSGD16_LO,
+ BFD_RELOC_PPC_GOT_TLSGD16_HI,
+ BFD_RELOC_PPC_GOT_TLSGD16_HA,
+ BFD_RELOC_PPC_GOT_TLSLD16,
+ BFD_RELOC_PPC_GOT_TLSLD16_LO,
+ BFD_RELOC_PPC_GOT_TLSLD16_HI,
+ BFD_RELOC_PPC_GOT_TLSLD16_HA,
+ BFD_RELOC_PPC_GOT_TPREL16,
+ BFD_RELOC_PPC_GOT_TPREL16_LO,
+ BFD_RELOC_PPC_GOT_TPREL16_HI,
+ BFD_RELOC_PPC_GOT_TPREL16_HA,
+ BFD_RELOC_PPC_GOT_DTPREL16,
+ BFD_RELOC_PPC_GOT_DTPREL16_LO,
+ BFD_RELOC_PPC_GOT_DTPREL16_HI,
+ BFD_RELOC_PPC_GOT_DTPREL16_HA,
+ BFD_RELOC_PPC64_TPREL16_DS,
+ BFD_RELOC_PPC64_TPREL16_LO_DS,
+ BFD_RELOC_PPC64_TPREL16_HIGHER,
+ BFD_RELOC_PPC64_TPREL16_HIGHERA,
+ BFD_RELOC_PPC64_TPREL16_HIGHEST,
+ BFD_RELOC_PPC64_TPREL16_HIGHESTA,
+ BFD_RELOC_PPC64_DTPREL16_DS,
+ BFD_RELOC_PPC64_DTPREL16_LO_DS,
+ BFD_RELOC_PPC64_DTPREL16_HIGHER,
+ BFD_RELOC_PPC64_DTPREL16_HIGHERA,
+ BFD_RELOC_PPC64_DTPREL16_HIGHEST,
+ BFD_RELOC_PPC64_DTPREL16_HIGHESTA,
+
+/* IBM 370/390 relocations */
+ BFD_RELOC_I370_D12,
+
+/* The type of reloc used to build a constructor table - at the moment
+probably a 32 bit wide absolute relocation, but the target can choose.
+It generally does map to one of the other relocation types. */
+ BFD_RELOC_CTOR,
+
+/* ARM 26 bit pc-relative branch. The lowest two bits must be zero and are
+not stored in the instruction. */
+ BFD_RELOC_ARM_PCREL_BRANCH,
+
+/* ARM 26 bit pc-relative branch. The lowest bit must be zero and is
+not stored in the instruction. The 2nd lowest bit comes from a 1 bit
+field in the instruction. */
+ BFD_RELOC_ARM_PCREL_BLX,
+
+/* Thumb 22 bit pc-relative branch. The lowest bit must be zero and is
+not stored in the instruction. The 2nd lowest bit comes from a 1 bit
+field in the instruction. */
+ BFD_RELOC_THUMB_PCREL_BLX,
+
+/* ARM 26-bit pc-relative branch for an unconditional BL or BLX instruction. */
+ BFD_RELOC_ARM_PCREL_CALL,
+
+/* ARM 26-bit pc-relative branch for B or conditional BL instruction. */
+ BFD_RELOC_ARM_PCREL_JUMP,
+
+/* Thumb 7-, 9-, 12-, 20-, 23-, and 25-bit pc-relative branches.
+The lowest bit must be zero and is not stored in the instruction.
+Note that the corresponding ELF R_ARM_THM_JUMPnn constant has an
+"nn" one smaller in all cases. Note further that BRANCH23
+corresponds to R_ARM_THM_CALL. */
+ BFD_RELOC_THUMB_PCREL_BRANCH7,
+ BFD_RELOC_THUMB_PCREL_BRANCH9,
+ BFD_RELOC_THUMB_PCREL_BRANCH12,
+ BFD_RELOC_THUMB_PCREL_BRANCH20,
+ BFD_RELOC_THUMB_PCREL_BRANCH23,
+ BFD_RELOC_THUMB_PCREL_BRANCH25,
+
+/* 12-bit immediate offset, used in ARM-format ldr and str instructions. */
+ BFD_RELOC_ARM_OFFSET_IMM,
+
+/* 5-bit immediate offset, used in Thumb-format ldr and str instructions. */
+ BFD_RELOC_ARM_THUMB_OFFSET,
+
+/* Pc-relative or absolute relocation depending on target. Used for
+entries in .init_array sections. */
+ BFD_RELOC_ARM_TARGET1,
+
+/* Read-only segment base relative address. */
+ BFD_RELOC_ARM_ROSEGREL32,
+
+/* Data segment base relative address. */
+ BFD_RELOC_ARM_SBREL32,
+
+/* This reloc is used for references to RTTI data from exception handling
+tables. The actual definition depends on the target. It may be a
+pc-relative or some form of GOT-indirect relocation. */
+ BFD_RELOC_ARM_TARGET2,
+
+/* 31-bit PC relative address. */
+ BFD_RELOC_ARM_PREL31,
+
+/* Low and High halfword relocations for MOVW and MOVT instructions. */
+ BFD_RELOC_ARM_MOVW,
+ BFD_RELOC_ARM_MOVT,
+ BFD_RELOC_ARM_MOVW_PCREL,
+ BFD_RELOC_ARM_MOVT_PCREL,
+ BFD_RELOC_ARM_THUMB_MOVW,
+ BFD_RELOC_ARM_THUMB_MOVT,
+ BFD_RELOC_ARM_THUMB_MOVW_PCREL,
+ BFD_RELOC_ARM_THUMB_MOVT_PCREL,
+
+/* Relocations for setting up GOTs and PLTs for shared libraries. */
+ BFD_RELOC_ARM_JUMP_SLOT,
+ BFD_RELOC_ARM_GLOB_DAT,
+ BFD_RELOC_ARM_GOT32,
+ BFD_RELOC_ARM_PLT32,
+ BFD_RELOC_ARM_RELATIVE,
+ BFD_RELOC_ARM_GOTOFF,
+ BFD_RELOC_ARM_GOTPC,
+ BFD_RELOC_ARM_GOT_PREL,
+
+/* ARM thread-local storage relocations. */
+ BFD_RELOC_ARM_TLS_GD32,
+ BFD_RELOC_ARM_TLS_LDO32,
+ BFD_RELOC_ARM_TLS_LDM32,
+ BFD_RELOC_ARM_TLS_DTPOFF32,
+ BFD_RELOC_ARM_TLS_DTPMOD32,
+ BFD_RELOC_ARM_TLS_TPOFF32,
+ BFD_RELOC_ARM_TLS_IE32,
+ BFD_RELOC_ARM_TLS_LE32,
+
+/* ARM group relocations. */
+ BFD_RELOC_ARM_ALU_PC_G0_NC,
+ BFD_RELOC_ARM_ALU_PC_G0,
+ BFD_RELOC_ARM_ALU_PC_G1_NC,
+ BFD_RELOC_ARM_ALU_PC_G1,
+ BFD_RELOC_ARM_ALU_PC_G2,
+ BFD_RELOC_ARM_LDR_PC_G0,
+ BFD_RELOC_ARM_LDR_PC_G1,
+ BFD_RELOC_ARM_LDR_PC_G2,
+ BFD_RELOC_ARM_LDRS_PC_G0,
+ BFD_RELOC_ARM_LDRS_PC_G1,
+ BFD_RELOC_ARM_LDRS_PC_G2,
+ BFD_RELOC_ARM_LDC_PC_G0,
+ BFD_RELOC_ARM_LDC_PC_G1,
+ BFD_RELOC_ARM_LDC_PC_G2,
+ BFD_RELOC_ARM_ALU_SB_G0_NC,
+ BFD_RELOC_ARM_ALU_SB_G0,
+ BFD_RELOC_ARM_ALU_SB_G1_NC,
+ BFD_RELOC_ARM_ALU_SB_G1,
+ BFD_RELOC_ARM_ALU_SB_G2,
+ BFD_RELOC_ARM_LDR_SB_G0,
+ BFD_RELOC_ARM_LDR_SB_G1,
+ BFD_RELOC_ARM_LDR_SB_G2,
+ BFD_RELOC_ARM_LDRS_SB_G0,
+ BFD_RELOC_ARM_LDRS_SB_G1,
+ BFD_RELOC_ARM_LDRS_SB_G2,
+ BFD_RELOC_ARM_LDC_SB_G0,
+ BFD_RELOC_ARM_LDC_SB_G1,
+ BFD_RELOC_ARM_LDC_SB_G2,
+
+/* Annotation of BX instructions. */
+ BFD_RELOC_ARM_V4BX,
+
+/* These relocs are only used within the ARM assembler. They are not
+(at present) written to any object files. */
+ BFD_RELOC_ARM_IMMEDIATE,
+ BFD_RELOC_ARM_ADRL_IMMEDIATE,
+ BFD_RELOC_ARM_T32_IMMEDIATE,
+ BFD_RELOC_ARM_T32_ADD_IMM,
+ BFD_RELOC_ARM_T32_IMM12,
+ BFD_RELOC_ARM_T32_ADD_PC12,
+ BFD_RELOC_ARM_SHIFT_IMM,
+ BFD_RELOC_ARM_SMC,
+ BFD_RELOC_ARM_HVC,
+ BFD_RELOC_ARM_SWI,
+ BFD_RELOC_ARM_MULTI,
+ BFD_RELOC_ARM_CP_OFF_IMM,
+ BFD_RELOC_ARM_CP_OFF_IMM_S2,
+ BFD_RELOC_ARM_T32_CP_OFF_IMM,
+ BFD_RELOC_ARM_T32_CP_OFF_IMM_S2,
+ BFD_RELOC_ARM_ADR_IMM,
+ BFD_RELOC_ARM_LDR_IMM,
+ BFD_RELOC_ARM_LITERAL,
+ BFD_RELOC_ARM_IN_POOL,
+ BFD_RELOC_ARM_OFFSET_IMM8,
+ BFD_RELOC_ARM_T32_OFFSET_U8,
+ BFD_RELOC_ARM_T32_OFFSET_IMM,
+ BFD_RELOC_ARM_HWLITERAL,
+ BFD_RELOC_ARM_THUMB_ADD,
+ BFD_RELOC_ARM_THUMB_IMM,
+ BFD_RELOC_ARM_THUMB_SHIFT,
+
+/* Renesas / SuperH SH relocs. Not all of these appear in object files. */
+ BFD_RELOC_SH_PCDISP8BY2,
+ BFD_RELOC_SH_PCDISP12BY2,
+ BFD_RELOC_SH_IMM3,
+ BFD_RELOC_SH_IMM3U,
+ BFD_RELOC_SH_DISP12,
+ BFD_RELOC_SH_DISP12BY2,
+ BFD_RELOC_SH_DISP12BY4,
+ BFD_RELOC_SH_DISP12BY8,
+ BFD_RELOC_SH_DISP20,
+ BFD_RELOC_SH_DISP20BY8,
+ BFD_RELOC_SH_IMM4,
+ BFD_RELOC_SH_IMM4BY2,
+ BFD_RELOC_SH_IMM4BY4,
+ BFD_RELOC_SH_IMM8,
+ BFD_RELOC_SH_IMM8BY2,
+ BFD_RELOC_SH_IMM8BY4,
+ BFD_RELOC_SH_PCRELIMM8BY2,
+ BFD_RELOC_SH_PCRELIMM8BY4,
+ BFD_RELOC_SH_SWITCH16,
+ BFD_RELOC_SH_SWITCH32,
+ BFD_RELOC_SH_USES,
+ BFD_RELOC_SH_COUNT,
+ BFD_RELOC_SH_ALIGN,
+ BFD_RELOC_SH_CODE,
+ BFD_RELOC_SH_DATA,
+ BFD_RELOC_SH_LABEL,
+ BFD_RELOC_SH_LOOP_START,
+ BFD_RELOC_SH_LOOP_END,
+ BFD_RELOC_SH_COPY,
+ BFD_RELOC_SH_GLOB_DAT,
+ BFD_RELOC_SH_JMP_SLOT,
+ BFD_RELOC_SH_RELATIVE,
+ BFD_RELOC_SH_GOTPC,
+ BFD_RELOC_SH_GOT_LOW16,
+ BFD_RELOC_SH_GOT_MEDLOW16,
+ BFD_RELOC_SH_GOT_MEDHI16,
+ BFD_RELOC_SH_GOT_HI16,
+ BFD_RELOC_SH_GOTPLT_LOW16,
+ BFD_RELOC_SH_GOTPLT_MEDLOW16,
+ BFD_RELOC_SH_GOTPLT_MEDHI16,
+ BFD_RELOC_SH_GOTPLT_HI16,
+ BFD_RELOC_SH_PLT_LOW16,
+ BFD_RELOC_SH_PLT_MEDLOW16,
+ BFD_RELOC_SH_PLT_MEDHI16,
+ BFD_RELOC_SH_PLT_HI16,
+ BFD_RELOC_SH_GOTOFF_LOW16,
+ BFD_RELOC_SH_GOTOFF_MEDLOW16,
+ BFD_RELOC_SH_GOTOFF_MEDHI16,
+ BFD_RELOC_SH_GOTOFF_HI16,
+ BFD_RELOC_SH_GOTPC_LOW16,
+ BFD_RELOC_SH_GOTPC_MEDLOW16,
+ BFD_RELOC_SH_GOTPC_MEDHI16,
+ BFD_RELOC_SH_GOTPC_HI16,
+ BFD_RELOC_SH_COPY64,
+ BFD_RELOC_SH_GLOB_DAT64,
+ BFD_RELOC_SH_JMP_SLOT64,
+ BFD_RELOC_SH_RELATIVE64,
+ BFD_RELOC_SH_GOT10BY4,
+ BFD_RELOC_SH_GOT10BY8,
+ BFD_RELOC_SH_GOTPLT10BY4,
+ BFD_RELOC_SH_GOTPLT10BY8,
+ BFD_RELOC_SH_GOTPLT32,
+ BFD_RELOC_SH_SHMEDIA_CODE,
+ BFD_RELOC_SH_IMMU5,
+ BFD_RELOC_SH_IMMS6,
+ BFD_RELOC_SH_IMMS6BY32,
+ BFD_RELOC_SH_IMMU6,
+ BFD_RELOC_SH_IMMS10,
+ BFD_RELOC_SH_IMMS10BY2,
+ BFD_RELOC_SH_IMMS10BY4,
+ BFD_RELOC_SH_IMMS10BY8,
+ BFD_RELOC_SH_IMMS16,
+ BFD_RELOC_SH_IMMU16,
+ BFD_RELOC_SH_IMM_LOW16,
+ BFD_RELOC_SH_IMM_LOW16_PCREL,
+ BFD_RELOC_SH_IMM_MEDLOW16,
+ BFD_RELOC_SH_IMM_MEDLOW16_PCREL,
+ BFD_RELOC_SH_IMM_MEDHI16,
+ BFD_RELOC_SH_IMM_MEDHI16_PCREL,
+ BFD_RELOC_SH_IMM_HI16,
+ BFD_RELOC_SH_IMM_HI16_PCREL,
+ BFD_RELOC_SH_PT_16,
+ BFD_RELOC_SH_TLS_GD_32,
+ BFD_RELOC_SH_TLS_LD_32,
+ BFD_RELOC_SH_TLS_LDO_32,
+ BFD_RELOC_SH_TLS_IE_32,
+ BFD_RELOC_SH_TLS_LE_32,
+ BFD_RELOC_SH_TLS_DTPMOD32,
+ BFD_RELOC_SH_TLS_DTPOFF32,
+ BFD_RELOC_SH_TLS_TPOFF32,
+ BFD_RELOC_SH_GOT20,
+ BFD_RELOC_SH_GOTOFF20,
+ BFD_RELOC_SH_GOTFUNCDESC,
+ BFD_RELOC_SH_GOTFUNCDESC20,
+ BFD_RELOC_SH_GOTOFFFUNCDESC,
+ BFD_RELOC_SH_GOTOFFFUNCDESC20,
+ BFD_RELOC_SH_FUNCDESC,
+
+/* ARC Cores relocs.
+ARC 22 bit pc-relative branch. The lowest two bits must be zero and are
+not stored in the instruction. The high 20 bits are installed in bits 26
+through 7 of the instruction. */
+ BFD_RELOC_ARC_B22_PCREL,
+
+/* ARC 26 bit absolute branch. The lowest two bits must be zero and are not
+stored in the instruction. The high 24 bits are installed in bits 23
+through 0. */
+ BFD_RELOC_ARC_B26,
+
+/* ADI Blackfin 16 bit immediate absolute reloc. */
+ BFD_RELOC_BFIN_16_IMM,
+
+/* ADI Blackfin 16 bit immediate absolute reloc higher 16 bits. */
+ BFD_RELOC_BFIN_16_HIGH,
+
+/* ADI Blackfin 'a' part of LSETUP. */
+ BFD_RELOC_BFIN_4_PCREL,
+
+/* ADI Blackfin. */
+ BFD_RELOC_BFIN_5_PCREL,
+
+/* ADI Blackfin 16 bit immediate absolute reloc lower 16 bits. */
+ BFD_RELOC_BFIN_16_LOW,
+
+/* ADI Blackfin. */
+ BFD_RELOC_BFIN_10_PCREL,
+
+/* ADI Blackfin 'b' part of LSETUP. */
+ BFD_RELOC_BFIN_11_PCREL,
+
+/* ADI Blackfin. */
+ BFD_RELOC_BFIN_12_PCREL_JUMP,
+
+/* ADI Blackfin Short jump, pcrel. */
+ BFD_RELOC_BFIN_12_PCREL_JUMP_S,
+
+/* ADI Blackfin Call.x not implemented. */
+ BFD_RELOC_BFIN_24_PCREL_CALL_X,
+
+/* ADI Blackfin Long Jump pcrel. */
+ BFD_RELOC_BFIN_24_PCREL_JUMP_L,
+
+/* ADI Blackfin FD-PIC relocations. */
+ BFD_RELOC_BFIN_GOT17M4,
+ BFD_RELOC_BFIN_GOTHI,
+ BFD_RELOC_BFIN_GOTLO,
+ BFD_RELOC_BFIN_FUNCDESC,
+ BFD_RELOC_BFIN_FUNCDESC_GOT17M4,
+ BFD_RELOC_BFIN_FUNCDESC_GOTHI,
+ BFD_RELOC_BFIN_FUNCDESC_GOTLO,
+ BFD_RELOC_BFIN_FUNCDESC_VALUE,
+ BFD_RELOC_BFIN_FUNCDESC_GOTOFF17M4,
+ BFD_RELOC_BFIN_FUNCDESC_GOTOFFHI,
+ BFD_RELOC_BFIN_FUNCDESC_GOTOFFLO,
+ BFD_RELOC_BFIN_GOTOFF17M4,
+ BFD_RELOC_BFIN_GOTOFFHI,
+ BFD_RELOC_BFIN_GOTOFFLO,
+
+/* ADI Blackfin GOT relocation. */
+ BFD_RELOC_BFIN_GOT,
+
+/* ADI Blackfin PLTPC relocation. */
+ BFD_RELOC_BFIN_PLTPC,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_PUSH,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_CONST,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_ADD,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_SUB,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_MULT,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_DIV,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_MOD,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_LSHIFT,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_RSHIFT,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_AND,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_OR,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_XOR,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_LAND,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_LOR,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_LEN,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_NEG,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_COMP,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_PAGE,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_HWPAGE,
+
+/* ADI Blackfin arithmetic relocation. */
+ BFD_ARELOC_BFIN_ADDR,
+
+/* Mitsubishi D10V relocs.
+This is a 10-bit reloc with the right 2 bits
+assumed to be 0. */
+ BFD_RELOC_D10V_10_PCREL_R,
+
+/* Mitsubishi D10V relocs.
+This is a 10-bit reloc with the right 2 bits
+assumed to be 0. This is the same as the previous reloc
+except it is in the left container, i.e.,
+shifted left 15 bits. */
+ BFD_RELOC_D10V_10_PCREL_L,
+
+/* This is an 18-bit reloc with the right 2 bits
+assumed to be 0. */
+ BFD_RELOC_D10V_18,
+
+/* This is an 18-bit reloc with the right 2 bits
+assumed to be 0. */
+ BFD_RELOC_D10V_18_PCREL,
+
+/* Mitsubishi D30V relocs.
+This is a 6-bit absolute reloc. */
+ BFD_RELOC_D30V_6,
+
+/* This is a 6-bit pc-relative reloc with
+the right 3 bits assumed to be 0. */
+ BFD_RELOC_D30V_9_PCREL,
+
+/* This is a 6-bit pc-relative reloc with
+the right 3 bits assumed to be 0. Same
+as the previous reloc but on the right side
+of the container. */
+ BFD_RELOC_D30V_9_PCREL_R,
+
+/* This is a 12-bit absolute reloc with the
+right 3 bitsassumed to be 0. */
+ BFD_RELOC_D30V_15,
+
+/* This is a 12-bit pc-relative reloc with
+the right 3 bits assumed to be 0. */
+ BFD_RELOC_D30V_15_PCREL,
+
+/* This is a 12-bit pc-relative reloc with
+the right 3 bits assumed to be 0. Same
+as the previous reloc but on the right side
+of the container. */
+ BFD_RELOC_D30V_15_PCREL_R,
+
+/* This is an 18-bit absolute reloc with
+the right 3 bits assumed to be 0. */
+ BFD_RELOC_D30V_21,
+
+/* This is an 18-bit pc-relative reloc with
+the right 3 bits assumed to be 0. */
+ BFD_RELOC_D30V_21_PCREL,
+
+/* This is an 18-bit pc-relative reloc with
+the right 3 bits assumed to be 0. Same
+as the previous reloc but on the right side
+of the container. */
+ BFD_RELOC_D30V_21_PCREL_R,
+
+/* This is a 32-bit absolute reloc. */
+ BFD_RELOC_D30V_32,
+
+/* This is a 32-bit pc-relative reloc. */
+ BFD_RELOC_D30V_32_PCREL,
+
+/* DLX relocs */
+ BFD_RELOC_DLX_HI16_S,
+
+/* DLX relocs */
+ BFD_RELOC_DLX_LO16,
+
+/* DLX relocs */
+ BFD_RELOC_DLX_JMP26,
+
+/* Renesas M16C/M32C Relocations. */
+ BFD_RELOC_M32C_HI8,
+ BFD_RELOC_M32C_RL_JUMP,
+ BFD_RELOC_M32C_RL_1ADDR,
+ BFD_RELOC_M32C_RL_2ADDR,
+
+/* Renesas M32R (formerly Mitsubishi M32R) relocs.
+This is a 24 bit absolute address. */
+ BFD_RELOC_M32R_24,
+
+/* This is a 10-bit pc-relative reloc with the right 2 bits assumed to be 0. */
+ BFD_RELOC_M32R_10_PCREL,
+
+/* This is an 18-bit reloc with the right 2 bits assumed to be 0. */
+ BFD_RELOC_M32R_18_PCREL,
+
+/* This is a 26-bit reloc with the right 2 bits assumed to be 0. */
+ BFD_RELOC_M32R_26_PCREL,
+
+/* This is a 16-bit reloc containing the high 16 bits of an address
+used when the lower 16 bits are treated as unsigned. */
+ BFD_RELOC_M32R_HI16_ULO,
+
+/* This is a 16-bit reloc containing the high 16 bits of an address
+used when the lower 16 bits are treated as signed. */
+ BFD_RELOC_M32R_HI16_SLO,
+
+/* This is a 16-bit reloc containing the lower 16 bits of an address. */
+ BFD_RELOC_M32R_LO16,
+
+/* This is a 16-bit reloc containing the small data area offset for use in
+add3, load, and store instructions. */
+ BFD_RELOC_M32R_SDA16,
+
+/* For PIC. */
+ BFD_RELOC_M32R_GOT24,
+ BFD_RELOC_M32R_26_PLTREL,
+ BFD_RELOC_M32R_COPY,
+ BFD_RELOC_M32R_GLOB_DAT,
+ BFD_RELOC_M32R_JMP_SLOT,
+ BFD_RELOC_M32R_RELATIVE,
+ BFD_RELOC_M32R_GOTOFF,
+ BFD_RELOC_M32R_GOTOFF_HI_ULO,
+ BFD_RELOC_M32R_GOTOFF_HI_SLO,
+ BFD_RELOC_M32R_GOTOFF_LO,
+ BFD_RELOC_M32R_GOTPC24,
+ BFD_RELOC_M32R_GOT16_HI_ULO,
+ BFD_RELOC_M32R_GOT16_HI_SLO,
+ BFD_RELOC_M32R_GOT16_LO,
+ BFD_RELOC_M32R_GOTPC_HI_ULO,
+ BFD_RELOC_M32R_GOTPC_HI_SLO,
+ BFD_RELOC_M32R_GOTPC_LO,
+
+/* This is a 9-bit reloc */
+ BFD_RELOC_V850_9_PCREL,
+
+/* This is a 22-bit reloc */
+ BFD_RELOC_V850_22_PCREL,
+
+/* This is a 16 bit offset from the short data area pointer. */
+ BFD_RELOC_V850_SDA_16_16_OFFSET,
+
+/* This is a 16 bit offset (of which only 15 bits are used) from the
+short data area pointer. */
+ BFD_RELOC_V850_SDA_15_16_OFFSET,
+
+/* This is a 16 bit offset from the zero data area pointer. */
+ BFD_RELOC_V850_ZDA_16_16_OFFSET,
+
+/* This is a 16 bit offset (of which only 15 bits are used) from the
+zero data area pointer. */
+ BFD_RELOC_V850_ZDA_15_16_OFFSET,
+
+/* This is an 8 bit offset (of which only 6 bits are used) from the
+tiny data area pointer. */
+ BFD_RELOC_V850_TDA_6_8_OFFSET,
+
+/* This is an 8bit offset (of which only 7 bits are used) from the tiny
+data area pointer. */
+ BFD_RELOC_V850_TDA_7_8_OFFSET,
+
+/* This is a 7 bit offset from the tiny data area pointer. */
+ BFD_RELOC_V850_TDA_7_7_OFFSET,
+
+/* This is a 16 bit offset from the tiny data area pointer. */
+ BFD_RELOC_V850_TDA_16_16_OFFSET,
+
+/* This is a 5 bit offset (of which only 4 bits are used) from the tiny
+data area pointer. */
+ BFD_RELOC_V850_TDA_4_5_OFFSET,
+
+/* This is a 4 bit offset from the tiny data area pointer. */
+ BFD_RELOC_V850_TDA_4_4_OFFSET,
+
+/* This is a 16 bit offset from the short data area pointer, with the
+bits placed non-contiguously in the instruction. */
+ BFD_RELOC_V850_SDA_16_16_SPLIT_OFFSET,
+
+/* This is a 16 bit offset from the zero data area pointer, with the
+bits placed non-contiguously in the instruction. */
+ BFD_RELOC_V850_ZDA_16_16_SPLIT_OFFSET,
+
+/* This is a 6 bit offset from the call table base pointer. */
+ BFD_RELOC_V850_CALLT_6_7_OFFSET,
+
+/* This is a 16 bit offset from the call table base pointer. */
+ BFD_RELOC_V850_CALLT_16_16_OFFSET,
+
+/* Used for relaxing indirect function calls. */
+ BFD_RELOC_V850_LONGCALL,
+
+/* Used for relaxing indirect jumps. */
+ BFD_RELOC_V850_LONGJUMP,
+
+/* Used to maintain alignment whilst relaxing. */
+ BFD_RELOC_V850_ALIGN,
+
+/* This is a variation of BFD_RELOC_LO16 that can be used in v850e ld.bu
+instructions. */
+ BFD_RELOC_V850_LO16_SPLIT_OFFSET,
+
+/* This is a 16-bit reloc. */
+ BFD_RELOC_V850_16_PCREL,
+
+/* This is a 17-bit reloc. */
+ BFD_RELOC_V850_17_PCREL,
+
+/* This is a 23-bit reloc. */
+ BFD_RELOC_V850_23,
+
+/* This is a 32-bit reloc. */
+ BFD_RELOC_V850_32_PCREL,
+
+/* This is a 32-bit reloc. */
+ BFD_RELOC_V850_32_ABS,
+
+/* This is a 16-bit reloc. */
+ BFD_RELOC_V850_16_SPLIT_OFFSET,
+
+/* This is a 16-bit reloc. */
+ BFD_RELOC_V850_16_S1,
+
+/* Low 16 bits. 16 bit shifted by 1. */
+ BFD_RELOC_V850_LO16_S1,
+
+/* This is a 16 bit offset from the call table base pointer. */
+ BFD_RELOC_V850_CALLT_15_16_OFFSET,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_32_GOTPCREL,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_16_GOT,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_32_GOT,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_22_PLT_PCREL,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_32_PLT_PCREL,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_COPY,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_GLOB_DAT,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_JMP_SLOT,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_RELATIVE,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_16_GOTOFF,
+
+/* DSO relocations. */
+ BFD_RELOC_V850_32_GOTOFF,
+
+/* start code. */
+ BFD_RELOC_V850_CODE,
+
+/* start data in text. */
+ BFD_RELOC_V850_DATA,
+
+/* This is a 32bit pcrel reloc for the mn10300, offset by two bytes in the
+instruction. */
+ BFD_RELOC_MN10300_32_PCREL,
+
+/* This is a 16bit pcrel reloc for the mn10300, offset by two bytes in the
+instruction. */
+ BFD_RELOC_MN10300_16_PCREL,
+
+/* This is a 8bit DP reloc for the tms320c30, where the most
+significant 8 bits of a 24 bit word are placed into the least
+significant 8 bits of the opcode. */
+ BFD_RELOC_TIC30_LDP,
+
+/* This is a 7bit reloc for the tms320c54x, where the least
+significant 7 bits of a 16 bit word are placed into the least
+significant 7 bits of the opcode. */
+ BFD_RELOC_TIC54X_PARTLS7,
+
+/* This is a 9bit DP reloc for the tms320c54x, where the most
+significant 9 bits of a 16 bit word are placed into the least
+significant 9 bits of the opcode. */
+ BFD_RELOC_TIC54X_PARTMS9,
+
+/* This is an extended address 23-bit reloc for the tms320c54x. */
+ BFD_RELOC_TIC54X_23,
+
+/* This is a 16-bit reloc for the tms320c54x, where the least
+significant 16 bits of a 23-bit extended address are placed into
+the opcode. */
+ BFD_RELOC_TIC54X_16_OF_23,
+
+/* This is a reloc for the tms320c54x, where the most
+significant 7 bits of a 23-bit extended address are placed into
+the opcode. */
+ BFD_RELOC_TIC54X_MS7_OF_23,
+
+/* TMS320C6000 relocations. */
+ BFD_RELOC_C6000_PCR_S21,
+ BFD_RELOC_C6000_PCR_S12,
+ BFD_RELOC_C6000_PCR_S10,
+ BFD_RELOC_C6000_PCR_S7,
+ BFD_RELOC_C6000_ABS_S16,
+ BFD_RELOC_C6000_ABS_L16,
+ BFD_RELOC_C6000_ABS_H16,
+ BFD_RELOC_C6000_SBR_U15_B,
+ BFD_RELOC_C6000_SBR_U15_H,
+ BFD_RELOC_C6000_SBR_U15_W,
+ BFD_RELOC_C6000_SBR_S16,
+ BFD_RELOC_C6000_SBR_L16_B,
+ BFD_RELOC_C6000_SBR_L16_H,
+ BFD_RELOC_C6000_SBR_L16_W,
+ BFD_RELOC_C6000_SBR_H16_B,
+ BFD_RELOC_C6000_SBR_H16_H,
+ BFD_RELOC_C6000_SBR_H16_W,
+ BFD_RELOC_C6000_SBR_GOT_U15_W,
+ BFD_RELOC_C6000_SBR_GOT_L16_W,
+ BFD_RELOC_C6000_SBR_GOT_H16_W,
+ BFD_RELOC_C6000_DSBT_INDEX,
+ BFD_RELOC_C6000_PREL31,
+ BFD_RELOC_C6000_COPY,
+ BFD_RELOC_C6000_ALIGN,
+ BFD_RELOC_C6000_FPHEAD,
+ BFD_RELOC_C6000_NOCMP,
+
+/* This is a 48 bit reloc for the FR30 that stores 32 bits. */
+ BFD_RELOC_FR30_48,
+
+/* This is a 32 bit reloc for the FR30 that stores 20 bits split up into
+two sections. */
+ BFD_RELOC_FR30_20,
+
+/* This is a 16 bit reloc for the FR30 that stores a 6 bit word offset in
+4 bits. */
+ BFD_RELOC_FR30_6_IN_4,
+
+/* This is a 16 bit reloc for the FR30 that stores an 8 bit byte offset
+into 8 bits. */
+ BFD_RELOC_FR30_8_IN_8,
+
+/* This is a 16 bit reloc for the FR30 that stores a 9 bit short offset
+into 8 bits. */
+ BFD_RELOC_FR30_9_IN_8,
+
+/* This is a 16 bit reloc for the FR30 that stores a 10 bit word offset
+into 8 bits. */
+ BFD_RELOC_FR30_10_IN_8,
+
+/* This is a 16 bit reloc for the FR30 that stores a 9 bit pc relative
+short offset into 8 bits. */
+ BFD_RELOC_FR30_9_PCREL,
+
+/* This is a 16 bit reloc for the FR30 that stores a 12 bit pc relative
+short offset into 11 bits. */
+ BFD_RELOC_FR30_12_PCREL,
+
+/* Motorola Mcore relocations. */
+ BFD_RELOC_MCORE_PCREL_IMM8BY4,
+ BFD_RELOC_MCORE_PCREL_IMM11BY2,
+ BFD_RELOC_MCORE_PCREL_IMM4BY2,
+ BFD_RELOC_MCORE_PCREL_32,
+ BFD_RELOC_MCORE_PCREL_JSR_IMM11BY2,
+ BFD_RELOC_MCORE_RVA,
+
+/* Toshiba Media Processor Relocations. */
+ BFD_RELOC_MEP_8,
+ BFD_RELOC_MEP_16,
+ BFD_RELOC_MEP_32,
+ BFD_RELOC_MEP_PCREL8A2,
+ BFD_RELOC_MEP_PCREL12A2,
+ BFD_RELOC_MEP_PCREL17A2,
+ BFD_RELOC_MEP_PCREL24A2,
+ BFD_RELOC_MEP_PCABS24A2,
+ BFD_RELOC_MEP_LOW16,
+ BFD_RELOC_MEP_HI16U,
+ BFD_RELOC_MEP_HI16S,
+ BFD_RELOC_MEP_GPREL,
+ BFD_RELOC_MEP_TPREL,
+ BFD_RELOC_MEP_TPREL7,
+ BFD_RELOC_MEP_TPREL7A2,
+ BFD_RELOC_MEP_TPREL7A4,
+ BFD_RELOC_MEP_UIMM24,
+ BFD_RELOC_MEP_ADDR24A4,
+ BFD_RELOC_MEP_GNU_VTINHERIT,
+ BFD_RELOC_MEP_GNU_VTENTRY,
+
+
+/* These are relocations for the GETA instruction. */
+ BFD_RELOC_MMIX_GETA,
+ BFD_RELOC_MMIX_GETA_1,
+ BFD_RELOC_MMIX_GETA_2,
+ BFD_RELOC_MMIX_GETA_3,
+
+/* These are relocations for a conditional branch instruction. */
+ BFD_RELOC_MMIX_CBRANCH,
+ BFD_RELOC_MMIX_CBRANCH_J,
+ BFD_RELOC_MMIX_CBRANCH_1,
+ BFD_RELOC_MMIX_CBRANCH_2,
+ BFD_RELOC_MMIX_CBRANCH_3,
+
+/* These are relocations for the PUSHJ instruction. */
+ BFD_RELOC_MMIX_PUSHJ,
+ BFD_RELOC_MMIX_PUSHJ_1,
+ BFD_RELOC_MMIX_PUSHJ_2,
+ BFD_RELOC_MMIX_PUSHJ_3,
+ BFD_RELOC_MMIX_PUSHJ_STUBBABLE,
+
+/* These are relocations for the JMP instruction. */
+ BFD_RELOC_MMIX_JMP,
+ BFD_RELOC_MMIX_JMP_1,
+ BFD_RELOC_MMIX_JMP_2,
+ BFD_RELOC_MMIX_JMP_3,
+
+/* This is a relocation for a relative address as in a GETA instruction or
+a branch. */
+ BFD_RELOC_MMIX_ADDR19,
+
+/* This is a relocation for a relative address as in a JMP instruction. */
+ BFD_RELOC_MMIX_ADDR27,
+
+/* This is a relocation for an instruction field that may be a general
+register or a value 0..255. */
+ BFD_RELOC_MMIX_REG_OR_BYTE,
+
+/* This is a relocation for an instruction field that may be a general
+register. */
+ BFD_RELOC_MMIX_REG,
+
+/* This is a relocation for two instruction fields holding a register and
+an offset, the equivalent of the relocation. */
+ BFD_RELOC_MMIX_BASE_PLUS_OFFSET,
+
+/* This relocation is an assertion that the expression is not allocated as
+a global register. It does not modify contents. */
+ BFD_RELOC_MMIX_LOCAL,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit pc relative
+short offset into 7 bits. */
+ BFD_RELOC_AVR_7_PCREL,
+
+/* This is a 16 bit reloc for the AVR that stores 13 bit pc relative
+short offset into 12 bits. */
+ BFD_RELOC_AVR_13_PCREL,
+
+/* This is a 16 bit reloc for the AVR that stores 17 bit value (usually
+program memory address) into 16 bits. */
+ BFD_RELOC_AVR_16_PM,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually
+data memory address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_LO8_LDI,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit
+of data memory address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_HI8_LDI,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit
+of program memory address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_HH8_LDI,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit
+of 32 bit value) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_MS8_LDI,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(usually data memory address) into 8 bit immediate value of SUBI insn. */
+ BFD_RELOC_AVR_LO8_LDI_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(high 8 bit of data memory address) into 8 bit immediate value of
+SUBI insn. */
+ BFD_RELOC_AVR_HI8_LDI_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(most high 8 bit of program memory address) into 8 bit immediate value
+of LDI or SUBI insn. */
+ BFD_RELOC_AVR_HH8_LDI_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value (msb
+of 32 bit value) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_MS8_LDI_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually
+command address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_LO8_LDI_PM,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value
+(command address) into 8 bit immediate value of LDI insn. If the address
+is beyond the 128k boundary, the linker inserts a jump stub for this reloc
+in the lower 128k. */
+ BFD_RELOC_AVR_LO8_LDI_GS,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit
+of command address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_HI8_LDI_PM,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit
+of command address) into 8 bit immediate value of LDI insn. If the address
+is beyond the 128k boundary, the linker inserts a jump stub for this reloc
+below 128k. */
+ BFD_RELOC_AVR_HI8_LDI_GS,
+
+/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit
+of command address) into 8 bit immediate value of LDI insn. */
+ BFD_RELOC_AVR_HH8_LDI_PM,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(usually command address) into 8 bit immediate value of SUBI insn. */
+ BFD_RELOC_AVR_LO8_LDI_PM_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(high 8 bit of 16 bit command address) into 8 bit immediate value
+of SUBI insn. */
+ BFD_RELOC_AVR_HI8_LDI_PM_NEG,
+
+/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
+(high 6 bit of 22 bit command address) into 8 bit immediate
+value of SUBI insn. */
+ BFD_RELOC_AVR_HH8_LDI_PM_NEG,
+
+/* This is a 32 bit reloc for the AVR that stores 23 bit value
+into 22 bits. */
+ BFD_RELOC_AVR_CALL,
+
+/* This is a 16 bit reloc for the AVR that stores all needed bits
+for absolute addressing with ldi with overflow check to linktime */
+ BFD_RELOC_AVR_LDI,
+
+/* This is a 6 bit reloc for the AVR that stores offset for ldd/std
+instructions */
+ BFD_RELOC_AVR_6,
+
+/* This is a 6 bit reloc for the AVR that stores offset for adiw/sbiw
+instructions */
+ BFD_RELOC_AVR_6_ADIW,
+
+/* Renesas RX Relocations. */
+ BFD_RELOC_RX_NEG8,
+ BFD_RELOC_RX_NEG16,
+ BFD_RELOC_RX_NEG24,
+ BFD_RELOC_RX_NEG32,
+ BFD_RELOC_RX_16_OP,
+ BFD_RELOC_RX_24_OP,
+ BFD_RELOC_RX_32_OP,
+ BFD_RELOC_RX_8U,
+ BFD_RELOC_RX_16U,
+ BFD_RELOC_RX_24U,
+ BFD_RELOC_RX_DIR3U_PCREL,
+ BFD_RELOC_RX_DIFF,
+ BFD_RELOC_RX_GPRELB,
+ BFD_RELOC_RX_GPRELW,
+ BFD_RELOC_RX_GPRELL,
+ BFD_RELOC_RX_SYM,
+ BFD_RELOC_RX_OP_SUBTRACT,
+ BFD_RELOC_RX_ABS8,
+ BFD_RELOC_RX_ABS16,
+ BFD_RELOC_RX_ABS32,
+ BFD_RELOC_RX_ABS16U,
+ BFD_RELOC_RX_ABS16UW,
+ BFD_RELOC_RX_ABS16UL,
+ BFD_RELOC_RX_RELAX,
+
+/* Direct 12 bit. */
+ BFD_RELOC_390_12,
+
+/* 12 bit GOT offset. */
+ BFD_RELOC_390_GOT12,
+
+/* 32 bit PC relative PLT address. */
+ BFD_RELOC_390_PLT32,
+
+/* Copy symbol at runtime. */
+ BFD_RELOC_390_COPY,
+
+/* Create GOT entry. */
+ BFD_RELOC_390_GLOB_DAT,
+
+/* Create PLT entry. */
+ BFD_RELOC_390_JMP_SLOT,
+
+/* Adjust by program base. */
+ BFD_RELOC_390_RELATIVE,
+
+/* 32 bit PC relative offset to GOT. */
+ BFD_RELOC_390_GOTPC,
+
+/* 16 bit GOT offset. */
+ BFD_RELOC_390_GOT16,
+
+/* PC relative 16 bit shifted by 1. */
+ BFD_RELOC_390_PC16DBL,
+
+/* 16 bit PC rel. PLT shifted by 1. */
+ BFD_RELOC_390_PLT16DBL,
+
+/* PC relative 32 bit shifted by 1. */
+ BFD_RELOC_390_PC32DBL,
+
+/* 32 bit PC rel. PLT shifted by 1. */
+ BFD_RELOC_390_PLT32DBL,
+
+/* 32 bit PC rel. GOT shifted by 1. */
+ BFD_RELOC_390_GOTPCDBL,
+
+/* 64 bit GOT offset. */
+ BFD_RELOC_390_GOT64,
+
+/* 64 bit PC relative PLT address. */
+ BFD_RELOC_390_PLT64,
+
+/* 32 bit rel. offset to GOT entry. */
+ BFD_RELOC_390_GOTENT,
+
+/* 64 bit offset to GOT. */
+ BFD_RELOC_390_GOTOFF64,
+
+/* 12-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLT12,
+
+/* 16-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLT16,
+
+/* 32-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLT32,
+
+/* 64-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLT64,
+
+/* 32-bit rel. offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_390_GOTPLTENT,
+
+/* 16-bit rel. offset from the GOT to a PLT entry. */
+ BFD_RELOC_390_PLTOFF16,
+
+/* 32-bit rel. offset from the GOT to a PLT entry. */
+ BFD_RELOC_390_PLTOFF32,
+
+/* 64-bit rel. offset from the GOT to a PLT entry. */
+ BFD_RELOC_390_PLTOFF64,
+
+/* s390 tls relocations. */
+ BFD_RELOC_390_TLS_LOAD,
+ BFD_RELOC_390_TLS_GDCALL,
+ BFD_RELOC_390_TLS_LDCALL,
+ BFD_RELOC_390_TLS_GD32,
+ BFD_RELOC_390_TLS_GD64,
+ BFD_RELOC_390_TLS_GOTIE12,
+ BFD_RELOC_390_TLS_GOTIE32,
+ BFD_RELOC_390_TLS_GOTIE64,
+ BFD_RELOC_390_TLS_LDM32,
+ BFD_RELOC_390_TLS_LDM64,
+ BFD_RELOC_390_TLS_IE32,
+ BFD_RELOC_390_TLS_IE64,
+ BFD_RELOC_390_TLS_IEENT,
+ BFD_RELOC_390_TLS_LE32,
+ BFD_RELOC_390_TLS_LE64,
+ BFD_RELOC_390_TLS_LDO32,
+ BFD_RELOC_390_TLS_LDO64,
+ BFD_RELOC_390_TLS_DTPMOD,
+ BFD_RELOC_390_TLS_DTPOFF,
+ BFD_RELOC_390_TLS_TPOFF,
+
+/* Long displacement extension. */
+ BFD_RELOC_390_20,
+ BFD_RELOC_390_GOT20,
+ BFD_RELOC_390_GOTPLT20,
+ BFD_RELOC_390_TLS_GOTIE20,
+
+/* Score relocations
+Low 16 bit for load/store */
+ BFD_RELOC_SCORE_GPREL15,
+
+/* This is a 24-bit reloc with the right 1 bit assumed to be 0 */
+ BFD_RELOC_SCORE_DUMMY2,
+ BFD_RELOC_SCORE_JMP,
+
+/* This is a 19-bit reloc with the right 1 bit assumed to be 0 */
+ BFD_RELOC_SCORE_BRANCH,
+
+/* This is a 32-bit reloc for 48-bit instructions. */
+ BFD_RELOC_SCORE_IMM30,
+
+/* This is a 32-bit reloc for 48-bit instructions. */
+ BFD_RELOC_SCORE_IMM32,
+
+/* This is a 11-bit reloc with the right 1 bit assumed to be 0 */
+ BFD_RELOC_SCORE16_JMP,
+
+/* This is a 8-bit reloc with the right 1 bit assumed to be 0 */
+ BFD_RELOC_SCORE16_BRANCH,
+
+/* This is a 9-bit reloc with the right 1 bit assumed to be 0 */
+ BFD_RELOC_SCORE_BCMP,
+
+/* Undocumented Score relocs */
+ BFD_RELOC_SCORE_GOT15,
+ BFD_RELOC_SCORE_GOT_LO16,
+ BFD_RELOC_SCORE_CALL15,
+ BFD_RELOC_SCORE_DUMMY_HI16,
+
+/* Scenix IP2K - 9-bit register number / data address */
+ BFD_RELOC_IP2K_FR9,
+
+/* Scenix IP2K - 4-bit register/data bank number */
+ BFD_RELOC_IP2K_BANK,
+
+/* Scenix IP2K - low 13 bits of instruction word address */
+ BFD_RELOC_IP2K_ADDR16CJP,
+
+/* Scenix IP2K - high 3 bits of instruction word address */
+ BFD_RELOC_IP2K_PAGE3,
+
+/* Scenix IP2K - ext/low/high 8 bits of data address */
+ BFD_RELOC_IP2K_LO8DATA,
+ BFD_RELOC_IP2K_HI8DATA,
+ BFD_RELOC_IP2K_EX8DATA,
+
+/* Scenix IP2K - low/high 8 bits of instruction word address */
+ BFD_RELOC_IP2K_LO8INSN,
+ BFD_RELOC_IP2K_HI8INSN,
+
+/* Scenix IP2K - even/odd PC modifier to modify snb pcl.0 */
+ BFD_RELOC_IP2K_PC_SKIP,
+
+/* Scenix IP2K - 16 bit word address in text section. */
+ BFD_RELOC_IP2K_TEXT,
+
+/* Scenix IP2K - 7-bit sp or dp offset */
+ BFD_RELOC_IP2K_FR_OFFSET,
+
+/* Scenix VPE4K coprocessor - data/insn-space addressing */
+ BFD_RELOC_VPE4KMATH_DATA,
+ BFD_RELOC_VPE4KMATH_INSN,
+
+/* These two relocations are used by the linker to determine which of
+the entries in a C++ virtual function table are actually used. When
+the --gc-sections option is given, the linker will zero out the entries
+that are not used, so that the code for those functions need not be
+included in the output.
+
+VTABLE_INHERIT is a zero-space relocation used to describe to the
+linker the inheritance tree of a C++ virtual function table. The
+relocation's symbol should be the parent class' vtable, and the
+relocation should be located at the child vtable.
+
+VTABLE_ENTRY is a zero-space relocation that describes the use of a
+virtual function table entry. The reloc's symbol should refer to the
+table of the class mentioned in the code. Off of that base, an offset
+describes the entry that is being used. For Rela hosts, this offset
+is stored in the reloc's addend. For Rel hosts, we are forced to put
+this offset in the reloc's section offset. */
+ BFD_RELOC_VTABLE_INHERIT,
+ BFD_RELOC_VTABLE_ENTRY,
+
+/* Intel IA64 Relocations. */
+ BFD_RELOC_IA64_IMM14,
+ BFD_RELOC_IA64_IMM22,
+ BFD_RELOC_IA64_IMM64,
+ BFD_RELOC_IA64_DIR32MSB,
+ BFD_RELOC_IA64_DIR32LSB,
+ BFD_RELOC_IA64_DIR64MSB,
+ BFD_RELOC_IA64_DIR64LSB,
+ BFD_RELOC_IA64_GPREL22,
+ BFD_RELOC_IA64_GPREL64I,
+ BFD_RELOC_IA64_GPREL32MSB,
+ BFD_RELOC_IA64_GPREL32LSB,
+ BFD_RELOC_IA64_GPREL64MSB,
+ BFD_RELOC_IA64_GPREL64LSB,
+ BFD_RELOC_IA64_LTOFF22,
+ BFD_RELOC_IA64_LTOFF64I,
+ BFD_RELOC_IA64_PLTOFF22,
+ BFD_RELOC_IA64_PLTOFF64I,
+ BFD_RELOC_IA64_PLTOFF64MSB,
+ BFD_RELOC_IA64_PLTOFF64LSB,
+ BFD_RELOC_IA64_FPTR64I,
+ BFD_RELOC_IA64_FPTR32MSB,
+ BFD_RELOC_IA64_FPTR32LSB,
+ BFD_RELOC_IA64_FPTR64MSB,
+ BFD_RELOC_IA64_FPTR64LSB,
+ BFD_RELOC_IA64_PCREL21B,
+ BFD_RELOC_IA64_PCREL21BI,
+ BFD_RELOC_IA64_PCREL21M,
+ BFD_RELOC_IA64_PCREL21F,
+ BFD_RELOC_IA64_PCREL22,
+ BFD_RELOC_IA64_PCREL60B,
+ BFD_RELOC_IA64_PCREL64I,
+ BFD_RELOC_IA64_PCREL32MSB,
+ BFD_RELOC_IA64_PCREL32LSB,
+ BFD_RELOC_IA64_PCREL64MSB,
+ BFD_RELOC_IA64_PCREL64LSB,
+ BFD_RELOC_IA64_LTOFF_FPTR22,
+ BFD_RELOC_IA64_LTOFF_FPTR64I,
+ BFD_RELOC_IA64_LTOFF_FPTR32MSB,
+ BFD_RELOC_IA64_LTOFF_FPTR32LSB,
+ BFD_RELOC_IA64_LTOFF_FPTR64MSB,
+ BFD_RELOC_IA64_LTOFF_FPTR64LSB,
+ BFD_RELOC_IA64_SEGREL32MSB,
+ BFD_RELOC_IA64_SEGREL32LSB,
+ BFD_RELOC_IA64_SEGREL64MSB,
+ BFD_RELOC_IA64_SEGREL64LSB,
+ BFD_RELOC_IA64_SECREL32MSB,
+ BFD_RELOC_IA64_SECREL32LSB,
+ BFD_RELOC_IA64_SECREL64MSB,
+ BFD_RELOC_IA64_SECREL64LSB,
+ BFD_RELOC_IA64_REL32MSB,
+ BFD_RELOC_IA64_REL32LSB,
+ BFD_RELOC_IA64_REL64MSB,
+ BFD_RELOC_IA64_REL64LSB,
+ BFD_RELOC_IA64_LTV32MSB,
+ BFD_RELOC_IA64_LTV32LSB,
+ BFD_RELOC_IA64_LTV64MSB,
+ BFD_RELOC_IA64_LTV64LSB,
+ BFD_RELOC_IA64_IPLTMSB,
+ BFD_RELOC_IA64_IPLTLSB,
+ BFD_RELOC_IA64_COPY,
+ BFD_RELOC_IA64_LTOFF22X,
+ BFD_RELOC_IA64_LDXMOV,
+ BFD_RELOC_IA64_TPREL14,
+ BFD_RELOC_IA64_TPREL22,
+ BFD_RELOC_IA64_TPREL64I,
+ BFD_RELOC_IA64_TPREL64MSB,
+ BFD_RELOC_IA64_TPREL64LSB,
+ BFD_RELOC_IA64_LTOFF_TPREL22,
+ BFD_RELOC_IA64_DTPMOD64MSB,
+ BFD_RELOC_IA64_DTPMOD64LSB,
+ BFD_RELOC_IA64_LTOFF_DTPMOD22,
+ BFD_RELOC_IA64_DTPREL14,
+ BFD_RELOC_IA64_DTPREL22,
+ BFD_RELOC_IA64_DTPREL64I,
+ BFD_RELOC_IA64_DTPREL32MSB,
+ BFD_RELOC_IA64_DTPREL32LSB,
+ BFD_RELOC_IA64_DTPREL64MSB,
+ BFD_RELOC_IA64_DTPREL64LSB,
+ BFD_RELOC_IA64_LTOFF_DTPREL22,
+
+/* Motorola 68HC11 reloc.
+This is the 8 bit high part of an absolute address. */
+ BFD_RELOC_M68HC11_HI8,
+
+/* Motorola 68HC11 reloc.
+This is the 8 bit low part of an absolute address. */
+ BFD_RELOC_M68HC11_LO8,
+
+/* Motorola 68HC11 reloc.
+This is the 3 bit of a value. */
+ BFD_RELOC_M68HC11_3B,
+
+/* Motorola 68HC11 reloc.
+This reloc marks the beginning of a jump/call instruction.
+It is used for linker relaxation to correctly identify beginning
+of instruction and change some branches to use PC-relative
+addressing mode. */
+ BFD_RELOC_M68HC11_RL_JUMP,
+
+/* Motorola 68HC11 reloc.
+This reloc marks a group of several instructions that gcc generates
+and for which the linker relaxation pass can modify and/or remove
+some of them. */
+ BFD_RELOC_M68HC11_RL_GROUP,
+
+/* Motorola 68HC11 reloc.
+This is the 16-bit lower part of an address. It is used for 'call'
+instruction to specify the symbol address without any special
+transformation (due to memory bank window). */
+ BFD_RELOC_M68HC11_LO16,
+
+/* Motorola 68HC11 reloc.
+This is a 8-bit reloc that specifies the page number of an address.
+It is used by 'call' instruction to specify the page number of
+the symbol. */
+ BFD_RELOC_M68HC11_PAGE,
+
+/* Motorola 68HC11 reloc.
+This is a 24-bit reloc that represents the address with a 16-bit
+value and a 8-bit page number. The symbol address is transformed
+to follow the 16K memory bank of 68HC12 (seen as mapped in the window). */
+ BFD_RELOC_M68HC11_24,
+
+/* Motorola 68HC12 reloc.
+This is the 5 bits of a value. */
+ BFD_RELOC_M68HC12_5B,
+
+/* NS CR16C Relocations. */
+ BFD_RELOC_16C_NUM08,
+ BFD_RELOC_16C_NUM08_C,
+ BFD_RELOC_16C_NUM16,
+ BFD_RELOC_16C_NUM16_C,
+ BFD_RELOC_16C_NUM32,
+ BFD_RELOC_16C_NUM32_C,
+ BFD_RELOC_16C_DISP04,
+ BFD_RELOC_16C_DISP04_C,
+ BFD_RELOC_16C_DISP08,
+ BFD_RELOC_16C_DISP08_C,
+ BFD_RELOC_16C_DISP16,
+ BFD_RELOC_16C_DISP16_C,
+ BFD_RELOC_16C_DISP24,
+ BFD_RELOC_16C_DISP24_C,
+ BFD_RELOC_16C_DISP24a,
+ BFD_RELOC_16C_DISP24a_C,
+ BFD_RELOC_16C_REG04,
+ BFD_RELOC_16C_REG04_C,
+ BFD_RELOC_16C_REG04a,
+ BFD_RELOC_16C_REG04a_C,
+ BFD_RELOC_16C_REG14,
+ BFD_RELOC_16C_REG14_C,
+ BFD_RELOC_16C_REG16,
+ BFD_RELOC_16C_REG16_C,
+ BFD_RELOC_16C_REG20,
+ BFD_RELOC_16C_REG20_C,
+ BFD_RELOC_16C_ABS20,
+ BFD_RELOC_16C_ABS20_C,
+ BFD_RELOC_16C_ABS24,
+ BFD_RELOC_16C_ABS24_C,
+ BFD_RELOC_16C_IMM04,
+ BFD_RELOC_16C_IMM04_C,
+ BFD_RELOC_16C_IMM16,
+ BFD_RELOC_16C_IMM16_C,
+ BFD_RELOC_16C_IMM20,
+ BFD_RELOC_16C_IMM20_C,
+ BFD_RELOC_16C_IMM24,
+ BFD_RELOC_16C_IMM24_C,
+ BFD_RELOC_16C_IMM32,
+ BFD_RELOC_16C_IMM32_C,
+
+/* NS CR16 Relocations. */
+ BFD_RELOC_CR16_NUM8,
+ BFD_RELOC_CR16_NUM16,
+ BFD_RELOC_CR16_NUM32,
+ BFD_RELOC_CR16_NUM32a,
+ BFD_RELOC_CR16_REGREL0,
+ BFD_RELOC_CR16_REGREL4,
+ BFD_RELOC_CR16_REGREL4a,
+ BFD_RELOC_CR16_REGREL14,
+ BFD_RELOC_CR16_REGREL14a,
+ BFD_RELOC_CR16_REGREL16,
+ BFD_RELOC_CR16_REGREL20,
+ BFD_RELOC_CR16_REGREL20a,
+ BFD_RELOC_CR16_ABS20,
+ BFD_RELOC_CR16_ABS24,
+ BFD_RELOC_CR16_IMM4,
+ BFD_RELOC_CR16_IMM8,
+ BFD_RELOC_CR16_IMM16,
+ BFD_RELOC_CR16_IMM20,
+ BFD_RELOC_CR16_IMM24,
+ BFD_RELOC_CR16_IMM32,
+ BFD_RELOC_CR16_IMM32a,
+ BFD_RELOC_CR16_DISP4,
+ BFD_RELOC_CR16_DISP8,
+ BFD_RELOC_CR16_DISP16,
+ BFD_RELOC_CR16_DISP20,
+ BFD_RELOC_CR16_DISP24,
+ BFD_RELOC_CR16_DISP24a,
+ BFD_RELOC_CR16_SWITCH8,
+ BFD_RELOC_CR16_SWITCH16,
+ BFD_RELOC_CR16_SWITCH32,
+ BFD_RELOC_CR16_GOT_REGREL20,
+ BFD_RELOC_CR16_GOTC_REGREL20,
+ BFD_RELOC_CR16_GLOB_DAT,
+
+/* NS CRX Relocations. */
+ BFD_RELOC_CRX_REL4,
+ BFD_RELOC_CRX_REL8,
+ BFD_RELOC_CRX_REL8_CMP,
+ BFD_RELOC_CRX_REL16,
+ BFD_RELOC_CRX_REL24,
+ BFD_RELOC_CRX_REL32,
+ BFD_RELOC_CRX_REGREL12,
+ BFD_RELOC_CRX_REGREL22,
+ BFD_RELOC_CRX_REGREL28,
+ BFD_RELOC_CRX_REGREL32,
+ BFD_RELOC_CRX_ABS16,
+ BFD_RELOC_CRX_ABS32,
+ BFD_RELOC_CRX_NUM8,
+ BFD_RELOC_CRX_NUM16,
+ BFD_RELOC_CRX_NUM32,
+ BFD_RELOC_CRX_IMM16,
+ BFD_RELOC_CRX_IMM32,
+ BFD_RELOC_CRX_SWITCH8,
+ BFD_RELOC_CRX_SWITCH16,
+ BFD_RELOC_CRX_SWITCH32,
+
+/* These relocs are only used within the CRIS assembler. They are not
+(at present) written to any object files. */
+ BFD_RELOC_CRIS_BDISP8,
+ BFD_RELOC_CRIS_UNSIGNED_5,
+ BFD_RELOC_CRIS_SIGNED_6,
+ BFD_RELOC_CRIS_UNSIGNED_6,
+ BFD_RELOC_CRIS_SIGNED_8,
+ BFD_RELOC_CRIS_UNSIGNED_8,
+ BFD_RELOC_CRIS_SIGNED_16,
+ BFD_RELOC_CRIS_UNSIGNED_16,
+ BFD_RELOC_CRIS_LAPCQ_OFFSET,
+ BFD_RELOC_CRIS_UNSIGNED_4,
+
+/* Relocs used in ELF shared libraries for CRIS. */
+ BFD_RELOC_CRIS_COPY,
+ BFD_RELOC_CRIS_GLOB_DAT,
+ BFD_RELOC_CRIS_JUMP_SLOT,
+ BFD_RELOC_CRIS_RELATIVE,
+
+/* 32-bit offset to symbol-entry within GOT. */
+ BFD_RELOC_CRIS_32_GOT,
+
+/* 16-bit offset to symbol-entry within GOT. */
+ BFD_RELOC_CRIS_16_GOT,
+
+/* 32-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_CRIS_32_GOTPLT,
+
+/* 16-bit offset to symbol-entry within GOT, with PLT handling. */
+ BFD_RELOC_CRIS_16_GOTPLT,
+
+/* 32-bit offset to symbol, relative to GOT. */
+ BFD_RELOC_CRIS_32_GOTREL,
+
+/* 32-bit offset to symbol with PLT entry, relative to GOT. */
+ BFD_RELOC_CRIS_32_PLT_GOTREL,
+
+/* 32-bit offset to symbol with PLT entry, relative to this relocation. */
+ BFD_RELOC_CRIS_32_PLT_PCREL,
+
+/* Relocs used in TLS code for CRIS. */
+ BFD_RELOC_CRIS_32_GOT_GD,
+ BFD_RELOC_CRIS_16_GOT_GD,
+ BFD_RELOC_CRIS_32_GD,
+ BFD_RELOC_CRIS_DTP,
+ BFD_RELOC_CRIS_32_DTPREL,
+ BFD_RELOC_CRIS_16_DTPREL,
+ BFD_RELOC_CRIS_32_GOT_TPREL,
+ BFD_RELOC_CRIS_16_GOT_TPREL,
+ BFD_RELOC_CRIS_32_TPREL,
+ BFD_RELOC_CRIS_16_TPREL,
+ BFD_RELOC_CRIS_DTPMOD,
+ BFD_RELOC_CRIS_32_IE,
+
+/* Intel i860 Relocations. */
+ BFD_RELOC_860_COPY,
+ BFD_RELOC_860_GLOB_DAT,
+ BFD_RELOC_860_JUMP_SLOT,
+ BFD_RELOC_860_RELATIVE,
+ BFD_RELOC_860_PC26,
+ BFD_RELOC_860_PLT26,
+ BFD_RELOC_860_PC16,
+ BFD_RELOC_860_LOW0,
+ BFD_RELOC_860_SPLIT0,
+ BFD_RELOC_860_LOW1,
+ BFD_RELOC_860_SPLIT1,
+ BFD_RELOC_860_LOW2,
+ BFD_RELOC_860_SPLIT2,
+ BFD_RELOC_860_LOW3,
+ BFD_RELOC_860_LOGOT0,
+ BFD_RELOC_860_SPGOT0,
+ BFD_RELOC_860_LOGOT1,
+ BFD_RELOC_860_SPGOT1,
+ BFD_RELOC_860_LOGOTOFF0,
+ BFD_RELOC_860_SPGOTOFF0,
+ BFD_RELOC_860_LOGOTOFF1,
+ BFD_RELOC_860_SPGOTOFF1,
+ BFD_RELOC_860_LOGOTOFF2,
+ BFD_RELOC_860_LOGOTOFF3,
+ BFD_RELOC_860_LOPC,
+ BFD_RELOC_860_HIGHADJ,
+ BFD_RELOC_860_HAGOT,
+ BFD_RELOC_860_HAGOTOFF,
+ BFD_RELOC_860_HAPC,
+ BFD_RELOC_860_HIGH,
+ BFD_RELOC_860_HIGOT,
+ BFD_RELOC_860_HIGOTOFF,
+
+/* OpenRISC Relocations. */
+ BFD_RELOC_OPENRISC_ABS_26,
+ BFD_RELOC_OPENRISC_REL_26,
+
+/* H8 elf Relocations. */
+ BFD_RELOC_H8_DIR16A8,
+ BFD_RELOC_H8_DIR16R8,
+ BFD_RELOC_H8_DIR24A8,
+ BFD_RELOC_H8_DIR24R8,
+ BFD_RELOC_H8_DIR32A16,
+
+/* Sony Xstormy16 Relocations. */
+ BFD_RELOC_XSTORMY16_REL_12,
+ BFD_RELOC_XSTORMY16_12,
+ BFD_RELOC_XSTORMY16_24,
+ BFD_RELOC_XSTORMY16_FPTR16,
+
+/* Self-describing complex relocations. */
+ BFD_RELOC_RELC,
+
+
+/* Infineon Relocations. */
+ BFD_RELOC_XC16X_PAG,
+ BFD_RELOC_XC16X_POF,
+ BFD_RELOC_XC16X_SEG,
+ BFD_RELOC_XC16X_SOF,
+
+/* Relocations used by VAX ELF. */
+ BFD_RELOC_VAX_GLOB_DAT,
+ BFD_RELOC_VAX_JMP_SLOT,
+ BFD_RELOC_VAX_RELATIVE,
+
+/* Morpho MT - 16 bit immediate relocation. */
+ BFD_RELOC_MT_PC16,
+
+/* Morpho MT - Hi 16 bits of an address. */
+ BFD_RELOC_MT_HI16,
+
+/* Morpho MT - Low 16 bits of an address. */
+ BFD_RELOC_MT_LO16,
+
+/* Morpho MT - Used to tell the linker which vtable entries are used. */
+ BFD_RELOC_MT_GNU_VTINHERIT,
+
+/* Morpho MT - Used to tell the linker which vtable entries are used. */
+ BFD_RELOC_MT_GNU_VTENTRY,
+
+/* Morpho MT - 8 bit immediate relocation. */
+ BFD_RELOC_MT_PCINSN8,
+
+/* msp430 specific relocation codes */
+ BFD_RELOC_MSP430_10_PCREL,
+ BFD_RELOC_MSP430_16_PCREL,
+ BFD_RELOC_MSP430_16,
+ BFD_RELOC_MSP430_16_PCREL_BYTE,
+ BFD_RELOC_MSP430_16_BYTE,
+ BFD_RELOC_MSP430_2X_PCREL,
+ BFD_RELOC_MSP430_RL_PCREL,
+
+/* IQ2000 Relocations. */
+ BFD_RELOC_IQ2000_OFFSET_16,
+ BFD_RELOC_IQ2000_OFFSET_21,
+ BFD_RELOC_IQ2000_UHI16,
+
+/* Special Xtensa relocation used only by PLT entries in ELF shared
+objects to indicate that the runtime linker should set the value
+to one of its own internal functions or data structures. */
+ BFD_RELOC_XTENSA_RTLD,
+
+/* Xtensa relocations for ELF shared objects. */
+ BFD_RELOC_XTENSA_GLOB_DAT,
+ BFD_RELOC_XTENSA_JMP_SLOT,
+ BFD_RELOC_XTENSA_RELATIVE,
+
+/* Xtensa relocation used in ELF object files for symbols that may require
+PLT entries. Otherwise, this is just a generic 32-bit relocation. */
+ BFD_RELOC_XTENSA_PLT,
+
+/* Xtensa relocations to mark the difference of two local symbols.
+These are only needed to support linker relaxation and can be ignored
+when not relaxing. The field is set to the value of the difference
+assuming no relaxation. The relocation encodes the position of the
+first symbol so the linker can determine whether to adjust the field
+value. */
+ BFD_RELOC_XTENSA_DIFF8,
+ BFD_RELOC_XTENSA_DIFF16,
+ BFD_RELOC_XTENSA_DIFF32,
+
+/* Generic Xtensa relocations for instruction operands. Only the slot
+number is encoded in the relocation. The relocation applies to the
+last PC-relative immediate operand, or if there are no PC-relative
+immediates, to the last immediate operand. */
+ BFD_RELOC_XTENSA_SLOT0_OP,
+ BFD_RELOC_XTENSA_SLOT1_OP,
+ BFD_RELOC_XTENSA_SLOT2_OP,
+ BFD_RELOC_XTENSA_SLOT3_OP,
+ BFD_RELOC_XTENSA_SLOT4_OP,
+ BFD_RELOC_XTENSA_SLOT5_OP,
+ BFD_RELOC_XTENSA_SLOT6_OP,
+ BFD_RELOC_XTENSA_SLOT7_OP,
+ BFD_RELOC_XTENSA_SLOT8_OP,
+ BFD_RELOC_XTENSA_SLOT9_OP,
+ BFD_RELOC_XTENSA_SLOT10_OP,
+ BFD_RELOC_XTENSA_SLOT11_OP,
+ BFD_RELOC_XTENSA_SLOT12_OP,
+ BFD_RELOC_XTENSA_SLOT13_OP,
+ BFD_RELOC_XTENSA_SLOT14_OP,
+
+/* Alternate Xtensa relocations. Only the slot is encoded in the
+relocation. The meaning of these relocations is opcode-specific. */
+ BFD_RELOC_XTENSA_SLOT0_ALT,
+ BFD_RELOC_XTENSA_SLOT1_ALT,
+ BFD_RELOC_XTENSA_SLOT2_ALT,
+ BFD_RELOC_XTENSA_SLOT3_ALT,
+ BFD_RELOC_XTENSA_SLOT4_ALT,
+ BFD_RELOC_XTENSA_SLOT5_ALT,
+ BFD_RELOC_XTENSA_SLOT6_ALT,
+ BFD_RELOC_XTENSA_SLOT7_ALT,
+ BFD_RELOC_XTENSA_SLOT8_ALT,
+ BFD_RELOC_XTENSA_SLOT9_ALT,
+ BFD_RELOC_XTENSA_SLOT10_ALT,
+ BFD_RELOC_XTENSA_SLOT11_ALT,
+ BFD_RELOC_XTENSA_SLOT12_ALT,
+ BFD_RELOC_XTENSA_SLOT13_ALT,
+ BFD_RELOC_XTENSA_SLOT14_ALT,
+
+/* Xtensa relocations for backward compatibility. These have all been
+replaced by BFD_RELOC_XTENSA_SLOT0_OP. */
+ BFD_RELOC_XTENSA_OP0,
+ BFD_RELOC_XTENSA_OP1,
+ BFD_RELOC_XTENSA_OP2,
+
+/* Xtensa relocation to mark that the assembler expanded the
+instructions from an original target. The expansion size is
+encoded in the reloc size. */
+ BFD_RELOC_XTENSA_ASM_EXPAND,
+
+/* Xtensa relocation to mark that the linker should simplify
+assembler-expanded instructions. This is commonly used
+internally by the linker after analysis of a
+BFD_RELOC_XTENSA_ASM_EXPAND. */
+ BFD_RELOC_XTENSA_ASM_SIMPLIFY,
+
+/* Xtensa TLS relocations. */
+ BFD_RELOC_XTENSA_TLSDESC_FN,
+ BFD_RELOC_XTENSA_TLSDESC_ARG,
+ BFD_RELOC_XTENSA_TLS_DTPOFF,
+ BFD_RELOC_XTENSA_TLS_TPOFF,
+ BFD_RELOC_XTENSA_TLS_FUNC,
+ BFD_RELOC_XTENSA_TLS_ARG,
+ BFD_RELOC_XTENSA_TLS_CALL,
+
+/* 8 bit signed offset in (ix+d) or (iy+d). */
+ BFD_RELOC_Z80_DISP8,
+
+/* DJNZ offset. */
+ BFD_RELOC_Z8K_DISP7,
+
+/* CALR offset. */
+ BFD_RELOC_Z8K_CALLR,
+
+/* 4 bit value. */
+ BFD_RELOC_Z8K_IMM4L,
+
+/* Lattice Mico32 relocations. */
+ BFD_RELOC_LM32_CALL,
+ BFD_RELOC_LM32_BRANCH,
+ BFD_RELOC_LM32_16_GOT,
+ BFD_RELOC_LM32_GOTOFF_HI16,
+ BFD_RELOC_LM32_GOTOFF_LO16,
+ BFD_RELOC_LM32_COPY,
+ BFD_RELOC_LM32_GLOB_DAT,
+ BFD_RELOC_LM32_JMP_SLOT,
+ BFD_RELOC_LM32_RELATIVE,
+
+/* Difference between two section addreses. Must be followed by a
+BFD_RELOC_MACH_O_PAIR. */
+ BFD_RELOC_MACH_O_SECTDIFF,
+
+/* Pair of relocation. Contains the first symbol. */
+ BFD_RELOC_MACH_O_PAIR,
+
+/* PCREL relocations. They are marked as branch to create PLT entry if
+required. */
+ BFD_RELOC_MACH_O_X86_64_BRANCH32,
+ BFD_RELOC_MACH_O_X86_64_BRANCH8,
+
+/* Used when referencing a GOT entry. */
+ BFD_RELOC_MACH_O_X86_64_GOT,
+
+/* Used when loading a GOT entry with movq. It is specially marked so that
+the linker could optimize the movq to a leaq if possible. */
+ BFD_RELOC_MACH_O_X86_64_GOT_LOAD,
+
+/* Symbol will be substracted. Must be followed by a BFD_RELOC_64. */
+ BFD_RELOC_MACH_O_X86_64_SUBTRACTOR32,
+
+/* Symbol will be substracted. Must be followed by a BFD_RELOC_64. */
+ BFD_RELOC_MACH_O_X86_64_SUBTRACTOR64,
+
+/* Same as BFD_RELOC_32_PCREL but with an implicit -1 addend. */
+ BFD_RELOC_MACH_O_X86_64_PCREL32_1,
+
+/* Same as BFD_RELOC_32_PCREL but with an implicit -2 addend. */
+ BFD_RELOC_MACH_O_X86_64_PCREL32_2,
+
+/* Same as BFD_RELOC_32_PCREL but with an implicit -4 addend. */
+ BFD_RELOC_MACH_O_X86_64_PCREL32_4,
+
+/* This is a 32 bit reloc for the microblaze that stores the
+low 16 bits of a value */
+ BFD_RELOC_MICROBLAZE_32_LO,
+
+/* This is a 32 bit pc-relative reloc for the microblaze that
+stores the low 16 bits of a value */
+ BFD_RELOC_MICROBLAZE_32_LO_PCREL,
+
+/* This is a 32 bit reloc for the microblaze that stores a
+value relative to the read-only small data area anchor */
+ BFD_RELOC_MICROBLAZE_32_ROSDA,
+
+/* This is a 32 bit reloc for the microblaze that stores a
+value relative to the read-write small data area anchor */
+ BFD_RELOC_MICROBLAZE_32_RWSDA,
+
+/* This is a 32 bit reloc for the microblaze to handle
+expressions of the form "Symbol Op Symbol" */
+ BFD_RELOC_MICROBLAZE_32_SYM_OP_SYM,
+
+/* This is a 64 bit reloc that stores the 32 bit pc relative
+value in two words (with an imm instruction). No relocation is
+done here - only used for relaxing */
+ BFD_RELOC_MICROBLAZE_64_NONE,
+
+/* This is a 64 bit reloc that stores the 32 bit pc relative
+value in two words (with an imm instruction). The relocation is
+PC-relative GOT offset */
+ BFD_RELOC_MICROBLAZE_64_GOTPC,
+
+/* This is a 64 bit reloc that stores the 32 bit pc relative
+value in two words (with an imm instruction). The relocation is
+GOT offset */
+ BFD_RELOC_MICROBLAZE_64_GOT,
+
+/* This is a 64 bit reloc that stores the 32 bit pc relative
+value in two words (with an imm instruction). The relocation is
+PC-relative offset into PLT */
+ BFD_RELOC_MICROBLAZE_64_PLT,
+
+/* This is a 64 bit reloc that stores the 32 bit GOT relative
+value in two words (with an imm instruction). The relocation is
+relative offset from _GLOBAL_OFFSET_TABLE_ */
+ BFD_RELOC_MICROBLAZE_64_GOTOFF,
+
+/* This is a 32 bit reloc that stores the 32 bit GOT relative
+value in a word. The relocation is relative offset from */
+ BFD_RELOC_MICROBLAZE_32_GOTOFF,
+
+/* This is used to tell the dynamic linker to copy the value out of
+the dynamic object into the runtime process image. */
+ BFD_RELOC_MICROBLAZE_COPY,
+ BFD_RELOC_UNUSED };
+typedef enum bfd_reloc_code_real bfd_reloc_code_real_type;
+reloc_howto_type *bfd_reloc_type_lookup
+ (bfd *abfd, bfd_reloc_code_real_type code);
+reloc_howto_type *bfd_reloc_name_lookup
+ (bfd *abfd, const char *reloc_name);
+
+const char *bfd_get_reloc_code_name (bfd_reloc_code_real_type code);
+
+/* Extracted from syms.c. */
+
+typedef struct bfd_symbol
+{
+ /* A pointer to the BFD which owns the symbol. This information
+ is necessary so that a back end can work out what additional
+ information (invisible to the application writer) is carried
+ with the symbol.
+
+ This field is *almost* redundant, since you can use section->owner
+ instead, except that some symbols point to the global sections
+ bfd_{abs,com,und}_section. This could be fixed by making
+ these globals be per-bfd (or per-target-flavor). FIXME. */
+ struct bfd *the_bfd; /* Use bfd_asymbol_bfd(sym) to access this field. */
+
+ /* The text of the symbol. The name is left alone, and not copied; the
+ application may not alter it. */
+ const char *name;
+
+ /* The value of the symbol. This really should be a union of a
+ numeric value with a pointer, since some flags indicate that
+ a pointer to another symbol is stored here. */
+ symvalue value;
+
+ /* Attributes of a symbol. */
+#define BSF_NO_FLAGS 0x00
+
+ /* The symbol has local scope; <<static>> in <<C>>. The value
+ is the offset into the section of the data. */
+#define BSF_LOCAL (1 << 0)
+
+ /* The symbol has global scope; initialized data in <<C>>. The
+ value is the offset into the section of the data. */
+#define BSF_GLOBAL (1 << 1)
+
+ /* The symbol has global scope and is exported. The value is
+ the offset into the section of the data. */
+#define BSF_EXPORT BSF_GLOBAL /* No real difference. */
+
+ /* A normal C symbol would be one of:
+ <<BSF_LOCAL>>, <<BSF_COMMON>>, <<BSF_UNDEFINED>> or
+ <<BSF_GLOBAL>>. */
+
+ /* The symbol is a debugging record. The value has an arbitrary
+ meaning, unless BSF_DEBUGGING_RELOC is also set. */
+#define BSF_DEBUGGING (1 << 2)
+
+ /* The symbol denotes a function entry point. Used in ELF,
+ perhaps others someday. */
+#define BSF_FUNCTION (1 << 3)
+
+ /* Used by the linker. */
+#define BSF_KEEP (1 << 5)
+#define BSF_KEEP_G (1 << 6)
+
+ /* A weak global symbol, overridable without warnings by
+ a regular global symbol of the same name. */
+#define BSF_WEAK (1 << 7)
+
+ /* This symbol was created to point to a section, e.g. ELF's
+ STT_SECTION symbols. */
+#define BSF_SECTION_SYM (1 << 8)
+
+ /* The symbol used to be a common symbol, but now it is
+ allocated. */
+#define BSF_OLD_COMMON (1 << 9)
+
+ /* In some files the type of a symbol sometimes alters its
+ location in an output file - ie in coff a <<ISFCN>> symbol
+ which is also <<C_EXT>> symbol appears where it was
+ declared and not at the end of a section. This bit is set
+ by the target BFD part to convey this information. */
+#define BSF_NOT_AT_END (1 << 10)
+
+ /* Signal that the symbol is the label of constructor section. */
+#define BSF_CONSTRUCTOR (1 << 11)
+
+ /* Signal that the symbol is a warning symbol. The name is a
+ warning. The name of the next symbol is the one to warn about;
+ if a reference is made to a symbol with the same name as the next
+ symbol, a warning is issued by the linker. */
+#define BSF_WARNING (1 << 12)
+
+ /* Signal that the symbol is indirect. This symbol is an indirect
+ pointer to the symbol with the same name as the next symbol. */
+#define BSF_INDIRECT (1 << 13)
+
+ /* BSF_FILE marks symbols that contain a file name. This is used
+ for ELF STT_FILE symbols. */
+#define BSF_FILE (1 << 14)
+
+ /* Symbol is from dynamic linking information. */
+#define BSF_DYNAMIC (1 << 15)
+
+ /* The symbol denotes a data object. Used in ELF, and perhaps
+ others someday. */
+#define BSF_OBJECT (1 << 16)
+
+ /* This symbol is a debugging symbol. The value is the offset
+ into the section of the data. BSF_DEBUGGING should be set
+ as well. */
+#define BSF_DEBUGGING_RELOC (1 << 17)
+
+ /* This symbol is thread local. Used in ELF. */
+#define BSF_THREAD_LOCAL (1 << 18)
+
+ /* This symbol represents a complex relocation expression,
+ with the expression tree serialized in the symbol name. */
+#define BSF_RELC (1 << 19)
+
+ /* This symbol represents a signed complex relocation expression,
+ with the expression tree serialized in the symbol name. */
+#define BSF_SRELC (1 << 20)
+
+ /* This symbol was created by bfd_get_synthetic_symtab. */
+#define BSF_SYNTHETIC (1 << 21)
+
+ /* This symbol is an indirect code object. Unrelated to BSF_INDIRECT.
+ The dynamic linker will compute the value of this symbol by
+ calling the function that it points to. BSF_FUNCTION must
+ also be also set. */
+#define BSF_GNU_INDIRECT_FUNCTION (1 << 22)
+ /* This symbol is a globally unique data object. The dynamic linker
+ will make sure that in the entire process there is just one symbol
+ with this name and type in use. BSF_OBJECT must also be set. */
+#define BSF_GNU_UNIQUE (1 << 23)
+
+ flagword flags;
+
+ /* A pointer to the section to which this symbol is
+ relative. This will always be non NULL, there are special
+ sections for undefined and absolute symbols. */
+ struct bfd_section *section;
+
+ /* Back end special data. */
+ union
+ {
+ void *p;
+ bfd_vma i;
+ }
+ udata;
+}
+asymbol;
+
+#define bfd_get_symtab_upper_bound(abfd) \
+ BFD_SEND (abfd, _bfd_get_symtab_upper_bound, (abfd))
+
+bfd_boolean bfd_is_local_label (bfd *abfd, asymbol *sym);
+
+bfd_boolean bfd_is_local_label_name (bfd *abfd, const char *name);
+
+#define bfd_is_local_label_name(abfd, name) \
+ BFD_SEND (abfd, _bfd_is_local_label_name, (abfd, name))
+
+bfd_boolean bfd_is_target_special_symbol (bfd *abfd, asymbol *sym);
+
+#define bfd_is_target_special_symbol(abfd, sym) \
+ BFD_SEND (abfd, _bfd_is_target_special_symbol, (abfd, sym))
+
+#define bfd_canonicalize_symtab(abfd, location) \
+ BFD_SEND (abfd, _bfd_canonicalize_symtab, (abfd, location))
+
+bfd_boolean bfd_set_symtab
+ (bfd *abfd, asymbol **location, unsigned int count);
+
+void bfd_print_symbol_vandf (bfd *abfd, void *file, asymbol *symbol);
+
+#define bfd_make_empty_symbol(abfd) \
+ BFD_SEND (abfd, _bfd_make_empty_symbol, (abfd))
+
+asymbol *_bfd_generic_make_empty_symbol (bfd *);
+
+#define bfd_make_debug_symbol(abfd,ptr,size) \
+ BFD_SEND (abfd, _bfd_make_debug_symbol, (abfd, ptr, size))
+
+int bfd_decode_symclass (asymbol *symbol);
+
+bfd_boolean bfd_is_undefined_symclass (int symclass);
+
+void bfd_symbol_info (asymbol *symbol, symbol_info *ret);
+
+bfd_boolean bfd_copy_private_symbol_data
+ (bfd *ibfd, asymbol *isym, bfd *obfd, asymbol *osym);
+
+#define bfd_copy_private_symbol_data(ibfd, isymbol, obfd, osymbol) \
+ BFD_SEND (obfd, _bfd_copy_private_symbol_data, \
+ (ibfd, isymbol, obfd, osymbol))
+
+/* Extracted from bfd.c. */
+enum bfd_direction
+ {
+ no_direction = 0,
+ read_direction = 1,
+ write_direction = 2,
+ both_direction = 3
+ };
+
+struct bfd
+{
+ /* A unique identifier of the BFD */
+ unsigned int id;
+
+ /* The filename the application opened the BFD with. */
+ const char *filename;
+
+ /* A pointer to the target jump table. */
+ const struct bfd_target *xvec;
+
+ /* The IOSTREAM, and corresponding IO vector that provide access
+ to the file backing the BFD. */
+ void *iostream;
+ const struct bfd_iovec *iovec;
+
+ /* The caching routines use these to maintain a
+ least-recently-used list of BFDs. */
+ struct bfd *lru_prev, *lru_next;
+
+ /* When a file is closed by the caching routines, BFD retains
+ state information on the file here... */
+ ufile_ptr where;
+
+ /* File modified time, if mtime_set is TRUE. */
+ long mtime;
+
+ /* Reserved for an unimplemented file locking extension. */
+ int ifd;
+
+ /* The format which belongs to the BFD. (object, core, etc.) */
+ bfd_format format;
+
+ /* The direction with which the BFD was opened. */
+ enum bfd_direction direction;
+
+ /* Format_specific flags. */
+ flagword flags;
+
+ /* Values that may appear in the flags field of a BFD. These also
+ appear in the object_flags field of the bfd_target structure, where
+ they indicate the set of flags used by that backend (not all flags
+ are meaningful for all object file formats) (FIXME: at the moment,
+ the object_flags values have mostly just been copied from backend
+ to another, and are not necessarily correct). */
+
+#define BFD_NO_FLAGS 0x00
+
+ /* BFD contains relocation entries. */
+#define HAS_RELOC 0x01
+
+ /* BFD is directly executable. */
+#define EXEC_P 0x02
+
+ /* BFD has line number information (basically used for F_LNNO in a
+ COFF header). */
+#define HAS_LINENO 0x04
+
+ /* BFD has debugging information. */
+#define HAS_DEBUG 0x08
+
+ /* BFD has symbols. */
+#define HAS_SYMS 0x10
+
+ /* BFD has local symbols (basically used for F_LSYMS in a COFF
+ header). */
+#define HAS_LOCALS 0x20
+
+ /* BFD is a dynamic object. */
+#define DYNAMIC 0x40
+
+ /* Text section is write protected (if D_PAGED is not set, this is
+ like an a.out NMAGIC file) (the linker sets this by default, but
+ clears it for -r or -N). */
+#define WP_TEXT 0x80
+
+ /* BFD is dynamically paged (this is like an a.out ZMAGIC file) (the
+ linker sets this by default, but clears it for -r or -n or -N). */
+#define D_PAGED 0x100
+
+ /* BFD is relaxable (this means that bfd_relax_section may be able to
+ do something) (sometimes bfd_relax_section can do something even if
+ this is not set). */
+#define BFD_IS_RELAXABLE 0x200
+
+ /* This may be set before writing out a BFD to request using a
+ traditional format. For example, this is used to request that when
+ writing out an a.out object the symbols not be hashed to eliminate
+ duplicates. */
+#define BFD_TRADITIONAL_FORMAT 0x400
+
+ /* This flag indicates that the BFD contents are actually cached
+ in memory. If this is set, iostream points to a bfd_in_memory
+ struct. */
+#define BFD_IN_MEMORY 0x800
+
+ /* The sections in this BFD specify a memory page. */
+#define HAS_LOAD_PAGE 0x1000
+
+ /* This BFD has been created by the linker and doesn't correspond
+ to any input file. */
+#define BFD_LINKER_CREATED 0x2000
+
+ /* This may be set before writing out a BFD to request that it
+ be written using values for UIDs, GIDs, timestamps, etc. that
+ will be consistent from run to run. */
+#define BFD_DETERMINISTIC_OUTPUT 0x4000
+
+ /* Currently my_archive is tested before adding origin to
+ anything. I believe that this can become always an add of
+ origin, with origin set to 0 for non archive files. */
+ ufile_ptr origin;
+
+ /* The origin in the archive of the proxy entry. This will
+ normally be the same as origin, except for thin archives,
+ when it will contain the current offset of the proxy in the
+ thin archive rather than the offset of the bfd in its actual
+ container. */
+ ufile_ptr proxy_origin;
+
+ /* A hash table for section names. */
+ struct bfd_hash_table section_htab;
+
+ /* Pointer to linked list of sections. */
+ struct bfd_section *sections;
+
+ /* The last section on the section list. */
+ struct bfd_section *section_last;
+
+ /* The number of sections. */
+ unsigned int section_count;
+
+ /* Stuff only useful for object files:
+ The start address. */
+ bfd_vma start_address;
+
+ /* Used for input and output. */
+ unsigned int symcount;
+
+ /* Symbol table for output BFD (with symcount entries).
+ Also used by the linker to cache input BFD symbols. */
+ struct bfd_symbol **outsymbols;
+
+ /* Used for slurped dynamic symbol tables. */
+ unsigned int dynsymcount;
+
+ /* Pointer to structure which contains architecture information. */
+ const struct bfd_arch_info *arch_info;
+
+ /* Stuff only useful for archives. */
+ void *arelt_data;
+ struct bfd *my_archive; /* The containing archive BFD. */
+ struct bfd *archive_next; /* The next BFD in the archive. */
+ struct bfd *archive_head; /* The first BFD in the archive. */
+ struct bfd *nested_archives; /* List of nested archive in a flattened
+ thin archive. */
+
+ /* A chain of BFD structures involved in a link. */
+ struct bfd *link_next;
+
+ /* A field used by _bfd_generic_link_add_archive_symbols. This will
+ be used only for archive elements. */
+ int archive_pass;
+
+ /* Used by the back end to hold private data. */
+ union
+ {
+ struct aout_data_struct *aout_data;
+ struct artdata *aout_ar_data;
+ struct _oasys_data *oasys_obj_data;
+ struct _oasys_ar_data *oasys_ar_data;
+ struct coff_tdata *coff_obj_data;
+ struct pe_tdata *pe_obj_data;
+ struct xcoff_tdata *xcoff_obj_data;
+ struct ecoff_tdata *ecoff_obj_data;
+ struct ieee_data_struct *ieee_data;
+ struct ieee_ar_data_struct *ieee_ar_data;
+ struct srec_data_struct *srec_data;
+ struct verilog_data_struct *verilog_data;
+ struct ihex_data_struct *ihex_data;
+ struct tekhex_data_struct *tekhex_data;
+ struct elf_obj_tdata *elf_obj_data;
+ struct nlm_obj_tdata *nlm_obj_data;
+ struct bout_data_struct *bout_data;
+ struct mmo_data_struct *mmo_data;
+ struct sun_core_struct *sun_core_data;
+ struct sco5_core_struct *sco5_core_data;
+ struct trad_core_struct *trad_core_data;
+ struct som_data_struct *som_data;
+ struct hpux_core_struct *hpux_core_data;
+ struct hppabsd_core_struct *hppabsd_core_data;
+ struct sgi_core_struct *sgi_core_data;
+ struct lynx_core_struct *lynx_core_data;
+ struct osf_core_struct *osf_core_data;
+ struct cisco_core_struct *cisco_core_data;
+ struct versados_data_struct *versados_data;
+ struct netbsd_core_struct *netbsd_core_data;
+ struct mach_o_data_struct *mach_o_data;
+ struct mach_o_fat_data_struct *mach_o_fat_data;
+ struct plugin_data_struct *plugin_data;
+ struct bfd_pef_data_struct *pef_data;
+ struct bfd_pef_xlib_data_struct *pef_xlib_data;
+ struct bfd_sym_data_struct *sym_data;
+ void *any;
+ }
+ tdata;
+
+ /* Used by the application to hold private data. */
+ void *usrdata;
+
+ /* Where all the allocated stuff under this BFD goes. This is a
+ struct objalloc *, but we use void * to avoid requiring the inclusion
+ of objalloc.h. */
+ void *memory;
+
+ /* Is the file descriptor being cached? That is, can it be closed as
+ needed, and re-opened when accessed later? */
+ unsigned int cacheable : 1;
+
+ /* Marks whether there was a default target specified when the
+ BFD was opened. This is used to select which matching algorithm
+ to use to choose the back end. */
+ unsigned int target_defaulted : 1;
+
+ /* ... and here: (``once'' means at least once). */
+ unsigned int opened_once : 1;
+
+ /* Set if we have a locally maintained mtime value, rather than
+ getting it from the file each time. */
+ unsigned int mtime_set : 1;
+
+ /* Flag set if symbols from this BFD should not be exported. */
+ unsigned int no_export : 1;
+
+ /* Remember when output has begun, to stop strange things
+ from happening. */
+ unsigned int output_has_begun : 1;
+
+ /* Have archive map. */
+ unsigned int has_armap : 1;
+
+ /* Set if this is a thin archive. */
+ unsigned int is_thin_archive : 1;
+
+ /* Set if only required symbols should be added in the link hash table for
+ this object. Used by VMS linkers. */
+ unsigned int selective_search : 1;
+};
+
+typedef enum bfd_error
+{
+ bfd_error_no_error = 0,
+ bfd_error_system_call,
+ bfd_error_invalid_target,
+ bfd_error_wrong_format,
+ bfd_error_wrong_object_format,
+ bfd_error_invalid_operation,
+ bfd_error_no_memory,
+ bfd_error_no_symbols,
+ bfd_error_no_armap,
+ bfd_error_no_more_archived_files,
+ bfd_error_malformed_archive,
+ bfd_error_file_not_recognized,
+ bfd_error_file_ambiguously_recognized,
+ bfd_error_no_contents,
+ bfd_error_nonrepresentable_section,
+ bfd_error_no_debug_section,
+ bfd_error_bad_value,
+ bfd_error_file_truncated,
+ bfd_error_file_too_big,
+ bfd_error_on_input,
+ bfd_error_invalid_error_code
+}
+bfd_error_type;
+
+bfd_error_type bfd_get_error (void);
+
+void bfd_set_error (bfd_error_type error_tag, ...);
+
+const char *bfd_errmsg (bfd_error_type error_tag);
+
+void bfd_perror (const char *message);
+
+typedef void (*bfd_error_handler_type) (const char *, ...);
+
+bfd_error_handler_type bfd_set_error_handler (bfd_error_handler_type);
+
+void bfd_set_error_program_name (const char *);
+
+bfd_error_handler_type bfd_get_error_handler (void);
+
+long bfd_get_reloc_upper_bound (bfd *abfd, asection *sect);
+
+long bfd_canonicalize_reloc
+ (bfd *abfd, asection *sec, arelent **loc, asymbol **syms);
+
+void bfd_set_reloc
+ (bfd *abfd, asection *sec, arelent **rel, unsigned int count);
+
+bfd_boolean bfd_set_file_flags (bfd *abfd, flagword flags);
+
+int bfd_get_arch_size (bfd *abfd);
+
+int bfd_get_sign_extend_vma (bfd *abfd);
+
+bfd_boolean bfd_set_start_address (bfd *abfd, bfd_vma vma);
+
+unsigned int bfd_get_gp_size (bfd *abfd);
+
+void bfd_set_gp_size (bfd *abfd, unsigned int i);
+
+bfd_vma bfd_scan_vma (const char *string, const char **end, int base);
+
+bfd_boolean bfd_copy_private_header_data (bfd *ibfd, bfd *obfd);
+
+#define bfd_copy_private_header_data(ibfd, obfd) \
+ BFD_SEND (obfd, _bfd_copy_private_header_data, \
+ (ibfd, obfd))
+bfd_boolean bfd_copy_private_bfd_data (bfd *ibfd, bfd *obfd);
+
+#define bfd_copy_private_bfd_data(ibfd, obfd) \
+ BFD_SEND (obfd, _bfd_copy_private_bfd_data, \
+ (ibfd, obfd))
+bfd_boolean bfd_merge_private_bfd_data (bfd *ibfd, bfd *obfd);
+
+#define bfd_merge_private_bfd_data(ibfd, obfd) \
+ BFD_SEND (obfd, _bfd_merge_private_bfd_data, \
+ (ibfd, obfd))
+bfd_boolean bfd_set_private_flags (bfd *abfd, flagword flags);
+
+#define bfd_set_private_flags(abfd, flags) \
+ BFD_SEND (abfd, _bfd_set_private_flags, (abfd, flags))
+#define bfd_sizeof_headers(abfd, info) \
+ BFD_SEND (abfd, _bfd_sizeof_headers, (abfd, info))
+
+#define bfd_find_nearest_line(abfd, sec, syms, off, file, func, line) \
+ BFD_SEND (abfd, _bfd_find_nearest_line, \
+ (abfd, sec, syms, off, file, func, line))
+
+#define bfd_find_line(abfd, syms, sym, file, line) \
+ BFD_SEND (abfd, _bfd_find_line, \
+ (abfd, syms, sym, file, line))
+
+#define bfd_find_inliner_info(abfd, file, func, line) \
+ BFD_SEND (abfd, _bfd_find_inliner_info, \
+ (abfd, file, func, line))
+
+#define bfd_debug_info_start(abfd) \
+ BFD_SEND (abfd, _bfd_debug_info_start, (abfd))
+
+#define bfd_debug_info_end(abfd) \
+ BFD_SEND (abfd, _bfd_debug_info_end, (abfd))
+
+#define bfd_debug_info_accumulate(abfd, section) \
+ BFD_SEND (abfd, _bfd_debug_info_accumulate, (abfd, section))
+
+#define bfd_stat_arch_elt(abfd, stat) \
+ BFD_SEND (abfd, _bfd_stat_arch_elt,(abfd, stat))
+
+#define bfd_update_armap_timestamp(abfd) \
+ BFD_SEND (abfd, _bfd_update_armap_timestamp, (abfd))
+
+#define bfd_set_arch_mach(abfd, arch, mach)\
+ BFD_SEND ( abfd, _bfd_set_arch_mach, (abfd, arch, mach))
+
+#define bfd_relax_section(abfd, section, link_info, again) \
+ BFD_SEND (abfd, _bfd_relax_section, (abfd, section, link_info, again))
+
+#define bfd_gc_sections(abfd, link_info) \
+ BFD_SEND (abfd, _bfd_gc_sections, (abfd, link_info))
+
+#define bfd_merge_sections(abfd, link_info) \
+ BFD_SEND (abfd, _bfd_merge_sections, (abfd, link_info))
+
+#define bfd_is_group_section(abfd, sec) \
+ BFD_SEND (abfd, _bfd_is_group_section, (abfd, sec))
+
+#define bfd_discard_group(abfd, sec) \
+ BFD_SEND (abfd, _bfd_discard_group, (abfd, sec))
+
+#define bfd_link_hash_table_create(abfd) \
+ BFD_SEND (abfd, _bfd_link_hash_table_create, (abfd))
+
+#define bfd_link_hash_table_free(abfd, hash) \
+ BFD_SEND (abfd, _bfd_link_hash_table_free, (hash))
+
+#define bfd_link_add_symbols(abfd, info) \
+ BFD_SEND (abfd, _bfd_link_add_symbols, (abfd, info))
+
+#define bfd_link_just_syms(abfd, sec, info) \
+ BFD_SEND (abfd, _bfd_link_just_syms, (sec, info))
+
+#define bfd_final_link(abfd, info) \
+ BFD_SEND (abfd, _bfd_final_link, (abfd, info))
+
+#define bfd_free_cached_info(abfd) \
+ BFD_SEND (abfd, _bfd_free_cached_info, (abfd))
+
+#define bfd_get_dynamic_symtab_upper_bound(abfd) \
+ BFD_SEND (abfd, _bfd_get_dynamic_symtab_upper_bound, (abfd))
+
+#define bfd_print_private_bfd_data(abfd, file)\
+ BFD_SEND (abfd, _bfd_print_private_bfd_data, (abfd, file))
+
+#define bfd_canonicalize_dynamic_symtab(abfd, asymbols) \
+ BFD_SEND (abfd, _bfd_canonicalize_dynamic_symtab, (abfd, asymbols))
+
+#define bfd_get_synthetic_symtab(abfd, count, syms, dyncount, dynsyms, ret) \
+ BFD_SEND (abfd, _bfd_get_synthetic_symtab, (abfd, count, syms, \
+ dyncount, dynsyms, ret))
+
+#define bfd_get_dynamic_reloc_upper_bound(abfd) \
+ BFD_SEND (abfd, _bfd_get_dynamic_reloc_upper_bound, (abfd))
+
+#define bfd_canonicalize_dynamic_reloc(abfd, arels, asyms) \
+ BFD_SEND (abfd, _bfd_canonicalize_dynamic_reloc, (abfd, arels, asyms))
+
+extern bfd_byte *bfd_get_relocated_section_contents
+ (bfd *, struct bfd_link_info *, struct bfd_link_order *, bfd_byte *,
+ bfd_boolean, asymbol **);
+
+bfd_boolean bfd_alt_mach_code (bfd *abfd, int alternative);
+
+struct bfd_preserve
+{
+ void *marker;
+ void *tdata;
+ flagword flags;
+ const struct bfd_arch_info *arch_info;
+ struct bfd_section *sections;
+ struct bfd_section *section_last;
+ unsigned int section_count;
+ struct bfd_hash_table section_htab;
+};
+
+bfd_boolean bfd_preserve_save (bfd *, struct bfd_preserve *);
+
+void bfd_preserve_restore (bfd *, struct bfd_preserve *);
+
+void bfd_preserve_finish (bfd *, struct bfd_preserve *);
+
+bfd_vma bfd_emul_get_maxpagesize (const char *);
+
+void bfd_emul_set_maxpagesize (const char *, bfd_vma);
+
+bfd_vma bfd_emul_get_commonpagesize (const char *);
+
+void bfd_emul_set_commonpagesize (const char *, bfd_vma);
+
+char *bfd_demangle (bfd *, const char *, int);
+
+/* Extracted from archive.c. */
+symindex bfd_get_next_mapent
+ (bfd *abfd, symindex previous, carsym **sym);
+
+bfd_boolean bfd_set_archive_head (bfd *output, bfd *new_head);
+
+bfd *bfd_openr_next_archived_file (bfd *archive, bfd *previous);
+
+/* Extracted from corefile.c. */
+const char *bfd_core_file_failing_command (bfd *abfd);
+
+int bfd_core_file_failing_signal (bfd *abfd);
+
+int bfd_core_file_pid (bfd *abfd);
+
+bfd_boolean core_file_matches_executable_p
+ (bfd *core_bfd, bfd *exec_bfd);
+
+bfd_boolean generic_core_file_matches_executable_p
+ (bfd *core_bfd, bfd *exec_bfd);
+
+/* Extracted from targets.c. */
+#define BFD_SEND(bfd, message, arglist) \
+ ((*((bfd)->xvec->message)) arglist)
+
+#ifdef DEBUG_BFD_SEND
+#undef BFD_SEND
+#define BFD_SEND(bfd, message, arglist) \
+ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \
+ ((*((bfd)->xvec->message)) arglist) : \
+ (bfd_assert (__FILE__,__LINE__), NULL))
+#endif
+#define BFD_SEND_FMT(bfd, message, arglist) \
+ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist)
+
+#ifdef DEBUG_BFD_SEND
+#undef BFD_SEND_FMT
+#define BFD_SEND_FMT(bfd, message, arglist) \
+ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \
+ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist) : \
+ (bfd_assert (__FILE__,__LINE__), NULL))
+#endif
+
+enum bfd_flavour
+{
+ bfd_target_unknown_flavour,
+ bfd_target_aout_flavour,
+ bfd_target_coff_flavour,
+ bfd_target_ecoff_flavour,
+ bfd_target_xcoff_flavour,
+ bfd_target_elf_flavour,
+ bfd_target_ieee_flavour,
+ bfd_target_nlm_flavour,
+ bfd_target_oasys_flavour,
+ bfd_target_tekhex_flavour,
+ bfd_target_srec_flavour,
+ bfd_target_verilog_flavour,
+ bfd_target_ihex_flavour,
+ bfd_target_som_flavour,
+ bfd_target_os9k_flavour,
+ bfd_target_versados_flavour,
+ bfd_target_msdos_flavour,
+ bfd_target_ovax_flavour,
+ bfd_target_evax_flavour,
+ bfd_target_mmo_flavour,
+ bfd_target_mach_o_flavour,
+ bfd_target_pef_flavour,
+ bfd_target_pef_xlib_flavour,
+ bfd_target_sym_flavour
+};
+
+enum bfd_endian { BFD_ENDIAN_BIG, BFD_ENDIAN_LITTLE, BFD_ENDIAN_UNKNOWN };
+
+/* Forward declaration. */
+typedef struct bfd_link_info _bfd_link_info;
+
+typedef struct bfd_target
+{
+ /* Identifies the kind of target, e.g., SunOS4, Ultrix, etc. */
+ char *name;
+
+ /* The "flavour" of a back end is a general indication about
+ the contents of a file. */
+ enum bfd_flavour flavour;
+
+ /* The order of bytes within the data area of a file. */
+ enum bfd_endian byteorder;
+
+ /* The order of bytes within the header parts of a file. */
+ enum bfd_endian header_byteorder;
+
+ /* A mask of all the flags which an executable may have set -
+ from the set <<BFD_NO_FLAGS>>, <<HAS_RELOC>>, ...<<D_PAGED>>. */
+ flagword object_flags;
+
+ /* A mask of all the flags which a section may have set - from
+ the set <<SEC_NO_FLAGS>>, <<SEC_ALLOC>>, ...<<SET_NEVER_LOAD>>. */
+ flagword section_flags;
+
+ /* The character normally found at the front of a symbol.
+ (if any), perhaps `_'. */
+ char symbol_leading_char;
+
+ /* The pad character for file names within an archive header. */
+ char ar_pad_char;
+
+ /* The maximum number of characters in an archive header. */
+ unsigned short ar_max_namelen;
+
+ /* Entries for byte swapping for data. These are different from the
+ other entry points, since they don't take a BFD as the first argument.
+ Certain other handlers could do the same. */
+ bfd_uint64_t (*bfd_getx64) (const void *);
+ bfd_int64_t (*bfd_getx_signed_64) (const void *);
+ void (*bfd_putx64) (bfd_uint64_t, void *);
+ bfd_vma (*bfd_getx32) (const void *);
+ bfd_signed_vma (*bfd_getx_signed_32) (const void *);
+ void (*bfd_putx32) (bfd_vma, void *);
+ bfd_vma (*bfd_getx16) (const void *);
+ bfd_signed_vma (*bfd_getx_signed_16) (const void *);
+ void (*bfd_putx16) (bfd_vma, void *);
+
+ /* Byte swapping for the headers. */
+ bfd_uint64_t (*bfd_h_getx64) (const void *);
+ bfd_int64_t (*bfd_h_getx_signed_64) (const void *);
+ void (*bfd_h_putx64) (bfd_uint64_t, void *);
+ bfd_vma (*bfd_h_getx32) (const void *);
+ bfd_signed_vma (*bfd_h_getx_signed_32) (const void *);
+ void (*bfd_h_putx32) (bfd_vma, void *);
+ bfd_vma (*bfd_h_getx16) (const void *);
+ bfd_signed_vma (*bfd_h_getx_signed_16) (const void *);
+ void (*bfd_h_putx16) (bfd_vma, void *);
+
+ /* Format dependent routines: these are vectors of entry points
+ within the target vector structure, one for each format to check. */
+
+ /* Check the format of a file being read. Return a <<bfd_target *>> or zero. */
+ const struct bfd_target *(*_bfd_check_format[bfd_type_end]) (bfd *);
+
+ /* Set the format of a file being written. */
+ bfd_boolean (*_bfd_set_format[bfd_type_end]) (bfd *);
+
+ /* Write cached information into a file being written, at <<bfd_close>>. */
+ bfd_boolean (*_bfd_write_contents[bfd_type_end]) (bfd *);
+
+
+ /* Generic entry points. */
+#define BFD_JUMP_TABLE_GENERIC(NAME) \
+ NAME##_close_and_cleanup, \
+ NAME##_bfd_free_cached_info, \
+ NAME##_new_section_hook, \
+ NAME##_get_section_contents, \
+ NAME##_get_section_contents_in_window
+
+ /* Called when the BFD is being closed to do any necessary cleanup. */
+ bfd_boolean (*_close_and_cleanup) (bfd *);
+ /* Ask the BFD to free all cached information. */
+ bfd_boolean (*_bfd_free_cached_info) (bfd *);
+ /* Called when a new section is created. */
+ bfd_boolean (*_new_section_hook) (bfd *, sec_ptr);
+ /* Read the contents of a section. */
+ bfd_boolean (*_bfd_get_section_contents)
+ (bfd *, sec_ptr, void *, file_ptr, bfd_size_type);
+ bfd_boolean (*_bfd_get_section_contents_in_window)
+ (bfd *, sec_ptr, bfd_window *, file_ptr, bfd_size_type);
+
+ /* Entry points to copy private data. */
+#define BFD_JUMP_TABLE_COPY(NAME) \
+ NAME##_bfd_copy_private_bfd_data, \
+ NAME##_bfd_merge_private_bfd_data, \
+ _bfd_generic_init_private_section_data, \
+ NAME##_bfd_copy_private_section_data, \
+ NAME##_bfd_copy_private_symbol_data, \
+ NAME##_bfd_copy_private_header_data, \
+ NAME##_bfd_set_private_flags, \
+ NAME##_bfd_print_private_bfd_data
+
+ /* Called to copy BFD general private data from one object file
+ to another. */
+ bfd_boolean (*_bfd_copy_private_bfd_data) (bfd *, bfd *);
+ /* Called to merge BFD general private data from one object file
+ to a common output file when linking. */
+ bfd_boolean (*_bfd_merge_private_bfd_data) (bfd *, bfd *);
+ /* Called to initialize BFD private section data from one object file
+ to another. */
+#define bfd_init_private_section_data(ibfd, isec, obfd, osec, link_info) \
+ BFD_SEND (obfd, _bfd_init_private_section_data, (ibfd, isec, obfd, osec, link_info))
+ bfd_boolean (*_bfd_init_private_section_data)
+ (bfd *, sec_ptr, bfd *, sec_ptr, struct bfd_link_info *);
+ /* Called to copy BFD private section data from one object file
+ to another. */
+ bfd_boolean (*_bfd_copy_private_section_data)
+ (bfd *, sec_ptr, bfd *, sec_ptr);
+ /* Called to copy BFD private symbol data from one symbol
+ to another. */
+ bfd_boolean (*_bfd_copy_private_symbol_data)
+ (bfd *, asymbol *, bfd *, asymbol *);
+ /* Called to copy BFD private header data from one object file
+ to another. */
+ bfd_boolean (*_bfd_copy_private_header_data)
+ (bfd *, bfd *);
+ /* Called to set private backend flags. */
+ bfd_boolean (*_bfd_set_private_flags) (bfd *, flagword);
+
+ /* Called to print private BFD data. */
+ bfd_boolean (*_bfd_print_private_bfd_data) (bfd *, void *);
+
+ /* Core file entry points. */
+#define BFD_JUMP_TABLE_CORE(NAME) \
+ NAME##_core_file_failing_command, \
+ NAME##_core_file_failing_signal, \
+ NAME##_core_file_matches_executable_p, \
+ NAME##_core_file_pid
+
+ char * (*_core_file_failing_command) (bfd *);
+ int (*_core_file_failing_signal) (bfd *);
+ bfd_boolean (*_core_file_matches_executable_p) (bfd *, bfd *);
+ int (*_core_file_pid) (bfd *);
+
+ /* Archive entry points. */
+#define BFD_JUMP_TABLE_ARCHIVE(NAME) \
+ NAME##_slurp_armap, \
+ NAME##_slurp_extended_name_table, \
+ NAME##_construct_extended_name_table, \
+ NAME##_truncate_arname, \
+ NAME##_write_armap, \
+ NAME##_read_ar_hdr, \
+ NAME##_write_ar_hdr, \
+ NAME##_openr_next_archived_file, \
+ NAME##_get_elt_at_index, \
+ NAME##_generic_stat_arch_elt, \
+ NAME##_update_armap_timestamp
+
+ bfd_boolean (*_bfd_slurp_armap) (bfd *);
+ bfd_boolean (*_bfd_slurp_extended_name_table) (bfd *);
+ bfd_boolean (*_bfd_construct_extended_name_table)
+ (bfd *, char **, bfd_size_type *, const char **);
+ void (*_bfd_truncate_arname) (bfd *, const char *, char *);
+ bfd_boolean (*write_armap)
+ (bfd *, unsigned int, struct orl *, unsigned int, int);
+ void * (*_bfd_read_ar_hdr_fn) (bfd *);
+ bfd_boolean (*_bfd_write_ar_hdr_fn) (bfd *, bfd *);
+ bfd * (*openr_next_archived_file) (bfd *, bfd *);
+#define bfd_get_elt_at_index(b,i) BFD_SEND (b, _bfd_get_elt_at_index, (b,i))
+ bfd * (*_bfd_get_elt_at_index) (bfd *, symindex);
+ int (*_bfd_stat_arch_elt) (bfd *, struct stat *);
+ bfd_boolean (*_bfd_update_armap_timestamp) (bfd *);
+
+ /* Entry points used for symbols. */
+#define BFD_JUMP_TABLE_SYMBOLS(NAME) \
+ NAME##_get_symtab_upper_bound, \
+ NAME##_canonicalize_symtab, \
+ NAME##_make_empty_symbol, \
+ NAME##_print_symbol, \
+ NAME##_get_symbol_info, \
+ NAME##_bfd_is_local_label_name, \
+ NAME##_bfd_is_target_special_symbol, \
+ NAME##_get_lineno, \
+ NAME##_find_nearest_line, \
+ _bfd_generic_find_line, \
+ NAME##_find_inliner_info, \
+ NAME##_bfd_make_debug_symbol, \
+ NAME##_read_minisymbols, \
+ NAME##_minisymbol_to_symbol
+
+ long (*_bfd_get_symtab_upper_bound) (bfd *);
+ long (*_bfd_canonicalize_symtab)
+ (bfd *, struct bfd_symbol **);
+ struct bfd_symbol *
+ (*_bfd_make_empty_symbol) (bfd *);
+ void (*_bfd_print_symbol)
+ (bfd *, void *, struct bfd_symbol *, bfd_print_symbol_type);
+#define bfd_print_symbol(b,p,s,e) BFD_SEND (b, _bfd_print_symbol, (b,p,s,e))
+ void (*_bfd_get_symbol_info)
+ (bfd *, struct bfd_symbol *, symbol_info *);
+#define bfd_get_symbol_info(b,p,e) BFD_SEND (b, _bfd_get_symbol_info, (b,p,e))
+ bfd_boolean (*_bfd_is_local_label_name) (bfd *, const char *);
+ bfd_boolean (*_bfd_is_target_special_symbol) (bfd *, asymbol *);
+ alent * (*_get_lineno) (bfd *, struct bfd_symbol *);
+ bfd_boolean (*_bfd_find_nearest_line)
+ (bfd *, struct bfd_section *, struct bfd_symbol **, bfd_vma,
+ const char **, const char **, unsigned int *);
+ bfd_boolean (*_bfd_find_line)
+ (bfd *, struct bfd_symbol **, struct bfd_symbol *,
+ const char **, unsigned int *);
+ bfd_boolean (*_bfd_find_inliner_info)
+ (bfd *, const char **, const char **, unsigned int *);
+ /* Back-door to allow format-aware applications to create debug symbols
+ while using BFD for everything else. Currently used by the assembler
+ when creating COFF files. */
+ asymbol * (*_bfd_make_debug_symbol)
+ (bfd *, void *, unsigned long size);
+#define bfd_read_minisymbols(b, d, m, s) \
+ BFD_SEND (b, _read_minisymbols, (b, d, m, s))
+ long (*_read_minisymbols)
+ (bfd *, bfd_boolean, void **, unsigned int *);
+#define bfd_minisymbol_to_symbol(b, d, m, f) \
+ BFD_SEND (b, _minisymbol_to_symbol, (b, d, m, f))
+ asymbol * (*_minisymbol_to_symbol)
+ (bfd *, bfd_boolean, const void *, asymbol *);
+
+ /* Routines for relocs. */
+#define BFD_JUMP_TABLE_RELOCS(NAME) \
+ NAME##_get_reloc_upper_bound, \
+ NAME##_canonicalize_reloc, \
+ NAME##_bfd_reloc_type_lookup, \
+ NAME##_bfd_reloc_name_lookup
+
+ long (*_get_reloc_upper_bound) (bfd *, sec_ptr);
+ long (*_bfd_canonicalize_reloc)
+ (bfd *, sec_ptr, arelent **, struct bfd_symbol **);
+ /* See documentation on reloc types. */
+ reloc_howto_type *
+ (*reloc_type_lookup) (bfd *, bfd_reloc_code_real_type);
+ reloc_howto_type *
+ (*reloc_name_lookup) (bfd *, const char *);
+
+
+ /* Routines used when writing an object file. */
+#define BFD_JUMP_TABLE_WRITE(NAME) \
+ NAME##_set_arch_mach, \
+ NAME##_set_section_contents
+
+ bfd_boolean (*_bfd_set_arch_mach)
+ (bfd *, enum bfd_architecture, unsigned long);
+ bfd_boolean (*_bfd_set_section_contents)
+ (bfd *, sec_ptr, const void *, file_ptr, bfd_size_type);
+
+ /* Routines used by the linker. */
+#define BFD_JUMP_TABLE_LINK(NAME) \
+ NAME##_sizeof_headers, \
+ NAME##_bfd_get_relocated_section_contents, \
+ NAME##_bfd_relax_section, \
+ NAME##_bfd_link_hash_table_create, \
+ NAME##_bfd_link_hash_table_free, \
+ NAME##_bfd_link_add_symbols, \
+ NAME##_bfd_link_just_syms, \
+ NAME##_bfd_copy_link_hash_symbol_type, \
+ NAME##_bfd_final_link, \
+ NAME##_bfd_link_split_section, \
+ NAME##_bfd_gc_sections, \
+ NAME##_bfd_merge_sections, \
+ NAME##_bfd_is_group_section, \
+ NAME##_bfd_discard_group, \
+ NAME##_section_already_linked, \
+ NAME##_bfd_define_common_symbol
+
+ int (*_bfd_sizeof_headers) (bfd *, struct bfd_link_info *);
+ bfd_byte * (*_bfd_get_relocated_section_contents)
+ (bfd *, struct bfd_link_info *, struct bfd_link_order *,
+ bfd_byte *, bfd_boolean, struct bfd_symbol **);
+
+ bfd_boolean (*_bfd_relax_section)
+ (bfd *, struct bfd_section *, struct bfd_link_info *, bfd_boolean *);
+
+ /* Create a hash table for the linker. Different backends store
+ different information in this table. */
+ struct bfd_link_hash_table *
+ (*_bfd_link_hash_table_create) (bfd *);
+
+ /* Release the memory associated with the linker hash table. */
+ void (*_bfd_link_hash_table_free) (struct bfd_link_hash_table *);
+
+ /* Add symbols from this object file into the hash table. */
+ bfd_boolean (*_bfd_link_add_symbols) (bfd *, struct bfd_link_info *);
+
+ /* Indicate that we are only retrieving symbol values from this section. */
+ void (*_bfd_link_just_syms) (asection *, struct bfd_link_info *);
+
+ /* Copy the symbol type of a linker hash table entry. */
+#define bfd_copy_link_hash_symbol_type(b, t, f) \
+ BFD_SEND (b, _bfd_copy_link_hash_symbol_type, (b, t, f))
+ void (*_bfd_copy_link_hash_symbol_type)
+ (bfd *, struct bfd_link_hash_entry *, struct bfd_link_hash_entry *);
+
+ /* Do a link based on the link_order structures attached to each
+ section of the BFD. */
+ bfd_boolean (*_bfd_final_link) (bfd *, struct bfd_link_info *);
+
+ /* Should this section be split up into smaller pieces during linking. */
+ bfd_boolean (*_bfd_link_split_section) (bfd *, struct bfd_section *);
+
+ /* Remove sections that are not referenced from the output. */
+ bfd_boolean (*_bfd_gc_sections) (bfd *, struct bfd_link_info *);
+
+ /* Attempt to merge SEC_MERGE sections. */
+ bfd_boolean (*_bfd_merge_sections) (bfd *, struct bfd_link_info *);
+
+ /* Is this section a member of a group? */
+ bfd_boolean (*_bfd_is_group_section) (bfd *, const struct bfd_section *);
+
+ /* Discard members of a group. */
+ bfd_boolean (*_bfd_discard_group) (bfd *, struct bfd_section *);
+
+ /* Check if SEC has been already linked during a reloceatable or
+ final link. */
+ void (*_section_already_linked) (bfd *, struct bfd_section *,
+ struct bfd_link_info *);
+
+ /* Define a common symbol. */
+ bfd_boolean (*_bfd_define_common_symbol) (bfd *, struct bfd_link_info *,
+ struct bfd_link_hash_entry *);
+
+ /* Routines to handle dynamic symbols and relocs. */
+#define BFD_JUMP_TABLE_DYNAMIC(NAME) \
+ NAME##_get_dynamic_symtab_upper_bound, \
+ NAME##_canonicalize_dynamic_symtab, \
+ NAME##_get_synthetic_symtab, \
+ NAME##_get_dynamic_reloc_upper_bound, \
+ NAME##_canonicalize_dynamic_reloc
+
+ /* Get the amount of memory required to hold the dynamic symbols. */
+ long (*_bfd_get_dynamic_symtab_upper_bound) (bfd *);
+ /* Read in the dynamic symbols. */
+ long (*_bfd_canonicalize_dynamic_symtab)
+ (bfd *, struct bfd_symbol **);
+ /* Create synthetized symbols. */
+ long (*_bfd_get_synthetic_symtab)
+ (bfd *, long, struct bfd_symbol **, long, struct bfd_symbol **,
+ struct bfd_symbol **);
+ /* Get the amount of memory required to hold the dynamic relocs. */
+ long (*_bfd_get_dynamic_reloc_upper_bound) (bfd *);
+ /* Read in the dynamic relocs. */
+ long (*_bfd_canonicalize_dynamic_reloc)
+ (bfd *, arelent **, struct bfd_symbol **);
+
+ /* Opposite endian version of this target. */
+ const struct bfd_target * alternative_target;
+
+ /* Data for use by back-end routines, which isn't
+ generic enough to belong in this structure. */
+ const void *backend_data;
+
+} bfd_target;
+
+bfd_boolean bfd_set_default_target (const char *name);
+
+const bfd_target *bfd_find_target (const char *target_name, bfd *abfd);
+
+const bfd_target *bfd_get_target_info (const char *target_name,
+ bfd *abfd,
+ bfd_boolean *is_bigendian,
+ int *underscoring,
+ const char **def_target_arch);
+const char ** bfd_target_list (void);
+
+const bfd_target *bfd_search_for_target
+ (int (*search_func) (const bfd_target *, void *),
+ void *);
+
+/* Extracted from format.c. */
+bfd_boolean bfd_check_format (bfd *abfd, bfd_format format);
+
+bfd_boolean bfd_check_format_matches
+ (bfd *abfd, bfd_format format, char ***matching);
+
+bfd_boolean bfd_set_format (bfd *abfd, bfd_format format);
+
+const char *bfd_format_string (bfd_format format);
+
+/* Extracted from linker.c. */
+bfd_boolean bfd_link_split_section (bfd *abfd, asection *sec);
+
+#define bfd_link_split_section(abfd, sec) \
+ BFD_SEND (abfd, _bfd_link_split_section, (abfd, sec))
+
+void bfd_section_already_linked (bfd *abfd, asection *sec,
+ struct bfd_link_info *info);
+
+#define bfd_section_already_linked(abfd, sec, info) \
+ BFD_SEND (abfd, _section_already_linked, (abfd, sec, info))
+
+bfd_boolean bfd_generic_define_common_symbol
+ (bfd *output_bfd, struct bfd_link_info *info,
+ struct bfd_link_hash_entry *h);
+
+#define bfd_define_common_symbol(output_bfd, info, h) \
+ BFD_SEND (output_bfd, _bfd_define_common_symbol, (output_bfd, info, h))
+
+struct bfd_elf_version_tree * bfd_find_version_for_sym
+ (struct bfd_elf_version_tree *verdefs,
+ const char *sym_name, bfd_boolean *hide);
+
+/* Extracted from simple.c. */
+bfd_byte *bfd_simple_get_relocated_section_contents
+ (bfd *abfd, asection *sec, bfd_byte *outbuf, asymbol **symbol_table);
+
+/* Extracted from compress.c. */
+bfd_boolean bfd_uncompress_section_contents
+ (bfd_byte **buffer, bfd_size_type *size);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/tools/perf/external/usr/include/bfdlink.h b/tools/perf/external/usr/include/bfdlink.h
new file mode 100644
index 00000000..0d6e9f8a
--- /dev/null
+++ b/tools/perf/external/usr/include/bfdlink.h
@@ -0,0 +1,786 @@
+/* bfdlink.h -- header file for BFD link routines
+ Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+ 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
+ Written by Steve Chamberlain and Ian Lance Taylor, Cygnus Support.
+
+ This file is part of BFD, the Binary File Descriptor library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+#ifndef BFDLINK_H
+#define BFDLINK_H
+
+/* Which symbols to strip during a link. */
+enum bfd_link_strip
+{
+ strip_none, /* Don't strip any symbols. */
+ strip_debugger, /* Strip debugging symbols. */
+ strip_some, /* keep_hash is the list of symbols to keep. */
+ strip_all /* Strip all symbols. */
+};
+
+/* Which local symbols to discard during a link. This is irrelevant
+ if strip_all is used. */
+enum bfd_link_discard
+{
+ discard_sec_merge, /* Discard local temporary symbols in SEC_MERGE
+ sections. */
+ discard_none, /* Don't discard any locals. */
+ discard_l, /* Discard local temporary symbols. */
+ discard_all /* Discard all locals. */
+};
+
+/* Describes the type of hash table entry structure being used.
+ Different hash table structure have different fields and so
+ support different linking features. */
+enum bfd_link_hash_table_type
+ {
+ bfd_link_generic_hash_table,
+ bfd_link_elf_hash_table
+ };
+
+/* These are the possible types of an entry in the BFD link hash
+ table. */
+
+enum bfd_link_hash_type
+{
+ bfd_link_hash_new, /* Symbol is new. */
+ bfd_link_hash_undefined, /* Symbol seen before, but undefined. */
+ bfd_link_hash_undefweak, /* Symbol is weak and undefined. */
+ bfd_link_hash_defined, /* Symbol is defined. */
+ bfd_link_hash_defweak, /* Symbol is weak and defined. */
+ bfd_link_hash_common, /* Symbol is common. */
+ bfd_link_hash_indirect, /* Symbol is an indirect link. */
+ bfd_link_hash_warning /* Like indirect, but warn if referenced. */
+};
+
+enum bfd_link_common_skip_ar_symbols
+{
+ bfd_link_common_skip_none,
+ bfd_link_common_skip_text,
+ bfd_link_common_skip_data,
+ bfd_link_common_skip_all
+};
+
+struct bfd_link_hash_common_entry
+ {
+ unsigned int alignment_power; /* Alignment. */
+ asection *section; /* Symbol section. */
+ };
+
+/* The linking routines use a hash table which uses this structure for
+ its elements. */
+
+struct bfd_link_hash_entry
+{
+ /* Base hash table entry structure. */
+ struct bfd_hash_entry root;
+
+ /* Type of this entry. */
+ enum bfd_link_hash_type type;
+
+ /* A union of information depending upon the type. */
+ union
+ {
+ /* Nothing is kept for bfd_hash_new. */
+ /* bfd_link_hash_undefined, bfd_link_hash_undefweak. */
+ struct
+ {
+ /* Undefined and common symbols are kept in a linked list through
+ this field. This field is present in all of the union element
+ so that we don't need to remove entries from the list when we
+ change their type. Removing entries would either require the
+ list to be doubly linked, which would waste more memory, or
+ require a traversal. When an undefined or common symbol is
+ created, it should be added to this list, the head of which is in
+ the link hash table itself. As symbols are defined, they need
+ not be removed from the list; anything which reads the list must
+ doublecheck the symbol type.
+
+ Weak symbols are not kept on this list.
+
+ Defined and defweak symbols use this field as a reference marker.
+ If the field is not NULL, or this structure is the tail of the
+ undefined symbol list, the symbol has been referenced. If the
+ symbol is undefined and becomes defined, this field will
+ automatically be non-NULL since the symbol will have been on the
+ undefined symbol list. */
+ struct bfd_link_hash_entry *next;
+ bfd *abfd; /* BFD symbol was found in. */
+ bfd *weak; /* BFD weak symbol was found in. */
+ } undef;
+ /* bfd_link_hash_defined, bfd_link_hash_defweak. */
+ struct
+ {
+ struct bfd_link_hash_entry *next;
+ asection *section; /* Symbol section. */
+ bfd_vma value; /* Symbol value. */
+ } def;
+ /* bfd_link_hash_indirect, bfd_link_hash_warning. */
+ struct
+ {
+ struct bfd_link_hash_entry *next;
+ struct bfd_link_hash_entry *link; /* Real symbol. */
+ const char *warning; /* Warning (bfd_link_hash_warning only). */
+ } i;
+ /* bfd_link_hash_common. */
+ struct
+ {
+ struct bfd_link_hash_entry *next;
+ /* The linker needs to know three things about common
+ symbols: the size, the alignment, and the section in
+ which the symbol should be placed. We store the size
+ here, and we allocate a small structure to hold the
+ section and the alignment. The alignment is stored as a
+ power of two. We don't store all the information
+ directly because we don't want to increase the size of
+ the union; this structure is a major space user in the
+ linker. */
+ struct bfd_link_hash_common_entry *p;
+ bfd_size_type size; /* Common symbol size. */
+ } c;
+ } u;
+};
+
+/* This is the link hash table. It is a derived class of
+ bfd_hash_table. */
+
+struct bfd_link_hash_table
+{
+ /* The hash table itself. */
+ struct bfd_hash_table table;
+ /* A linked list of undefined and common symbols, linked through the
+ next field in the bfd_link_hash_entry structure. */
+ struct bfd_link_hash_entry *undefs;
+ /* Entries are added to the tail of the undefs list. */
+ struct bfd_link_hash_entry *undefs_tail;
+ /* The type of the link hash table. */
+ enum bfd_link_hash_table_type type;
+};
+
+/* Look up an entry in a link hash table. If FOLLOW is TRUE, this
+ follows bfd_link_hash_indirect and bfd_link_hash_warning links to
+ the real symbol. */
+extern struct bfd_link_hash_entry *bfd_link_hash_lookup
+ (struct bfd_link_hash_table *, const char *, bfd_boolean create,
+ bfd_boolean copy, bfd_boolean follow);
+
+/* Look up an entry in the main linker hash table if the symbol might
+ be wrapped. This should only be used for references to an
+ undefined symbol, not for definitions of a symbol. */
+
+extern struct bfd_link_hash_entry *bfd_wrapped_link_hash_lookup
+ (bfd *, struct bfd_link_info *, const char *, bfd_boolean,
+ bfd_boolean, bfd_boolean);
+
+/* Traverse a link hash table. */
+extern void bfd_link_hash_traverse
+ (struct bfd_link_hash_table *,
+ bfd_boolean (*) (struct bfd_link_hash_entry *, void *),
+ void *);
+
+/* Add an entry to the undefs list. */
+extern void bfd_link_add_undef
+ (struct bfd_link_hash_table *, struct bfd_link_hash_entry *);
+
+/* Remove symbols from the undefs list that don't belong there. */
+extern void bfd_link_repair_undef_list
+ (struct bfd_link_hash_table *table);
+
+/* Read symbols and cache symbol pointer array in outsymbols. */
+extern bfd_boolean bfd_generic_link_read_symbols (bfd *);
+
+struct bfd_sym_chain
+{
+ struct bfd_sym_chain *next;
+ const char *name;
+};
+
+/* How to handle unresolved symbols.
+ There are four possibilities which are enumerated below: */
+enum report_method
+{
+ /* This is the initial value when then link_info structure is created.
+ It allows the various stages of the linker to determine whether they
+ allowed to set the value. */
+ RM_NOT_YET_SET = 0,
+ RM_IGNORE,
+ RM_GENERATE_WARNING,
+ RM_GENERATE_ERROR
+};
+
+struct bfd_elf_dynamic_list;
+
+/* This structure holds all the information needed to communicate
+ between BFD and the linker when doing a link. */
+
+struct bfd_link_info
+{
+ /* TRUE if BFD should generate a relocatable object file. */
+ unsigned int relocatable: 1;
+
+ /* TRUE if BFD should generate relocation information in the final
+ executable. */
+ unsigned int emitrelocations: 1;
+
+ /* TRUE if BFD should generate a "task linked" object file,
+ similar to relocatable but also with globals converted to
+ statics. */
+ unsigned int task_link: 1;
+
+ /* TRUE if BFD should generate a shared object. */
+ unsigned int shared: 1;
+
+ /* TRUE if BFD should pre-bind symbols in a shared object. */
+ unsigned int symbolic: 1;
+
+ /* TRUE if BFD should export all symbols in the dynamic symbol table
+ of an executable, rather than only those used. */
+ unsigned int export_dynamic: 1;
+
+ /* TRUE if shared objects should be linked directly, not shared. */
+ unsigned int static_link: 1;
+
+ /* TRUE if the output file should be in a traditional format. This
+ is equivalent to the setting of the BFD_TRADITIONAL_FORMAT flag
+ on the output file, but may be checked when reading the input
+ files. */
+ unsigned int traditional_format: 1;
+
+ /* TRUE if we want to produced optimized output files. This might
+ need much more time and therefore must be explicitly selected. */
+ unsigned int optimize: 1;
+
+ /* TRUE if ok to have multiple definition. */
+ unsigned int allow_multiple_definition: 1;
+
+ /* TRUE if ok to have version with no definition. */
+ unsigned int allow_undefined_version: 1;
+
+ /* TRUE if a default symbol version should be created and used for
+ exported symbols. */
+ unsigned int create_default_symver: 1;
+
+ /* TRUE if a default symbol version should be created and used for
+ imported symbols. */
+ unsigned int default_imported_symver: 1;
+
+ /* TRUE if symbols should be retained in memory, FALSE if they
+ should be freed and reread. */
+ unsigned int keep_memory: 1;
+
+ /* TRUE if every symbol should be reported back via the notice
+ callback. */
+ unsigned int notice_all: 1;
+
+ /* TRUE if executable should not contain copy relocs.
+ Setting this true may result in a non-sharable text segment. */
+ unsigned int nocopyreloc: 1;
+
+ /* TRUE if the new ELF dynamic tags are enabled. */
+ unsigned int new_dtags: 1;
+
+ /* TRUE if non-PLT relocs should be merged into one reloc section
+ and sorted so that relocs against the same symbol come together. */
+ unsigned int combreloc: 1;
+
+ /* TRUE if .eh_frame_hdr section and PT_GNU_EH_FRAME ELF segment
+ should be created. */
+ unsigned int eh_frame_hdr: 1;
+
+ /* TRUE if global symbols in discarded sections should be stripped. */
+ unsigned int strip_discarded: 1;
+
+ /* TRUE if generating a position independent executable. */
+ unsigned int pie: 1;
+
+ /* TRUE if generating an executable, position independent or not. */
+ unsigned int executable : 1;
+
+ /* TRUE if PT_GNU_STACK segment should be created with PF_R|PF_W|PF_X
+ flags. */
+ unsigned int execstack: 1;
+
+ /* TRUE if PT_GNU_STACK segment should be created with PF_R|PF_W
+ flags. */
+ unsigned int noexecstack: 1;
+
+ /* TRUE if PT_GNU_RELRO segment should be created. */
+ unsigned int relro: 1;
+
+ /* TRUE if we should warn when adding a DT_TEXTREL to a shared object. */
+ unsigned int warn_shared_textrel: 1;
+
+ /* TRUE if we should warn alternate ELF machine code. */
+ unsigned int warn_alternate_em: 1;
+
+ /* TRUE if unreferenced sections should be removed. */
+ unsigned int gc_sections: 1;
+
+ /* TRUE if user shoudl be informed of removed unreferenced sections. */
+ unsigned int print_gc_sections: 1;
+
+ /* TRUE if .hash section should be created. */
+ unsigned int emit_hash: 1;
+
+ /* TRUE if .gnu.hash section should be created. */
+ unsigned int emit_gnu_hash: 1;
+
+ /* If TRUE reduce memory overheads, at the expense of speed. This will
+ cause map file generation to use an O(N^2) algorithm and disable
+ caching ELF symbol buffer. */
+ unsigned int reduce_memory_overheads: 1;
+
+ /* TRUE if all data symbols should be dynamic. */
+ unsigned int dynamic_data: 1;
+
+ /* TRUE if some symbols have to be dynamic, controlled by
+ --dynamic-list command line options. */
+ unsigned int dynamic: 1;
+
+ /* Non-NULL if .note.gnu.build-id section should be created. */
+ char *emit_note_gnu_build_id;
+
+ /* What to do with unresolved symbols in an object file.
+ When producing executables the default is GENERATE_ERROR.
+ When producing shared libraries the default is IGNORE. The
+ assumption with shared libraries is that the reference will be
+ resolved at load/execution time. */
+ enum report_method unresolved_syms_in_objects;
+
+ /* What to do with unresolved symbols in a shared library.
+ The same defaults apply. */
+ enum report_method unresolved_syms_in_shared_libs;
+
+ /* Which symbols to strip. */
+ enum bfd_link_strip strip;
+
+ /* Which local symbols to discard. */
+ enum bfd_link_discard discard;
+
+ /* Criteria for skipping symbols when determining
+ whether to include an object from an archive. */
+ enum bfd_link_common_skip_ar_symbols common_skip_ar_symbols;
+
+ /* Char that may appear as the first char of a symbol, but should be
+ skipped (like symbol_leading_char) when looking up symbols in
+ wrap_hash. Used by PowerPC Linux for 'dot' symbols. */
+ char wrap_char;
+
+ /* Separator between archive and filename in linker script filespecs. */
+ char path_separator;
+
+ /* Function callbacks. */
+ const struct bfd_link_callbacks *callbacks;
+
+ /* Hash table handled by BFD. */
+ struct bfd_link_hash_table *hash;
+
+ /* Hash table of symbols to keep. This is NULL unless strip is
+ strip_some. */
+ struct bfd_hash_table *keep_hash;
+
+ /* Hash table of symbols to report back via the notice callback. If
+ this is NULL, and notice_all is FALSE, then no symbols are
+ reported back. */
+ struct bfd_hash_table *notice_hash;
+
+ /* Hash table of symbols which are being wrapped (the --wrap linker
+ option). If this is NULL, no symbols are being wrapped. */
+ struct bfd_hash_table *wrap_hash;
+
+ /* The output BFD. */
+ bfd *output_bfd;
+
+ /* The list of input BFD's involved in the link. These are chained
+ together via the link_next field. */
+ bfd *input_bfds;
+ bfd **input_bfds_tail;
+
+ /* If a symbol should be created for each input BFD, this is section
+ where those symbols should be placed. It must be a section in
+ the output BFD. It may be NULL, in which case no such symbols
+ will be created. This is to support CREATE_OBJECT_SYMBOLS in the
+ linker command language. */
+ asection *create_object_symbols_section;
+
+ /* List of global symbol names that are starting points for marking
+ sections against garbage collection. */
+ struct bfd_sym_chain *gc_sym_list;
+
+ /* If a base output file is wanted, then this points to it */
+ void *base_file;
+
+ /* The function to call when the executable or shared object is
+ loaded. */
+ const char *init_function;
+
+ /* The function to call when the executable or shared object is
+ unloaded. */
+ const char *fini_function;
+
+ /* Number of relaxation passes. Usually only one relaxation pass
+ is needed. But a backend can have as many relaxation passes as
+ necessary. During bfd_relax_section call, it is set to the
+ current pass, starting from 0. */
+ int relax_pass;
+
+ /* Number of relaxation trips. This number is incremented every
+ time the relaxation pass is restarted due to a previous
+ relaxation returning true in *AGAIN. */
+ int relax_trip;
+
+ /* Non-zero if auto-import thunks for DATA items in pei386 DLLs
+ should be generated/linked against. Set to 1 if this feature
+ is explicitly requested by the user, -1 if enabled by default. */
+ int pei386_auto_import;
+
+ /* Non-zero if runtime relocs for DATA items with non-zero addends
+ in pei386 DLLs should be generated. Set to 1 if this feature
+ is explicitly requested by the user, -1 if enabled by default. */
+ int pei386_runtime_pseudo_reloc;
+
+ /* How many spare .dynamic DT_NULL entries should be added? */
+ unsigned int spare_dynamic_tags;
+
+ /* May be used to set DT_FLAGS for ELF. */
+ bfd_vma flags;
+
+ /* May be used to set DT_FLAGS_1 for ELF. */
+ bfd_vma flags_1;
+
+ /* Start and end of RELRO region. */
+ bfd_vma relro_start, relro_end;
+
+ /* List of symbols should be dynamic. */
+ struct bfd_elf_dynamic_list *dynamic_list;
+};
+
+/* This structures holds a set of callback functions. These are called
+ by the BFD linker routines. Except for the info functions, the first
+ argument to each callback function is the bfd_link_info structure
+ being used and each function returns a boolean value. If the
+ function returns FALSE, then the BFD function which called it should
+ return with a failure indication. */
+
+struct bfd_link_callbacks
+{
+ /* A function which is called when an object is added from an
+ archive. ABFD is the archive element being added. NAME is the
+ name of the symbol which caused the archive element to be pulled
+ in. This function may set *SUBSBFD to point to an alternative
+ BFD from which symbols should in fact be added in place of the
+ original BFD's symbols. */
+ bfd_boolean (*add_archive_element)
+ (struct bfd_link_info *, bfd *abfd, const char *name, bfd **subsbfd);
+ /* A function which is called when a symbol is found with multiple
+ definitions. NAME is the symbol which is defined multiple times.
+ OBFD is the old BFD, OSEC is the old section, OVAL is the old
+ value, NBFD is the new BFD, NSEC is the new section, and NVAL is
+ the new value. OBFD may be NULL. OSEC and NSEC may be
+ bfd_com_section or bfd_ind_section. */
+ bfd_boolean (*multiple_definition)
+ (struct bfd_link_info *, const char *name,
+ bfd *obfd, asection *osec, bfd_vma oval,
+ bfd *nbfd, asection *nsec, bfd_vma nval);
+ /* A function which is called when a common symbol is defined
+ multiple times. NAME is the symbol appearing multiple times.
+ OBFD is the BFD of the existing symbol; it may be NULL if this is
+ not known. OTYPE is the type of the existing symbol, which may
+ be bfd_link_hash_defined, bfd_link_hash_defweak,
+ bfd_link_hash_common, or bfd_link_hash_indirect. If OTYPE is
+ bfd_link_hash_common, OSIZE is the size of the existing symbol.
+ NBFD is the BFD of the new symbol. NTYPE is the type of the new
+ symbol, one of bfd_link_hash_defined, bfd_link_hash_common, or
+ bfd_link_hash_indirect. If NTYPE is bfd_link_hash_common, NSIZE
+ is the size of the new symbol. */
+ bfd_boolean (*multiple_common)
+ (struct bfd_link_info *, const char *name,
+ bfd *obfd, enum bfd_link_hash_type otype, bfd_vma osize,
+ bfd *nbfd, enum bfd_link_hash_type ntype, bfd_vma nsize);
+ /* A function which is called to add a symbol to a set. ENTRY is
+ the link hash table entry for the set itself (e.g.,
+ __CTOR_LIST__). RELOC is the relocation to use for an entry in
+ the set when generating a relocatable file, and is also used to
+ get the size of the entry when generating an executable file.
+ ABFD, SEC and VALUE identify the value to add to the set. */
+ bfd_boolean (*add_to_set)
+ (struct bfd_link_info *, struct bfd_link_hash_entry *entry,
+ bfd_reloc_code_real_type reloc, bfd *abfd, asection *sec, bfd_vma value);
+ /* A function which is called when the name of a g++ constructor or
+ destructor is found. This is only called by some object file
+ formats. CONSTRUCTOR is TRUE for a constructor, FALSE for a
+ destructor. This will use BFD_RELOC_CTOR when generating a
+ relocatable file. NAME is the name of the symbol found. ABFD,
+ SECTION and VALUE are the value of the symbol. */
+ bfd_boolean (*constructor)
+ (struct bfd_link_info *, bfd_boolean constructor, const char *name,
+ bfd *abfd, asection *sec, bfd_vma value);
+ /* A function which is called to issue a linker warning. For
+ example, this is called when there is a reference to a warning
+ symbol. WARNING is the warning to be issued. SYMBOL is the name
+ of the symbol which triggered the warning; it may be NULL if
+ there is none. ABFD, SECTION and ADDRESS identify the location
+ which trigerred the warning; either ABFD or SECTION or both may
+ be NULL if the location is not known. */
+ bfd_boolean (*warning)
+ (struct bfd_link_info *, const char *warning, const char *symbol,
+ bfd *abfd, asection *section, bfd_vma address);
+ /* A function which is called when a relocation is attempted against
+ an undefined symbol. NAME is the symbol which is undefined.
+ ABFD, SECTION and ADDRESS identify the location from which the
+ reference is made. IS_FATAL indicates whether an undefined symbol is
+ a fatal error or not. In some cases SECTION may be NULL. */
+ bfd_boolean (*undefined_symbol)
+ (struct bfd_link_info *, const char *name, bfd *abfd,
+ asection *section, bfd_vma address, bfd_boolean is_fatal);
+ /* A function which is called when a reloc overflow occurs. ENTRY is
+ the link hash table entry for the symbol the reloc is against.
+ NAME is the name of the local symbol or section the reloc is
+ against, RELOC_NAME is the name of the relocation, and ADDEND is
+ any addend that is used. ABFD, SECTION and ADDRESS identify the
+ location at which the overflow occurs; if this is the result of a
+ bfd_section_reloc_link_order or bfd_symbol_reloc_link_order, then
+ ABFD will be NULL. */
+ bfd_boolean (*reloc_overflow)
+ (struct bfd_link_info *, struct bfd_link_hash_entry *entry,
+ const char *name, const char *reloc_name, bfd_vma addend,
+ bfd *abfd, asection *section, bfd_vma address);
+ /* A function which is called when a dangerous reloc is performed.
+ MESSAGE is an appropriate message.
+ ABFD, SECTION and ADDRESS identify the location at which the
+ problem occurred; if this is the result of a
+ bfd_section_reloc_link_order or bfd_symbol_reloc_link_order, then
+ ABFD will be NULL. */
+ bfd_boolean (*reloc_dangerous)
+ (struct bfd_link_info *, const char *message,
+ bfd *abfd, asection *section, bfd_vma address);
+ /* A function which is called when a reloc is found to be attached
+ to a symbol which is not being written out. NAME is the name of
+ the symbol. ABFD, SECTION and ADDRESS identify the location of
+ the reloc; if this is the result of a
+ bfd_section_reloc_link_order or bfd_symbol_reloc_link_order, then
+ ABFD will be NULL. */
+ bfd_boolean (*unattached_reloc)
+ (struct bfd_link_info *, const char *name,
+ bfd *abfd, asection *section, bfd_vma address);
+ /* A function which is called when a symbol in notice_hash is
+ defined or referenced. NAME is the symbol. ABFD, SECTION and
+ ADDRESS are the value of the symbol. If SECTION is
+ bfd_und_section, this is a reference. */
+ bfd_boolean (*notice)
+ (struct bfd_link_info *, const char *name,
+ bfd *abfd, asection *section, bfd_vma address);
+ /* Error or warning link info message. */
+ void (*einfo)
+ (const char *fmt, ...);
+ /* General link info message. */
+ void (*info)
+ (const char *fmt, ...);
+ /* Message to be printed in linker map file. */
+ void (*minfo)
+ (const char *fmt, ...);
+ /* This callback provides a chance for users of the BFD library to
+ override its decision about whether to place two adjacent sections
+ into the same segment. */
+ bfd_boolean (*override_segment_assignment)
+ (struct bfd_link_info *, bfd * abfd,
+ asection * current_section, asection * previous_section,
+ bfd_boolean new_segment);
+};
+
+/* The linker builds link_order structures which tell the code how to
+ include input data in the output file. */
+
+/* These are the types of link_order structures. */
+
+enum bfd_link_order_type
+{
+ bfd_undefined_link_order, /* Undefined. */
+ bfd_indirect_link_order, /* Built from a section. */
+ bfd_data_link_order, /* Set to explicit data. */
+ bfd_section_reloc_link_order, /* Relocate against a section. */
+ bfd_symbol_reloc_link_order /* Relocate against a symbol. */
+};
+
+/* This is the link_order structure itself. These form a chain
+ attached to the output section whose contents they are describing. */
+
+struct bfd_link_order
+{
+ /* Next link_order in chain. */
+ struct bfd_link_order *next;
+ /* Type of link_order. */
+ enum bfd_link_order_type type;
+ /* Offset within output section. */
+ bfd_vma offset;
+ /* Size within output section. */
+ bfd_size_type size;
+ /* Type specific information. */
+ union
+ {
+ struct
+ {
+ /* Section to include. If this is used, then
+ section->output_section must be the section the
+ link_order is attached to, section->output_offset must
+ equal the link_order offset field, and section->size
+ must equal the link_order size field. Maybe these
+ restrictions should be relaxed someday. */
+ asection *section;
+ } indirect;
+ struct
+ {
+ /* Size of contents, or zero when contents size == size
+ within output section.
+ A non-zero value allows filling of the output section
+ with an arbitrary repeated pattern. */
+ unsigned int size;
+ /* Data to put into file. */
+ bfd_byte *contents;
+ } data;
+ struct
+ {
+ /* Description of reloc to generate. Used for
+ bfd_section_reloc_link_order and
+ bfd_symbol_reloc_link_order. */
+ struct bfd_link_order_reloc *p;
+ } reloc;
+ } u;
+};
+
+/* A linker order of type bfd_section_reloc_link_order or
+ bfd_symbol_reloc_link_order means to create a reloc against a
+ section or symbol, respectively. This is used to implement -Ur to
+ generate relocs for the constructor tables. The
+ bfd_link_order_reloc structure describes the reloc that BFD should
+ create. It is similar to a arelent, but I didn't use arelent
+ because the linker does not know anything about most symbols, and
+ any asymbol structure it creates will be partially meaningless.
+ This information could logically be in the bfd_link_order struct,
+ but I didn't want to waste the space since these types of relocs
+ are relatively rare. */
+
+struct bfd_link_order_reloc
+{
+ /* Reloc type. */
+ bfd_reloc_code_real_type reloc;
+
+ union
+ {
+ /* For type bfd_section_reloc_link_order, this is the section
+ the reloc should be against. This must be a section in the
+ output BFD, not any of the input BFDs. */
+ asection *section;
+ /* For type bfd_symbol_reloc_link_order, this is the name of the
+ symbol the reloc should be against. */
+ const char *name;
+ } u;
+
+ /* Addend to use. The object file should contain zero. The BFD
+ backend is responsible for filling in the contents of the object
+ file correctly. For some object file formats (e.g., COFF) the
+ addend must be stored into in the object file, and for some
+ (e.g., SPARC a.out) it is kept in the reloc. */
+ bfd_vma addend;
+};
+
+/* Allocate a new link_order for a section. */
+extern struct bfd_link_order *bfd_new_link_order (bfd *, asection *);
+
+/* These structures are used to describe version information for the
+ ELF linker. These structures could be manipulated entirely inside
+ BFD, but it would be a pain. Instead, the regular linker sets up
+ these structures, and then passes them into BFD. */
+
+/* Glob pattern for a version. */
+
+struct bfd_elf_version_expr
+{
+ /* Next glob pattern for this version. */
+ struct bfd_elf_version_expr *next;
+ /* Glob pattern. */
+ const char *pattern;
+ /* Set if pattern is not a glob. */
+ unsigned int literal : 1;
+ /* Defined by ".symver". */
+ unsigned int symver : 1;
+ /* Defined by version script. */
+ unsigned int script : 1;
+ /* Pattern type. */
+#define BFD_ELF_VERSION_C_TYPE 1
+#define BFD_ELF_VERSION_CXX_TYPE 2
+#define BFD_ELF_VERSION_JAVA_TYPE 4
+ unsigned int mask : 3;
+};
+
+struct bfd_elf_version_expr_head
+{
+ /* List of all patterns, both wildcards and non-wildcards. */
+ struct bfd_elf_version_expr *list;
+ /* Hash table for non-wildcards. */
+ void *htab;
+ /* Remaining patterns. */
+ struct bfd_elf_version_expr *remaining;
+ /* What kind of pattern types are present in list (bitmask). */
+ unsigned int mask;
+};
+
+/* Version dependencies. */
+
+struct bfd_elf_version_deps
+{
+ /* Next dependency for this version. */
+ struct bfd_elf_version_deps *next;
+ /* The version which this version depends upon. */
+ struct bfd_elf_version_tree *version_needed;
+};
+
+/* A node in the version tree. */
+
+struct bfd_elf_version_tree
+{
+ /* Next version. */
+ struct bfd_elf_version_tree *next;
+ /* Name of this version. */
+ const char *name;
+ /* Version number. */
+ unsigned int vernum;
+ /* Regular expressions for global symbols in this version. */
+ struct bfd_elf_version_expr_head globals;
+ /* Regular expressions for local symbols in this version. */
+ struct bfd_elf_version_expr_head locals;
+ /* List of versions which this version depends upon. */
+ struct bfd_elf_version_deps *deps;
+ /* Index of the version name. This is used within BFD. */
+ unsigned int name_indx;
+ /* Whether this version tree was used. This is used within BFD. */
+ int used;
+ /* Matching hook. */
+ struct bfd_elf_version_expr *(*match)
+ (struct bfd_elf_version_expr_head *head,
+ struct bfd_elf_version_expr *prev, const char *sym);
+};
+
+struct bfd_elf_dynamic_list
+{
+ struct bfd_elf_version_expr_head head;
+ struct bfd_elf_version_expr *(*match)
+ (struct bfd_elf_version_expr_head *head,
+ struct bfd_elf_version_expr *prev, const char *sym);
+};
+
+#endif
diff --git a/tools/perf/external/usr/include/demangle.h b/tools/perf/external/usr/include/demangle.h
new file mode 100644
index 00000000..8ad073de
--- /dev/null
+++ b/tools/perf/external/usr/include/demangle.h
@@ -0,0 +1,638 @@
+/* Defs for interface to demanglers.
+ Copyright 1992, 1993, 1994, 1995, 1996, 1997, 1998, 2000, 2001, 2002,
+ 2003, 2004, 2005, 2007, 2008, 2009 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License
+ as published by the Free Software Foundation; either version 2, or
+ (at your option) any later version.
+
+ In addition to the permissions in the GNU Library General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Library Public License restrictions do apply in other
+ respects; for example, they cover modification of the file, and
+ distribution when not linked into a combined executable.)
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
+ 02110-1301, USA. */
+
+
+#if !defined (DEMANGLE_H)
+#define DEMANGLE_H
+
+#include "libiberty.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* Options passed to cplus_demangle (in 2nd parameter). */
+
+#define DMGL_NO_OPTS 0 /* For readability... */
+#define DMGL_PARAMS (1 << 0) /* Include function args */
+#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
+#define DMGL_JAVA (1 << 2) /* Demangle as Java rather than C++. */
+#define DMGL_VERBOSE (1 << 3) /* Include implementation details. */
+#define DMGL_TYPES (1 << 4) /* Also try to demangle type encodings. */
+#define DMGL_RET_POSTFIX (1 << 5) /* Print function return types (when
+ present) after function signature */
+
+#define DMGL_AUTO (1 << 8)
+#define DMGL_GNU (1 << 9)
+#define DMGL_LUCID (1 << 10)
+#define DMGL_ARM (1 << 11)
+#define DMGL_HP (1 << 12) /* For the HP aCC compiler;
+ same as ARM except for
+ template arguments, etc. */
+#define DMGL_EDG (1 << 13)
+#define DMGL_GNU_V3 (1 << 14)
+#define DMGL_GNAT (1 << 15)
+
+/* If none of these are set, use 'current_demangling_style' as the default. */
+#define DMGL_STYLE_MASK (DMGL_AUTO|DMGL_GNU|DMGL_LUCID|DMGL_ARM|DMGL_HP|DMGL_EDG|DMGL_GNU_V3|DMGL_JAVA|DMGL_GNAT)
+
+/* Enumeration of possible demangling styles.
+
+ Lucid and ARM styles are still kept logically distinct, even though
+ they now both behave identically. The resulting style is actual the
+ union of both. I.E. either style recognizes both "__pt__" and "__rf__"
+ for operator "->", even though the first is lucid style and the second
+ is ARM style. (FIXME?) */
+
+extern enum demangling_styles
+{
+ no_demangling = -1,
+ unknown_demangling = 0,
+ auto_demangling = DMGL_AUTO,
+ gnu_demangling = DMGL_GNU,
+ lucid_demangling = DMGL_LUCID,
+ arm_demangling = DMGL_ARM,
+ hp_demangling = DMGL_HP,
+ edg_demangling = DMGL_EDG,
+ gnu_v3_demangling = DMGL_GNU_V3,
+ java_demangling = DMGL_JAVA,
+ gnat_demangling = DMGL_GNAT
+} current_demangling_style;
+
+/* Define string names for the various demangling styles. */
+
+#define NO_DEMANGLING_STYLE_STRING "none"
+#define AUTO_DEMANGLING_STYLE_STRING "auto"
+#define GNU_DEMANGLING_STYLE_STRING "gnu"
+#define LUCID_DEMANGLING_STYLE_STRING "lucid"
+#define ARM_DEMANGLING_STYLE_STRING "arm"
+#define HP_DEMANGLING_STYLE_STRING "hp"
+#define EDG_DEMANGLING_STYLE_STRING "edg"
+#define GNU_V3_DEMANGLING_STYLE_STRING "gnu-v3"
+#define JAVA_DEMANGLING_STYLE_STRING "java"
+#define GNAT_DEMANGLING_STYLE_STRING "gnat"
+
+/* Some macros to test what demangling style is active. */
+
+#define CURRENT_DEMANGLING_STYLE current_demangling_style
+#define AUTO_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_AUTO)
+#define GNU_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_GNU)
+#define LUCID_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_LUCID)
+#define ARM_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_ARM)
+#define HP_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_HP)
+#define EDG_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_EDG)
+#define GNU_V3_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_GNU_V3)
+#define JAVA_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_JAVA)
+#define GNAT_DEMANGLING (((int) CURRENT_DEMANGLING_STYLE) & DMGL_GNAT)
+
+/* Provide information about the available demangle styles. This code is
+ pulled from gdb into libiberty because it is useful to binutils also. */
+
+extern const struct demangler_engine
+{
+ const char *const demangling_style_name;
+ const enum demangling_styles demangling_style;
+ const char *const demangling_style_doc;
+} libiberty_demanglers[];
+
+extern char *
+cplus_demangle (const char *mangled, int options);
+
+extern int
+cplus_demangle_opname (const char *opname, char *result, int options);
+
+extern const char *
+cplus_mangle_opname (const char *opname, int options);
+
+/* Note: This sets global state. FIXME if you care about multi-threading. */
+
+extern void
+set_cplus_marker_for_demangling (int ch);
+
+extern enum demangling_styles
+cplus_demangle_set_style (enum demangling_styles style);
+
+extern enum demangling_styles
+cplus_demangle_name_to_style (const char *name);
+
+/* Callback typedef for allocation-less demangler interfaces. */
+typedef void (*demangle_callbackref) (const char *, size_t, void *);
+
+/* V3 ABI demangling entry points, defined in cp-demangle.c. Callback
+ variants return non-zero on success, zero on error. char* variants
+ return a string allocated by malloc on success, NULL on error. */
+extern int
+cplus_demangle_v3_callback (const char *mangled, int options,
+ demangle_callbackref callback, void *opaque);
+
+extern char*
+cplus_demangle_v3 (const char *mangled, int options);
+
+extern int
+java_demangle_v3_callback (const char *mangled,
+ demangle_callbackref callback, void *opaque);
+
+extern char*
+java_demangle_v3 (const char *mangled);
+
+char *
+ada_demangle (const char *mangled, int options);
+
+enum gnu_v3_ctor_kinds {
+ gnu_v3_complete_object_ctor = 1,
+ gnu_v3_base_object_ctor,
+ gnu_v3_complete_object_allocating_ctor
+};
+
+/* Return non-zero iff NAME is the mangled form of a constructor name
+ in the G++ V3 ABI demangling style. Specifically, return an `enum
+ gnu_v3_ctor_kinds' value indicating what kind of constructor
+ it is. */
+extern enum gnu_v3_ctor_kinds
+ is_gnu_v3_mangled_ctor (const char *name);
+
+
+enum gnu_v3_dtor_kinds {
+ gnu_v3_deleting_dtor = 1,
+ gnu_v3_complete_object_dtor,
+ gnu_v3_base_object_dtor
+};
+
+/* Return non-zero iff NAME is the mangled form of a destructor name
+ in the G++ V3 ABI demangling style. Specifically, return an `enum
+ gnu_v3_dtor_kinds' value, indicating what kind of destructor
+ it is. */
+extern enum gnu_v3_dtor_kinds
+ is_gnu_v3_mangled_dtor (const char *name);
+
+/* The V3 demangler works in two passes. The first pass builds a tree
+ representation of the mangled name, and the second pass turns the
+ tree representation into a demangled string. Here we define an
+ interface to permit a caller to build their own tree
+ representation, which they can pass to the demangler to get a
+ demangled string. This can be used to canonicalize user input into
+ something which the demangler might output. It could also be used
+ by other demanglers in the future. */
+
+/* These are the component types which may be found in the tree. Many
+ component types have one or two subtrees, referred to as left and
+ right (a component type with only one subtree puts it in the left
+ subtree). */
+
+enum demangle_component_type
+{
+ /* A name, with a length and a pointer to a string. */
+ DEMANGLE_COMPONENT_NAME,
+ /* A qualified name. The left subtree is a class or namespace or
+ some such thing, and the right subtree is a name qualified by
+ that class. */
+ DEMANGLE_COMPONENT_QUAL_NAME,
+ /* A local name. The left subtree describes a function, and the
+ right subtree is a name which is local to that function. */
+ DEMANGLE_COMPONENT_LOCAL_NAME,
+ /* A typed name. The left subtree is a name, and the right subtree
+ describes that name as a function. */
+ DEMANGLE_COMPONENT_TYPED_NAME,
+ /* A template. The left subtree is a template name, and the right
+ subtree is a template argument list. */
+ DEMANGLE_COMPONENT_TEMPLATE,
+ /* A template parameter. This holds a number, which is the template
+ parameter index. */
+ DEMANGLE_COMPONENT_TEMPLATE_PARAM,
+ /* A function parameter. This holds a number, which is the index. */
+ DEMANGLE_COMPONENT_FUNCTION_PARAM,
+ /* A constructor. This holds a name and the kind of
+ constructor. */
+ DEMANGLE_COMPONENT_CTOR,
+ /* A destructor. This holds a name and the kind of destructor. */
+ DEMANGLE_COMPONENT_DTOR,
+ /* A vtable. This has one subtree, the type for which this is a
+ vtable. */
+ DEMANGLE_COMPONENT_VTABLE,
+ /* A VTT structure. This has one subtree, the type for which this
+ is a VTT. */
+ DEMANGLE_COMPONENT_VTT,
+ /* A construction vtable. The left subtree is the type for which
+ this is a vtable, and the right subtree is the derived type for
+ which this vtable is built. */
+ DEMANGLE_COMPONENT_CONSTRUCTION_VTABLE,
+ /* A typeinfo structure. This has one subtree, the type for which
+ this is the tpeinfo structure. */
+ DEMANGLE_COMPONENT_TYPEINFO,
+ /* A typeinfo name. This has one subtree, the type for which this
+ is the typeinfo name. */
+ DEMANGLE_COMPONENT_TYPEINFO_NAME,
+ /* A typeinfo function. This has one subtree, the type for which
+ this is the tpyeinfo function. */
+ DEMANGLE_COMPONENT_TYPEINFO_FN,
+ /* A thunk. This has one subtree, the name for which this is a
+ thunk. */
+ DEMANGLE_COMPONENT_THUNK,
+ /* A virtual thunk. This has one subtree, the name for which this
+ is a virtual thunk. */
+ DEMANGLE_COMPONENT_VIRTUAL_THUNK,
+ /* A covariant thunk. This has one subtree, the name for which this
+ is a covariant thunk. */
+ DEMANGLE_COMPONENT_COVARIANT_THUNK,
+ /* A Java class. This has one subtree, the type. */
+ DEMANGLE_COMPONENT_JAVA_CLASS,
+ /* A guard variable. This has one subtree, the name for which this
+ is a guard variable. */
+ DEMANGLE_COMPONENT_GUARD,
+ /* A reference temporary. This has one subtree, the name for which
+ this is a temporary. */
+ DEMANGLE_COMPONENT_REFTEMP,
+ /* A hidden alias. This has one subtree, the encoding for which it
+ is providing alternative linkage. */
+ DEMANGLE_COMPONENT_HIDDEN_ALIAS,
+ /* A standard substitution. This holds the name of the
+ substitution. */
+ DEMANGLE_COMPONENT_SUB_STD,
+ /* The restrict qualifier. The one subtree is the type which is
+ being qualified. */
+ DEMANGLE_COMPONENT_RESTRICT,
+ /* The volatile qualifier. The one subtree is the type which is
+ being qualified. */
+ DEMANGLE_COMPONENT_VOLATILE,
+ /* The const qualifier. The one subtree is the type which is being
+ qualified. */
+ DEMANGLE_COMPONENT_CONST,
+ /* The restrict qualifier modifying a member function. The one
+ subtree is the type which is being qualified. */
+ DEMANGLE_COMPONENT_RESTRICT_THIS,
+ /* The volatile qualifier modifying a member function. The one
+ subtree is the type which is being qualified. */
+ DEMANGLE_COMPONENT_VOLATILE_THIS,
+ /* The const qualifier modifying a member function. The one subtree
+ is the type which is being qualified. */
+ DEMANGLE_COMPONENT_CONST_THIS,
+ /* A vendor qualifier. The left subtree is the type which is being
+ qualified, and the right subtree is the name of the
+ qualifier. */
+ DEMANGLE_COMPONENT_VENDOR_TYPE_QUAL,
+ /* A pointer. The one subtree is the type which is being pointed
+ to. */
+ DEMANGLE_COMPONENT_POINTER,
+ /* A reference. The one subtree is the type which is being
+ referenced. */
+ DEMANGLE_COMPONENT_REFERENCE,
+ /* C++0x: An rvalue reference. The one subtree is the type which is
+ being referenced. */
+ DEMANGLE_COMPONENT_RVALUE_REFERENCE,
+ /* A complex type. The one subtree is the base type. */
+ DEMANGLE_COMPONENT_COMPLEX,
+ /* An imaginary type. The one subtree is the base type. */
+ DEMANGLE_COMPONENT_IMAGINARY,
+ /* A builtin type. This holds the builtin type information. */
+ DEMANGLE_COMPONENT_BUILTIN_TYPE,
+ /* A vendor's builtin type. This holds the name of the type. */
+ DEMANGLE_COMPONENT_VENDOR_TYPE,
+ /* A function type. The left subtree is the return type. The right
+ subtree is a list of ARGLIST nodes. Either or both may be
+ NULL. */
+ DEMANGLE_COMPONENT_FUNCTION_TYPE,
+ /* An array type. The left subtree is the dimension, which may be
+ NULL, or a string (represented as DEMANGLE_COMPONENT_NAME), or an
+ expression. The right subtree is the element type. */
+ DEMANGLE_COMPONENT_ARRAY_TYPE,
+ /* A pointer to member type. The left subtree is the class type,
+ and the right subtree is the member type. CV-qualifiers appear
+ on the latter. */
+ DEMANGLE_COMPONENT_PTRMEM_TYPE,
+ /* A fixed-point type. */
+ DEMANGLE_COMPONENT_FIXED_TYPE,
+ /* A vector type. The left subtree is the number of elements,
+ the right subtree is the element type. */
+ DEMANGLE_COMPONENT_VECTOR_TYPE,
+ /* An argument list. The left subtree is the current argument, and
+ the right subtree is either NULL or another ARGLIST node. */
+ DEMANGLE_COMPONENT_ARGLIST,
+ /* A template argument list. The left subtree is the current
+ template argument, and the right subtree is either NULL or
+ another TEMPLATE_ARGLIST node. */
+ DEMANGLE_COMPONENT_TEMPLATE_ARGLIST,
+ /* An operator. This holds information about a standard
+ operator. */
+ DEMANGLE_COMPONENT_OPERATOR,
+ /* An extended operator. This holds the number of arguments, and
+ the name of the extended operator. */
+ DEMANGLE_COMPONENT_EXTENDED_OPERATOR,
+ /* A typecast, represented as a unary operator. The one subtree is
+ the type to which the argument should be cast. */
+ DEMANGLE_COMPONENT_CAST,
+ /* A unary expression. The left subtree is the operator, and the
+ right subtree is the single argument. */
+ DEMANGLE_COMPONENT_UNARY,
+ /* A binary expression. The left subtree is the operator, and the
+ right subtree is a BINARY_ARGS. */
+ DEMANGLE_COMPONENT_BINARY,
+ /* Arguments to a binary expression. The left subtree is the first
+ argument, and the right subtree is the second argument. */
+ DEMANGLE_COMPONENT_BINARY_ARGS,
+ /* A trinary expression. The left subtree is the operator, and the
+ right subtree is a TRINARY_ARG1. */
+ DEMANGLE_COMPONENT_TRINARY,
+ /* Arguments to a trinary expression. The left subtree is the first
+ argument, and the right subtree is a TRINARY_ARG2. */
+ DEMANGLE_COMPONENT_TRINARY_ARG1,
+ /* More arguments to a trinary expression. The left subtree is the
+ second argument, and the right subtree is the third argument. */
+ DEMANGLE_COMPONENT_TRINARY_ARG2,
+ /* A literal. The left subtree is the type, and the right subtree
+ is the value, represented as a DEMANGLE_COMPONENT_NAME. */
+ DEMANGLE_COMPONENT_LITERAL,
+ /* A negative literal. Like LITERAL, but the value is negated.
+ This is a minor hack: the NAME used for LITERAL points directly
+ to the mangled string, but since negative numbers are mangled
+ using 'n' instead of '-', we want a way to indicate a negative
+ number which involves neither modifying the mangled string nor
+ allocating a new copy of the literal in memory. */
+ DEMANGLE_COMPONENT_LITERAL_NEG,
+ /* A libgcj compiled resource. The left subtree is the name of the
+ resource. */
+ DEMANGLE_COMPONENT_JAVA_RESOURCE,
+ /* A name formed by the concatenation of two parts. The left
+ subtree is the first part and the right subtree the second. */
+ DEMANGLE_COMPONENT_COMPOUND_NAME,
+ /* A name formed by a single character. */
+ DEMANGLE_COMPONENT_CHARACTER,
+ /* A number. */
+ DEMANGLE_COMPONENT_NUMBER,
+ /* A decltype type. */
+ DEMANGLE_COMPONENT_DECLTYPE,
+ /* Global constructors keyed to name. */
+ DEMANGLE_COMPONENT_GLOBAL_CONSTRUCTORS,
+ /* Global destructors keyed to name. */
+ DEMANGLE_COMPONENT_GLOBAL_DESTRUCTORS,
+ /* A lambda closure type. */
+ DEMANGLE_COMPONENT_LAMBDA,
+ /* A default argument scope. */
+ DEMANGLE_COMPONENT_DEFAULT_ARG,
+ /* An unnamed type. */
+ DEMANGLE_COMPONENT_UNNAMED_TYPE,
+ /* A pack expansion. */
+ DEMANGLE_COMPONENT_PACK_EXPANSION
+};
+
+/* Types which are only used internally. */
+
+struct demangle_operator_info;
+struct demangle_builtin_type_info;
+
+/* A node in the tree representation is an instance of a struct
+ demangle_component. Note that the field names of the struct are
+ not well protected against macros defined by the file including
+ this one. We can fix this if it ever becomes a problem. */
+
+struct demangle_component
+{
+ /* The type of this component. */
+ enum demangle_component_type type;
+
+ union
+ {
+ /* For DEMANGLE_COMPONENT_NAME. */
+ struct
+ {
+ /* A pointer to the name (which need not NULL terminated) and
+ its length. */
+ const char *s;
+ int len;
+ } s_name;
+
+ /* For DEMANGLE_COMPONENT_OPERATOR. */
+ struct
+ {
+ /* Operator. */
+ const struct demangle_operator_info *op;
+ } s_operator;
+
+ /* For DEMANGLE_COMPONENT_EXTENDED_OPERATOR. */
+ struct
+ {
+ /* Number of arguments. */
+ int args;
+ /* Name. */
+ struct demangle_component *name;
+ } s_extended_operator;
+
+ /* For DEMANGLE_COMPONENT_FIXED_TYPE. */
+ struct
+ {
+ /* The length, indicated by a C integer type name. */
+ struct demangle_component *length;
+ /* _Accum or _Fract? */
+ short accum;
+ /* Saturating or not? */
+ short sat;
+ } s_fixed;
+
+ /* For DEMANGLE_COMPONENT_CTOR. */
+ struct
+ {
+ /* Kind of constructor. */
+ enum gnu_v3_ctor_kinds kind;
+ /* Name. */
+ struct demangle_component *name;
+ } s_ctor;
+
+ /* For DEMANGLE_COMPONENT_DTOR. */
+ struct
+ {
+ /* Kind of destructor. */
+ enum gnu_v3_dtor_kinds kind;
+ /* Name. */
+ struct demangle_component *name;
+ } s_dtor;
+
+ /* For DEMANGLE_COMPONENT_BUILTIN_TYPE. */
+ struct
+ {
+ /* Builtin type. */
+ const struct demangle_builtin_type_info *type;
+ } s_builtin;
+
+ /* For DEMANGLE_COMPONENT_SUB_STD. */
+ struct
+ {
+ /* Standard substitution string. */
+ const char* string;
+ /* Length of string. */
+ int len;
+ } s_string;
+
+ /* For DEMANGLE_COMPONENT_*_PARAM. */
+ struct
+ {
+ /* Parameter index. */
+ long number;
+ } s_number;
+
+ /* For DEMANGLE_COMPONENT_CHARACTER. */
+ struct
+ {
+ int character;
+ } s_character;
+
+ /* For other types. */
+ struct
+ {
+ /* Left (or only) subtree. */
+ struct demangle_component *left;
+ /* Right subtree. */
+ struct demangle_component *right;
+ } s_binary;
+
+ struct
+ {
+ /* subtree, same place as d_left. */
+ struct demangle_component *sub;
+ /* integer. */
+ int num;
+ } s_unary_num;
+
+ } u;
+};
+
+/* People building mangled trees are expected to allocate instances of
+ struct demangle_component themselves. They can then call one of
+ the following functions to fill them in. */
+
+/* Fill in most component types with a left subtree and a right
+ subtree. Returns non-zero on success, zero on failure, such as an
+ unrecognized or inappropriate component type. */
+
+extern int
+cplus_demangle_fill_component (struct demangle_component *fill,
+ enum demangle_component_type,
+ struct demangle_component *left,
+ struct demangle_component *right);
+
+/* Fill in a DEMANGLE_COMPONENT_NAME. Returns non-zero on success,
+ zero for bad arguments. */
+
+extern int
+cplus_demangle_fill_name (struct demangle_component *fill,
+ const char *, int);
+
+/* Fill in a DEMANGLE_COMPONENT_BUILTIN_TYPE, using the name of the
+ builtin type (e.g., "int", etc.). Returns non-zero on success,
+ zero if the type is not recognized. */
+
+extern int
+cplus_demangle_fill_builtin_type (struct demangle_component *fill,
+ const char *type_name);
+
+/* Fill in a DEMANGLE_COMPONENT_OPERATOR, using the name of the
+ operator and the number of arguments which it takes (the latter is
+ used to disambiguate operators which can be both binary and unary,
+ such as '-'). Returns non-zero on success, zero if the operator is
+ not recognized. */
+
+extern int
+cplus_demangle_fill_operator (struct demangle_component *fill,
+ const char *opname, int args);
+
+/* Fill in a DEMANGLE_COMPONENT_EXTENDED_OPERATOR, providing the
+ number of arguments and the name. Returns non-zero on success,
+ zero for bad arguments. */
+
+extern int
+cplus_demangle_fill_extended_operator (struct demangle_component *fill,
+ int numargs,
+ struct demangle_component *nm);
+
+/* Fill in a DEMANGLE_COMPONENT_CTOR. Returns non-zero on success,
+ zero for bad arguments. */
+
+extern int
+cplus_demangle_fill_ctor (struct demangle_component *fill,
+ enum gnu_v3_ctor_kinds kind,
+ struct demangle_component *name);
+
+/* Fill in a DEMANGLE_COMPONENT_DTOR. Returns non-zero on success,
+ zero for bad arguments. */
+
+extern int
+cplus_demangle_fill_dtor (struct demangle_component *fill,
+ enum gnu_v3_dtor_kinds kind,
+ struct demangle_component *name);
+
+/* This function translates a mangled name into a struct
+ demangle_component tree. The first argument is the mangled name.
+ The second argument is DMGL_* options. This returns a pointer to a
+ tree on success, or NULL on failure. On success, the third
+ argument is set to a block of memory allocated by malloc. This
+ block should be passed to free when the tree is no longer
+ needed. */
+
+extern struct demangle_component *
+cplus_demangle_v3_components (const char *mangled, int options, void **mem);
+
+/* This function takes a struct demangle_component tree and returns
+ the corresponding demangled string. The first argument is DMGL_*
+ options. The second is the tree to demangle. The third is a guess
+ at the length of the demangled string, used to initially allocate
+ the return buffer. The fourth is a pointer to a size_t. On
+ success, this function returns a buffer allocated by malloc(), and
+ sets the size_t pointed to by the fourth argument to the size of
+ the allocated buffer (not the length of the returned string). On
+ failure, this function returns NULL, and sets the size_t pointed to
+ by the fourth argument to 0 for an invalid tree, or to 1 for a
+ memory allocation error. */
+
+extern char *
+cplus_demangle_print (int options,
+ const struct demangle_component *tree,
+ int estimated_length,
+ size_t *p_allocated_size);
+
+/* This function takes a struct demangle_component tree and passes back
+ a demangled string in one or more calls to a callback function.
+ The first argument is DMGL_* options. The second is the tree to
+ demangle. The third is a pointer to a callback function; on each call
+ this receives an element of the demangled string, its length, and an
+ opaque value. The fourth is the opaque value passed to the callback.
+ The callback is called once or more to return the full demangled
+ string. The demangled element string is always nul-terminated, though
+ its length is also provided for convenience. In contrast to
+ cplus_demangle_print(), this function does not allocate heap memory
+ to grow output strings (except perhaps where alloca() is implemented
+ by malloc()), and so is normally safe for use where the heap has been
+ corrupted. On success, this function returns 1; on failure, 0. */
+
+extern int
+cplus_demangle_print_callback (int options,
+ const struct demangle_component *tree,
+ demangle_callbackref callback, void *opaque);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* DEMANGLE_H */
diff --git a/tools/perf/external/usr/include/dis-asm.h b/tools/perf/external/usr/include/dis-asm.h
new file mode 100644
index 00000000..63366d9e
--- /dev/null
+++ b/tools/perf/external/usr/include/dis-asm.h
@@ -0,0 +1,368 @@
+/* Interface between the opcode library and its callers.
+
+ Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2009, 2010
+ Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor,
+ Boston, MA 02110-1301, USA.
+
+ Written by Cygnus Support, 1993.
+
+ The opcode library (libopcodes.a) provides instruction decoders for
+ a large variety of instruction sets, callable with an identical
+ interface, for making instruction-processing programs more independent
+ of the instruction set being processed. */
+
+#ifndef DIS_ASM_H
+#define DIS_ASM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include "bfd.h"
+
+ typedef int (*fprintf_ftype) (void *, const char*, ...) /*ATTRIBUTE_FPTR_PRINTF_2*/;
+
+enum dis_insn_type
+{
+ dis_noninsn, /* Not a valid instruction. */
+ dis_nonbranch, /* Not a branch instruction. */
+ dis_branch, /* Unconditional branch. */
+ dis_condbranch, /* Conditional branch. */
+ dis_jsr, /* Jump to subroutine. */
+ dis_condjsr, /* Conditional jump to subroutine. */
+ dis_dref, /* Data reference instruction. */
+ dis_dref2 /* Two data references in instruction. */
+};
+
+/* This struct is passed into the instruction decoding routine,
+ and is passed back out into each callback. The various fields are used
+ for conveying information from your main routine into your callbacks,
+ for passing information into the instruction decoders (such as the
+ addresses of the callback functions), or for passing information
+ back from the instruction decoders to their callers.
+
+ It must be initialized before it is first passed; this can be done
+ by hand, or using one of the initialization macros below. */
+
+typedef struct disassemble_info
+{
+ fprintf_ftype fprintf_func;
+ void *stream;
+ void *application_data;
+
+ /* Target description. We could replace this with a pointer to the bfd,
+ but that would require one. There currently isn't any such requirement
+ so to avoid introducing one we record these explicitly. */
+ /* The bfd_flavour. This can be bfd_target_unknown_flavour. */
+ enum bfd_flavour flavour;
+ /* The bfd_arch value. */
+ enum bfd_architecture arch;
+ /* The bfd_mach value. */
+ unsigned long mach;
+ /* Endianness (for bi-endian cpus). Mono-endian cpus can ignore this. */
+ enum bfd_endian endian;
+ /* Endianness of code, for mixed-endian situations such as ARM BE8. */
+ enum bfd_endian endian_code;
+ /* An arch/mach-specific bitmask of selected instruction subsets, mainly
+ for processors with run-time-switchable instruction sets. The default,
+ zero, means that there is no constraint. CGEN-based opcodes ports
+ may use ISA_foo masks. */
+ void *insn_sets;
+
+ /* Some targets need information about the current section to accurately
+ display insns. If this is NULL, the target disassembler function
+ will have to make its best guess. */
+ asection *section;
+
+ /* An array of pointers to symbols either at the location being disassembled
+ or at the start of the function being disassembled. The array is sorted
+ so that the first symbol is intended to be the one used. The others are
+ present for any misc. purposes. This is not set reliably, but if it is
+ not NULL, it is correct. */
+ asymbol **symbols;
+ /* Number of symbols in array. */
+ int num_symbols;
+
+ /* Symbol table provided for targets that want to look at it. This is
+ used on Arm to find mapping symbols and determine Arm/Thumb code. */
+ asymbol **symtab;
+ int symtab_pos;
+ int symtab_size;
+
+ /* For use by the disassembler.
+ The top 16 bits are reserved for public use (and are documented here).
+ The bottom 16 bits are for the internal use of the disassembler. */
+ unsigned long flags;
+ /* Set if the disassembler has determined that there are one or more
+ relocations associated with the instruction being disassembled. */
+#define INSN_HAS_RELOC (1 << 31)
+ /* Set if the user has requested the disassembly of data as well as code. */
+#define DISASSEMBLE_DATA (1 << 30)
+ /* Set if the user has specifically set the machine type encoded in the
+ mach field of this structure. */
+#define USER_SPECIFIED_MACHINE_TYPE (1 << 29)
+
+ /* Use internally by the target specific disassembly code. */
+ void *private_data;
+
+ /* Function used to get bytes to disassemble. MEMADDR is the
+ address of the stuff to be disassembled, MYADDR is the address to
+ put the bytes in, and LENGTH is the number of bytes to read.
+ INFO is a pointer to this struct.
+ Returns an errno value or 0 for success. */
+ int (*read_memory_func)
+ (bfd_vma memaddr, bfd_byte *myaddr, unsigned int length,
+ struct disassemble_info *dinfo);
+
+ /* Function which should be called if we get an error that we can't
+ recover from. STATUS is the errno value from read_memory_func and
+ MEMADDR is the address that we were trying to read. INFO is a
+ pointer to this struct. */
+ void (*memory_error_func)
+ (int status, bfd_vma memaddr, struct disassemble_info *dinfo);
+
+ /* Function called to print ADDR. */
+ void (*print_address_func)
+ (bfd_vma addr, struct disassemble_info *dinfo);
+
+ /* Function called to determine if there is a symbol at the given ADDR.
+ If there is, the function returns 1, otherwise it returns 0.
+ This is used by ports which support an overlay manager where
+ the overlay number is held in the top part of an address. In
+ some circumstances we want to include the overlay number in the
+ address, (normally because there is a symbol associated with
+ that address), but sometimes we want to mask out the overlay bits. */
+ int (* symbol_at_address_func)
+ (bfd_vma addr, struct disassemble_info *dinfo);
+
+ /* Function called to check if a SYMBOL is can be displayed to the user.
+ This is used by some ports that want to hide special symbols when
+ displaying debugging outout. */
+ bfd_boolean (* symbol_is_valid)
+ (asymbol *, struct disassemble_info *dinfo);
+
+ /* These are for buffer_read_memory. */
+ bfd_byte *buffer;
+ bfd_vma buffer_vma;
+ unsigned int buffer_length;
+
+ /* This variable may be set by the instruction decoder. It suggests
+ the number of bytes objdump should display on a single line. If
+ the instruction decoder sets this, it should always set it to
+ the same value in order to get reasonable looking output. */
+ int bytes_per_line;
+
+ /* The next two variables control the way objdump displays the raw data. */
+ /* For example, if bytes_per_line is 8 and bytes_per_chunk is 4, the */
+ /* output will look like this:
+ 00: 00000000 00000000
+ with the chunks displayed according to "display_endian". */
+ int bytes_per_chunk;
+ enum bfd_endian display_endian;
+
+ /* Number of octets per incremented target address
+ Normally one, but some DSPs have byte sizes of 16 or 32 bits. */
+ unsigned int octets_per_byte;
+
+ /* The number of zeroes we want to see at the end of a section before we
+ start skipping them. */
+ unsigned int skip_zeroes;
+
+ /* The number of zeroes to skip at the end of a section. If the number
+ of zeroes at the end is between SKIP_ZEROES_AT_END and SKIP_ZEROES,
+ they will be disassembled. If there are fewer than
+ SKIP_ZEROES_AT_END, they will be skipped. This is a heuristic
+ attempt to avoid disassembling zeroes inserted by section
+ alignment. */
+ unsigned int skip_zeroes_at_end;
+
+ /* Whether the disassembler always needs the relocations. */
+ bfd_boolean disassembler_needs_relocs;
+
+ /* Results from instruction decoders. Not all decoders yet support
+ this information. This info is set each time an instruction is
+ decoded, and is only valid for the last such instruction.
+
+ To determine whether this decoder supports this information, set
+ insn_info_valid to 0, decode an instruction, then check it. */
+
+ char insn_info_valid; /* Branch info has been set. */
+ char branch_delay_insns; /* How many sequential insn's will run before
+ a branch takes effect. (0 = normal) */
+ char data_size; /* Size of data reference in insn, in bytes */
+ enum dis_insn_type insn_type; /* Type of instruction */
+ bfd_vma target; /* Target address of branch or dref, if known;
+ zero if unknown. */
+ bfd_vma target2; /* Second target address for dref2 */
+
+ /* Command line options specific to the target disassembler. */
+ char * disassembler_options;
+
+} disassemble_info;
+
+
+/* Standard disassemblers. Disassemble one instruction at the given
+ target address. Return number of octets processed. */
+typedef int (*disassembler_ftype) (bfd_vma, disassemble_info *);
+
+extern int print_insn_alpha (bfd_vma, disassemble_info *);
+extern int print_insn_avr (bfd_vma, disassemble_info *);
+extern int print_insn_bfin (bfd_vma, disassemble_info *);
+extern int print_insn_big_arm (bfd_vma, disassemble_info *);
+extern int print_insn_big_mips (bfd_vma, disassemble_info *);
+extern int print_insn_big_or32 (bfd_vma, disassemble_info *);
+extern int print_insn_big_powerpc (bfd_vma, disassemble_info *);
+extern int print_insn_big_score (bfd_vma, disassemble_info *);
+extern int print_insn_cr16 (bfd_vma, disassemble_info *);
+extern int print_insn_crx (bfd_vma, disassemble_info *);
+extern int print_insn_d10v (bfd_vma, disassemble_info *);
+extern int print_insn_d30v (bfd_vma, disassemble_info *);
+extern int print_insn_dlx (bfd_vma, disassemble_info *);
+extern int print_insn_fr30 (bfd_vma, disassemble_info *);
+extern int print_insn_frv (bfd_vma, disassemble_info *);
+extern int print_insn_h8300 (bfd_vma, disassemble_info *);
+extern int print_insn_h8300h (bfd_vma, disassemble_info *);
+extern int print_insn_h8300s (bfd_vma, disassemble_info *);
+extern int print_insn_h8500 (bfd_vma, disassemble_info *);
+extern int print_insn_hppa (bfd_vma, disassemble_info *);
+extern int print_insn_i370 (bfd_vma, disassemble_info *);
+extern int print_insn_i386 (bfd_vma, disassemble_info *);
+extern int print_insn_i386_att (bfd_vma, disassemble_info *);
+extern int print_insn_i386_intel (bfd_vma, disassemble_info *);
+extern int print_insn_i860 (bfd_vma, disassemble_info *);
+extern int print_insn_i960 (bfd_vma, disassemble_info *);
+extern int print_insn_ia64 (bfd_vma, disassemble_info *);
+extern int print_insn_ip2k (bfd_vma, disassemble_info *);
+extern int print_insn_iq2000 (bfd_vma, disassemble_info *);
+extern int print_insn_little_arm (bfd_vma, disassemble_info *);
+extern int print_insn_little_mips (bfd_vma, disassemble_info *);
+extern int print_insn_little_or32 (bfd_vma, disassemble_info *);
+extern int print_insn_little_powerpc (bfd_vma, disassemble_info *);
+extern int print_insn_little_score (bfd_vma, disassemble_info *);
+extern int print_insn_lm32 (bfd_vma, disassemble_info *);
+extern int print_insn_m32c (bfd_vma, disassemble_info *);
+extern int print_insn_m32r (bfd_vma, disassemble_info *);
+extern int print_insn_m68hc11 (bfd_vma, disassemble_info *);
+extern int print_insn_m68hc12 (bfd_vma, disassemble_info *);
+extern int print_insn_m68k (bfd_vma, disassemble_info *);
+extern int print_insn_m88k (bfd_vma, disassemble_info *);
+extern int print_insn_mcore (bfd_vma, disassemble_info *);
+extern int print_insn_mep (bfd_vma, disassemble_info *);
+extern int print_insn_microblaze (bfd_vma, disassemble_info *);
+extern int print_insn_mmix (bfd_vma, disassemble_info *);
+extern int print_insn_mn10200 (bfd_vma, disassemble_info *);
+extern int print_insn_mn10300 (bfd_vma, disassemble_info *);
+extern int print_insn_moxie (bfd_vma, disassemble_info *);
+extern int print_insn_msp430 (bfd_vma, disassemble_info *);
+extern int print_insn_mt (bfd_vma, disassemble_info *);
+extern int print_insn_ns32k (bfd_vma, disassemble_info *);
+extern int print_insn_openrisc (bfd_vma, disassemble_info *);
+extern int print_insn_pdp11 (bfd_vma, disassemble_info *);
+extern int print_insn_pj (bfd_vma, disassemble_info *);
+extern int print_insn_rs6000 (bfd_vma, disassemble_info *);
+extern int print_insn_s390 (bfd_vma, disassemble_info *);
+extern int print_insn_sh (bfd_vma, disassemble_info *);
+extern int print_insn_sh64 (bfd_vma, disassemble_info *);
+extern int print_insn_sh64x_media (bfd_vma, disassemble_info *);
+extern int print_insn_sparc (bfd_vma, disassemble_info *);
+extern int print_insn_spu (bfd_vma, disassemble_info *);
+extern int print_insn_tic30 (bfd_vma, disassemble_info *);
+extern int print_insn_tic4x (bfd_vma, disassemble_info *);
+extern int print_insn_tic54x (bfd_vma, disassemble_info *);
+extern int print_insn_tic6x (bfd_vma, disassemble_info *);
+extern int print_insn_tic80 (bfd_vma, disassemble_info *);
+extern int print_insn_v850 (bfd_vma, disassemble_info *);
+extern int print_insn_vax (bfd_vma, disassemble_info *);
+extern int print_insn_w65 (bfd_vma, disassemble_info *);
+extern int print_insn_xc16x (bfd_vma, disassemble_info *);
+extern int print_insn_xstormy16 (bfd_vma, disassemble_info *);
+extern int print_insn_xtensa (bfd_vma, disassemble_info *);
+extern int print_insn_z80 (bfd_vma, disassemble_info *);
+extern int print_insn_z8001 (bfd_vma, disassemble_info *);
+extern int print_insn_z8002 (bfd_vma, disassemble_info *);
+extern int print_insn_rx (bfd_vma, disassemble_info *);
+
+extern disassembler_ftype arc_get_disassembler (void *);
+extern disassembler_ftype cris_get_disassembler (bfd *);
+
+extern void print_i386_disassembler_options (FILE *);
+extern void print_mips_disassembler_options (FILE *);
+extern void print_ppc_disassembler_options (FILE *);
+extern void print_arm_disassembler_options (FILE *);
+extern void parse_arm_disassembler_option (char *);
+extern void print_s390_disassembler_options (FILE *);
+extern int get_arm_regname_num_options (void);
+extern int set_arm_regname_option (int);
+extern int get_arm_regnames (int, const char **, const char **, const char *const **);
+extern bfd_boolean arm_symbol_is_valid (asymbol *, struct disassemble_info *);
+
+/* Fetch the disassembler for a given BFD, if that support is available. */
+extern disassembler_ftype disassembler (bfd *);
+
+/* Amend the disassemble_info structure as necessary for the target architecture.
+ Should only be called after initialising the info->arch field. */
+extern void disassemble_init_for_target (struct disassemble_info * dinfo);
+
+/* Document any target specific options available from the disassembler. */
+extern void disassembler_usage (FILE *);
+
+
+/* This block of definitions is for particular callers who read instructions
+ into a buffer before calling the instruction decoder. */
+
+/* Here is a function which callers may wish to use for read_memory_func.
+ It gets bytes from a buffer. */
+extern int buffer_read_memory
+ (bfd_vma, bfd_byte *, unsigned int, struct disassemble_info *);
+
+/* This function goes with buffer_read_memory.
+ It prints a message using info->fprintf_func and info->stream. */
+extern void perror_memory (int, bfd_vma, struct disassemble_info *);
+
+
+/* Just print the address in hex. This is included for completeness even
+ though both GDB and objdump provide their own (to print symbolic
+ addresses). */
+extern void generic_print_address
+ (bfd_vma, struct disassemble_info *);
+
+/* Always true. */
+extern int generic_symbol_at_address
+ (bfd_vma, struct disassemble_info *);
+
+/* Also always true. */
+extern bfd_boolean generic_symbol_is_valid
+ (asymbol *, struct disassemble_info *);
+
+/* Method to initialize a disassemble_info struct. This should be
+ called by all applications creating such a struct. */
+extern void init_disassemble_info (struct disassemble_info *dinfo, void *stream,
+ fprintf_ftype fprintf_func);
+
+/* For compatibility with existing code. */
+#define INIT_DISASSEMBLE_INFO(INFO, STREAM, FPRINTF_FUNC) \
+ init_disassemble_info (&(INFO), (STREAM), (fprintf_ftype) (FPRINTF_FUNC))
+#define INIT_DISASSEMBLE_INFO_NO_ARCH(INFO, STREAM, FPRINTF_FUNC) \
+ init_disassemble_info (&(INFO), (STREAM), (fprintf_ftype) (FPRINTF_FUNC))
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ! defined (DIS_ASM_H) */
diff --git a/tools/perf/external/usr/include/dwarf.h b/tools/perf/external/usr/include/dwarf.h
new file mode 100644
index 00000000..dbf56e91
--- /dev/null
+++ b/tools/perf/external/usr/include/dwarf.h
@@ -0,0 +1,772 @@
+/* This file defines standard DWARF types, structures, and macros.
+ Copyright (C) 2000-2010 Red Hat, Inc.
+ This file is part of Red Hat elfutils.
+
+ Red Hat elfutils is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by the
+ Free Software Foundation; version 2 of the License.
+
+ Red Hat elfutils is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with Red Hat elfutils; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA.
+
+ In addition, as a special exception, Red Hat, Inc. gives You the
+ additional right to link the code of Red Hat elfutils with code licensed
+ under any Open Source Initiative certified open source license
+ (http://www.opensource.org/licenses/index.php) which requires the
+ distribution of source code with any binary distribution and to
+ distribute linked combinations of the two. Non-GPL Code permitted under
+ this exception must only link to the code of Red Hat elfutils through
+ those well defined interfaces identified in the file named EXCEPTION
+ found in the source code files (the "Approved Interfaces"). The files
+ of Non-GPL Code may instantiate templates or use macros or inline
+ functions from the Approved Interfaces without causing the resulting
+ work to be covered by the GNU General Public License. Only Red Hat,
+ Inc. may make changes or additions to the list of Approved Interfaces.
+ Red Hat's grant of this exception is conditioned upon your not adding
+ any new exceptions. If you wish to add a new Approved Interface or
+ exception, please contact Red Hat. You must obey the GNU General Public
+ License in all respects for all of the Red Hat elfutils code and other
+ code used in conjunction with Red Hat elfutils except the Non-GPL Code
+ covered by this exception. If you modify this file, you may extend this
+ exception to your version of the file, but you are not obligated to do
+ so. If you do not wish to provide this exception without modification,
+ you must delete this exception statement from your version and license
+ this file solely under the GPL without exception.
+
+ Red Hat elfutils is an included package of the Open Invention Network.
+ An included package of the Open Invention Network is a package for which
+ Open Invention Network licensees cross-license their patents. No patent
+ license is granted, either expressly or impliedly, by designation as an
+ included package. Should you wish to participate in the Open Invention
+ Network licensing program, please visit www.openinventionnetwork.com
+ <http://www.openinventionnetwork.com>. */
+
+#ifndef _DWARF_H
+#define _DWARF_H 1
+
+/* DWARF tags. */
+enum
+ {
+ DW_TAG_array_type = 0x01,
+ DW_TAG_class_type = 0x02,
+ DW_TAG_entry_point = 0x03,
+ DW_TAG_enumeration_type = 0x04,
+ DW_TAG_formal_parameter = 0x05,
+ DW_TAG_imported_declaration = 0x08,
+ DW_TAG_label = 0x0a,
+ DW_TAG_lexical_block = 0x0b,
+ DW_TAG_member = 0x0d,
+ DW_TAG_pointer_type = 0x0f,
+ DW_TAG_reference_type = 0x10,
+ DW_TAG_compile_unit = 0x11,
+ DW_TAG_string_type = 0x12,
+ DW_TAG_structure_type = 0x13,
+ DW_TAG_subroutine_type = 0x15,
+ DW_TAG_typedef = 0x16,
+ DW_TAG_union_type = 0x17,
+ DW_TAG_unspecified_parameters = 0x18,
+ DW_TAG_variant = 0x19,
+ DW_TAG_common_block = 0x1a,
+ DW_TAG_common_inclusion = 0x1b,
+ DW_TAG_inheritance = 0x1c,
+ DW_TAG_inlined_subroutine = 0x1d,
+ DW_TAG_module = 0x1e,
+ DW_TAG_ptr_to_member_type = 0x1f,
+ DW_TAG_set_type = 0x20,
+ DW_TAG_subrange_type = 0x21,
+ DW_TAG_with_stmt = 0x22,
+ DW_TAG_access_declaration = 0x23,
+ DW_TAG_base_type = 0x24,
+ DW_TAG_catch_block = 0x25,
+ DW_TAG_const_type = 0x26,
+ DW_TAG_constant = 0x27,
+ DW_TAG_enumerator = 0x28,
+ DW_TAG_file_type = 0x29,
+ DW_TAG_friend = 0x2a,
+ DW_TAG_namelist = 0x2b,
+ DW_TAG_namelist_item = 0x2c,
+ DW_TAG_packed_type = 0x2d,
+ DW_TAG_subprogram = 0x2e,
+ DW_TAG_template_type_parameter = 0x2f,
+ DW_TAG_template_value_parameter = 0x30,
+ DW_TAG_thrown_type = 0x31,
+ DW_TAG_try_block = 0x32,
+ DW_TAG_variant_part = 0x33,
+ DW_TAG_variable = 0x34,
+ DW_TAG_volatile_type = 0x35,
+ DW_TAG_dwarf_procedure = 0x36,
+ DW_TAG_restrict_type = 0x37,
+ DW_TAG_interface_type = 0x38,
+ DW_TAG_namespace = 0x39,
+ DW_TAG_imported_module = 0x3a,
+ DW_TAG_unspecified_type = 0x3b,
+ DW_TAG_partial_unit = 0x3c,
+ DW_TAG_imported_unit = 0x3d,
+ DW_TAG_mutable_type = 0x3e,
+ DW_TAG_condition = 0x3f,
+ DW_TAG_shared_type = 0x40,
+ DW_TAG_type_unit = 0x41,
+ DW_TAG_rvalue_reference_type = 0x42,
+ DW_TAG_template_alias = 0x43,
+
+ DW_TAG_lo_user = 0x4080,
+
+ DW_TAG_MIPS_loop = 0x4081,
+ DW_TAG_format_label = 0x4101,
+ DW_TAG_function_template = 0x4102,
+ DW_TAG_class_template = 0x4103,
+
+ DW_TAG_GNU_BINCL = 0x4104,
+ DW_TAG_GNU_EINCL = 0x4105,
+
+ DW_TAG_GNU_template_template_param = 0x4106,
+ DW_TAG_GNU_template_parameter_pack = 0x4107,
+ DW_TAG_GNU_formal_parameter_pack = 0x4108,
+
+ DW_TAG_hi_user = 0xffff
+ };
+
+
+/* Children determination encodings. */
+enum
+ {
+ DW_CHILDREN_no = 0,
+ DW_CHILDREN_yes = 1
+ };
+
+
+/* DWARF attributes encodings. */
+enum
+ {
+ DW_AT_sibling = 0x01,
+ DW_AT_location = 0x02,
+ DW_AT_name = 0x03,
+ DW_AT_ordering = 0x09,
+ DW_AT_subscr_data = 0x0a,
+ DW_AT_byte_size = 0x0b,
+ DW_AT_bit_offset = 0x0c,
+ DW_AT_bit_size = 0x0d,
+ DW_AT_element_list = 0x0f,
+ DW_AT_stmt_list = 0x10,
+ DW_AT_low_pc = 0x11,
+ DW_AT_high_pc = 0x12,
+ DW_AT_language = 0x13,
+ DW_AT_member = 0x14,
+ DW_AT_discr = 0x15,
+ DW_AT_discr_value = 0x16,
+ DW_AT_visibility = 0x17,
+ DW_AT_import = 0x18,
+ DW_AT_string_length = 0x19,
+ DW_AT_common_reference = 0x1a,
+ DW_AT_comp_dir = 0x1b,
+ DW_AT_const_value = 0x1c,
+ DW_AT_containing_type = 0x1d,
+ DW_AT_default_value = 0x1e,
+ DW_AT_inline = 0x20,
+ DW_AT_is_optional = 0x21,
+ DW_AT_lower_bound = 0x22,
+ DW_AT_producer = 0x25,
+ DW_AT_prototyped = 0x27,
+ DW_AT_return_addr = 0x2a,
+ DW_AT_start_scope = 0x2c,
+ DW_AT_bit_stride = 0x2e,
+ DW_AT_upper_bound = 0x2f,
+ DW_AT_abstract_origin = 0x31,
+ DW_AT_accessibility = 0x32,
+ DW_AT_address_class = 0x33,
+ DW_AT_artificial = 0x34,
+ DW_AT_base_types = 0x35,
+ DW_AT_calling_convention = 0x36,
+ DW_AT_count = 0x37,
+ DW_AT_data_member_location = 0x38,
+ DW_AT_decl_column = 0x39,
+ DW_AT_decl_file = 0x3a,
+ DW_AT_decl_line = 0x3b,
+ DW_AT_declaration = 0x3c,
+ DW_AT_discr_list = 0x3d,
+ DW_AT_encoding = 0x3e,
+ DW_AT_external = 0x3f,
+ DW_AT_frame_base = 0x40,
+ DW_AT_friend = 0x41,
+ DW_AT_identifier_case = 0x42,
+ DW_AT_macro_info = 0x43,
+ DW_AT_namelist_item = 0x44,
+ DW_AT_priority = 0x45,
+ DW_AT_segment = 0x46,
+ DW_AT_specification = 0x47,
+ DW_AT_static_link = 0x48,
+ DW_AT_type = 0x49,
+ DW_AT_use_location = 0x4a,
+ DW_AT_variable_parameter = 0x4b,
+ DW_AT_virtuality = 0x4c,
+ DW_AT_vtable_elem_location = 0x4d,
+ DW_AT_allocated = 0x4e,
+ DW_AT_associated = 0x4f,
+ DW_AT_data_location = 0x50,
+ DW_AT_byte_stride = 0x51,
+ DW_AT_entry_pc = 0x52,
+ DW_AT_use_UTF8 = 0x53,
+ DW_AT_extension = 0x54,
+ DW_AT_ranges = 0x55,
+ DW_AT_trampoline = 0x56,
+ DW_AT_call_column = 0x57,
+ DW_AT_call_file = 0x58,
+ DW_AT_call_line = 0x59,
+ DW_AT_description = 0x5a,
+ DW_AT_binary_scale = 0x5b,
+ DW_AT_decimal_scale = 0x5c,
+ DW_AT_small = 0x5d,
+ DW_AT_decimal_sign = 0x5e,
+ DW_AT_digit_count = 0x5f,
+ DW_AT_picture_string = 0x60,
+ DW_AT_mutable = 0x61,
+ DW_AT_threads_scaled = 0x62,
+ DW_AT_explicit = 0x63,
+ DW_AT_object_pointer = 0x64,
+ DW_AT_endianity = 0x65,
+ DW_AT_elemental = 0x66,
+ DW_AT_pure = 0x67,
+ DW_AT_recursive = 0x68,
+ DW_AT_signature = 0x69,
+ DW_AT_main_subprogram = 0x6a,
+ DW_AT_data_bit_offset = 0x6b,
+ DW_AT_const_expr = 0x6c,
+ DW_AT_enum_class = 0x6d,
+ DW_AT_linkage_name = 0x6e,
+
+ DW_AT_lo_user = 0x2000,
+
+ DW_AT_MIPS_fde = 0x2001,
+ DW_AT_MIPS_loop_begin = 0x2002,
+ DW_AT_MIPS_tail_loop_begin = 0x2003,
+ DW_AT_MIPS_epilog_begin = 0x2004,
+ DW_AT_MIPS_loop_unroll_factor = 0x2005,
+ DW_AT_MIPS_software_pipeline_depth = 0x2006,
+ DW_AT_MIPS_linkage_name = 0x2007,
+ DW_AT_MIPS_stride = 0x2008,
+ DW_AT_MIPS_abstract_name = 0x2009,
+ DW_AT_MIPS_clone_origin = 0x200a,
+ DW_AT_MIPS_has_inlines = 0x200b,
+ DW_AT_MIPS_stride_byte = 0x200c,
+ DW_AT_MIPS_stride_elem = 0x200d,
+ DW_AT_MIPS_ptr_dopetype = 0x200e,
+ DW_AT_MIPS_allocatable_dopetype = 0x200f,
+ DW_AT_MIPS_assumed_shape_dopetype = 0x2010,
+ DW_AT_MIPS_assumed_size = 0x2011,
+
+ /* GNU extensions. */
+ DW_AT_sf_names = 0x2101,
+ DW_AT_src_info = 0x2102,
+ DW_AT_mac_info = 0x2103,
+ DW_AT_src_coords = 0x2104,
+ DW_AT_body_begin = 0x2105,
+ DW_AT_body_end = 0x2106,
+ DW_AT_GNU_vector = 0x2107,
+ DW_AT_GNU_guarded_by = 0x2108,
+ DW_AT_GNU_pt_guarded_by = 0x2109,
+ DW_AT_GNU_guarded = 0x210a,
+ DW_AT_GNU_pt_guarded = 0x210b,
+ DW_AT_GNU_locks_excluded = 0x210c,
+ DW_AT_GNU_exclusive_locks_required = 0x210d,
+ DW_AT_GNU_shared_locks_required = 0x210e,
+ DW_AT_GNU_odr_signature = 0x210f,
+ DW_AT_GNU_template_name = 0x2110,
+
+ DW_AT_hi_user = 0x3fff
+ };
+
+
+/* DWARF form encodings. */
+enum
+ {
+ DW_FORM_addr = 0x01,
+ DW_FORM_block2 = 0x03,
+ DW_FORM_block4 = 0x04,
+ DW_FORM_data2 = 0x05,
+ DW_FORM_data4 = 0x06,
+ DW_FORM_data8 = 0x07,
+ DW_FORM_string = 0x08,
+ DW_FORM_block = 0x09,
+ DW_FORM_block1 = 0x0a,
+ DW_FORM_data1 = 0x0b,
+ DW_FORM_flag = 0x0c,
+ DW_FORM_sdata = 0x0d,
+ DW_FORM_strp = 0x0e,
+ DW_FORM_udata = 0x0f,
+ DW_FORM_ref_addr = 0x10,
+ DW_FORM_ref1 = 0x11,
+ DW_FORM_ref2 = 0x12,
+ DW_FORM_ref4 = 0x13,
+ DW_FORM_ref8 = 0x14,
+ DW_FORM_ref_udata = 0x15,
+ DW_FORM_indirect = 0x16,
+ DW_FORM_sec_offset = 0x17,
+ DW_FORM_exprloc = 0x18,
+ DW_FORM_flag_present = 0x19,
+ DW_FORM_ref_sig8 = 0x20
+ };
+
+
+/* DWARF location operation encodings. */
+enum
+ {
+ DW_OP_addr = 0x03, /* Constant address. */
+ DW_OP_deref = 0x06,
+ DW_OP_const1u = 0x08, /* Unsigned 1-byte constant. */
+ DW_OP_const1s = 0x09, /* Signed 1-byte constant. */
+ DW_OP_const2u = 0x0a, /* Unsigned 2-byte constant. */
+ DW_OP_const2s = 0x0b, /* Signed 2-byte constant. */
+ DW_OP_const4u = 0x0c, /* Unsigned 4-byte constant. */
+ DW_OP_const4s = 0x0d, /* Signed 4-byte constant. */
+ DW_OP_const8u = 0x0e, /* Unsigned 8-byte constant. */
+ DW_OP_const8s = 0x0f, /* Signed 8-byte constant. */
+ DW_OP_constu = 0x10, /* Unsigned LEB128 constant. */
+ DW_OP_consts = 0x11, /* Signed LEB128 constant. */
+ DW_OP_dup = 0x12,
+ DW_OP_drop = 0x13,
+ DW_OP_over = 0x14,
+ DW_OP_pick = 0x15, /* 1-byte stack index. */
+ DW_OP_swap = 0x16,
+ DW_OP_rot = 0x17,
+ DW_OP_xderef = 0x18,
+ DW_OP_abs = 0x19,
+ DW_OP_and = 0x1a,
+ DW_OP_div = 0x1b,
+ DW_OP_minus = 0x1c,
+ DW_OP_mod = 0x1d,
+ DW_OP_mul = 0x1e,
+ DW_OP_neg = 0x1f,
+ DW_OP_not = 0x20,
+ DW_OP_or = 0x21,
+ DW_OP_plus = 0x22,
+ DW_OP_plus_uconst = 0x23, /* Unsigned LEB128 addend. */
+ DW_OP_shl = 0x24,
+ DW_OP_shr = 0x25,
+ DW_OP_shra = 0x26,
+ DW_OP_xor = 0x27,
+ DW_OP_bra = 0x28, /* Signed 2-byte constant. */
+ DW_OP_eq = 0x29,
+ DW_OP_ge = 0x2a,
+ DW_OP_gt = 0x2b,
+ DW_OP_le = 0x2c,
+ DW_OP_lt = 0x2d,
+ DW_OP_ne = 0x2e,
+ DW_OP_skip = 0x2f, /* Signed 2-byte constant. */
+ DW_OP_lit0 = 0x30, /* Literal 0. */
+ DW_OP_lit1 = 0x31, /* Literal 1. */
+ DW_OP_lit2 = 0x32, /* Literal 2. */
+ DW_OP_lit3 = 0x33, /* Literal 3. */
+ DW_OP_lit4 = 0x34, /* Literal 4. */
+ DW_OP_lit5 = 0x35, /* Literal 5. */
+ DW_OP_lit6 = 0x36, /* Literal 6. */
+ DW_OP_lit7 = 0x37, /* Literal 7. */
+ DW_OP_lit8 = 0x38, /* Literal 8. */
+ DW_OP_lit9 = 0x39, /* Literal 9. */
+ DW_OP_lit10 = 0x3a, /* Literal 10. */
+ DW_OP_lit11 = 0x3b, /* Literal 11. */
+ DW_OP_lit12 = 0x3c, /* Literal 12. */
+ DW_OP_lit13 = 0x3d, /* Literal 13. */
+ DW_OP_lit14 = 0x3e, /* Literal 14. */
+ DW_OP_lit15 = 0x3f, /* Literal 15. */
+ DW_OP_lit16 = 0x40, /* Literal 16. */
+ DW_OP_lit17 = 0x41, /* Literal 17. */
+ DW_OP_lit18 = 0x42, /* Literal 18. */
+ DW_OP_lit19 = 0x43, /* Literal 19. */
+ DW_OP_lit20 = 0x44, /* Literal 20. */
+ DW_OP_lit21 = 0x45, /* Literal 21. */
+ DW_OP_lit22 = 0x46, /* Literal 22. */
+ DW_OP_lit23 = 0x47, /* Literal 23. */
+ DW_OP_lit24 = 0x48, /* Literal 24. */
+ DW_OP_lit25 = 0x49, /* Literal 25. */
+ DW_OP_lit26 = 0x4a, /* Literal 26. */
+ DW_OP_lit27 = 0x4b, /* Literal 27. */
+ DW_OP_lit28 = 0x4c, /* Literal 28. */
+ DW_OP_lit29 = 0x4d, /* Literal 29. */
+ DW_OP_lit30 = 0x4e, /* Literal 30. */
+ DW_OP_lit31 = 0x4f, /* Literal 31. */
+ DW_OP_reg0 = 0x50, /* Register 0. */
+ DW_OP_reg1 = 0x51, /* Register 1. */
+ DW_OP_reg2 = 0x52, /* Register 2. */
+ DW_OP_reg3 = 0x53, /* Register 3. */
+ DW_OP_reg4 = 0x54, /* Register 4. */
+ DW_OP_reg5 = 0x55, /* Register 5. */
+ DW_OP_reg6 = 0x56, /* Register 6. */
+ DW_OP_reg7 = 0x57, /* Register 7. */
+ DW_OP_reg8 = 0x58, /* Register 8. */
+ DW_OP_reg9 = 0x59, /* Register 9. */
+ DW_OP_reg10 = 0x5a, /* Register 10. */
+ DW_OP_reg11 = 0x5b, /* Register 11. */
+ DW_OP_reg12 = 0x5c, /* Register 12. */
+ DW_OP_reg13 = 0x5d, /* Register 13. */
+ DW_OP_reg14 = 0x5e, /* Register 14. */
+ DW_OP_reg15 = 0x5f, /* Register 15. */
+ DW_OP_reg16 = 0x60, /* Register 16. */
+ DW_OP_reg17 = 0x61, /* Register 17. */
+ DW_OP_reg18 = 0x62, /* Register 18. */
+ DW_OP_reg19 = 0x63, /* Register 19. */
+ DW_OP_reg20 = 0x64, /* Register 20. */
+ DW_OP_reg21 = 0x65, /* Register 21. */
+ DW_OP_reg22 = 0x66, /* Register 22. */
+ DW_OP_reg23 = 0x67, /* Register 24. */
+ DW_OP_reg24 = 0x68, /* Register 24. */
+ DW_OP_reg25 = 0x69, /* Register 25. */
+ DW_OP_reg26 = 0x6a, /* Register 26. */
+ DW_OP_reg27 = 0x6b, /* Register 27. */
+ DW_OP_reg28 = 0x6c, /* Register 28. */
+ DW_OP_reg29 = 0x6d, /* Register 29. */
+ DW_OP_reg30 = 0x6e, /* Register 30. */
+ DW_OP_reg31 = 0x6f, /* Register 31. */
+ DW_OP_breg0 = 0x70, /* Base register 0. */
+ DW_OP_breg1 = 0x71, /* Base register 1. */
+ DW_OP_breg2 = 0x72, /* Base register 2. */
+ DW_OP_breg3 = 0x73, /* Base register 3. */
+ DW_OP_breg4 = 0x74, /* Base register 4. */
+ DW_OP_breg5 = 0x75, /* Base register 5. */
+ DW_OP_breg6 = 0x76, /* Base register 6. */
+ DW_OP_breg7 = 0x77, /* Base register 7. */
+ DW_OP_breg8 = 0x78, /* Base register 8. */
+ DW_OP_breg9 = 0x79, /* Base register 9. */
+ DW_OP_breg10 = 0x7a, /* Base register 10. */
+ DW_OP_breg11 = 0x7b, /* Base register 11. */
+ DW_OP_breg12 = 0x7c, /* Base register 12. */
+ DW_OP_breg13 = 0x7d, /* Base register 13. */
+ DW_OP_breg14 = 0x7e, /* Base register 14. */
+ DW_OP_breg15 = 0x7f, /* Base register 15. */
+ DW_OP_breg16 = 0x80, /* Base register 16. */
+ DW_OP_breg17 = 0x81, /* Base register 17. */
+ DW_OP_breg18 = 0x82, /* Base register 18. */
+ DW_OP_breg19 = 0x83, /* Base register 19. */
+ DW_OP_breg20 = 0x84, /* Base register 20. */
+ DW_OP_breg21 = 0x85, /* Base register 21. */
+ DW_OP_breg22 = 0x86, /* Base register 22. */
+ DW_OP_breg23 = 0x87, /* Base register 23. */
+ DW_OP_breg24 = 0x88, /* Base register 24. */
+ DW_OP_breg25 = 0x89, /* Base register 25. */
+ DW_OP_breg26 = 0x8a, /* Base register 26. */
+ DW_OP_breg27 = 0x8b, /* Base register 27. */
+ DW_OP_breg28 = 0x8c, /* Base register 28. */
+ DW_OP_breg29 = 0x8d, /* Base register 29. */
+ DW_OP_breg30 = 0x8e, /* Base register 30. */
+ DW_OP_breg31 = 0x8f, /* Base register 31. */
+ DW_OP_regx = 0x90, /* Unsigned LEB128 register. */
+ DW_OP_fbreg = 0x91, /* Signed LEB128 offset. */
+ DW_OP_bregx = 0x92, /* ULEB128 register followed by SLEB128 off. */
+ DW_OP_piece = 0x93, /* ULEB128 size of piece addressed. */
+ DW_OP_deref_size = 0x94, /* 1-byte size of data retrieved. */
+ DW_OP_xderef_size = 0x95, /* 1-byte size of data retrieved. */
+ DW_OP_nop = 0x96,
+ DW_OP_push_object_address = 0x97,
+ DW_OP_call2 = 0x98,
+ DW_OP_call4 = 0x99,
+ DW_OP_call_ref = 0x9a,
+ DW_OP_form_tls_address = 0x9b,/* TLS offset to address in current thread */
+ DW_OP_call_frame_cfa = 0x9c,/* CFA as determined by CFI. */
+ DW_OP_bit_piece = 0x9d, /* ULEB128 size and ULEB128 offset in bits. */
+ DW_OP_implicit_value = 0x9e, /* DW_FORM_block follows opcode. */
+ DW_OP_stack_value = 0x9f, /* No operands, special like DW_OP_piece. */
+
+ /* GNU extensions. */
+ DW_OP_GNU_push_tls_address = 0xe0,
+ DW_OP_GNU_uninit = 0xf0,
+ DW_OP_GNU_encoded_addr = 0xf1,
+
+ DW_OP_lo_user = 0xe0, /* Implementation-defined range start. */
+ DW_OP_hi_user = 0xff /* Implementation-defined range end. */
+ };
+
+
+/* DWARF base type encodings. */
+enum
+ {
+ DW_ATE_void = 0x0,
+ DW_ATE_address = 0x1,
+ DW_ATE_boolean = 0x2,
+ DW_ATE_complex_float = 0x3,
+ DW_ATE_float = 0x4,
+ DW_ATE_signed = 0x5,
+ DW_ATE_signed_char = 0x6,
+ DW_ATE_unsigned = 0x7,
+ DW_ATE_unsigned_char = 0x8,
+ DW_ATE_imaginary_float = 0x9,
+ DW_ATE_packed_decimal = 0xa,
+ DW_ATE_numeric_string = 0xb,
+ DW_ATE_edited = 0xc,
+ DW_ATE_signed_fixed = 0xd,
+ DW_ATE_unsigned_fixed = 0xe,
+ DW_ATE_decimal_float = 0xf,
+
+ DW_ATE_lo_user = 0x80,
+ DW_ATE_hi_user = 0xff
+ };
+
+
+/* DWARF decimal sign encodings. */
+enum
+ {
+ DW_DS_unsigned = 1,
+ DW_DS_leading_overpunch = 2,
+ DW_DS_trailing_overpunch = 3,
+ DW_DS_leading_separate = 4,
+ DW_DS_trailing_separate = 5,
+ };
+
+
+/* DWARF endianity encodings. */
+enum
+ {
+ DW_END_default = 0,
+ DW_END_big = 1,
+ DW_END_little = 2,
+
+ DW_END_lo_user = 0x40,
+ DW_END_hi_user = 0xff
+ };
+
+
+/* DWARF accessibility encodings. */
+enum
+ {
+ DW_ACCESS_public = 1,
+ DW_ACCESS_protected = 2,
+ DW_ACCESS_private = 3
+ };
+
+
+/* DWARF visibility encodings. */
+enum
+ {
+ DW_VIS_local = 1,
+ DW_VIS_exported = 2,
+ DW_VIS_qualified = 3
+ };
+
+
+/* DWARF virtuality encodings. */
+enum
+ {
+ DW_VIRTUALITY_none = 0,
+ DW_VIRTUALITY_virtual = 1,
+ DW_VIRTUALITY_pure_virtual = 2
+ };
+
+
+/* DWARF language encodings. */
+enum
+ {
+ DW_LANG_C89 = 0x0001, /* ISO C:1989 */
+ DW_LANG_C = 0x0002, /* C */
+ DW_LANG_Ada83 = 0x0003, /* ISO Ada:1983 */
+ DW_LANG_C_plus_plus = 0x0004, /* ISO C++:1998 */
+ DW_LANG_Cobol74 = 0x0005, /* ISO Cobol:1974 */
+ DW_LANG_Cobol85 = 0x0006, /* ISO Cobol:1985 */
+ DW_LANG_Fortran77 = 0x0007, /* ISO FORTRAN 77 */
+ DW_LANG_Fortran90 = 0x0008, /* ISO Fortran 90 */
+ DW_LANG_Pascal83 = 0x0009, /* ISO Pascal:1983 */
+ DW_LANG_Modula2 = 0x000a, /* ISO Modula-2:1996 */
+ DW_LANG_Java = 0x000b, /* Java */
+ DW_LANG_C99 = 0x000c, /* ISO C:1999 */
+ DW_LANG_Ada95 = 0x000d, /* ISO Ada:1995 */
+ DW_LANG_Fortran95 = 0x000e, /* ISO Fortran 95 */
+ DW_LANG_PL1 = 0x000f, /* ISO PL/1:1976 */
+ DW_LANG_Objc = 0x0010, /* Objective-C */
+ DW_LANG_ObjC_plus_plus = 0x0011, /* Objective-C++ */
+ DW_LANG_UPC = 0x0012, /* Unified Parallel C */
+ DW_LANG_D = 0x0013, /* D */
+ DW_LANG_Python = 0x0014, /* Python */
+
+ DW_LANG_lo_user = 0x8000,
+ DW_LANG_Mips_Assembler = 0x8001,
+ DW_LANG_hi_user = 0xffff
+ };
+
+
+/* DWARF identifier case encodings. */
+enum
+ {
+ DW_ID_case_sensitive = 0,
+ DW_ID_up_case = 1,
+ DW_ID_down_case = 2,
+ DW_ID_case_insensitive = 3
+ };
+
+
+/* DWARF calling conventions encodings. */
+enum
+ {
+ DW_CC_normal = 0x1,
+ DW_CC_program = 0x2,
+ DW_CC_nocall = 0x3,
+ DW_CC_lo_user = 0x40,
+ DW_CC_hi_user = 0xff
+ };
+
+
+/* DWARF inline encodings. */
+enum
+ {
+ DW_INL_not_inlined = 0,
+ DW_INL_inlined = 1,
+ DW_INL_declared_not_inlined = 2,
+ DW_INL_declared_inlined = 3
+ };
+
+
+/* DWARF ordering encodings. */
+enum
+ {
+ DW_ORD_row_major = 0,
+ DW_ORD_col_major = 1
+ };
+
+
+/* DWARF discriminant descriptor encodings. */
+enum
+ {
+ DW_DSC_label = 0,
+ DW_DSC_range = 1
+ };
+
+
+/* DWARF standard opcode encodings. */
+enum
+ {
+ DW_LNS_copy = 1,
+ DW_LNS_advance_pc = 2,
+ DW_LNS_advance_line = 3,
+ DW_LNS_set_file = 4,
+ DW_LNS_set_column = 5,
+ DW_LNS_negate_stmt = 6,
+ DW_LNS_set_basic_block = 7,
+ DW_LNS_const_add_pc = 8,
+ DW_LNS_fixed_advance_pc = 9,
+ DW_LNS_set_prologue_end = 10,
+ DW_LNS_set_epilogue_begin = 11,
+ DW_LNS_set_isa = 12
+ };
+
+
+/* DWARF extended opcode encodings. */
+enum
+ {
+ DW_LNE_end_sequence = 1,
+ DW_LNE_set_address = 2,
+ DW_LNE_define_file = 3,
+ DW_LNE_set_discriminator = 4,
+
+ DW_LNE_lo_user = 128,
+ DW_LNE_hi_user = 255
+ };
+
+
+/* DWARF macinfo type encodings. */
+enum
+ {
+ DW_MACINFO_define = 1,
+ DW_MACINFO_undef = 2,
+ DW_MACINFO_start_file = 3,
+ DW_MACINFO_end_file = 4,
+ DW_MACINFO_vendor_ext = 255
+ };
+
+
+/* DWARF call frame instruction encodings. */
+enum
+ {
+ DW_CFA_advance_loc = 0x40,
+ DW_CFA_offset = 0x80,
+ DW_CFA_restore = 0xc0,
+ DW_CFA_extended = 0,
+
+ DW_CFA_nop = 0x00,
+ DW_CFA_set_loc = 0x01,
+ DW_CFA_advance_loc1 = 0x02,
+ DW_CFA_advance_loc2 = 0x03,
+ DW_CFA_advance_loc4 = 0x04,
+ DW_CFA_offset_extended = 0x05,
+ DW_CFA_restore_extended = 0x06,
+ DW_CFA_undefined = 0x07,
+ DW_CFA_same_value = 0x08,
+ DW_CFA_register = 0x09,
+ DW_CFA_remember_state = 0x0a,
+ DW_CFA_restore_state = 0x0b,
+ DW_CFA_def_cfa = 0x0c,
+ DW_CFA_def_cfa_register = 0x0d,
+ DW_CFA_def_cfa_offset = 0x0e,
+ DW_CFA_def_cfa_expression = 0x0f,
+ DW_CFA_expression = 0x10,
+ DW_CFA_offset_extended_sf = 0x11,
+ DW_CFA_def_cfa_sf = 0x12,
+ DW_CFA_def_cfa_offset_sf = 0x13,
+ DW_CFA_val_offset = 0x14,
+ DW_CFA_val_offset_sf = 0x15,
+ DW_CFA_val_expression = 0x16,
+
+ DW_CFA_low_user = 0x1c,
+ DW_CFA_MIPS_advance_loc8 = 0x1d,
+ DW_CFA_GNU_window_save = 0x2d,
+ DW_CFA_GNU_args_size = 0x2e,
+ DW_CFA_GNU_negative_offset_extended = 0x2f,
+ DW_CFA_high_user = 0x3f
+ };
+
+/* ID indicating CIE as opposed to FDE in .debug_frame. */
+enum
+ {
+ DW_CIE_ID_32 = 0xffffffffU, /* In 32-bit format CIE header. */
+ DW_CIE_ID_64 = 0xffffffffffffffffULL /* In 64-bit format CIE header. */
+ };
+
+
+/* Information for GNU unwind information. */
+enum
+ {
+ DW_EH_PE_absptr = 0x00,
+ DW_EH_PE_omit = 0xff,
+
+ /* FDE data encoding. */
+ DW_EH_PE_uleb128 = 0x01,
+ DW_EH_PE_udata2 = 0x02,
+ DW_EH_PE_udata4 = 0x03,
+ DW_EH_PE_udata8 = 0x04,
+ DW_EH_PE_sleb128 = 0x09,
+ DW_EH_PE_sdata2 = 0x0a,
+ DW_EH_PE_sdata4 = 0x0b,
+ DW_EH_PE_sdata8 = 0x0c,
+ DW_EH_PE_signed = 0x08,
+
+ /* FDE flags. */
+ DW_EH_PE_pcrel = 0x10,
+ DW_EH_PE_textrel = 0x20,
+ DW_EH_PE_datarel = 0x30,
+ DW_EH_PE_funcrel = 0x40,
+ DW_EH_PE_aligned = 0x50,
+
+ DW_EH_PE_indirect = 0x80
+ };
+
+
+/* DWARF XXX. */
+#define DW_ADDR_none 0
+
+/* Section 7.2.2 of the DWARF3 specification defines a range of escape
+ codes that can appear in the length field of certain DWARF structures.
+
+ These defines enumerate the minium and maximum values of this range.
+ Currently only the maximum value is used (to indicate that 64-bit
+ values are going to be used in the dwarf data that accompanies the
+ structure). The other values are reserved.
+
+ Note: There is a typo in DWARF3 spec (published Dec 20, 2005). In
+ sections 7.4, 7.5.1, 7.19, 7.20 the minimum escape code is referred to
+ as 0xffffff00 whereas in fact it should be 0xfffffff0. */
+#define DWARF3_LENGTH_MIN_ESCAPE_CODE 0xfffffff0u
+#define DWARF3_LENGTH_MAX_ESCAPE_CODE 0xffffffffu
+#define DWARF3_LENGTH_64_BIT DWARF3_LENGTH_MAX_ESCAPE_CODE
+
+#endif /* dwarf.h */
diff --git a/tools/perf/external/usr/include/elfutils/elf-knowledge.h b/tools/perf/external/usr/include/elfutils/elf-knowledge.h
new file mode 100644
index 00000000..8b4c0d7b
--- /dev/null
+++ b/tools/perf/external/usr/include/elfutils/elf-knowledge.h
@@ -0,0 +1,127 @@
+/* Accumulation of various pieces of knowledge about ELF.
+ Copyright (C) 2000, 2001, 2002, 2003, 2005 Red Hat, Inc.
+ This file is part of Red Hat elfutils.
+ Written by Ulrich Drepper <drepper@redhat.com>, 2000.
+
+ Red Hat elfutils is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by the
+ Free Software Foundation; version 2 of the License.
+
+ Red Hat elfutils is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with Red Hat elfutils; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA.
+
+ In addition, as a special exception, Red Hat, Inc. gives You the
+ additional right to link the code of Red Hat elfutils with code licensed
+ under any Open Source Initiative certified open source license
+ (http://www.opensource.org/licenses/index.php) which requires the
+ distribution of source code with any binary distribution and to
+ distribute linked combinations of the two. Non-GPL Code permitted under
+ this exception must only link to the code of Red Hat elfutils through
+ those well defined interfaces identified in the file named EXCEPTION
+ found in the source code files (the "Approved Interfaces"). The files
+ of Non-GPL Code may instantiate templates or use macros or inline
+ functions from the Approved Interfaces without causing the resulting
+ work to be covered by the GNU General Public License. Only Red Hat,
+ Inc. may make changes or additions to the list of Approved Interfaces.
+ Red Hat's grant of this exception is conditioned upon your not adding
+ any new exceptions. If you wish to add a new Approved Interface or
+ exception, please contact Red Hat. You must obey the GNU General Public
+ License in all respects for all of the Red Hat elfutils code and other
+ code used in conjunction with Red Hat elfutils except the Non-GPL Code
+ covered by this exception. If you modify this file, you may extend this
+ exception to your version of the file, but you are not obligated to do
+ so. If you do not wish to provide this exception without modification,
+ you must delete this exception statement from your version and license
+ this file solely under the GPL without exception.
+
+ Red Hat elfutils is an included package of the Open Invention Network.
+ An included package of the Open Invention Network is a package for which
+ Open Invention Network licensees cross-license their patents. No patent
+ license is granted, either expressly or impliedly, by designation as an
+ included package. Should you wish to participate in the Open Invention
+ Network licensing program, please visit www.openinventionnetwork.com
+ <http://www.openinventionnetwork.com>. */
+
+#ifndef _ELF_KNOWLEDGE_H
+#define _ELF_KNOWLEDGE_H 1
+
+#include <stdbool.h>
+
+
+/* Test whether a section can be stripped or not. */
+#define SECTION_STRIP_P(shdr, name, remove_comment) \
+ /* Sections which are allocated are not removed. */ \
+ (((shdr)->sh_flags & SHF_ALLOC) == 0 \
+ /* We never remove .note sections. */ \
+ && (shdr)->sh_type != SHT_NOTE \
+ && (((shdr)->sh_type) != SHT_PROGBITS \
+ /* Never remove .gnu.warning.* sections. */ \
+ || (strncmp (name, ".gnu.warning.", sizeof ".gnu.warning." - 1) != 0 \
+ /* We remove .comment sections only if explicitly told to do so. */\
+ && (remove_comment \
+ || strcmp (name, ".comment") != 0))) \
+ /* So far we do not remove any of the non-standard sections. \
+ XXX Maybe in future. */ \
+ && (shdr)->sh_type < SHT_NUM)
+
+
+/* Test whether `sh_info' field in section header contains a section
+ index. There are two kinds of sections doing this:
+
+ - the sections containing relocation information reference in this
+ field the section to which the relocations apply;
+
+ - section with the SHF_INFO_LINK flag set to signal that `sh_info'
+ references a section. This allows correct handling of unknown
+ sections. */
+#define SH_INFO_LINK_P(Shdr) \
+ ((Shdr)->sh_type == SHT_REL || (Shdr)->sh_type == SHT_RELA \
+ || ((Shdr)->sh_flags & SHF_INFO_LINK) != 0)
+
+
+/* When combining ELF section flags we must distinguish two kinds:
+
+ - flags which cause problem if not added to the result even if not
+ present in all input sections
+
+ - flags which cause problem if added to the result if not present
+ in all input sections
+
+ The following definition is for the general case. There might be
+ machine specific extensions. */
+#define SH_FLAGS_COMBINE(Flags1, Flags2) \
+ (((Flags1 | Flags2) \
+ & (SHF_WRITE | SHF_ALLOC | SHF_EXECINSTR | SHF_LINK_ORDER \
+ | SHF_OS_NONCONFORMING | SHF_GROUP)) \
+ | (Flags1 & Flags2 & (SHF_MERGE | SHF_STRINGS | SHF_INFO_LINK)))
+
+/* Similar macro: return the bits of the flags which necessarily must
+ match if two sections are automatically combined. Sections still
+ can be forcefully combined in which case SH_FLAGS_COMBINE can be
+ used to determine the combined flags. */
+#define SH_FLAGS_IMPORTANT(Flags) \
+ ((Flags) & ~((GElf_Xword) 0 | SHF_LINK_ORDER | SHF_OS_NONCONFORMING))
+
+
+/* Size of an entry in the hash table. The ELF specification says all
+ entries are regardless of platform 32-bits in size. Early 64-bit
+ ports (namely Alpha for Linux) got this wrong. The wording was not
+ clear.
+
+ Several years later the ABI for the 64-bit S390s was developed.
+ Many things were copied from the IA-64 ABI (which uses the correct
+ 32-bit entry size) but what do these people do? They use 64-bit
+ entries. It is really shocking to see what kind of morons are out
+ there. And even worse: they are allowed to design ABIs. */
+#define SH_ENTSIZE_HASH(Ehdr) \
+ ((Ehdr)->e_machine == EM_ALPHA \
+ || ((Ehdr)->e_machine == EM_S390 \
+ && (Ehdr)->e_ident[EI_CLASS] == ELFCLASS64) ? 8 : 4)
+
+#endif /* elf-knowledge.h */
diff --git a/tools/perf/external/usr/include/elfutils/libdw.h b/tools/perf/external/usr/include/elfutils/libdw.h
new file mode 100644
index 00000000..2c1e9f47
--- /dev/null
+++ b/tools/perf/external/usr/include/elfutils/libdw.h
@@ -0,0 +1,865 @@
+/* Interfaces for libdw.
+ Copyright (C) 2002-2010 Red Hat, Inc.
+ This file is part of Red Hat elfutils.
+
+ Red Hat elfutils is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by the
+ Free Software Foundation; version 2 of the License.
+
+ Red Hat elfutils is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with Red Hat elfutils; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA.
+
+ In addition, as a special exception, Red Hat, Inc. gives You the
+ additional right to link the code of Red Hat elfutils with code licensed
+ under any Open Source Initiative certified open source license
+ (http://www.opensource.org/licenses/index.php) which requires the
+ distribution of source code with any binary distribution and to
+ distribute linked combinations of the two. Non-GPL Code permitted under
+ this exception must only link to the code of Red Hat elfutils through
+ those well defined interfaces identified in the file named EXCEPTION
+ found in the source code files (the "Approved Interfaces"). The files
+ of Non-GPL Code may instantiate templates or use macros or inline
+ functions from the Approved Interfaces without causing the resulting
+ work to be covered by the GNU General Public License. Only Red Hat,
+ Inc. may make changes or additions to the list of Approved Interfaces.
+ Red Hat's grant of this exception is conditioned upon your not adding
+ any new exceptions. If you wish to add a new Approved Interface or
+ exception, please contact Red Hat. You must obey the GNU General Public
+ License in all respects for all of the Red Hat elfutils code and other
+ code used in conjunction with Red Hat elfutils except the Non-GPL Code
+ covered by this exception. If you modify this file, you may extend this
+ exception to your version of the file, but you are not obligated to do
+ so. If you do not wish to provide this exception without modification,
+ you must delete this exception statement from your version and license
+ this file solely under the GPL without exception.
+
+ Red Hat elfutils is an included package of the Open Invention Network.
+ An included package of the Open Invention Network is a package for which
+ Open Invention Network licensees cross-license their patents. No patent
+ license is granted, either expressly or impliedly, by designation as an
+ included package. Should you wish to participate in the Open Invention
+ Network licensing program, please visit www.openinventionnetwork.com
+ <http://www.openinventionnetwork.com>. */
+
+#ifndef _LIBDW_H
+#define _LIBDW_H 1
+
+#include <gelf.h>
+#include <stdbool.h>
+#include <stddef.h>
+
+
+#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)
+# define __nonnull_attribute__(...) __attribute__ ((__nonnull__ (__VA_ARGS__)))
+# define __deprecated_attribute__ __attribute__ ((__deprecated__))
+#else
+# define __nonnull_attribute__(args...)
+# define __deprecated_attribute__
+#endif
+
+
+#ifdef __GNUC_STDC_INLINE__
+# define __libdw_extern_inline extern __inline __attribute__ ((__gnu_inline__))
+#else
+# define __libdw_extern_inline extern __inline
+#endif
+
+
+/* Mode for the session. */
+typedef enum
+ {
+ DWARF_C_READ, /* Read .. */
+ DWARF_C_RDWR, /* Read and write .. */
+ DWARF_C_WRITE, /* Write .. */
+ }
+Dwarf_Cmd;
+
+
+/* Callback results. */
+enum
+{
+ DWARF_CB_OK = 0,
+ DWARF_CB_ABORT
+};
+
+
+/* Error values. */
+enum
+ {
+ DW_TAG_invalid = 0
+#define DW_TAG_invalid DW_TAG_invalid
+ };
+
+
+/* Type for offset in DWARF file. */
+typedef GElf_Off Dwarf_Off;
+
+/* Type for address in DWARF file. */
+typedef GElf_Addr Dwarf_Addr;
+
+/* Integer types. Big enough to hold any numeric value. */
+typedef GElf_Xword Dwarf_Word;
+typedef GElf_Sxword Dwarf_Sword;
+/* For the times we know we do not need that much. */
+typedef GElf_Half Dwarf_Half;
+
+
+/* DWARF abbreviation record. */
+typedef struct Dwarf_Abbrev Dwarf_Abbrev;
+
+/* Returned to show the last DIE has be returned. */
+#define DWARF_END_ABBREV ((Dwarf_Abbrev *) -1l)
+
+/* Source code line information for CU. */
+typedef struct Dwarf_Lines_s Dwarf_Lines;
+
+/* One source code line information. */
+typedef struct Dwarf_Line_s Dwarf_Line;
+
+/* Source file information. */
+typedef struct Dwarf_Files_s Dwarf_Files;
+
+/* One address range record. */
+typedef struct Dwarf_Arange_s Dwarf_Arange;
+
+/* Address ranges of a file. */
+typedef struct Dwarf_Aranges_s Dwarf_Aranges;
+
+/* CU representation. */
+struct Dwarf_CU;
+
+/* Macro information. */
+typedef struct Dwarf_Macro_s Dwarf_Macro;
+
+/* Attribute representation. */
+typedef struct
+{
+ unsigned int code;
+ unsigned int form;
+ unsigned char *valp;
+ struct Dwarf_CU *cu;
+} Dwarf_Attribute;
+
+
+/* Data block representation. */
+typedef struct
+{
+ Dwarf_Word length;
+ unsigned char *data;
+} Dwarf_Block;
+
+
+/* DIE information. */
+typedef struct
+{
+ /* The offset can be computed from the address. */
+ void *addr;
+ struct Dwarf_CU *cu;
+ Dwarf_Abbrev *abbrev;
+ // XXX We'll see what other information will be needed.
+ long int padding__;
+} Dwarf_Die;
+
+/* Returned to show the last DIE has be returned. */
+#define DWARF_END_DIE ((Dwarf_Die *) -1l)
+
+
+/* Global symbol information. */
+typedef struct
+{
+ Dwarf_Off cu_offset;
+ Dwarf_Off die_offset;
+ const char *name;
+} Dwarf_Global;
+
+
+/* One operation in a DWARF location expression.
+ A location expression is an array of these. */
+typedef struct
+{
+ uint8_t atom; /* Operation */
+ Dwarf_Word number; /* Operand */
+ Dwarf_Word number2; /* Possible second operand */
+ Dwarf_Word offset; /* Offset in location expression */
+} Dwarf_Op;
+
+
+/* This describes one Common Information Entry read from a CFI section.
+ Pointers here point into the DATA->d_buf block passed to dwarf_next_cfi. */
+typedef struct
+{
+ Dwarf_Off CIE_id; /* Always DW_CIE_ID_64 in Dwarf_CIE structures. */
+
+ /* Instruction stream describing initial state used by FDEs. If
+ we did not understand the whole augmentation string and it did
+ not use 'z', then there might be more augmentation data here
+ (and in FDEs) before the actual instructions. */
+ const uint8_t *initial_instructions;
+ const uint8_t *initial_instructions_end;
+
+ Dwarf_Word code_alignment_factor;
+ Dwarf_Sword data_alignment_factor;
+ Dwarf_Word return_address_register;
+
+ const char *augmentation; /* Augmentation string. */
+
+ /* Augmentation data, might be NULL. The size is correct only if
+ we understood the augmentation string sufficiently. */
+ const uint8_t *augmentation_data;
+ size_t augmentation_data_size;
+ size_t fde_augmentation_data_size;
+} Dwarf_CIE;
+
+/* This describes one Frame Description Entry read from a CFI section.
+ Pointers here point into the DATA->d_buf block passed to dwarf_next_cfi. */
+typedef struct
+{
+ /* Section offset of CIE this FDE refers to. This will never be
+ DW_CIE_ID_64 in an FDE. If this value is DW_CIE_ID_64, this is
+ actually a Dwarf_CIE structure. */
+ Dwarf_Off CIE_pointer;
+
+ /* We can't really decode anything further without looking up the CIE
+ and checking its augmentation string. Here follows the encoded
+ initial_location and address_range, then any augmentation data,
+ then the instruction stream. This FDE describes PC locations in
+ the byte range [initial_location, initial_location+address_range).
+ When the CIE augmentation string uses 'z', the augmentation data is
+ a DW_FORM_block (self-sized). Otherwise, when we understand the
+ augmentation string completely, fde_augmentation_data_size gives
+ the number of bytes of augmentation data before the instructions. */
+ const uint8_t *start;
+ const uint8_t *end;
+} Dwarf_FDE;
+
+/* Each entry in a CFI section is either a CIE described by Dwarf_CIE or
+ an FDE described by Dward_FDE. Check CIE_id to see which you have. */
+typedef union
+{
+ Dwarf_Off CIE_id; /* Always DW_CIE_ID_64 in Dwarf_CIE structures. */
+ Dwarf_CIE cie;
+ Dwarf_FDE fde;
+} Dwarf_CFI_Entry;
+
+#define dwarf_cfi_cie_p(entry) ((entry)->cie.CIE_id == DW_CIE_ID_64)
+
+/* Opaque type representing a frame state described by CFI. */
+typedef struct Dwarf_Frame_s Dwarf_Frame;
+
+/* Opaque type representing a CFI section found in a DWARF or ELF file. */
+typedef struct Dwarf_CFI_s Dwarf_CFI;
+
+
+/* Handle for debug sessions. */
+typedef struct Dwarf Dwarf;
+
+
+/* Out-Of-Memory handler. */
+#if __GNUC__ < 4
+typedef void (*Dwarf_OOM) (void);
+#else
+typedef void (*__attribute__ ((noreturn)) Dwarf_OOM) (void);
+#endif
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Create a handle for a new debug session. */
+extern Dwarf *dwarf_begin (int fildes, Dwarf_Cmd cmd);
+
+/* Create a handle for a new debug session for an ELF file. */
+extern Dwarf *dwarf_begin_elf (Elf *elf, Dwarf_Cmd cmd, Elf_Scn *scngrp);
+
+/* Retrieve ELF descriptor used for DWARF access. */
+extern Elf *dwarf_getelf (Dwarf *dwarf);
+
+/* Release debugging handling context. */
+extern int dwarf_end (Dwarf *dwarf);
+
+
+/* Get the data block for the .debug_info section. */
+extern Elf_Data *dwarf_getscn_info (Dwarf *dwarf);
+
+/* Read the header for the DWARF CU. */
+extern int dwarf_nextcu (Dwarf *dwarf, Dwarf_Off off, Dwarf_Off *next_off,
+ size_t *header_sizep, Dwarf_Off *abbrev_offsetp,
+ uint8_t *address_sizep, uint8_t *offset_sizep)
+ __nonnull_attribute__ (3);
+
+/* Read the header of a DWARF CU or type unit. If TYPE_SIGNATUREP is not
+ null, this reads a type unit from the .debug_types section; otherwise
+ this reads a CU from the .debug_info section. */
+extern int dwarf_next_unit (Dwarf *dwarf, Dwarf_Off off, Dwarf_Off *next_off,
+ size_t *header_sizep, Dwarf_Half *versionp,
+ Dwarf_Off *abbrev_offsetp,
+ uint8_t *address_sizep, uint8_t *offset_sizep,
+ uint64_t *type_signaturep, Dwarf_Off *type_offsetp)
+ __nonnull_attribute__ (3);
+
+
+/* Decode one DWARF CFI entry (CIE or FDE) from the raw section data.
+ The E_IDENT from the originating ELF file indicates the address
+ size and byte order used in the CFI section contained in DATA;
+ EH_FRAME_P should be true for .eh_frame format and false for
+ .debug_frame format. OFFSET is the byte position in the section
+ to start at; on return *NEXT_OFFSET is filled in with the byte
+ position immediately after this entry.
+
+ On success, returns 0 and fills in *ENTRY; use dwarf_cfi_cie_p to
+ see whether ENTRY->cie or ENTRY->fde is valid.
+
+ On errors, returns -1. Some format errors will permit safely
+ skipping to the next CFI entry though the current one is unusable.
+ In that case, *NEXT_OFF will be updated before a -1 return.
+
+ If there are no more CFI entries left in the section,
+ returns 1 and sets *NEXT_OFFSET to (Dwarf_Off) -1. */
+extern int dwarf_next_cfi (const unsigned char e_ident[],
+ Elf_Data *data, bool eh_frame_p,
+ Dwarf_Off offset, Dwarf_Off *next_offset,
+ Dwarf_CFI_Entry *entry)
+ __nonnull_attribute__ (1, 2, 5, 6);
+
+/* Use the CFI in the DWARF .debug_frame section.
+ Returns NULL if there is no such section (not an error).
+ The pointer returned can be used until dwarf_end is called on DWARF,
+ and must not be passed to dwarf_cfi_end.
+ Calling this more than once returns the same pointer. */
+extern Dwarf_CFI *dwarf_getcfi (Dwarf *dwarf);
+
+/* Use the CFI in the ELF file's exception-handling data.
+ Returns NULL if there is no such data.
+ The pointer returned can be used until elf_end is called on ELF,
+ and must be passed to dwarf_cfi_end before then.
+ Calling this more than once allocates independent data structures. */
+extern Dwarf_CFI *dwarf_getcfi_elf (Elf *elf);
+
+/* Release resources allocated by dwarf_getcfi_elf. */
+extern int dwarf_cfi_end (Dwarf_CFI *cache);
+
+
+/* Return DIE at given offset in .debug_types section. */
+extern Dwarf_Die *dwarf_offdie (Dwarf *dbg, Dwarf_Off offset,
+ Dwarf_Die *result) __nonnull_attribute__ (3);
+
+/* Return DIE at given offset in .debug_types section. */
+extern Dwarf_Die *dwarf_offdie_types (Dwarf *dbg, Dwarf_Off offset,
+ Dwarf_Die *result)
+ __nonnull_attribute__ (3);
+
+/* Return offset of DIE. */
+extern Dwarf_Off dwarf_dieoffset (Dwarf_Die *die);
+
+/* Return offset of DIE in CU. */
+extern Dwarf_Off dwarf_cuoffset (Dwarf_Die *die);
+
+/* Return CU DIE containing given DIE. */
+extern Dwarf_Die *dwarf_diecu (Dwarf_Die *die, Dwarf_Die *result,
+ uint8_t *address_sizep, uint8_t *offset_sizep)
+ __nonnull_attribute__ (2);
+
+/* Return CU DIE containing given address. */
+extern Dwarf_Die *dwarf_addrdie (Dwarf *dbg, Dwarf_Addr addr,
+ Dwarf_Die *result) __nonnull_attribute__ (3);
+
+/* Return child of current DIE. */
+extern int dwarf_child (Dwarf_Die *die, Dwarf_Die *result)
+ __nonnull_attribute__ (2);
+
+/* Locates the first sibling of DIE and places it in RESULT.
+ Returns 0 if a sibling was found, -1 if something went wrong.
+ Returns 1 if no sibling could be found and, if RESULT is not
+ the same as DIE, it sets RESULT->addr to the address of the
+ (non-sibling) DIE that follows this one, or NULL if this DIE
+ was the last one in the compilation unit. */
+extern int dwarf_siblingof (Dwarf_Die *die, Dwarf_Die *result)
+ __nonnull_attribute__ (2);
+
+/* Check whether the DIE has children. */
+extern int dwarf_haschildren (Dwarf_Die *die) __nonnull_attribute__ (1);
+
+/* Walks the attributes of DIE, starting at the one OFFSET bytes in,
+ calling the CALLBACK function for each one. Stops if the callback
+ function ever returns a value other than DWARF_CB_OK and returns the
+ offset of the offending attribute. If the end of the attributes
+ is reached 1 is returned. If something goes wrong -1 is returned and
+ the dwarf error number is set. */
+extern ptrdiff_t dwarf_getattrs (Dwarf_Die *die,
+ int (*callback) (Dwarf_Attribute *, void *),
+ void *arg, ptrdiff_t offset)
+ __nonnull_attribute__ (2);
+
+/* Return tag of given DIE. */
+extern int dwarf_tag (Dwarf_Die *die) __nonnull_attribute__ (1);
+
+
+/* Return specific attribute of DIE. */
+extern Dwarf_Attribute *dwarf_attr (Dwarf_Die *die, unsigned int search_name,
+ Dwarf_Attribute *result)
+ __nonnull_attribute__ (3);
+
+/* Check whether given DIE has specific attribute. */
+extern int dwarf_hasattr (Dwarf_Die *die, unsigned int search_name);
+
+/* These are the same as dwarf_attr and dwarf_hasattr, respectively,
+ but they resolve an indirect attribute through DW_AT_abstract_origin. */
+extern Dwarf_Attribute *dwarf_attr_integrate (Dwarf_Die *die,
+ unsigned int search_name,
+ Dwarf_Attribute *result)
+ __nonnull_attribute__ (3);
+extern int dwarf_hasattr_integrate (Dwarf_Die *die, unsigned int search_name);
+
+
+
+
+/* Check whether given attribute has specific form. */
+extern int dwarf_hasform (Dwarf_Attribute *attr, unsigned int search_form);
+
+/* Return attribute code of given attribute. */
+extern unsigned int dwarf_whatattr (Dwarf_Attribute *attr);
+
+/* Return form code of given attribute. */
+extern unsigned int dwarf_whatform (Dwarf_Attribute *attr);
+
+
+/* Return string associated with given attribute. */
+extern const char *dwarf_formstring (Dwarf_Attribute *attrp);
+
+/* Return unsigned constant represented by attribute. */
+extern int dwarf_formudata (Dwarf_Attribute *attr, Dwarf_Word *return_uval)
+ __nonnull_attribute__ (2);
+
+/* Return signed constant represented by attribute. */
+extern int dwarf_formsdata (Dwarf_Attribute *attr, Dwarf_Sword *return_uval)
+ __nonnull_attribute__ (2);
+
+/* Return address represented by attribute. */
+extern int dwarf_formaddr (Dwarf_Attribute *attr, Dwarf_Addr *return_addr)
+ __nonnull_attribute__ (2);
+
+/* This function is deprecated. Always use dwarf_formref_die instead.
+ Return reference offset represented by attribute. */
+extern int dwarf_formref (Dwarf_Attribute *attr, Dwarf_Off *return_offset)
+ __nonnull_attribute__ (2) __deprecated_attribute__;
+
+/* Look up the DIE in a reference-form attribute. */
+extern Dwarf_Die *dwarf_formref_die (Dwarf_Attribute *attr, Dwarf_Die *die_mem)
+ __nonnull_attribute__ (2);
+
+/* Return block represented by attribute. */
+extern int dwarf_formblock (Dwarf_Attribute *attr, Dwarf_Block *return_block)
+ __nonnull_attribute__ (2);
+
+/* Return flag represented by attribute. */
+extern int dwarf_formflag (Dwarf_Attribute *attr, bool *return_bool)
+ __nonnull_attribute__ (2);
+
+
+/* Simplified attribute value access functions. */
+
+/* Return string in name attribute of DIE. */
+extern const char *dwarf_diename (Dwarf_Die *die);
+
+/* Return high PC attribute of DIE. */
+extern int dwarf_highpc (Dwarf_Die *die, Dwarf_Addr *return_addr)
+ __nonnull_attribute__ (2);
+
+/* Return low PC attribute of DIE. */
+extern int dwarf_lowpc (Dwarf_Die *die, Dwarf_Addr *return_addr)
+ __nonnull_attribute__ (2);
+
+/* Return entry_pc or low_pc attribute of DIE. */
+extern int dwarf_entrypc (Dwarf_Die *die, Dwarf_Addr *return_addr)
+ __nonnull_attribute__ (2);
+
+/* Return 1 if DIE's lowpc/highpc or ranges attributes match the PC address,
+ 0 if not, or -1 for errors. */
+extern int dwarf_haspc (Dwarf_Die *die, Dwarf_Addr pc);
+
+/* Enumerate the PC address ranges covered by this DIE, covering all
+ addresses where dwarf_haspc returns true. In the first call OFFSET
+ should be zero and *BASEP need not be initialized. Returns -1 for
+ errors, zero when there are no more address ranges to report, or a
+ nonzero OFFSET value to pass to the next call. Each subsequent call
+ must preserve *BASEP from the prior call. Successful calls fill in
+ *STARTP and *ENDP with a contiguous address range. */
+extern ptrdiff_t dwarf_ranges (Dwarf_Die *die,
+ ptrdiff_t offset, Dwarf_Addr *basep,
+ Dwarf_Addr *startp, Dwarf_Addr *endp);
+
+
+/* Return byte size attribute of DIE. */
+extern int dwarf_bytesize (Dwarf_Die *die);
+
+/* Return bit size attribute of DIE. */
+extern int dwarf_bitsize (Dwarf_Die *die);
+
+/* Return bit offset attribute of DIE. */
+extern int dwarf_bitoffset (Dwarf_Die *die);
+
+/* Return array order attribute of DIE. */
+extern int dwarf_arrayorder (Dwarf_Die *die);
+
+/* Return source language attribute of DIE. */
+extern int dwarf_srclang (Dwarf_Die *die);
+
+
+/* Get abbreviation at given offset for given DIE. */
+extern Dwarf_Abbrev *dwarf_getabbrev (Dwarf_Die *die, Dwarf_Off offset,
+ size_t *lengthp);
+
+/* Get abbreviation at given offset in .debug_abbrev section. */
+extern int dwarf_offabbrev (Dwarf *dbg, Dwarf_Off offset, size_t *lengthp,
+ Dwarf_Abbrev *abbrevp)
+ __nonnull_attribute__ (4);
+
+/* Get abbreviation code. */
+extern unsigned int dwarf_getabbrevcode (Dwarf_Abbrev *abbrev);
+
+/* Get abbreviation tag. */
+extern unsigned int dwarf_getabbrevtag (Dwarf_Abbrev *abbrev);
+
+/* Return true if abbreviation is children flag set. */
+extern int dwarf_abbrevhaschildren (Dwarf_Abbrev *abbrev);
+
+/* Get number of attributes of abbreviation. */
+extern int dwarf_getattrcnt (Dwarf_Abbrev *abbrev, size_t *attrcntp)
+ __nonnull_attribute__ (2);
+
+/* Get specific attribute of abbreviation. */
+extern int dwarf_getabbrevattr (Dwarf_Abbrev *abbrev, size_t idx,
+ unsigned int *namep, unsigned int *formp,
+ Dwarf_Off *offset);
+
+
+/* Get string from-debug_str section. */
+extern const char *dwarf_getstring (Dwarf *dbg, Dwarf_Off offset,
+ size_t *lenp);
+
+
+/* Get public symbol information. */
+extern ptrdiff_t dwarf_getpubnames (Dwarf *dbg,
+ int (*callback) (Dwarf *, Dwarf_Global *,
+ void *),
+ void *arg, ptrdiff_t offset)
+ __nonnull_attribute__ (2);
+
+
+/* Get source file information for CU. */
+extern int dwarf_getsrclines (Dwarf_Die *cudie, Dwarf_Lines **lines,
+ size_t *nlines) __nonnull_attribute__ (2, 3);
+
+/* Return one of the source lines of the CU. */
+extern Dwarf_Line *dwarf_onesrcline (Dwarf_Lines *lines, size_t idx);
+
+/* Get the file source files used in the CU. */
+extern int dwarf_getsrcfiles (Dwarf_Die *cudie, Dwarf_Files **files,
+ size_t *nfiles)
+ __nonnull_attribute__ (2);
+
+
+/* Get source for address in CU. */
+extern Dwarf_Line *dwarf_getsrc_die (Dwarf_Die *cudie, Dwarf_Addr addr);
+
+/* Get source for file and line number. */
+extern int dwarf_getsrc_file (Dwarf *dbg, const char *fname, int line, int col,
+ Dwarf_Line ***srcsp, size_t *nsrcs)
+ __nonnull_attribute__ (2, 5, 6);
+
+
+/* Return line address. */
+extern int dwarf_lineaddr (Dwarf_Line *line, Dwarf_Addr *addrp);
+
+/* Return line VLIW operation index. */
+extern int dwarf_lineop_index (Dwarf_Line *line, unsigned int *op_indexp);
+
+/* Return line number. */
+extern int dwarf_lineno (Dwarf_Line *line, int *linep)
+ __nonnull_attribute__ (2);
+
+/* Return column in line. */
+extern int dwarf_linecol (Dwarf_Line *line, int *colp)
+ __nonnull_attribute__ (2);
+
+/* Return true if record is for beginning of a statement. */
+extern int dwarf_linebeginstatement (Dwarf_Line *line, bool *flagp)
+ __nonnull_attribute__ (2);
+
+/* Return true if record is for end of sequence. */
+extern int dwarf_lineendsequence (Dwarf_Line *line, bool *flagp)
+ __nonnull_attribute__ (2);
+
+/* Return true if record is for beginning of a basic block. */
+extern int dwarf_lineblock (Dwarf_Line *line, bool *flagp)
+ __nonnull_attribute__ (2);
+
+/* Return true if record is for end of prologue. */
+extern int dwarf_lineprologueend (Dwarf_Line *line, bool *flagp)
+ __nonnull_attribute__ (2);
+
+/* Return true if record is for beginning of epilogue. */
+extern int dwarf_lineepiloguebegin (Dwarf_Line *line, bool *flagp)
+ __nonnull_attribute__ (2);
+
+/* Return instruction-set architecture in this record. */
+extern int dwarf_lineisa (Dwarf_Line *line, unsigned int *isap)
+ __nonnull_attribute__ (2);
+
+/* Return code path discriminator in this record. */
+extern int dwarf_linediscriminator (Dwarf_Line *line, unsigned int *discp)
+ __nonnull_attribute__ (2);
+
+
+/* Find line information for address. */
+extern const char *dwarf_linesrc (Dwarf_Line *line,
+ Dwarf_Word *mtime, Dwarf_Word *length);
+
+/* Return file information. */
+extern const char *dwarf_filesrc (Dwarf_Files *file, size_t idx,
+ Dwarf_Word *mtime, Dwarf_Word *length);
+
+/* Return the directory list used in the file information extracted.
+ (*RESULT)[0] is the CU's DW_AT_comp_dir value, and may be null.
+ (*RESULT)[0..*NDIRS-1] are the compile-time include directory path
+ encoded by the compiler. */
+extern int dwarf_getsrcdirs (Dwarf_Files *files,
+ const char *const **result, size_t *ndirs)
+ __nonnull_attribute__ (2, 3);
+
+
+/* Return location expression, decoded as a list of operations. */
+extern int dwarf_getlocation (Dwarf_Attribute *attr, Dwarf_Op **expr,
+ size_t *exprlen) __nonnull_attribute__ (2, 3);
+
+/* Return location expressions. If the attribute uses a location list,
+ ADDRESS selects the relevant location expressions from the list.
+ There can be multiple matches, resulting in multiple expressions to
+ return. EXPRS and EXPRLENS are parallel arrays of NLOCS slots to
+ fill in. Returns the number of locations filled in, or -1 for
+ errors. If EXPRS is a null pointer, stores nothing and returns the
+ total number of locations. A return value of zero means that the
+ location list indicated no value is accessible. */
+extern int dwarf_getlocation_addr (Dwarf_Attribute *attr, Dwarf_Addr address,
+ Dwarf_Op **exprs, size_t *exprlens,
+ size_t nlocs);
+
+/* Return the block associated with a DW_OP_implicit_value operation.
+ The OP pointer must point into an expression that dwarf_getlocation
+ or dwarf_getlocation_addr has returned given the same ATTR. */
+extern int dwarf_getlocation_implicit_value (Dwarf_Attribute *attr,
+ const Dwarf_Op *op,
+ Dwarf_Block *return_block)
+ __nonnull_attribute__ (2, 3);
+
+
+/* Compute the byte-size of a type DIE according to DWARF rules.
+ For most types, this is just DW_AT_byte_size.
+ For DW_TAG_array_type it can apply much more complex rules. */
+extern int dwarf_aggregate_size (Dwarf_Die *die, Dwarf_Word *size);
+
+
+/* Return scope DIEs containing PC address.
+ Sets *SCOPES to a malloc'd array of Dwarf_Die structures,
+ and returns the number of elements in the array.
+ (*SCOPES)[0] is the DIE for the innermost scope containing PC,
+ (*SCOPES)[1] is the DIE for the scope containing that scope, and so on.
+ Returns -1 for errors or 0 if no scopes match PC. */
+extern int dwarf_getscopes (Dwarf_Die *cudie, Dwarf_Addr pc,
+ Dwarf_Die **scopes);
+
+/* Return scope DIEs containing the given DIE.
+ Sets *SCOPES to a malloc'd array of Dwarf_Die structures,
+ and returns the number of elements in the array.
+ (*SCOPES)[0] is a copy of DIE.
+ (*SCOPES)[1] is the DIE for the scope containing that scope, and so on.
+ Returns -1 for errors or 0 if DIE is not found in any scope entry. */
+extern int dwarf_getscopes_die (Dwarf_Die *die, Dwarf_Die **scopes);
+
+
+/* Search SCOPES[0..NSCOPES-1] for a variable called NAME.
+ Ignore the first SKIP_SHADOWS scopes that match the name.
+ If MATCH_FILE is not null, accept only declaration in that source file;
+ if MATCH_LINENO or MATCH_LINECOL are also nonzero, accept only declaration
+ at that line and column.
+
+ If successful, fill in *RESULT with the DIE of the variable found,
+ and return N where SCOPES[N] is the scope defining the variable.
+ Return -1 for errors or -2 for no matching variable found. */
+extern int dwarf_getscopevar (Dwarf_Die *scopes, int nscopes,
+ const char *name, int skip_shadows,
+ const char *match_file,
+ int match_lineno, int match_linecol,
+ Dwarf_Die *result);
+
+
+
+/* Return list address ranges. */
+extern int dwarf_getaranges (Dwarf *dbg, Dwarf_Aranges **aranges,
+ size_t *naranges)
+ __nonnull_attribute__ (2);
+
+/* Return one of the address range entries. */
+extern Dwarf_Arange *dwarf_onearange (Dwarf_Aranges *aranges, size_t idx);
+
+/* Return information in address range record. */
+extern int dwarf_getarangeinfo (Dwarf_Arange *arange, Dwarf_Addr *addrp,
+ Dwarf_Word *lengthp, Dwarf_Off *offsetp);
+
+/* Get address range which includes given address. */
+extern Dwarf_Arange *dwarf_getarange_addr (Dwarf_Aranges *aranges,
+ Dwarf_Addr addr);
+
+
+
+/* Get functions in CUDIE. */
+extern ptrdiff_t dwarf_getfuncs (Dwarf_Die *cudie,
+ int (*callback) (Dwarf_Die *, void *),
+ void *arg, ptrdiff_t offset);
+
+
+/* Return file name containing definition of the given declaration. */
+extern const char *dwarf_decl_file (Dwarf_Die *decl);
+
+/* Get line number of beginning of given declaration. */
+extern int dwarf_decl_line (Dwarf_Die *decl, int *linep)
+ __nonnull_attribute__ (2);
+
+/* Get column number of beginning of given declaration. */
+extern int dwarf_decl_column (Dwarf_Die *decl, int *colp)
+ __nonnull_attribute__ (2);
+
+
+/* Return nonzero if given function is an abstract inline definition. */
+extern int dwarf_func_inline (Dwarf_Die *func);
+
+/* Find each concrete inlined instance of the abstract inline definition. */
+extern int dwarf_func_inline_instances (Dwarf_Die *func,
+ int (*callback) (Dwarf_Die *, void *),
+ void *arg);
+
+
+/* Find the appropriate PC location or locations for function entry
+ breakpoints for the given DW_TAG_subprogram DIE. Returns -1 for errors.
+ On success, returns the number of breakpoint locations (never zero)
+ and sets *BKPTS to a malloc'd vector of addresses. */
+extern int dwarf_entry_breakpoints (Dwarf_Die *die, Dwarf_Addr **bkpts);
+
+
+/* Call callback function for each of the macro information entry for
+ the CU. */
+extern ptrdiff_t dwarf_getmacros (Dwarf_Die *cudie,
+ int (*callback) (Dwarf_Macro *, void *),
+ void *arg, ptrdiff_t offset)
+ __nonnull_attribute__ (2);
+
+/* Return macro opcode. */
+extern int dwarf_macro_opcode (Dwarf_Macro *macro, unsigned int *opcodep)
+ __nonnull_attribute__ (2);
+
+/* Return first macro parameter. */
+extern int dwarf_macro_param1 (Dwarf_Macro *macro, Dwarf_Word *paramp)
+ __nonnull_attribute__ (2);
+
+/* Return second macro parameter. */
+extern int dwarf_macro_param2 (Dwarf_Macro *macro, Dwarf_Word *paramp,
+ const char **strp);
+
+
+/* Compute what's known about a call frame when the PC is at ADDRESS.
+ Returns 0 for success or -1 for errors.
+ On success, *FRAME is a malloc'd pointer. */
+extern int dwarf_cfi_addrframe (Dwarf_CFI *cache,
+ Dwarf_Addr address, Dwarf_Frame **frame)
+ __nonnull_attribute__ (3);
+
+/* Return the DWARF register number used in FRAME to denote
+ the return address in FRAME's caller frame. The remaining
+ arguments can be non-null to fill in more information.
+
+ Fill [*START, *END) with the PC range to which FRAME's information applies.
+ Fill in *SIGNALP to indicate whether this is a signal-handling frame.
+ If true, this is the implicit call frame that calls a signal handler.
+ This frame's "caller" is actually the interrupted state, not a call;
+ its return address is an exact PC, not a PC after a call instruction. */
+extern int dwarf_frame_info (Dwarf_Frame *frame,
+ Dwarf_Addr *start, Dwarf_Addr *end, bool *signalp);
+
+/* Return a DWARF expression that yields the Canonical Frame Address at
+ this frame state. Returns -1 for errors, or zero for success, with
+ *NOPS set to the number of operations stored at *OPS. That pointer
+ can be used only as long as FRAME is alive and unchanged. *NOPS is
+ zero if the CFA cannot be determined here. Note that if nonempty,
+ *OPS is a DWARF expression, not a location description--append
+ DW_OP_stack_value to a get a location description for the CFA. */
+extern int dwarf_frame_cfa (Dwarf_Frame *frame, Dwarf_Op **ops, size_t *nops)
+ __nonnull_attribute__ (2);
+
+/* Deliver a DWARF location description that yields the location or
+ value of DWARF register number REGNO in the state described by FRAME.
+
+ Returns -1 for errors or zero for success, setting *NOPS to the
+ number of operations in the array stored at *OPS. Note the last
+ operation is DW_OP_stack_value if there is no mutable location but
+ only a computable value.
+
+ *NOPS zero with *OPS set to OPS_MEM means CFI says the caller's
+ REGNO is "undefined", i.e. it's call-clobbered and cannot be recovered.
+
+ *NOPS zero with *OPS set to a null pointer means CFI says the
+ caller's REGNO is "same_value", i.e. this frame did not change it;
+ ask the caller frame where to find it.
+
+ For common simple expressions *OPS is OPS_MEM. For arbitrary DWARF
+ expressions in the CFI, *OPS is an internal pointer that can be used as
+ long as the Dwarf_CFI used to create FRAME remains alive. */
+extern int dwarf_frame_register (Dwarf_Frame *frame, int regno,
+ Dwarf_Op ops_mem[3],
+ Dwarf_Op **ops, size_t *nops)
+ __nonnull_attribute__ (3, 4, 5);
+
+
+/* Return error code of last failing function call. This value is kept
+ separately for each thread. */
+extern int dwarf_errno (void);
+
+/* Return error string for ERROR. If ERROR is zero, return error string
+ for most recent error or NULL is none occurred. If ERROR is -1 the
+ behaviour is similar to the last case except that not NULL but a legal
+ string is returned. */
+extern const char *dwarf_errmsg (int err);
+
+
+/* Register new Out-Of-Memory handler. The old handler is returned. */
+extern Dwarf_OOM dwarf_new_oom_handler (Dwarf *dbg, Dwarf_OOM handler);
+
+
+/* Inline optimizations. */
+#if defined __OPTIMIZE__ && !(__GNUC__ == 4 && __GNUC_MINOR__ == 2)
+/* Return attribute code of given attribute. */
+__libdw_extern_inline unsigned int
+dwarf_whatattr (Dwarf_Attribute *attr)
+{
+ return attr == NULL ? 0 : attr->code;
+}
+
+/* Return attribute code of given attribute. */
+__libdw_extern_inline unsigned int
+dwarf_whatform (Dwarf_Attribute *attr)
+{
+ return attr == NULL ? 0 : attr->form;
+}
+#endif /* Optimize. */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* libdw.h */
diff --git a/tools/perf/external/usr/include/elfutils/libdwfl.h b/tools/perf/external/usr/include/elfutils/libdwfl.h
new file mode 100644
index 00000000..51e98187
--- /dev/null
+++ b/tools/perf/external/usr/include/elfutils/libdwfl.h
@@ -0,0 +1,582 @@
+/* Interfaces for libdwfl.
+ Copyright (C) 2005-2010 Red Hat, Inc.
+ This file is part of Red Hat elfutils.
+
+ Red Hat elfutils is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by the
+ Free Software Foundation; version 2 of the License.
+
+ Red Hat elfutils is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with Red Hat elfutils; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA.
+
+ In addition, as a special exception, Red Hat, Inc. gives You the
+ additional right to link the code of Red Hat elfutils with code licensed
+ under any Open Source Initiative certified open source license
+ (http://www.opensource.org/licenses/index.php) which requires the
+ distribution of source code with any binary distribution and to
+ distribute linked combinations of the two. Non-GPL Code permitted under
+ this exception must only link to the code of Red Hat elfutils through
+ those well defined interfaces identified in the file named EXCEPTION
+ found in the source code files (the "Approved Interfaces"). The files
+ of Non-GPL Code may instantiate templates or use macros or inline
+ functions from the Approved Interfaces without causing the resulting
+ work to be covered by the GNU General Public License. Only Red Hat,
+ Inc. may make changes or additions to the list of Approved Interfaces.
+ Red Hat's grant of this exception is conditioned upon your not adding
+ any new exceptions. If you wish to add a new Approved Interface or
+ exception, please contact Red Hat. You must obey the GNU General Public
+ License in all respects for all of the Red Hat elfutils code and other
+ code used in conjunction with Red Hat elfutils except the Non-GPL Code
+ covered by this exception. If you modify this file, you may extend this
+ exception to your version of the file, but you are not obligated to do
+ so. If you do not wish to provide this exception without modification,
+ you must delete this exception statement from your version and license
+ this file solely under the GPL without exception.
+
+ Red Hat elfutils is an included package of the Open Invention Network.
+ An included package of the Open Invention Network is a package for which
+ Open Invention Network licensees cross-license their patents. No patent
+ license is granted, either expressly or impliedly, by designation as an
+ included package. Should you wish to participate in the Open Invention
+ Network licensing program, please visit www.openinventionnetwork.com
+ <http://www.openinventionnetwork.com>. */
+
+#ifndef _LIBDWFL_H
+#define _LIBDWFL_H 1
+
+#include "libdw.h"
+#include <stdio.h>
+
+/* Handle for a session using the library. */
+typedef struct Dwfl Dwfl;
+
+/* Handle for a module. */
+typedef struct Dwfl_Module Dwfl_Module;
+
+/* Handle describing a line record. */
+typedef struct Dwfl_Line Dwfl_Line;
+
+/* Callbacks. */
+typedef struct
+{
+ int (*find_elf) (Dwfl_Module *mod, void **userdata,
+ const char *modname, Dwarf_Addr base,
+ char **file_name, Elf **elfp);
+
+ int (*find_debuginfo) (Dwfl_Module *mod, void **userdata,
+ const char *modname, Dwarf_Addr base,
+ const char *file_name,
+ const char *debuglink_file, GElf_Word debuglink_crc,
+ char **debuginfo_file_name);
+
+ /* Fill *ADDR with the loaded address of the section called SECNAME in
+ the given module. Use (Dwarf_Addr) -1 if this section is omitted from
+ accessible memory. This is called exactly once for each SHF_ALLOC
+ section that relocations affecting DWARF data refer to, so it can
+ easily be used to collect state about the sections referenced. */
+ int (*section_address) (Dwfl_Module *mod, void **userdata,
+ const char *modname, Dwarf_Addr base,
+ const char *secname,
+ GElf_Word shndx, const GElf_Shdr *shdr,
+ Dwarf_Addr *addr);
+
+ char **debuginfo_path; /* See dwfl_standard_find_debuginfo. */
+} Dwfl_Callbacks;
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Start a new session with the library. */
+extern Dwfl *dwfl_begin (const Dwfl_Callbacks *callbacks)
+ __nonnull_attribute__ (1);
+
+
+/* End a session. */
+extern void dwfl_end (Dwfl *);
+
+/* Return implementation's version string suitable for printing. */
+extern const char *dwfl_version (Dwfl *);
+
+/* Return error code of last failing function call. This value is kept
+ separately for each thread. */
+extern int dwfl_errno (void);
+
+/* Return error string for ERROR. If ERROR is zero, return error string
+ for most recent error or NULL if none occurred. If ERROR is -1 the
+ behaviour is similar to the last case except that not NULL but a legal
+ string is returned. */
+extern const char *dwfl_errmsg (int err);
+
+
+/* Start reporting the current set of segments and modules to the library.
+ All existing segments are wiped. Existing modules are marked to be
+ deleted, and will not be found via dwfl_addrmodule et al if they are not
+ re-reported before dwfl_report_end is called. */
+extern void dwfl_report_begin (Dwfl *dwfl);
+
+/* Report that segment NDX begins at PHDR->p_vaddr + BIAS.
+ If NDX is < 0, the value succeeding the last call's NDX
+ is used instead (zero on the first call).
+
+ If nonzero, the smallest PHDR->p_align value seen sets the
+ effective page size for the address space DWFL describes.
+ This is the granularity at which reported module boundary
+ addresses will be considered to fall in or out of a segment.
+
+ Returns -1 for errors, or NDX (or its assigned replacement) on success.
+
+ When NDX is the value succeeding the last call's NDX (or is implicitly
+ so as above), IDENT is nonnull and matches the value in the last call,
+ and the PHDR and BIAS values reflect a segment that would be contiguous,
+ in both memory and file, with the last segment reported, then this
+ segment may be coalesced internally with preceding segments. When given
+ an address inside this segment, dwfl_addrsegment may return the NDX of a
+ preceding contiguous segment. To prevent coalesced segments, always
+ pass a null pointer for IDENT.
+
+ The values passed are not stored (except to track coalescence).
+ The only information that can be extracted from DWFL later is the
+ mapping of an address to a segment index that starts at or below
+ it. Reporting segments at all is optional. Its only benefit to
+ the caller is to offer this quick lookup via dwfl_addrsegment,
+ or use other segment-based calls. */
+extern int dwfl_report_segment (Dwfl *dwfl, int ndx,
+ const GElf_Phdr *phdr, GElf_Addr bias,
+ const void *ident);
+
+/* Report that a module called NAME spans addresses [START, END).
+ Returns the module handle, either existing or newly allocated,
+ or returns a null pointer for an allocation error. */
+extern Dwfl_Module *dwfl_report_module (Dwfl *dwfl, const char *name,
+ Dwarf_Addr start, Dwarf_Addr end);
+
+/* Report a module with start and end addresses computed from the ELF
+ program headers in the given file, plus BASE. For an ET_REL file,
+ does a simple absolute section layout starting at BASE.
+ FD may be -1 to open FILE_NAME. On success, FD is consumed by the
+ library, and the `find_elf' callback will not be used for this module. */
+extern Dwfl_Module *dwfl_report_elf (Dwfl *dwfl, const char *name,
+ const char *file_name, int fd,
+ GElf_Addr base);
+
+/* Similar, but report the module for offline use. All ET_EXEC files
+ being reported must be reported before any relocatable objects.
+ If this is used, dwfl_report_module and dwfl_report_elf may not be
+ used in the same reporting session. */
+extern Dwfl_Module *dwfl_report_offline (Dwfl *dwfl, const char *name,
+ const char *file_name, int fd);
+
+
+/* Finish reporting the current set of modules to the library.
+ If REMOVED is not null, it's called for each module that
+ existed before but was not included in the current report.
+ Returns a nonzero return value from the callback.
+ The callback may call dwfl_report_module; doing so with the
+ details of the module being removed prevents its removal.
+ DWFL cannot be used until this function has returned zero. */
+extern int dwfl_report_end (Dwfl *dwfl,
+ int (*removed) (Dwfl_Module *, void *,
+ const char *, Dwarf_Addr,
+ void *arg),
+ void *arg);
+
+/* Start reporting additional modules to the library. No calls but
+ dwfl_report_* can be made on DWFL until dwfl_report_end is called.
+ This is like dwfl_report_begin, but all the old modules are kept on.
+ More dwfl_report_* calls can follow to add more modules.
+ When dwfl_report_end is called, no old modules will be removed. */
+extern void dwfl_report_begin_add (Dwfl *dwfl);
+
+
+/* Return the name of the module, and for each non-null argument store
+ interesting details: *USERDATA is a location for storing your own
+ pointer, **USERDATA is initially null; *START and *END give the address
+ range covered by the module; *DWBIAS is the address bias for debugging
+ information, and *SYMBIAS for symbol table entries (either is -1 if not
+ yet accessed); *MAINFILE is the name of the ELF file, and *DEBUGFILE the
+ name of the debuginfo file (might be equal to *MAINFILE; either is null
+ if not yet accessed). */
+extern const char *dwfl_module_info (Dwfl_Module *mod, void ***userdata,
+ Dwarf_Addr *start, Dwarf_Addr *end,
+ Dwarf_Addr *dwbias, Dwarf_Addr *symbias,
+ const char **mainfile,
+ const char **debugfile);
+
+/* Iterate through the modules, starting the walk with OFFSET == 0.
+ Calls *CALLBACK for each module as long as it returns DWARF_CB_OK.
+ When *CALLBACK returns another value, the walk stops and the
+ return value can be passed as OFFSET to resume it. Returns 0 when
+ there are no more modules, or -1 for errors. */
+extern ptrdiff_t dwfl_getmodules (Dwfl *dwfl,
+ int (*callback) (Dwfl_Module *, void **,
+ const char *, Dwarf_Addr,
+ void *arg),
+ void *arg,
+ ptrdiff_t offset);
+
+/* Find the module containing the given address. */
+extern Dwfl_Module *dwfl_addrmodule (Dwfl *dwfl, Dwarf_Addr address);
+
+/* Find the segment, if any, and module, if any, containing ADDRESS.
+ Returns a segment index returned by dwfl_report_segment, or -1
+ if no segment matches the address. Regardless of the return value,
+ *MOD is always set to the module containing ADDRESS, or to null. */
+extern int dwfl_addrsegment (Dwfl *dwfl, Dwarf_Addr address, Dwfl_Module **mod);
+
+
+
+/* Report the known build ID bits associated with a module.
+ If VADDR is nonzero, it gives the absolute address where those
+ bits are found within the module. This can be called at any
+ time, but is usually used immediately after dwfl_report_module.
+ Once the module's main ELF file is opened, the ID note found
+ there takes precedence and cannot be changed. */
+extern int dwfl_module_report_build_id (Dwfl_Module *mod,
+ const unsigned char *bits, size_t len,
+ GElf_Addr vaddr)
+ __nonnull_attribute__ (2);
+
+/* Extract the build ID bits associated with a module.
+ Returns -1 for errors, 0 if no ID is known, or the number of ID bytes.
+ When an ID is found, *BITS points to it; *VADDR is the absolute address
+ at which the ID bits are found within the module, or 0 if unknown.
+
+ This returns 0 when the module's main ELF file has not yet been loaded
+ and its build ID bits were not reported. To ensure the ID is always
+ returned when determinable, call dwfl_module_getelf first. */
+extern int dwfl_module_build_id (Dwfl_Module *mod,
+ const unsigned char **bits, GElf_Addr *vaddr)
+ __nonnull_attribute__ (2, 3);
+
+
+/*** Standard callbacks ***/
+
+/* These standard find_elf and find_debuginfo callbacks are
+ controlled by a string specifying directories to look in.
+ If `debuginfo_path' is set in the Dwfl_Callbacks structure
+ and the char * it points to is not null, that supplies the
+ string. Otherwise a default path is used.
+
+ If the first character of the string is + or - that enables or
+ disables CRC32 checksum validation when it's necessary. The
+ remainder of the string is composed of elements separated by
+ colons. Each element can start with + or - to override the
+ global checksum behavior. This flag is never relevant when
+ working with build IDs, but it's always parsed in the path
+ string. The remainder of the element indicates a directory.
+
+ Searches by build ID consult only the elements naming absolute
+ directory paths. They look under those directories for a link
+ named ".build-id/xx/yy" or ".build-id/xx/yy.debug", where "xxyy"
+ is the lower-case hexadecimal representation of the ID bytes.
+
+ In searches for debuginfo by name, if the remainder of the
+ element is empty, the directory containing the main file is
+ tried; if it's an absolute path name, the absolute directory path
+ containing the main file is taken as a subdirectory of this path;
+ a relative path name is taken as a subdirectory of the directory
+ containing the main file. Hence for /bin/ls, the default string
+ ":.debug:/usr/lib/debug" says to look in /bin, then /bin/.debug,
+ then /usr/lib/debug/bin, for the file name in the .gnu_debuglink
+ section (or "ls.debug" if none was found). */
+
+/* Standard find_elf callback function working solely on build ID.
+ This can be tried first by any find_elf callback, to use the
+ bits passed to dwfl_module_report_build_id, if any. */
+extern int dwfl_build_id_find_elf (Dwfl_Module *, void **,
+ const char *, Dwarf_Addr,
+ char **, Elf **);
+
+/* Standard find_debuginfo callback function working solely on build ID.
+ This can be tried first by any find_debuginfo callback,
+ to use the build ID bits from the main file when present. */
+extern int dwfl_build_id_find_debuginfo (Dwfl_Module *, void **,
+ const char *, Dwarf_Addr,
+ const char *, const char *,
+ GElf_Word, char **);
+
+/* Standard find_debuginfo callback function.
+ If a build ID is available, this tries first to use that.
+ If there is no build ID or no valid debuginfo found by ID,
+ it searches the debuginfo path by name, as described above.
+ Any file found in the path is validated by build ID if possible,
+ or else by CRC32 checksum if enabled, and skipped if it does not match. */
+extern int dwfl_standard_find_debuginfo (Dwfl_Module *, void **,
+ const char *, Dwarf_Addr,
+ const char *, const char *,
+ GElf_Word, char **);
+
+
+/* This callback must be used when using dwfl_offline_* to report modules,
+ if ET_REL is to be supported. */
+extern int dwfl_offline_section_address (Dwfl_Module *, void **,
+ const char *, Dwarf_Addr,
+ const char *, GElf_Word,
+ const GElf_Shdr *,
+ Dwarf_Addr *addr);
+
+
+/* Callbacks for working with kernel modules in the running Linux kernel. */
+extern int dwfl_linux_kernel_find_elf (Dwfl_Module *, void **,
+ const char *, Dwarf_Addr,
+ char **, Elf **);
+extern int dwfl_linux_kernel_module_section_address (Dwfl_Module *, void **,
+ const char *, Dwarf_Addr,
+ const char *, GElf_Word,
+ const GElf_Shdr *,
+ Dwarf_Addr *addr);
+
+/* Call dwfl_report_elf for the running Linux kernel.
+ Returns zero on success, -1 if dwfl_report_module failed,
+ or an errno code if opening the kernel binary failed. */
+extern int dwfl_linux_kernel_report_kernel (Dwfl *dwfl);
+
+/* Call dwfl_report_module for each kernel module in the running Linux kernel.
+ Returns zero on success, -1 if dwfl_report_module failed,
+ or an errno code if reading the list of modules failed. */
+extern int dwfl_linux_kernel_report_modules (Dwfl *dwfl);
+
+/* Report a kernel and its modules found on disk, for offline use.
+ If RELEASE starts with '/', it names a directory to look in;
+ if not, it names a directory to find under /lib/modules/;
+ if null, /lib/modules/`uname -r` is used.
+ Returns zero on success, -1 if dwfl_report_module failed,
+ or an errno code if finding the files on disk failed.
+
+ If PREDICATE is not null, it is called with each module to be reported;
+ its arguments are the module name, and the ELF file name or null if unknown,
+ and its return value should be zero to skip the module, one to report it,
+ or -1 to cause the call to fail and return errno. */
+extern int dwfl_linux_kernel_report_offline (Dwfl *dwfl, const char *release,
+ int (*predicate) (const char *,
+ const char *));
+
+/* Examine an ET_CORE file and report modules based on its contents.
+ This can follow a dwfl_report_offline call to bootstrap the
+ DT_DEBUG method of following the dynamic linker link_map chain, in
+ case the core file does not contain enough of the executable's text
+ segment to locate its PT_DYNAMIC in the dump. This might call
+ dwfl_report_elf on file names found in the dump if reading some
+ link_map files is the only way to ascertain those modules' addresses.
+ Returns the number of modules reported, or -1 for errors. */
+extern int dwfl_core_file_report (Dwfl *dwfl, Elf *elf);
+
+/* Call dwfl_report_module for each file mapped into the address space of PID.
+ Returns zero on success, -1 if dwfl_report_module failed,
+ or an errno code if opening the kernel binary failed. */
+extern int dwfl_linux_proc_report (Dwfl *dwfl, pid_t pid);
+
+/* Similar, but reads an input stream in the format of Linux /proc/PID/maps
+ files giving module layout, not the file for a live process. */
+extern int dwfl_linux_proc_maps_report (Dwfl *dwfl, FILE *);
+
+/* Trivial find_elf callback for use with dwfl_linux_proc_report.
+ This uses the module name as a file name directly and tries to open it
+ if it begin with a slash, or handles the magic string "[vdso]". */
+extern int dwfl_linux_proc_find_elf (Dwfl_Module *mod, void **userdata,
+ const char *module_name, Dwarf_Addr base,
+ char **file_name, Elf **);
+
+/* Standard argument parsing for using a standard callback set. */
+struct argp;
+extern const struct argp *dwfl_standard_argp (void) __attribute__ ((const));
+
+
+/*** Relocation of addresses from Dwfl ***/
+
+/* Return the number of relocatable bases associated with the module,
+ which is zero for ET_EXEC and one for ET_DYN. Returns -1 for errors. */
+extern int dwfl_module_relocations (Dwfl_Module *mod);
+
+/* Return the relocation base index associated with the *ADDRESS location,
+ and adjust *ADDRESS to be an offset relative to that base.
+ Returns -1 for errors. */
+extern int dwfl_module_relocate_address (Dwfl_Module *mod,
+ Dwarf_Addr *address);
+
+/* Return the ELF section name for the given relocation base index;
+ if SHNDXP is not null, set *SHNDXP to the ELF section index.
+ For ET_DYN, returns "" and sets *SHNDXP to SHN_ABS; the relocation
+ base is the runtime start address reported for the module.
+ Returns null for errors. */
+extern const char *dwfl_module_relocation_info (Dwfl_Module *mod,
+ unsigned int idx,
+ GElf_Word *shndxp);
+
+/* Validate that ADDRESS and ADDRESS+OFFSET lie in a known module
+ and both within the same contiguous region for relocation purposes.
+ Returns zero for success and -1 for errors. */
+extern int dwfl_validate_address (Dwfl *dwfl,
+ Dwarf_Addr address, Dwarf_Sword offset);
+
+
+/*** ELF access functions ***/
+
+/* Fetch the module main ELF file (where the allocated sections
+ are found) for use with libelf. If successful, fills in *BIAS
+ with the difference between addresses within the loaded module
+ and those in symbol tables or Dwarf information referring to it. */
+extern Elf *dwfl_module_getelf (Dwfl_Module *, GElf_Addr *bias);
+
+/* Return the number of symbols in the module's symbol table,
+ or -1 for errors. */
+extern int dwfl_module_getsymtab (Dwfl_Module *mod);
+
+/* Fetch one entry from the module's symbol table. On errors, returns
+ NULL. If successful, fills in *SYM and returns the string for st_name.
+ This works like gelf_getsym except that st_value is always adjusted to
+ an absolute value based on the module's location, when the symbol is in
+ an SHF_ALLOC section. If SHNDXP is non-null, it's set with the section
+ index (whether from st_shndx or extended index table); in case of a
+ symbol in a non-allocated section, *SHNDXP is instead set to -1. */
+extern const char *dwfl_module_getsym (Dwfl_Module *mod, int ndx,
+ GElf_Sym *sym, GElf_Word *shndxp)
+ __nonnull_attribute__ (3);
+
+/* Find the symbol that ADDRESS lies inside, and return its name. */
+extern const char *dwfl_module_addrname (Dwfl_Module *mod, GElf_Addr address);
+
+/* Find the symbol that ADDRESS lies inside, and return detailed
+ information as for dwfl_module_getsym (above). */
+extern const char *dwfl_module_addrsym (Dwfl_Module *mod, GElf_Addr address,
+ GElf_Sym *sym, GElf_Word *shndxp)
+ __nonnull_attribute__ (3);
+
+/* Find the ELF section that *ADDRESS lies inside and return it.
+ On success, adjusts *ADDRESS to be relative to the section,
+ and sets *BIAS to the difference between addresses used in
+ the returned section's headers and run-time addresses. */
+extern Elf_Scn *dwfl_module_address_section (Dwfl_Module *mod,
+ Dwarf_Addr *address,
+ Dwarf_Addr *bias)
+ __nonnull_attribute__ (2, 3);
+
+
+/*** Dwarf access functions ***/
+
+/* Fetch the module's debug information for use with libdw.
+ If successful, fills in *BIAS with the difference between
+ addresses within the loaded module and those to use with libdw. */
+extern Dwarf *dwfl_module_getdwarf (Dwfl_Module *, Dwarf_Addr *bias)
+ __nonnull_attribute__ (2);
+
+/* Get the libdw handle for each module. */
+extern ptrdiff_t dwfl_getdwarf (Dwfl *,
+ int (*callback) (Dwfl_Module *, void **,
+ const char *, Dwarf_Addr,
+ Dwarf *, Dwarf_Addr, void *),
+ void *arg, ptrdiff_t offset);
+
+/* Look up the module containing ADDR and return its debugging information,
+ loading it if necessary. */
+extern Dwarf *dwfl_addrdwarf (Dwfl *dwfl, Dwarf_Addr addr, Dwarf_Addr *bias)
+ __nonnull_attribute__ (3);
+
+
+/* Find the CU containing ADDR and return its DIE. */
+extern Dwarf_Die *dwfl_addrdie (Dwfl *dwfl, Dwarf_Addr addr, Dwarf_Addr *bias)
+ __nonnull_attribute__ (3);
+extern Dwarf_Die *dwfl_module_addrdie (Dwfl_Module *mod,
+ Dwarf_Addr addr, Dwarf_Addr *bias)
+ __nonnull_attribute__ (3);
+
+/* Iterate through the CUs, start with null for LASTCU. */
+extern Dwarf_Die *dwfl_nextcu (Dwfl *dwfl, Dwarf_Die *lastcu, Dwarf_Addr *bias)
+ __nonnull_attribute__ (3);
+extern Dwarf_Die *dwfl_module_nextcu (Dwfl_Module *mod,
+ Dwarf_Die *lastcu, Dwarf_Addr *bias)
+ __nonnull_attribute__ (3);
+
+/* Return the module containing the CU DIE. */
+extern Dwfl_Module *dwfl_cumodule (Dwarf_Die *cudie);
+
+
+/* Cache the source line information fo the CU and return the
+ number of Dwfl_Line entries it has. */
+extern int dwfl_getsrclines (Dwarf_Die *cudie, size_t *nlines);
+
+/* Access one line number entry within the CU. */
+extern Dwfl_Line *dwfl_onesrcline (Dwarf_Die *cudie, size_t idx);
+
+/* Get source for address. */
+extern Dwfl_Line *dwfl_module_getsrc (Dwfl_Module *mod, Dwarf_Addr addr);
+extern Dwfl_Line *dwfl_getsrc (Dwfl *dwfl, Dwarf_Addr addr);
+
+/* Get address for source. */
+extern int dwfl_module_getsrc_file (Dwfl_Module *mod,
+ const char *fname, int lineno, int column,
+ Dwfl_Line ***srcsp, size_t *nsrcs);
+
+/* Return the module containing this line record. */
+extern Dwfl_Module *dwfl_linemodule (Dwfl_Line *line);
+
+/* Return the CU containing this line record. */
+extern Dwarf_Die *dwfl_linecu (Dwfl_Line *line);
+
+/* Return the source file name and fill in other information.
+ Arguments may be null for unneeded fields. */
+extern const char *dwfl_lineinfo (Dwfl_Line *line, Dwarf_Addr *addr,
+ int *linep, int *colp,
+ Dwarf_Word *mtime, Dwarf_Word *length);
+
+/* Return the compilation directory (AT_comp_dir) from this line's CU. */
+extern const char *dwfl_line_comp_dir (Dwfl_Line *line);
+
+
+/*** Machine backend access functions ***/
+
+/* Return location expression to find return value given a
+ DW_TAG_subprogram, DW_TAG_subroutine_type, or similar DIE describing
+ function itself (whose DW_AT_type attribute describes its return type).
+ The given DIE must come from the given module. Returns -1 for errors.
+ Returns zero if the function has no return value (e.g. "void" in C).
+ Otherwise, *LOCOPS gets a location expression to find the return value,
+ and returns the number of operations in the expression. The pointer is
+ permanently allocated at least as long as the module is live. */
+extern int dwfl_module_return_value_location (Dwfl_Module *mod,
+ Dwarf_Die *functypedie,
+ const Dwarf_Op **locops);
+
+/* Enumerate the DWARF register numbers and their names.
+ For each register, CALLBACK gets its DWARF number, a string describing
+ the register set (such as "integer" or "FPU"), a prefix used in
+ assembler syntax (such as "%" or "$", may be ""), and the name for the
+ register (contains identifier characters only, possibly all digits).
+ The REGNAME string is valid only during the callback. */
+extern int dwfl_module_register_names (Dwfl_Module *mod,
+ int (*callback) (void *arg,
+ int regno,
+ const char *setname,
+ const char *prefix,
+ const char *regname,
+ int bits, int type),
+ void *arg);
+
+
+/* Find the CFI for this module. Returns NULL if there is no CFI.
+ On success, fills in *BIAS with the difference between addresses
+ within the loaded module and those in the CFI referring to it.
+ The pointer returned can be used until the module is cleaned up.
+ Calling these more than once returns the same pointers.
+
+ dwfl_module_dwarf_cfi gets the '.debug_frame' information found with the
+ rest of the DWARF information. dwfl_module_eh_cfi gets the '.eh_frame'
+ information found linked into the text. A module might have either or
+ both. */
+extern Dwarf_CFI *dwfl_module_dwarf_cfi (Dwfl_Module *mod, Dwarf_Addr *bias);
+extern Dwarf_CFI *dwfl_module_eh_cfi (Dwfl_Module *mod, Dwarf_Addr *bias);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* libdwfl.h */
diff --git a/tools/perf/external/usr/include/elfutils/version.h b/tools/perf/external/usr/include/elfutils/version.h
new file mode 100644
index 00000000..ec565d5b
--- /dev/null
+++ b/tools/perf/external/usr/include/elfutils/version.h
@@ -0,0 +1,58 @@
+/* Version information about elfutils development libraries.
+ Copyright (C) 2008 Red Hat, Inc.
+ This file is part of Red Hat elfutils.
+
+ Red Hat elfutils is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by the
+ Free Software Foundation; version 2 of the License.
+
+ Red Hat elfutils is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with Red Hat elfutils; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA.
+
+ In addition, as a special exception, Red Hat, Inc. gives You the
+ additional right to link the code of Red Hat elfutils with code licensed
+ under any Open Source Initiative certified open source license
+ (http://www.opensource.org/licenses/index.php) which requires the
+ distribution of source code with any binary distribution and to
+ distribute linked combinations of the two. Non-GPL Code permitted under
+ this exception must only link to the code of Red Hat elfutils through
+ those well defined interfaces identified in the file named EXCEPTION
+ found in the source code files (the "Approved Interfaces"). The files
+ of Non-GPL Code may instantiate templates or use macros or inline
+ functions from the Approved Interfaces without causing the resulting
+ work to be covered by the GNU General Public License. Only Red Hat,
+ Inc. may make changes or additions to the list of Approved Interfaces.
+ Red Hat's grant of this exception is conditioned upon your not adding
+ any new exceptions. If you wish to add a new Approved Interface or
+ exception, please contact Red Hat. You must obey the GNU General Public
+ License in all respects for all of the Red Hat elfutils code and other
+ code used in conjunction with Red Hat elfutils except the Non-GPL Code
+ covered by this exception. If you modify this file, you may extend this
+ exception to your version of the file, but you are not obligated to do
+ so. If you do not wish to provide this exception without modification,
+ you must delete this exception statement from your version and license
+ this file solely under the GPL without exception.
+
+ Red Hat elfutils is an included package of the Open Invention Network.
+ An included package of the Open Invention Network is a package for which
+ Open Invention Network licensees cross-license their patents. No patent
+ license is granted, either expressly or impliedly, by designation as an
+ included package. Should you wish to participate in the Open Invention
+ Network licensing program, please visit www.openinventionnetwork.com
+ <http://www.openinventionnetwork.com>. */
+
+#ifndef _ELFUTILS_VERSION_H
+#define _ELFUTILS_VERSION_H 1
+
+#define _ELFUTILS_VERSION 148
+
+#define _ELFUTILS_PREREQ(major, minor) \
+ (_ELFUTILS_VERSION >= ((major) * 1000 + (minor)))
+
+#endif /* elfutils/version.h */
diff --git a/tools/perf/external/usr/include/gelf.h b/tools/perf/external/usr/include/gelf.h
new file mode 100644
index 00000000..533e15a9
--- /dev/null
+++ b/tools/perf/external/usr/include/gelf.h
@@ -0,0 +1,353 @@
+/* This file defines generic ELF types, structures, and macros.
+ Copyright (C) 1999, 2000, 2001, 2002, 2004, 2005, 2007 Red Hat, Inc.
+ This file is part of Red Hat elfutils.
+
+ Red Hat elfutils is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by the
+ Free Software Foundation; version 2 of the License.
+
+ Red Hat elfutils is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with Red Hat elfutils; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA.
+
+ In addition, as a special exception, Red Hat, Inc. gives You the
+ additional right to link the code of Red Hat elfutils with code licensed
+ under any Open Source Initiative certified open source license
+ (http://www.opensource.org/licenses/index.php) which requires the
+ distribution of source code with any binary distribution and to
+ distribute linked combinations of the two. Non-GPL Code permitted under
+ this exception must only link to the code of Red Hat elfutils through
+ those well defined interfaces identified in the file named EXCEPTION
+ found in the source code files (the "Approved Interfaces"). The files
+ of Non-GPL Code may instantiate templates or use macros or inline
+ functions from the Approved Interfaces without causing the resulting
+ work to be covered by the GNU General Public License. Only Red Hat,
+ Inc. may make changes or additions to the list of Approved Interfaces.
+ Red Hat's grant of this exception is conditioned upon your not adding
+ any new exceptions. If you wish to add a new Approved Interface or
+ exception, please contact Red Hat. You must obey the GNU General Public
+ License in all respects for all of the Red Hat elfutils code and other
+ code used in conjunction with Red Hat elfutils except the Non-GPL Code
+ covered by this exception. If you modify this file, you may extend this
+ exception to your version of the file, but you are not obligated to do
+ so. If you do not wish to provide this exception without modification,
+ you must delete this exception statement from your version and license
+ this file solely under the GPL without exception.
+
+ Red Hat elfutils is an included package of the Open Invention Network.
+ An included package of the Open Invention Network is a package for which
+ Open Invention Network licensees cross-license their patents. No patent
+ license is granted, either expressly or impliedly, by designation as an
+ included package. Should you wish to participate in the Open Invention
+ Network licensing program, please visit www.openinventionnetwork.com
+ <http://www.openinventionnetwork.com>. */
+
+#ifndef _GELF_H
+#define _GELF_H 1
+
+#include <libelf.h>
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Class independent type definitions. Correctly speaking this is not
+ true. We assume that 64-bit binaries are the largest class and
+ therefore all other classes can be represented without loss. */
+
+/* Type for a 16-bit quantity. */
+typedef Elf64_Half GElf_Half;
+
+/* Types for signed and unsigned 32-bit quantities. */
+typedef Elf64_Word GElf_Word;
+typedef Elf64_Sword GElf_Sword;
+
+/* Types for signed and unsigned 64-bit quantities. */
+typedef Elf64_Xword GElf_Xword;
+typedef Elf64_Sxword GElf_Sxword;
+
+/* Type of addresses. */
+typedef Elf64_Addr GElf_Addr;
+
+/* Type of file offsets. */
+typedef Elf64_Off GElf_Off;
+
+
+/* The ELF file header. This appears at the start of every ELF file. */
+typedef Elf64_Ehdr GElf_Ehdr;
+
+/* Section header. */
+typedef Elf64_Shdr GElf_Shdr;
+
+/* Section index. */
+/* XXX This should probably be a larger type in preparation of times when
+ regular section indices can be larger. */
+typedef Elf64_Section GElf_Section;
+
+/* Symbol table entry. */
+typedef Elf64_Sym GElf_Sym;
+
+/* The syminfo section if available contains additional information about
+ every dynamic symbol. */
+typedef Elf64_Syminfo GElf_Syminfo;
+
+/* Relocation table entry without addend (in section of type SHT_REL). */
+typedef Elf64_Rel GElf_Rel;
+
+/* Relocation table entry with addend (in section of type SHT_RELA). */
+typedef Elf64_Rela GElf_Rela;
+
+/* Program segment header. */
+typedef Elf64_Phdr GElf_Phdr;
+
+/* Dynamic section entry. */
+typedef Elf64_Dyn GElf_Dyn;
+
+
+/* Version definition sections. */
+typedef Elf64_Verdef GElf_Verdef;
+
+/* Auxialiary version information. */
+typedef Elf64_Verdaux GElf_Verdaux;
+
+/* Version dependency section. */
+typedef Elf64_Verneed GElf_Verneed;
+
+/* Auxiliary needed version information. */
+typedef Elf64_Vernaux GElf_Vernaux;
+
+
+/* Type for version symbol information. */
+typedef Elf64_Versym GElf_Versym;
+
+
+/* Auxiliary vector. */
+typedef Elf64_auxv_t GElf_auxv_t;
+
+
+/* Note section contents. */
+typedef Elf64_Nhdr GElf_Nhdr;
+
+
+/* Move structure. */
+typedef Elf64_Move GElf_Move;
+
+
+/* Library list structure. */
+typedef Elf64_Lib GElf_Lib;
+
+
+/* How to extract and insert information held in the st_info field. */
+
+#define GELF_ST_BIND(val) ELF64_ST_BIND (val)
+#define GELF_ST_TYPE(val) ELF64_ST_TYPE (val)
+#define GELF_ST_INFO(bind, type) ELF64_ST_INFO (bind, type)
+
+/* How to extract information held in the st_other field. */
+
+#define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val)
+
+
+/* How to extract and insert information held in the r_info field. */
+
+#define GELF_R_SYM(info) ELF64_R_SYM (info)
+#define GELF_R_TYPE(info) ELF64_R_TYPE (info)
+#define GELF_R_INFO(sym, type) ELF64_R_INFO (sym, type)
+
+
+/* How to extract and insert information held in the m_info field. */
+#define GELF_M_SYM(info) ELF64_M_SYM (info)
+#define GELF_M_SIZE(info) ELF64_M_SIZE (info)
+#define GELF_M_INFO(sym, size) ELF64_M_INFO (sym, size)
+
+
+/* Get class of the file associated with ELF. */
+extern int gelf_getclass (Elf *__elf);
+
+
+/* Return size of array of COUNT elemeents of the type denoted by TYPE
+ in the external representation. The binary class is taken from ELF.
+ The result is based on version VERSION of the ELF standard. */
+extern size_t gelf_fsize (Elf *__elf, Elf_Type __type, size_t __count,
+ unsigned int __version);
+
+/* Retrieve object file header. */
+extern GElf_Ehdr *gelf_getehdr (Elf *__elf, GElf_Ehdr *__dest);
+
+/* Update the ELF header. */
+extern int gelf_update_ehdr (Elf *__elf, GElf_Ehdr *__src);
+
+/* Create new ELF header if none exists. */
+extern unsigned long int gelf_newehdr (Elf *__elf, int __class);
+
+/* Get section at OFFSET. */
+extern Elf_Scn *gelf_offscn (Elf *__elf, GElf_Off __offset);
+
+/* Retrieve section header. */
+extern GElf_Shdr *gelf_getshdr (Elf_Scn *__scn, GElf_Shdr *__dst);
+
+/* Update section header. */
+extern int gelf_update_shdr (Elf_Scn *__scn, GElf_Shdr *__src);
+
+/* Retrieve program header table entry. */
+extern GElf_Phdr *gelf_getphdr (Elf *__elf, int __ndx, GElf_Phdr *__dst);
+
+/* Update the program header. */
+extern int gelf_update_phdr (Elf *__elf, int __ndx, GElf_Phdr *__src);
+
+/* Create new program header with PHNUM entries. */
+extern unsigned long int gelf_newphdr (Elf *__elf, size_t __phnum);
+
+
+/* Convert data structure from the representation in the file represented
+ by ELF to their memory representation. */
+extern Elf_Data *gelf_xlatetom (Elf *__elf, Elf_Data *__dest,
+ const Elf_Data *__src, unsigned int __encode);
+
+/* Convert data structure from to the representation in memory
+ represented by ELF file representation. */
+extern Elf_Data *gelf_xlatetof (Elf *__elf, Elf_Data *__dest,
+ const Elf_Data *__src, unsigned int __encode);
+
+
+/* Retrieve REL relocation info at the given index. */
+extern GElf_Rel *gelf_getrel (Elf_Data *__data, int __ndx, GElf_Rel *__dst);
+
+/* Retrieve RELA relocation info at the given index. */
+extern GElf_Rela *gelf_getrela (Elf_Data *__data, int __ndx, GElf_Rela *__dst);
+
+/* Update REL relocation information at given index. */
+extern int gelf_update_rel (Elf_Data *__dst, int __ndx, GElf_Rel *__src);
+
+/* Update RELA relocation information at given index. */
+extern int gelf_update_rela (Elf_Data *__dst, int __ndx, GElf_Rela *__src);
+
+
+/* Retrieve symbol information from the symbol table at the given index. */
+extern GElf_Sym *gelf_getsym (Elf_Data *__data, int __ndx, GElf_Sym *__dst);
+
+/* Update symbol information in the symbol table at the given index. */
+extern int gelf_update_sym (Elf_Data *__data, int __ndx, GElf_Sym *__src);
+
+
+/* Retrieve symbol information and separate section index from the
+ symbol table at the given index. */
+extern GElf_Sym *gelf_getsymshndx (Elf_Data *__symdata, Elf_Data *__shndxdata,
+ int __ndx, GElf_Sym *__sym,
+ Elf32_Word *__xshndx);
+
+/* Update symbol information and separate section index in the symbol
+ table at the given index. */
+extern int gelf_update_symshndx (Elf_Data *__symdata, Elf_Data *__shndxdata,
+ int __ndx, GElf_Sym *__sym,
+ Elf32_Word __xshndx);
+
+
+/* Retrieve additional symbol information from the symbol table at the
+ given index. */
+extern GElf_Syminfo *gelf_getsyminfo (Elf_Data *__data, int __ndx,
+ GElf_Syminfo *__dst);
+
+/* Update additional symbol information in the symbol table at the
+ given index. */
+extern int gelf_update_syminfo (Elf_Data *__data, int __ndx,
+ GElf_Syminfo *__src);
+
+
+/* Get information from dynamic table at the given index. */
+extern GElf_Dyn *gelf_getdyn (Elf_Data *__data, int __ndx, GElf_Dyn *__dst);
+
+/* Update information in dynamic table at the given index. */
+extern int gelf_update_dyn (Elf_Data *__dst, int __ndx, GElf_Dyn *__src);
+
+
+/* Get move structure at the given index. */
+extern GElf_Move *gelf_getmove (Elf_Data *__data, int __ndx, GElf_Move *__dst);
+
+/* Update move structure at the given index. */
+extern int gelf_update_move (Elf_Data *__data, int __ndx,
+ GElf_Move *__src);
+
+
+/* Get library from table at the given index. */
+extern GElf_Lib *gelf_getlib (Elf_Data *__data, int __ndx, GElf_Lib *__dst);
+
+/* Update library in table at the given index. */
+extern int gelf_update_lib (Elf_Data *__data, int __ndx, GElf_Lib *__src);
+
+
+
+/* Retrieve symbol version information at given index. */
+extern GElf_Versym *gelf_getversym (Elf_Data *__data, int __ndx,
+ GElf_Versym *__dst);
+
+/* Update symbol version information. */
+extern int gelf_update_versym (Elf_Data *__data, int __ndx,
+ GElf_Versym *__src);
+
+
+/* Retrieve required symbol version information at given offset. */
+extern GElf_Verneed *gelf_getverneed (Elf_Data *__data, int __offset,
+ GElf_Verneed *__dst);
+
+/* Update required symbol version information. */
+extern int gelf_update_verneed (Elf_Data *__data, int __offset,
+ GElf_Verneed *__src);
+
+/* Retrieve additional required symbol version information at given offset. */
+extern GElf_Vernaux *gelf_getvernaux (Elf_Data *__data, int __offset,
+ GElf_Vernaux *__dst);
+
+/* Update additional required symbol version information. */
+extern int gelf_update_vernaux (Elf_Data *__data, int __offset,
+ GElf_Vernaux *__src);
+
+
+/* Retrieve symbol version definition information at given offset. */
+extern GElf_Verdef *gelf_getverdef (Elf_Data *__data, int __offset,
+ GElf_Verdef *__dst);
+
+/* Update symbol version definition information. */
+extern int gelf_update_verdef (Elf_Data *__data, int __offset,
+ GElf_Verdef *__src);
+
+/* Retrieve additional symbol version definition information at given
+ offset. */
+extern GElf_Verdaux *gelf_getverdaux (Elf_Data *__data, int __offset,
+ GElf_Verdaux *__dst);
+
+/* Update additional symbol version definition information. */
+extern int gelf_update_verdaux (Elf_Data *__data, int __offset,
+ GElf_Verdaux *__src);
+
+
+/* Get auxv entry at the given index. */
+extern GElf_auxv_t *gelf_getauxv (Elf_Data *__data, int __ndx,
+ GElf_auxv_t *__dst);
+
+/* Update auxv entry at the given index. */
+extern int gelf_update_auxv (Elf_Data *__data, int __ndx, GElf_auxv_t *__src);
+
+
+/* Get note header at the given offset into the data, and the offsets of
+ the note's name and descriptor data. Returns the offset of the next
+ note header, or 0 for an invalid offset or corrupt note header. */
+extern size_t gelf_getnote (Elf_Data *__data, size_t __offset,
+ GElf_Nhdr *__result,
+ size_t *__name_offset, size_t *__desc_offset);
+
+
+/* Compute simple checksum from permanent parts of the ELF file. */
+extern long int gelf_checksum (Elf *__elf);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* gelf.h */
diff --git a/tools/perf/external/usr/include/libelf.h b/tools/perf/external/usr/include/libelf.h
new file mode 100644
index 00000000..b0b3a8d7
--- /dev/null
+++ b/tools/perf/external/usr/include/libelf.h
@@ -0,0 +1,417 @@
+/* Interface for libelf.
+ Copyright (C) 1998-2010 Red Hat, Inc.
+ This file is part of Red Hat elfutils.
+
+ Red Hat elfutils is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by the
+ Free Software Foundation; version 2 of the License.
+
+ Red Hat elfutils is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with Red Hat elfutils; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA.
+
+ In addition, as a special exception, Red Hat, Inc. gives You the
+ additional right to link the code of Red Hat elfutils with code licensed
+ under any Open Source Initiative certified open source license
+ (http://www.opensource.org/licenses/index.php) which requires the
+ distribution of source code with any binary distribution and to
+ distribute linked combinations of the two. Non-GPL Code permitted under
+ this exception must only link to the code of Red Hat elfutils through
+ those well defined interfaces identified in the file named EXCEPTION
+ found in the source code files (the "Approved Interfaces"). The files
+ of Non-GPL Code may instantiate templates or use macros or inline
+ functions from the Approved Interfaces without causing the resulting
+ work to be covered by the GNU General Public License. Only Red Hat,
+ Inc. may make changes or additions to the list of Approved Interfaces.
+ Red Hat's grant of this exception is conditioned upon your not adding
+ any new exceptions. If you wish to add a new Approved Interface or
+ exception, please contact Red Hat. You must obey the GNU General Public
+ License in all respects for all of the Red Hat elfutils code and other
+ code used in conjunction with Red Hat elfutils except the Non-GPL Code
+ covered by this exception. If you modify this file, you may extend this
+ exception to your version of the file, but you are not obligated to do
+ so. If you do not wish to provide this exception without modification,
+ you must delete this exception statement from your version and license
+ this file solely under the GPL without exception.
+
+ Red Hat elfutils is an included package of the Open Invention Network.
+ An included package of the Open Invention Network is a package for which
+ Open Invention Network licensees cross-license their patents. No patent
+ license is granted, either expressly or impliedly, by designation as an
+ included package. Should you wish to participate in the Open Invention
+ Network licensing program, please visit www.openinventionnetwork.com
+ <http://www.openinventionnetwork.com>. */
+
+#ifndef _LIBELF_H
+#define _LIBELF_H 1
+
+#include <sys/types.h>
+
+/* Get the ELF types. */
+#include <elf.h>
+
+
+/* Known translation types. */
+typedef enum
+{
+ ELF_T_BYTE, /* unsigned char */
+ ELF_T_ADDR, /* Elf32_Addr, Elf64_Addr, ... */
+ ELF_T_DYN, /* Dynamic section record. */
+ ELF_T_EHDR, /* ELF header. */
+ ELF_T_HALF, /* Elf32_Half, Elf64_Half, ... */
+ ELF_T_OFF, /* Elf32_Off, Elf64_Off, ... */
+ ELF_T_PHDR, /* Program header. */
+ ELF_T_RELA, /* Relocation entry with addend. */
+ ELF_T_REL, /* Relocation entry. */
+ ELF_T_SHDR, /* Section header. */
+ ELF_T_SWORD, /* Elf32_Sword, Elf64_Sword, ... */
+ ELF_T_SYM, /* Symbol record. */
+ ELF_T_WORD, /* Elf32_Word, Elf64_Word, ... */
+ ELF_T_XWORD, /* Elf32_Xword, Elf64_Xword, ... */
+ ELF_T_SXWORD, /* Elf32_Sxword, Elf64_Sxword, ... */
+ ELF_T_VDEF, /* Elf32_Verdef, Elf64_Verdef, ... */
+ ELF_T_VDAUX, /* Elf32_Verdaux, Elf64_Verdaux, ... */
+ ELF_T_VNEED, /* Elf32_Verneed, Elf64_Verneed, ... */
+ ELF_T_VNAUX, /* Elf32_Vernaux, Elf64_Vernaux, ... */
+ ELF_T_NHDR, /* Elf32_Nhdr, Elf64_Nhdr, ... */
+ ELF_T_SYMINFO, /* Elf32_Syminfo, Elf64_Syminfo, ... */
+ ELF_T_MOVE, /* Elf32_Move, Elf64_Move, ... */
+ ELF_T_LIB, /* Elf32_Lib, Elf64_Lib, ... */
+ ELF_T_GNUHASH, /* GNU-style hash section. */
+ ELF_T_AUXV, /* Elf32_auxv_t, Elf64_auxv_t, ... */
+ /* Keep this the last entry. */
+ ELF_T_NUM
+} Elf_Type;
+
+/* Descriptor for data to be converted to or from memory format. */
+typedef struct
+{
+ void *d_buf; /* Pointer to the actual data. */
+ Elf_Type d_type; /* Type of this piece of data. */
+ unsigned int d_version; /* ELF version. */
+ size_t d_size; /* Size in bytes. */
+ loff_t d_off; /* Offset into section. */
+ size_t d_align; /* Alignment in section. */
+} Elf_Data;
+
+
+/* Commands for `...'. */
+typedef enum
+{
+ ELF_C_NULL, /* Nothing, terminate, or compute only. */
+ ELF_C_READ, /* Read .. */
+ ELF_C_RDWR, /* Read and write .. */
+ ELF_C_WRITE, /* Write .. */
+ ELF_C_CLR, /* Clear flag. */
+ ELF_C_SET, /* Set flag. */
+ ELF_C_FDDONE, /* Signal that file descriptor will not be
+ used anymore. */
+ ELF_C_FDREAD, /* Read rest of data so that file descriptor
+ is not used anymore. */
+ /* The following are extensions. */
+ ELF_C_READ_MMAP, /* Read, but mmap the file if possible. */
+ ELF_C_RDWR_MMAP, /* Read and write, with mmap. */
+ ELF_C_WRITE_MMAP, /* Write, with mmap. */
+ ELF_C_READ_MMAP_PRIVATE, /* Read, but memory is writable, results are
+ not written to the file. */
+ ELF_C_EMPTY, /* Copy basic file data but not the content. */
+ /* Keep this the last entry. */
+ ELF_C_NUM
+} Elf_Cmd;
+
+
+/* Flags for the ELF structures. */
+enum
+{
+ ELF_F_DIRTY = 0x1,
+#define ELF_F_DIRTY ELF_F_DIRTY
+ ELF_F_LAYOUT = 0x4,
+#define ELF_F_LAYOUT ELF_F_LAYOUT
+ ELF_F_PERMISSIVE = 0x8
+#define ELF_F_PERMISSIVE ELF_F_PERMISSIVE
+};
+
+
+/* Identification values for recognized object files. */
+typedef enum
+{
+ ELF_K_NONE, /* Unknown. */
+ ELF_K_AR, /* Archive. */
+ ELF_K_COFF, /* Stupid old COFF. */
+ ELF_K_ELF, /* ELF file. */
+ /* Keep this the last entry. */
+ ELF_K_NUM
+} Elf_Kind;
+
+
+/* Archive member header. */
+typedef struct
+{
+ char *ar_name; /* Name of archive member. */
+ time_t ar_date; /* File date. */
+ uid_t ar_uid; /* User ID. */
+ gid_t ar_gid; /* Group ID. */
+ mode_t ar_mode; /* File mode. */
+ loff_t ar_size; /* File size. */
+ char *ar_rawname; /* Original name of archive member. */
+} Elf_Arhdr;
+
+
+/* Archive symbol table entry. */
+typedef struct
+{
+ char *as_name; /* Symbol name. */
+ size_t as_off; /* Offset for this file in the archive. */
+ unsigned long int as_hash; /* Hash value of the name. */
+} Elf_Arsym;
+
+
+/* Descriptor for the ELF file. */
+typedef struct Elf Elf;
+
+/* Descriptor for ELF file section. */
+typedef struct Elf_Scn Elf_Scn;
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Return descriptor for ELF file to work according to CMD. */
+extern Elf *elf_begin (int __fildes, Elf_Cmd __cmd, Elf *__ref);
+
+/* Create a clone of an existing ELF descriptor. */
+ extern Elf *elf_clone (Elf *__elf, Elf_Cmd __cmd);
+
+/* Create descriptor for memory region. */
+extern Elf *elf_memory (char *__image, size_t __size);
+
+/* Advance archive descriptor to next element. */
+extern Elf_Cmd elf_next (Elf *__elf);
+
+/* Free resources allocated for ELF. */
+extern int elf_end (Elf *__elf);
+
+/* Update ELF descriptor and write file to disk. */
+extern loff_t elf_update (Elf *__elf, Elf_Cmd __cmd);
+
+/* Determine what kind of file is associated with ELF. */
+extern Elf_Kind elf_kind (Elf *__elf) __attribute__ ((__pure__));
+
+/* Get the base offset for an object file. */
+extern loff_t elf_getbase (Elf *__elf);
+
+
+/* Retrieve file identification data. */
+extern char *elf_getident (Elf *__elf, size_t *__nbytes);
+
+/* Retrieve class-dependent object file header. */
+extern Elf32_Ehdr *elf32_getehdr (Elf *__elf);
+/* Similar but this time the binary calls is ELFCLASS64. */
+extern Elf64_Ehdr *elf64_getehdr (Elf *__elf);
+
+/* Create ELF header if none exists. */
+extern Elf32_Ehdr *elf32_newehdr (Elf *__elf);
+/* Similar but this time the binary calls is ELFCLASS64. */
+extern Elf64_Ehdr *elf64_newehdr (Elf *__elf);
+
+/* Get the number of program headers in the ELF file. If the file uses
+ more headers than can be represented in the e_phnum field of the ELF
+ header the information from the sh_info field in the zeroth section
+ header is used. */
+extern int elf_getphdrnum (Elf *__elf, size_t *__dst);
+
+/* Retrieve class-dependent program header table. */
+extern Elf32_Phdr *elf32_getphdr (Elf *__elf);
+/* Similar but this time the binary calls is ELFCLASS64. */
+extern Elf64_Phdr *elf64_getphdr (Elf *__elf);
+
+/* Create ELF program header. */
+extern Elf32_Phdr *elf32_newphdr (Elf *__elf, size_t __cnt);
+/* Similar but this time the binary calls is ELFCLASS64. */
+extern Elf64_Phdr *elf64_newphdr (Elf *__elf, size_t __cnt);
+
+
+/* Get section at INDEX. */
+extern Elf_Scn *elf_getscn (Elf *__elf, size_t __index);
+
+/* Get section at OFFSET. */
+extern Elf_Scn *elf32_offscn (Elf *__elf, Elf32_Off __offset);
+/* Similar bug this time the binary calls is ELFCLASS64. */
+extern Elf_Scn *elf64_offscn (Elf *__elf, Elf64_Off __offset);
+
+/* Get index of section. */
+extern size_t elf_ndxscn (Elf_Scn *__scn);
+
+/* Get section with next section index. */
+extern Elf_Scn *elf_nextscn (Elf *__elf, Elf_Scn *__scn);
+
+/* Create a new section and append it at the end of the table. */
+extern Elf_Scn *elf_newscn (Elf *__elf);
+
+/* Get the section index of the extended section index table for the
+ given symbol table. */
+extern int elf_scnshndx (Elf_Scn *__scn);
+
+/* Get the number of sections in the ELF file. If the file uses more
+ sections than can be represented in the e_shnum field of the ELF
+ header the information from the sh_size field in the zeroth section
+ header is used. */
+extern int elf_getshdrnum (Elf *__elf, size_t *__dst);
+/* Sun messed up the implementation of 'elf_getshnum' in their implementation.
+ It was agreed to make the same functionality available under a different
+ name and obsolete the old name. */
+extern int elf_getshnum (Elf *__elf, size_t *__dst)
+ __attribute__ ((__deprecated__));
+
+
+/* Get the section index of the section header string table in the ELF
+ file. If the index cannot be represented in the e_shnum field of
+ the ELF header the information from the sh_link field in the zeroth
+ section header is used. */
+extern int elf_getshdrstrndx (Elf *__elf, size_t *__dst);
+/* Sun messed up the implementation of 'elf_getshnum' in their implementation.
+ It was agreed to make the same functionality available under a different
+ name and obsolete the old name. */
+extern int elf_getshstrndx (Elf *__elf, size_t *__dst)
+ __attribute__ ((__deprecated__));
+
+
+/* Retrieve section header of ELFCLASS32 binary. */
+extern Elf32_Shdr *elf32_getshdr (Elf_Scn *__scn);
+/* Similar for ELFCLASS64. */
+extern Elf64_Shdr *elf64_getshdr (Elf_Scn *__scn);
+
+
+/* Set or clear flags for ELF file. */
+extern unsigned int elf_flagelf (Elf *__elf, Elf_Cmd __cmd,
+ unsigned int __flags);
+/* Similarly for the ELF header. */
+extern unsigned int elf_flagehdr (Elf *__elf, Elf_Cmd __cmd,
+ unsigned int __flags);
+/* Similarly for the ELF program header. */
+extern unsigned int elf_flagphdr (Elf *__elf, Elf_Cmd __cmd,
+ unsigned int __flags);
+/* Similarly for the given ELF section. */
+extern unsigned int elf_flagscn (Elf_Scn *__scn, Elf_Cmd __cmd,
+ unsigned int __flags);
+/* Similarly for the given ELF data. */
+extern unsigned int elf_flagdata (Elf_Data *__data, Elf_Cmd __cmd,
+ unsigned int __flags);
+/* Similarly for the given ELF section header. */
+extern unsigned int elf_flagshdr (Elf_Scn *__scn, Elf_Cmd __cmd,
+ unsigned int __flags);
+
+
+/* Get data from section while translating from file representation
+ to memory representation. */
+extern Elf_Data *elf_getdata (Elf_Scn *__scn, Elf_Data *__data);
+
+/* Get uninterpreted section content. */
+extern Elf_Data *elf_rawdata (Elf_Scn *__scn, Elf_Data *__data);
+
+/* Create new data descriptor for section SCN. */
+extern Elf_Data *elf_newdata (Elf_Scn *__scn);
+
+/* Get data translated from a chunk of the file contents as section data
+ would be for TYPE. The resulting Elf_Data pointer is valid until
+ elf_end (ELF) is called. */
+extern Elf_Data *elf_getdata_rawchunk (Elf *__elf,
+ loff_t __offset, size_t __size,
+ Elf_Type __type);
+
+
+/* Return pointer to string at OFFSET in section INDEX. */
+extern char *elf_strptr (Elf *__elf, size_t __index, size_t __offset);
+
+
+/* Return header of archive. */
+extern Elf_Arhdr *elf_getarhdr (Elf *__elf);
+
+/* Return offset in archive for current file ELF. */
+extern loff_t elf_getaroff (Elf *__elf);
+
+/* Select archive element at OFFSET. */
+extern size_t elf_rand (Elf *__elf, size_t __offset);
+
+/* Get symbol table of archive. */
+extern Elf_Arsym *elf_getarsym (Elf *__elf, size_t *__narsyms);
+
+
+/* Control ELF descriptor. */
+extern int elf_cntl (Elf *__elf, Elf_Cmd __cmd);
+
+/* Retrieve uninterpreted file contents. */
+extern char *elf_rawfile (Elf *__elf, size_t *__nbytes);
+
+
+/* Return size of array of COUNT elements of the type denoted by TYPE
+ in the external representation. The binary class is taken from ELF.
+ The result is based on version VERSION of the ELF standard. */
+extern size_t elf32_fsize (Elf_Type __type, size_t __count,
+ unsigned int __version)
+ __attribute__ ((__const__));
+/* Similar but this time the binary calls is ELFCLASS64. */
+extern size_t elf64_fsize (Elf_Type __type, size_t __count,
+ unsigned int __version)
+ __attribute__ ((__const__));
+
+
+/* Convert data structure from the representation in the file represented
+ by ELF to their memory representation. */
+extern Elf_Data *elf32_xlatetom (Elf_Data *__dest, const Elf_Data *__src,
+ unsigned int __encode);
+/* Same for 64 bit class. */
+extern Elf_Data *elf64_xlatetom (Elf_Data *__dest, const Elf_Data *__src,
+ unsigned int __encode);
+
+/* Convert data structure from to the representation in memory
+ represented by ELF file representation. */
+extern Elf_Data *elf32_xlatetof (Elf_Data *__dest, const Elf_Data *__src,
+ unsigned int __encode);
+/* Same for 64 bit class. */
+extern Elf_Data *elf64_xlatetof (Elf_Data *__dest, const Elf_Data *__src,
+ unsigned int __encode);
+
+
+/* Return error code of last failing function call. This value is kept
+ separately for each thread. */
+extern int elf_errno (void);
+
+/* Return error string for ERROR. If ERROR is zero, return error string
+ for most recent error or NULL is none occurred. If ERROR is -1 the
+ behaviour is similar to the last case except that not NULL but a legal
+ string is returned. */
+extern const char *elf_errmsg (int __error);
+
+
+/* Coordinate ELF library and application versions. */
+extern unsigned int elf_version (unsigned int __version);
+
+/* Set fill bytes used to fill holes in data structures. */
+extern void elf_fill (int __fill);
+
+/* Compute hash value. */
+extern unsigned long int elf_hash (const char *__string)
+ __attribute__ ((__pure__));
+
+/* Compute hash value using the GNU-specific hash function. */
+extern unsigned long int elf_gnu_hash (const char *__string)
+ __attribute__ ((__pure__));
+
+
+/* Compute simple checksum from permanent parts of the ELF file. */
+extern long int elf32_checksum (Elf *__elf);
+/* Similar but this time the binary calls is ELFCLASS64. */
+extern long int elf64_checksum (Elf *__elf);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* libelf.h */
diff --git a/tools/perf/external/usr/include/libiberty.h b/tools/perf/external/usr/include/libiberty.h
new file mode 100644
index 00000000..f54ca18c
--- /dev/null
+++ b/tools/perf/external/usr/include/libiberty.h
@@ -0,0 +1,679 @@
+/* Function declarations for libiberty.
+
+ Copyright 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
+ 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
+
+ Note - certain prototypes declared in this header file are for
+ functions whoes implementation copyright does not belong to the
+ FSF. Those prototypes are present in this file for reference
+ purposes only and their presence in this file should not construed
+ as an indication of ownership by the FSF of the implementation of
+ those functions in any way or form whatsoever.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor,
+ Boston, MA 02110-1301, USA.
+
+ Written by Cygnus Support, 1994.
+
+ The libiberty library provides a number of functions which are
+ missing on some operating systems. We do not declare those here,
+ to avoid conflicts with the system header files on operating
+ systems that do support those functions. In this file we only
+ declare those functions which are specific to libiberty. */
+
+#ifndef LIBIBERTY_H
+#define LIBIBERTY_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "ansidecl.h"
+
+/* Get a definition for size_t. */
+#include <stddef.h>
+/* Get a definition for va_list. */
+#include <stdarg.h>
+
+#include <stdio.h>
+
+/* If the OS supports it, ensure that the supplied stream is setup to
+ avoid any multi-threaded locking. Otherwise leave the FILE pointer
+ unchanged. If the stream is NULL do nothing. */
+
+extern void unlock_stream (FILE *);
+
+/* If the OS supports it, ensure that the standard I/O streams, stdin,
+ stdout and stderr are setup to avoid any multi-threaded locking.
+ Otherwise do nothing. */
+
+extern void unlock_std_streams (void);
+
+/* Open and return a FILE pointer. If the OS supports it, ensure that
+ the stream is setup to avoid any multi-threaded locking. Otherwise
+ return the FILE pointer unchanged. */
+
+extern FILE *fopen_unlocked (const char *, const char *);
+extern FILE *fdopen_unlocked (int, const char *);
+extern FILE *freopen_unlocked (const char *, const char *, FILE *);
+
+/* Build an argument vector from a string. Allocates memory using
+ malloc. Use freeargv to free the vector. */
+
+extern char **buildargv (const char *) ATTRIBUTE_MALLOC;
+
+/* Free a vector returned by buildargv. */
+
+extern void freeargv (char **);
+
+/* Duplicate an argument vector. Allocates memory using malloc. Use
+ freeargv to free the vector. */
+
+extern char **dupargv (char **) ATTRIBUTE_MALLOC;
+
+/* Expand "@file" arguments in argv. */
+
+extern void expandargv PARAMS ((int *, char ***));
+
+/* Write argv to an @-file, inserting necessary quoting. */
+
+extern int writeargv PARAMS ((char **, FILE *));
+
+/* Return the last component of a path name. Note that we can't use a
+ prototype here because the parameter is declared inconsistently
+ across different systems, sometimes as "char *" and sometimes as
+ "const char *" */
+
+/* HAVE_DECL_* is a three-state macro: undefined, 0 or 1. If it is
+ undefined, we haven't run the autoconf check so provide the
+ declaration without arguments. If it is 0, we checked and failed
+ to find the declaration so provide a fully prototyped one. If it
+ is 1, we found it so don't provide any declaration at all. */
+#if !HAVE_DECL_BASENAME
+#if defined (__GNU_LIBRARY__ ) || defined (__linux__) || defined (__FreeBSD__) || defined (__OpenBSD__) || defined(__NetBSD__) || defined (__CYGWIN__) || defined (__CYGWIN32__) || defined (__MINGW32__) || defined (HAVE_DECL_BASENAME)
+extern char *basename (const char *);
+#else
+/* Do not allow basename to be used if there is no prototype seen. We
+ either need to use the above prototype or have one from
+ autoconf which would result in HAVE_DECL_BASENAME being set. */
+#define basename basename_cannot_be_used_without_a_prototype
+#endif
+#endif
+
+/* A well-defined basename () that is always compiled in. */
+
+extern const char *lbasename (const char *);
+
+/* Same, but assumes DOS semantics (drive name, backslash is also a
+ dir separator) regardless of host. */
+
+extern const char *dos_lbasename (const char *);
+
+/* Same, but assumes Unix semantics (absolute paths always start with
+ a slash, only forward slash is accepted as dir separator)
+ regardless of host. */
+
+extern const char *unix_lbasename (const char *);
+
+/* A well-defined realpath () that is always compiled in. */
+
+extern char *lrealpath (const char *);
+
+/* Concatenate an arbitrary number of strings. You must pass NULL as
+ the last argument of this function, to terminate the list of
+ strings. Allocates memory using xmalloc. */
+
+extern char *concat (const char *, ...) ATTRIBUTE_MALLOC ATTRIBUTE_SENTINEL;
+
+/* Concatenate an arbitrary number of strings. You must pass NULL as
+ the last argument of this function, to terminate the list of
+ strings. Allocates memory using xmalloc. The first argument is
+ not one of the strings to be concatenated, but if not NULL is a
+ pointer to be freed after the new string is created, similar to the
+ way xrealloc works. */
+
+extern char *reconcat (char *, const char *, ...) ATTRIBUTE_MALLOC ATTRIBUTE_SENTINEL;
+
+/* Determine the length of concatenating an arbitrary number of
+ strings. You must pass NULL as the last argument of this function,
+ to terminate the list of strings. */
+
+extern unsigned long concat_length (const char *, ...) ATTRIBUTE_SENTINEL;
+
+/* Concatenate an arbitrary number of strings into a SUPPLIED area of
+ memory. You must pass NULL as the last argument of this function,
+ to terminate the list of strings. The supplied memory is assumed
+ to be large enough. */
+
+extern char *concat_copy (char *, const char *, ...) ATTRIBUTE_SENTINEL;
+
+/* Concatenate an arbitrary number of strings into a GLOBAL area of
+ memory. You must pass NULL as the last argument of this function,
+ to terminate the list of strings. The supplied memory is assumed
+ to be large enough. */
+
+extern char *concat_copy2 (const char *, ...) ATTRIBUTE_SENTINEL;
+
+/* This is the global area used by concat_copy2. */
+
+extern char *libiberty_concat_ptr;
+
+/* Concatenate an arbitrary number of strings. You must pass NULL as
+ the last argument of this function, to terminate the list of
+ strings. Allocates memory using alloca. The arguments are
+ evaluated twice! */
+#define ACONCAT(ACONCAT_PARAMS) \
+ (libiberty_concat_ptr = (char *) alloca (concat_length ACONCAT_PARAMS + 1), \
+ concat_copy2 ACONCAT_PARAMS)
+
+/* Check whether two file descriptors refer to the same file. */
+
+extern int fdmatch (int fd1, int fd2);
+
+/* Return the position of the first bit set in the argument. */
+/* Prototypes vary from system to system, so we only provide a
+ prototype on systems where we know that we need it. */
+#if defined (HAVE_DECL_FFS) && !HAVE_DECL_FFS
+extern int ffs(int);
+#endif
+
+/* Get the working directory. The result is cached, so don't call
+ chdir() between calls to getpwd(). */
+
+extern char * getpwd (void);
+
+/* Get the current time. */
+/* Prototypes vary from system to system, so we only provide a
+ prototype on systems where we know that we need it. */
+#ifdef __MINGW32__
+/* Forward declaration to avoid #include <sys/time.h>. */
+struct timeval;
+extern int gettimeofday (struct timeval *, void *);
+#endif
+
+/* Get the amount of time the process has run, in microseconds. */
+
+extern long get_run_time (void);
+
+/* Generate a relocated path to some installation directory. Allocates
+ return value using malloc. */
+
+extern char *make_relative_prefix (const char *, const char *,
+ const char *) ATTRIBUTE_MALLOC;
+
+/* Generate a relocated path to some installation directory without
+ attempting to follow any soft links. Allocates
+ return value using malloc. */
+
+extern char *make_relative_prefix_ignore_links (const char *, const char *,
+ const char *) ATTRIBUTE_MALLOC;
+
+/* Choose a temporary directory to use for scratch files. */
+
+extern char *choose_temp_base (void) ATTRIBUTE_MALLOC;
+
+/* Return a temporary file name or NULL if unable to create one. */
+
+extern char *make_temp_file (const char *) ATTRIBUTE_MALLOC;
+
+/* Remove a link to a file unless it is special. */
+
+extern int unlink_if_ordinary (const char *);
+
+/* Allocate memory filled with spaces. Allocates using malloc. */
+
+extern const char *spaces (int count);
+
+/* Return the maximum error number for which strerror will return a
+ string. */
+
+extern int errno_max (void);
+
+/* Return the name of an errno value (e.g., strerrno (EINVAL) returns
+ "EINVAL"). */
+
+extern const char *strerrno (int);
+
+/* Given the name of an errno value, return the value. */
+
+extern int strtoerrno (const char *);
+
+/* ANSI's strerror(), but more robust. */
+
+extern char *xstrerror (int);
+
+/* Return the maximum signal number for which strsignal will return a
+ string. */
+
+extern int signo_max (void);
+
+/* Return a signal message string for a signal number
+ (e.g., strsignal (SIGHUP) returns something like "Hangup"). */
+/* This is commented out as it can conflict with one in system headers.
+ We still document its existence though. */
+
+/*extern const char *strsignal (int);*/
+
+/* Return the name of a signal number (e.g., strsigno (SIGHUP) returns
+ "SIGHUP"). */
+
+extern const char *strsigno (int);
+
+/* Given the name of a signal, return its number. */
+
+extern int strtosigno (const char *);
+
+/* Register a function to be run by xexit. Returns 0 on success. */
+
+extern int xatexit (void (*fn) (void));
+
+/* Exit, calling all the functions registered with xatexit. */
+
+extern void xexit (int status) ATTRIBUTE_NORETURN;
+
+/* Set the program name used by xmalloc. */
+
+extern void xmalloc_set_program_name (const char *);
+
+/* Report an allocation failure. */
+extern void xmalloc_failed (size_t) ATTRIBUTE_NORETURN;
+
+/* Allocate memory without fail. If malloc fails, this will print a
+ message to stderr (using the name set by xmalloc_set_program_name,
+ if any) and then call xexit. */
+
+extern void *xmalloc (size_t) ATTRIBUTE_MALLOC;
+
+/* Reallocate memory without fail. This works like xmalloc. Note,
+ realloc type functions are not suitable for attribute malloc since
+ they may return the same address across multiple calls. */
+
+extern void *xrealloc (void *, size_t);
+
+/* Allocate memory without fail and set it to zero. This works like
+ xmalloc. */
+
+extern void *xcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
+
+/* Copy a string into a memory buffer without fail. */
+
+extern char *xstrdup (const char *) ATTRIBUTE_MALLOC;
+
+/* Copy at most N characters from string into a buffer without fail. */
+
+extern char *xstrndup (const char *, size_t) ATTRIBUTE_MALLOC;
+
+/* Copy an existing memory buffer to a new memory buffer without fail. */
+
+extern void *xmemdup (const void *, size_t, size_t) ATTRIBUTE_MALLOC;
+
+/* Physical memory routines. Return values are in BYTES. */
+extern double physmem_total (void);
+extern double physmem_available (void);
+
+/* Compute the 32-bit CRC of a block of memory. */
+extern unsigned int xcrc32 (const unsigned char *, int, unsigned int);
+
+/* These macros provide a K&R/C89/C++-friendly way of allocating structures
+ with nice encapsulation. The XDELETE*() macros are technically
+ superfluous, but provided here for symmetry. Using them consistently
+ makes it easier to update client code to use different allocators such
+ as new/delete and new[]/delete[]. */
+
+/* Scalar allocators. */
+
+#define XALLOCA(T) ((T *) alloca (sizeof (T)))
+#define XNEW(T) ((T *) xmalloc (sizeof (T)))
+#define XCNEW(T) ((T *) xcalloc (1, sizeof (T)))
+#define XDUP(T, P) ((T *) xmemdup ((P), sizeof (T), sizeof (T)))
+#define XDELETE(P) free ((void*) (P))
+
+/* Array allocators. */
+
+#define XALLOCAVEC(T, N) ((T *) alloca (sizeof (T) * (N)))
+#define XNEWVEC(T, N) ((T *) xmalloc (sizeof (T) * (N)))
+#define XCNEWVEC(T, N) ((T *) xcalloc ((N), sizeof (T)))
+#define XDUPVEC(T, P, N) ((T *) xmemdup ((P), sizeof (T) * (N), sizeof (T) * (N)))
+#define XRESIZEVEC(T, P, N) ((T *) xrealloc ((void *) (P), sizeof (T) * (N)))
+#define XDELETEVEC(P) free ((void*) (P))
+
+/* Allocators for variable-sized structures and raw buffers. */
+
+#define XALLOCAVAR(T, S) ((T *) alloca ((S)))
+#define XNEWVAR(T, S) ((T *) xmalloc ((S)))
+#define XCNEWVAR(T, S) ((T *) xcalloc (1, (S)))
+#define XDUPVAR(T, P, S1, S2) ((T *) xmemdup ((P), (S1), (S2)))
+#define XRESIZEVAR(T, P, S) ((T *) xrealloc ((P), (S)))
+
+/* Type-safe obstack allocator. */
+
+#define XOBNEW(O, T) ((T *) obstack_alloc ((O), sizeof (T)))
+#define XOBNEWVEC(O, T, N) ((T *) obstack_alloc ((O), sizeof (T) * (N)))
+#define XOBNEWVAR(O, T, S) ((T *) obstack_alloc ((O), (S)))
+#define XOBFINISH(O, T) ((T) obstack_finish ((O)))
+
+/* hex character manipulation routines */
+
+#define _hex_array_size 256
+#define _hex_bad 99
+extern const unsigned char _hex_value[_hex_array_size];
+extern void hex_init (void);
+#define hex_p(c) (hex_value (c) != _hex_bad)
+/* If you change this, note well: Some code relies on side effects in
+ the argument being performed exactly once. */
+#define hex_value(c) ((unsigned int) _hex_value[(unsigned char) (c)])
+
+/* Flags for pex_init. These are bits to be or'ed together. */
+
+/* Record subprocess times, if possible. */
+#define PEX_RECORD_TIMES 0x1
+
+/* Use pipes for communication between processes, if possible. */
+#define PEX_USE_PIPES 0x2
+
+/* Save files used for communication between processes. */
+#define PEX_SAVE_TEMPS 0x4
+
+/* Prepare to execute one or more programs, with standard output of
+ each program fed to standard input of the next.
+ FLAGS As above.
+ PNAME The name of the program to report in error messages.
+ TEMPBASE A base name to use for temporary files; may be NULL to
+ use a random name.
+ Returns NULL on error. */
+
+extern struct pex_obj *pex_init (int flags, const char *pname,
+ const char *tempbase);
+
+/* Flags for pex_run. These are bits to be or'ed together. */
+
+/* Last program in pipeline. Standard output of program goes to
+ OUTNAME, or, if OUTNAME is NULL, to standard output of caller. Do
+ not set this if you want to call pex_read_output. After this is
+ set, pex_run may no longer be called with the same struct
+ pex_obj. */
+#define PEX_LAST 0x1
+
+/* Search for program in executable search path. */
+#define PEX_SEARCH 0x2
+
+/* OUTNAME is a suffix. */
+#define PEX_SUFFIX 0x4
+
+/* Send program's standard error to standard output. */
+#define PEX_STDERR_TO_STDOUT 0x8
+
+/* Input file should be opened in binary mode. This flag is ignored
+ on Unix. */
+#define PEX_BINARY_INPUT 0x10
+
+/* Output file should be opened in binary mode. This flag is ignored
+ on Unix. For proper behaviour PEX_BINARY_INPUT and
+ PEX_BINARY_OUTPUT have to match appropriately--i.e., a call using
+ PEX_BINARY_OUTPUT should be followed by a call using
+ PEX_BINARY_INPUT. */
+#define PEX_BINARY_OUTPUT 0x20
+
+/* Capture stderr to a pipe. The output can be read by
+ calling pex_read_err and reading from the returned
+ FILE object. This flag may be specified only for
+ the last program in a pipeline.
+
+ This flag is supported only on Unix and Windows. */
+#define PEX_STDERR_TO_PIPE 0x40
+
+/* Capture stderr in binary mode. This flag is ignored
+ on Unix. */
+#define PEX_BINARY_ERROR 0x80
+
+
+/* Execute one program. Returns NULL on success. On error returns an
+ error string (typically just the name of a system call); the error
+ string is statically allocated.
+
+ OBJ Returned by pex_init.
+
+ FLAGS As above.
+
+ EXECUTABLE The program to execute.
+
+ ARGV NULL terminated array of arguments to pass to the program.
+
+ OUTNAME Sets the output file name as follows:
+
+ PEX_SUFFIX set (OUTNAME may not be NULL):
+ TEMPBASE parameter to pex_init not NULL:
+ Output file name is the concatenation of TEMPBASE
+ and OUTNAME.
+ TEMPBASE is NULL:
+ Output file name is a random file name ending in
+ OUTNAME.
+ PEX_SUFFIX not set:
+ OUTNAME not NULL:
+ Output file name is OUTNAME.
+ OUTNAME NULL, TEMPBASE not NULL:
+ Output file name is randomly chosen using
+ TEMPBASE.
+ OUTNAME NULL, TEMPBASE NULL:
+ Output file name is randomly chosen.
+
+ If PEX_LAST is not set, the output file name is the
+ name to use for a temporary file holding stdout, if
+ any (there will not be a file if PEX_USE_PIPES is set
+ and the system supports pipes). If a file is used, it
+ will be removed when no longer needed unless
+ PEX_SAVE_TEMPS is set.
+
+ If PEX_LAST is set, and OUTNAME is not NULL, standard
+ output is written to the output file name. The file
+ will not be removed. If PEX_LAST and PEX_SUFFIX are
+ both set, TEMPBASE may not be NULL.
+
+ ERRNAME If not NULL, this is the name of a file to which
+ standard error is written. If NULL, standard error of
+ the program is standard error of the caller.
+
+ ERR On an error return, *ERR is set to an errno value, or
+ to 0 if there is no relevant errno.
+*/
+
+extern const char *pex_run (struct pex_obj *obj, int flags,
+ const char *executable, char * const *argv,
+ const char *outname, const char *errname,
+ int *err);
+
+/* As for pex_run (), but takes an extra parameter to enable the
+ environment for the child process to be specified.
+
+ ENV The environment for the child process, specified as
+ an array of character pointers. Each element of the
+ array should point to a string of the form VAR=VALUE,
+ with the exception of the last element which must be
+ a null pointer.
+*/
+
+extern const char *pex_run_in_environment (struct pex_obj *obj, int flags,
+ const char *executable,
+ char * const *argv,
+ char * const *env,
+ const char *outname,
+ const char *errname, int *err);
+
+/* Return a stream for a temporary file to pass to the first program
+ in the pipeline as input. The file name is chosen as for pex_run.
+ pex_run closes the file automatically; don't close it yourself. */
+
+extern FILE *pex_input_file (struct pex_obj *obj, int flags,
+ const char *in_name);
+
+/* Return a stream for a pipe connected to the standard input of the
+ first program in the pipeline. You must have passed
+ `PEX_USE_PIPES' to `pex_init'. Close the returned stream
+ yourself. */
+
+extern FILE *pex_input_pipe (struct pex_obj *obj, int binary);
+
+/* Read the standard output of the last program to be executed.
+ pex_run can not be called after this. BINARY should be non-zero if
+ the file should be opened in binary mode; this is ignored on Unix.
+ Returns NULL on error. Don't call fclose on the returned FILE; it
+ will be closed by pex_free. */
+
+extern FILE *pex_read_output (struct pex_obj *, int binary);
+
+/* Read the standard error of the last program to be executed.
+ pex_run can not be called after this. BINARY should be non-zero if
+ the file should be opened in binary mode; this is ignored on Unix.
+ Returns NULL on error. Don't call fclose on the returned FILE; it
+ will be closed by pex_free. */
+
+extern FILE *pex_read_err (struct pex_obj *, int binary);
+
+/* Return exit status of all programs in VECTOR. COUNT indicates the
+ size of VECTOR. The status codes in the vector are in the order of
+ the calls to pex_run. Returns 0 on error, 1 on success. */
+
+extern int pex_get_status (struct pex_obj *, int count, int *vector);
+
+/* Return times of all programs in VECTOR. COUNT indicates the size
+ of VECTOR. struct pex_time is really just struct timeval, but that
+ is not portable to all systems. Returns 0 on error, 1 on
+ success. */
+
+struct pex_time
+{
+ unsigned long user_seconds;
+ unsigned long user_microseconds;
+ unsigned long system_seconds;
+ unsigned long system_microseconds;
+};
+
+extern int pex_get_times (struct pex_obj *, int count,
+ struct pex_time *vector);
+
+/* Clean up a pex_obj. If you have not called pex_get_times or
+ pex_get_status, this will try to kill the subprocesses. */
+
+extern void pex_free (struct pex_obj *);
+
+/* Just execute one program. Return value is as for pex_run.
+ FLAGS Combination of PEX_SEARCH and PEX_STDERR_TO_STDOUT.
+ EXECUTABLE As for pex_run.
+ ARGV As for pex_run.
+ PNAME As for pex_init.
+ OUTNAME As for pex_run when PEX_LAST is set.
+ ERRNAME As for pex_run.
+ STATUS Set to exit status on success.
+ ERR As for pex_run.
+*/
+
+extern const char *pex_one (int flags, const char *executable,
+ char * const *argv, const char *pname,
+ const char *outname, const char *errname,
+ int *status, int *err);
+
+/* pexecute and pwait are the old pexecute interface, still here for
+ backward compatibility. Don't use these for new code. Instead,
+ use pex_init/pex_run/pex_get_status/pex_free, or pex_one. */
+
+/* Definitions used by the pexecute routine. */
+
+#define PEXECUTE_FIRST 1
+#define PEXECUTE_LAST 2
+#define PEXECUTE_ONE (PEXECUTE_FIRST + PEXECUTE_LAST)
+#define PEXECUTE_SEARCH 4
+#define PEXECUTE_VERBOSE 8
+
+/* Execute a program. */
+
+extern int pexecute (const char *, char * const *, const char *,
+ const char *, char **, char **, int);
+
+/* Wait for pexecute to finish. */
+
+extern int pwait (int, int *, int);
+
+#if !HAVE_DECL_ASPRINTF
+/* Like sprintf but provides a pointer to malloc'd storage, which must
+ be freed by the caller. */
+
+extern int asprintf (char **, const char *, ...) ATTRIBUTE_PRINTF_2;
+#endif
+
+#if !HAVE_DECL_VASPRINTF
+/* Like vsprintf but provides a pointer to malloc'd storage, which
+ must be freed by the caller. */
+
+extern int vasprintf (char **, const char *, va_list) ATTRIBUTE_PRINTF(2,0);
+#endif
+
+#if defined(HAVE_DECL_SNPRINTF) && !HAVE_DECL_SNPRINTF
+/* Like sprintf but prints at most N characters. */
+extern int snprintf (char *, size_t, const char *, ...) ATTRIBUTE_PRINTF_3;
+#endif
+
+#if defined(HAVE_DECL_VSNPRINTF) && !HAVE_DECL_VSNPRINTF
+/* Like vsprintf but prints at most N characters. */
+extern int vsnprintf (char *, size_t, const char *, va_list) ATTRIBUTE_PRINTF(3,0);
+#endif
+
+#if defined(HAVE_DECL_STRVERSCMP) && !HAVE_DECL_STRVERSCMP
+/* Compare version strings. */
+extern int strverscmp (const char *, const char *);
+#endif
+
+/* Set the title of a process */
+extern void setproctitle (const char *name, ...);
+
+#define ARRAY_SIZE(a) (sizeof (a) / sizeof ((a)[0]))
+
+/* Drastically simplified alloca configurator. If we're using GCC,
+ we use __builtin_alloca; otherwise we use the C alloca. The C
+ alloca is always available. You can override GCC by defining
+ USE_C_ALLOCA yourself. The canonical autoconf macro C_ALLOCA is
+ also set/unset as it is often used to indicate whether code needs
+ to call alloca(0). */
+extern void *C_alloca (size_t) ATTRIBUTE_MALLOC;
+#undef alloca
+#if GCC_VERSION >= 2000 && !defined USE_C_ALLOCA
+# define alloca(x) __builtin_alloca(x)
+# undef C_ALLOCA
+# define ASTRDUP(X) \
+ (__extension__ ({ const char *const libiberty_optr = (X); \
+ const unsigned long libiberty_len = strlen (libiberty_optr) + 1; \
+ char *const libiberty_nptr = (char *const) alloca (libiberty_len); \
+ (char *) memcpy (libiberty_nptr, libiberty_optr, libiberty_len); }))
+#else
+# define alloca(x) C_alloca(x)
+# undef USE_C_ALLOCA
+# define USE_C_ALLOCA 1
+# undef C_ALLOCA
+# define C_ALLOCA 1
+extern const char *libiberty_optr;
+extern char *libiberty_nptr;
+extern unsigned long libiberty_len;
+# define ASTRDUP(X) \
+ (libiberty_optr = (X), \
+ libiberty_len = strlen (libiberty_optr) + 1, \
+ libiberty_nptr = (char *) alloca (libiberty_len), \
+ (char *) memcpy (libiberty_nptr, libiberty_optr, libiberty_len))
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* ! defined (LIBIBERTY_H) */
diff --git a/tools/perf/external/usr/include/newt.h b/tools/perf/external/usr/include/newt.h
new file mode 100644
index 00000000..3111a21a
--- /dev/null
+++ b/tools/perf/external/usr/include/newt.h
@@ -0,0 +1,387 @@
+#ifndef H_NEWT
+#define H_NEWT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdarg.h>
+
+#define NEWT_COLORSET_ROOT 2
+#define NEWT_COLORSET_BORDER 3
+#define NEWT_COLORSET_WINDOW 4
+#define NEWT_COLORSET_SHADOW 5
+#define NEWT_COLORSET_TITLE 6
+#define NEWT_COLORSET_BUTTON 7
+#define NEWT_COLORSET_ACTBUTTON 8
+#define NEWT_COLORSET_CHECKBOX 9
+#define NEWT_COLORSET_ACTCHECKBOX 10
+#define NEWT_COLORSET_ENTRY 11
+#define NEWT_COLORSET_LABEL 12
+#define NEWT_COLORSET_LISTBOX 13
+#define NEWT_COLORSET_ACTLISTBOX 14
+#define NEWT_COLORSET_TEXTBOX 15
+#define NEWT_COLORSET_ACTTEXTBOX 16
+#define NEWT_COLORSET_HELPLINE 17
+#define NEWT_COLORSET_ROOTTEXT 18
+#define NEWT_COLORSET_EMPTYSCALE 19
+#define NEWT_COLORSET_FULLSCALE 20
+#define NEWT_COLORSET_DISENTRY 21
+#define NEWT_COLORSET_COMPACTBUTTON 22
+#define NEWT_COLORSET_ACTSELLISTBOX 23
+#define NEWT_COLORSET_SELLISTBOX 24
+
+#define NEWT_ARG_LAST -100000
+#define NEWT_ARG_APPEND -1
+
+struct newtColors {
+ char * rootFg, * rootBg;
+ char * borderFg, * borderBg;
+ char * windowFg, * windowBg;
+ char * shadowFg, * shadowBg;
+ char * titleFg, * titleBg;
+ char * buttonFg, * buttonBg;
+ char * actButtonFg, * actButtonBg;
+ char * checkboxFg, * checkboxBg;
+ char * actCheckboxFg, * actCheckboxBg;
+ char * entryFg, * entryBg;
+ char * labelFg, * labelBg;
+ char * listboxFg, * listboxBg;
+ char * actListboxFg, * actListboxBg;
+ char * textboxFg, * textboxBg;
+ char * actTextboxFg, * actTextboxBg;
+ char * helpLineFg, * helpLineBg;
+ char * rootTextFg, * rootTextBg;
+ char * emptyScale, * fullScale;
+ char * disabledEntryFg, * disabledEntryBg;
+ char * compactButtonFg, * compactButtonBg;
+ char * actSelListboxFg, * actSelListboxBg;
+ char * selListboxFg, * selListboxBg;
+};
+
+enum newtFlagsSense { NEWT_FLAGS_SET, NEWT_FLAGS_RESET, NEWT_FLAGS_TOGGLE };
+
+#define NEWT_FLAG_RETURNEXIT (1 << 0)
+#define NEWT_FLAG_HIDDEN (1 << 1)
+#define NEWT_FLAG_SCROLL (1 << 2)
+#define NEWT_FLAG_DISABLED (1 << 3)
+/* OBSOLETE #define NEWT_FLAG_NOSCROLL (1 << 4) for listboxes */
+#define NEWT_FLAG_BORDER (1 << 5)
+#define NEWT_FLAG_WRAP (1 << 6)
+#define NEWT_FLAG_NOF12 (1 << 7)
+#define NEWT_FLAG_MULTIPLE (1 << 8)
+#define NEWT_FLAG_SELECTED (1 << 9)
+#define NEWT_FLAG_CHECKBOX (1 << 10)
+#define NEWT_FLAG_PASSWORD (1 << 11) /* draw '*' of chars in entrybox */
+#define NEWT_FLAG_SHOWCURSOR (1 << 12) /* Only applies to listbox for now */
+#define NEWT_FD_READ (1 << 0)
+#define NEWT_FD_WRITE (1 << 1)
+#define NEWT_FD_EXCEPT (1 << 2)
+
+#define NEWT_CHECKBOXTREE_UNSELECTABLE (1 << 12)
+#define NEWT_CHECKBOXTREE_HIDE_BOX (1 << 13)
+
+#define NEWT_CHECKBOXTREE_COLLAPSED '\0'
+#define NEWT_CHECKBOXTREE_EXPANDED '\1'
+#define NEWT_CHECKBOXTREE_UNSELECTED ' '
+#define NEWT_CHECKBOXTREE_SELECTED '*'
+
+/* Backwards compatibility */
+#define NEWT_LISTBOX_RETURNEXIT NEWT_FLAG_RETURNEXIT
+#define NEWT_ENTRY_SCROLL NEWT_FLAG_SCROLL
+#define NEWT_ENTRY_HIDDEN NEWT_FLAG_HIDDEN
+#define NEWT_ENTRY_RETURNEXIT NEWT_FLAG_RETURNEXIT
+#define NEWT_ENTRY_DISABLED NEWT_FLAG_DISABLED
+
+#define NEWT_TEXTBOX_WRAP NEWT_FLAG_WRAP
+#define NEWT_TEXTBOX_SCROLL NEWT_FLAG_SCROLL
+#define NEWT_FORM_NOF12 NEWT_FLAG_NOF12
+
+#define newtListboxAddEntry newtListboxAppendEntry
+
+
+typedef struct newtComponent_struct * newtComponent;
+
+extern const struct newtColors newtDefaultColorPalette;
+
+typedef void (*newtCallback)(newtComponent, void *);
+typedef void (*newtSuspendCallback)(void * data);
+
+int newtInit(void);
+int newtFinished(void);
+void newtCls(void);
+void newtResizeScreen(int redraw);
+void newtWaitForKey(void);
+void newtClearKeyBuffer(void);
+void newtDelay(unsigned int usecs);
+/* top, left are *not* counting the border */
+int newtOpenWindow(int left,int top,
+ unsigned int width,unsigned int height,
+ const char * title);
+int newtCenteredWindow(unsigned int width,unsigned int height, const char * title);
+void newtPopWindow(void);
+void newtPopWindowNoRefresh(void);
+void newtSetColors(struct newtColors colors);
+void newtSetColor(int colorset, char *fg, char *bg);
+void newtRefresh(void);
+void newtSuspend(void);
+void newtSetSuspendCallback(newtSuspendCallback cb, void * data);
+void newtSetHelpCallback(newtCallback cb);
+int newtResume(void);
+void newtPushHelpLine(const char * text);
+void newtRedrawHelpLine(void);
+void newtPopHelpLine(void);
+void newtDrawRootText(int col, int row, const char * text);
+void newtBell(void);
+void newtCursorOff(void);
+void newtCursorOn(void);
+
+/* Components */
+
+newtComponent newtCompactButton(int left, int top, const char * text);
+newtComponent newtButton(int left, int top, const char * text);
+newtComponent newtCheckbox(int left, int top, const char * text, char defValue,
+ const char * seq, char * result);
+char newtCheckboxGetValue(newtComponent co);
+void newtCheckboxSetValue(newtComponent co, char value);
+void newtCheckboxSetFlags(newtComponent co, int flags, enum newtFlagsSense sense);
+
+
+newtComponent newtRadiobutton(int left, int top, const char * text, int isDefault,
+ newtComponent prevButton);
+newtComponent newtRadioGetCurrent(newtComponent setMember);
+void newtRadioSetCurrent(newtComponent setMember);
+newtComponent newtListitem(int left, int top, const char * text, int isDefault,
+ newtComponent prevItem, const void * data, int flags);
+void newtListitemSet(newtComponent co, const char * text);
+void * newtListitemGetData(newtComponent co);
+void newtGetScreenSize(int * cols, int * rows);
+
+newtComponent newtLabel(int left, int top, const char * text);
+void newtLabelSetText(newtComponent co, const char * text);
+newtComponent newtVerticalScrollbar(int left, int top, int height,
+ int normalColorset, int thumbColorset);
+void newtScrollbarSet(newtComponent co, int where, int total);
+
+newtComponent newtListbox(int left, int top, int height, int flags);
+void * newtListboxGetCurrent(newtComponent co);
+void newtListboxSetCurrent(newtComponent co, int num);
+void newtListboxSetCurrentByKey(newtComponent co, void * key);
+void newtListboxSetEntry(newtComponent co, int num, const char * text);
+void newtListboxSetWidth(newtComponent co, int width);
+void newtListboxSetData(newtComponent co, int num, void * data);
+int newtListboxAppendEntry(newtComponent co, const char * text,
+ const void * data);
+/* Send the key to insert after, or NULL to insert at the top */
+int newtListboxInsertEntry(newtComponent co, const char * text, const void * data, void * key);
+int newtListboxDeleteEntry(newtComponent co, void * data);
+void newtListboxClear(newtComponent co); /* removes all entries from listbox */
+void newtListboxGetEntry(newtComponent co, int num, char **text, void **data);
+/* Returns an array of data pointers from items, last element is NULL */
+void **newtListboxGetSelection(newtComponent co, int *numitems);
+void newtListboxClearSelection(newtComponent co);
+void newtListboxSelectItem(newtComponent co, const void * key,
+ enum newtFlagsSense sense);
+/* Returns number of items currently in listbox. */
+int newtListboxItemCount(newtComponent co);
+
+newtComponent newtCheckboxTree(int left, int top, int height, int flags);
+newtComponent newtCheckboxTreeMulti(int left, int top, int height, char *seq, int flags);
+const void ** newtCheckboxTreeGetSelection(newtComponent co, int *numitems);
+const void * newtCheckboxTreeGetCurrent(newtComponent co);
+void newtCheckboxTreeSetCurrent(newtComponent co, void * item);
+const void ** newtCheckboxTreeGetMultiSelection(newtComponent co, int *numitems, char seqnum);
+/* last item is NEWT_ARG_LAST for all of these */
+int newtCheckboxTreeAddItem(newtComponent co,
+ const char * text, const void * data,
+ int flags, int index, ...);
+int newtCheckboxTreeAddArray(newtComponent co,
+ const char * text, const void * data,
+ int flags, int * indexes);
+int * newtCheckboxTreeFindItem(newtComponent co, void * data);
+void newtCheckboxTreeSetEntry(newtComponent co, const void * data,
+ const char * text);
+void newtCheckboxTreeSetWidth(newtComponent co, int width);
+char newtCheckboxTreeGetEntryValue(newtComponent co, const void * data);
+void newtCheckboxTreeSetEntryValue(newtComponent co, const void * data,
+ char value);
+
+newtComponent newtTextboxReflowed(int left, int top, char * text, int width,
+ int flexDown, int flexUp, int flags);
+newtComponent newtTextbox(int left, int top, int width, int height, int flags);
+void newtTextboxSetText(newtComponent co, const char * text);
+void newtTextboxSetHeight(newtComponent co, int height);
+int newtTextboxGetNumLines(newtComponent co);
+char * newtReflowText(char * text, int width, int flexDown, int flexUp,
+ int * actualWidth, int * actualHeight);
+
+struct newtExitStruct {
+ enum { NEWT_EXIT_HOTKEY, NEWT_EXIT_COMPONENT, NEWT_EXIT_FDREADY,
+ NEWT_EXIT_TIMER } reason;
+ union {
+ int watch;
+ int key;
+ newtComponent co;
+ } u;
+} ;
+
+newtComponent newtForm(newtComponent vertBar, void * helpTag, int flags);
+void newtFormSetTimer(newtComponent form, int millisecs);
+void newtFormWatchFd(newtComponent form, int fd, int fdFlags);
+void newtFormSetSize(newtComponent co);
+newtComponent newtFormGetCurrent(newtComponent co);
+void newtFormSetBackground(newtComponent co, int color);
+void newtFormSetCurrent(newtComponent co, newtComponent subco);
+void newtFormAddComponent(newtComponent form, newtComponent co);
+void newtFormAddComponents(newtComponent form, ...);
+void newtFormSetHeight(newtComponent co, int height);
+void newtFormSetWidth(newtComponent co, int width);
+newtComponent newtRunForm(newtComponent form); /* obsolete */
+void newtFormRun(newtComponent co, struct newtExitStruct * es);
+void newtDrawForm(newtComponent form);
+void newtFormAddHotKey(newtComponent co, int key);
+
+typedef int (*newtEntryFilter)(newtComponent entry, void * data, int ch,
+ int cursor);
+newtComponent newtEntry(int left, int top, const char * initialValue, int width,
+ const char ** resultPtr, int flags);
+void newtEntrySet(newtComponent co, const char * value, int cursorAtEnd);
+void newtEntrySetFilter(newtComponent co, newtEntryFilter filter, void * data);
+char * newtEntryGetValue(newtComponent co);
+void newtEntrySetFlags(newtComponent co, int flags, enum newtFlagsSense sense);
+
+newtComponent newtScale(int left, int top, int width, long long fullValue);
+void newtScaleSet(newtComponent co, unsigned long long amount);
+
+void newtComponentAddCallback(newtComponent co, newtCallback f, void * data);
+void newtComponentTakesFocus(newtComponent co, int val);
+
+/* This callback is called when a component is destroyed. */
+void newtComponentAddDestroyCallback(newtComponent co,
+ newtCallback f, void * data);
+
+/* this also destroys all of the components (including other forms) on the
+ form */
+void newtFormDestroy(newtComponent form);
+
+/* NB: You SHOULD NOT call this for components which have been added
+ * to a form (ie. almost all components). They are destroyed along
+ * with the form when you call newtFormDestroy.
+ */
+void newtComponentDestroy(newtComponent co);
+
+/* Key codes */
+
+#define NEWT_KEY_TAB '\t'
+#define NEWT_KEY_ENTER '\r'
+#define NEWT_KEY_SUSPEND '\032' /* ctrl - z*/
+#define NEWT_KEY_ESCAPE ''
+#define NEWT_KEY_RETURN NEWT_KEY_ENTER
+
+#define NEWT_KEY_EXTRA_BASE 0x8000
+#define NEWT_KEY_UP NEWT_KEY_EXTRA_BASE + 1
+#define NEWT_KEY_DOWN NEWT_KEY_EXTRA_BASE + 2
+#define NEWT_KEY_LEFT NEWT_KEY_EXTRA_BASE + 4
+#define NEWT_KEY_RIGHT NEWT_KEY_EXTRA_BASE + 5
+#define NEWT_KEY_BKSPC NEWT_KEY_EXTRA_BASE + 6
+#define NEWT_KEY_DELETE NEWT_KEY_EXTRA_BASE + 7
+#define NEWT_KEY_HOME NEWT_KEY_EXTRA_BASE + 8
+#define NEWT_KEY_END NEWT_KEY_EXTRA_BASE + 9
+#define NEWT_KEY_UNTAB NEWT_KEY_EXTRA_BASE + 10
+#define NEWT_KEY_PGUP NEWT_KEY_EXTRA_BASE + 11
+#define NEWT_KEY_PGDN NEWT_KEY_EXTRA_BASE + 12
+#define NEWT_KEY_INSERT NEWT_KEY_EXTRA_BASE + 13
+
+#define NEWT_KEY_F1 NEWT_KEY_EXTRA_BASE + 101
+#define NEWT_KEY_F2 NEWT_KEY_EXTRA_BASE + 102
+#define NEWT_KEY_F3 NEWT_KEY_EXTRA_BASE + 103
+#define NEWT_KEY_F4 NEWT_KEY_EXTRA_BASE + 104
+#define NEWT_KEY_F5 NEWT_KEY_EXTRA_BASE + 105
+#define NEWT_KEY_F6 NEWT_KEY_EXTRA_BASE + 106
+#define NEWT_KEY_F7 NEWT_KEY_EXTRA_BASE + 107
+#define NEWT_KEY_F8 NEWT_KEY_EXTRA_BASE + 108
+#define NEWT_KEY_F9 NEWT_KEY_EXTRA_BASE + 109
+#define NEWT_KEY_F10 NEWT_KEY_EXTRA_BASE + 110
+#define NEWT_KEY_F11 NEWT_KEY_EXTRA_BASE + 111
+#define NEWT_KEY_F12 NEWT_KEY_EXTRA_BASE + 112
+
+/* not really a key, but newtGetKey returns it */
+#define NEWT_KEY_RESIZE NEWT_KEY_EXTRA_BASE + 113
+
+#define NEWT_ANCHOR_LEFT (1 << 0)
+#define NEWT_ANCHOR_RIGHT (1 << 1)
+#define NEWT_ANCHOR_TOP (1 << 2)
+#define NEWT_ANCHOR_BOTTOM (1 << 3)
+
+#define NEWT_GRID_FLAG_GROWX (1 << 0)
+#define NEWT_GRID_FLAG_GROWY (1 << 1)
+
+typedef struct grid_s * newtGrid;
+enum newtGridElement { NEWT_GRID_EMPTY = 0,
+ NEWT_GRID_COMPONENT, NEWT_GRID_SUBGRID };
+
+newtGrid newtCreateGrid(int cols, int rows);
+/* TYPE, what, TYPE, what, ..., NULL */
+newtGrid newtGridVStacked(enum newtGridElement type, void * what, ...);
+newtGrid newtGridVCloseStacked(enum newtGridElement type, void * what, ...);
+newtGrid newtGridHStacked(enum newtGridElement type1, void * what1, ...);
+newtGrid newtGridHCloseStacked(enum newtGridElement type1, void * what1, ...);
+newtGrid newtGridBasicWindow(newtComponent text, newtGrid middle,
+ newtGrid buttons);
+newtGrid newtGridSimpleWindow(newtComponent text, newtComponent middle,
+ newtGrid buttons);
+void newtGridSetField(newtGrid grid, int col, int row,
+ enum newtGridElement type, void * val, int padLeft,
+ int padTop, int padRight, int padBottom, int anchor,
+ int flags);
+void newtGridPlace(newtGrid grid, int left, int top);
+#define newtGridDestroy newtGridFree
+void newtGridFree(newtGrid grid, int recurse);
+void newtGridGetSize(newtGrid grid, int * width, int * height);
+void newtGridWrappedWindow(newtGrid grid, char * title);
+void newtGridWrappedWindowAt(newtGrid grid, char * title, int left, int top);
+void newtGridAddComponentsToForm(newtGrid grid, newtComponent form,
+ int recurse);
+
+/* convienve */
+newtGrid newtButtonBarv(char * button1, newtComponent * b1comp, va_list args);
+newtGrid newtButtonBar(char * button1, newtComponent * b1comp, ...);
+
+/* automatically centered and shrink wrapped */
+void newtWinMessage(char * title, char * buttonText, char * text, ...);
+void newtWinMessagev(char * title, char * buttonText, char * text,
+ va_list argv);
+
+/* having separate calls for these two seems silly, but having two separate
+ variable length-arg lists seems like a bad idea as well */
+
+/* Returns 0 if F12 was pressed, 1 for button1, 2 for button2 */
+int newtWinChoice(char * title, char * button1, char * button2,
+ char * text, ...);
+/* Returns 0 if F12 was pressed, 1 for button1, 2 for button2,
+ 3 for button3 */
+int newtWinTernary(char * title, char * button1, char * button2,
+ char * button3, char * message, ...);
+
+/* Returns the button number pressed, 0 on F12 */
+int newtWinMenu(char * title, char * text, int suggestedWidth, int flexDown,
+ int flexUp, int maxListHeight, char ** items, int * listItem,
+ char * button1, ...);
+
+struct newtWinEntry {
+ char * text;
+ char ** value; /* may be initialized to set default */
+ int flags;
+};
+
+/* Returns the button number pressed, 0 on F12. The final values are
+ dynamically allocated, and need to be freed. */
+int newtWinEntries(char * title, char * text, int suggestedWidth, int flexDown,
+ int flexUp, int dataWidth,
+ struct newtWinEntry * items, char * button1, ...);
+
+#ifdef __cplusplus
+} /* End of extern "C" { */
+#endif
+
+#endif /* H_NEWT */
diff --git a/tools/perf/external/usr/include/nlist.h b/tools/perf/external/usr/include/nlist.h
new file mode 100644
index 00000000..ce6569ef
--- /dev/null
+++ b/tools/perf/external/usr/include/nlist.h
@@ -0,0 +1,77 @@
+/* Interface for nlist.
+ Copyright (C) 1998, 1999, 2000, 2002 Red Hat, Inc.
+ This file is part of Red Hat elfutils.
+
+ Red Hat elfutils is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by the
+ Free Software Foundation; version 2 of the License.
+
+ Red Hat elfutils is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with Red Hat elfutils; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA.
+
+ In addition, as a special exception, Red Hat, Inc. gives You the
+ additional right to link the code of Red Hat elfutils with code licensed
+ under any Open Source Initiative certified open source license
+ (http://www.opensource.org/licenses/index.php) which requires the
+ distribution of source code with any binary distribution and to
+ distribute linked combinations of the two. Non-GPL Code permitted under
+ this exception must only link to the code of Red Hat elfutils through
+ those well defined interfaces identified in the file named EXCEPTION
+ found in the source code files (the "Approved Interfaces"). The files
+ of Non-GPL Code may instantiate templates or use macros or inline
+ functions from the Approved Interfaces without causing the resulting
+ work to be covered by the GNU General Public License. Only Red Hat,
+ Inc. may make changes or additions to the list of Approved Interfaces.
+ Red Hat's grant of this exception is conditioned upon your not adding
+ any new exceptions. If you wish to add a new Approved Interface or
+ exception, please contact Red Hat. You must obey the GNU General Public
+ License in all respects for all of the Red Hat elfutils code and other
+ code used in conjunction with Red Hat elfutils except the Non-GPL Code
+ covered by this exception. If you modify this file, you may extend this
+ exception to your version of the file, but you are not obligated to do
+ so. If you do not wish to provide this exception without modification,
+ you must delete this exception statement from your version and license
+ this file solely under the GPL without exception.
+
+ Red Hat elfutils is an included package of the Open Invention Network.
+ An included package of the Open Invention Network is a package for which
+ Open Invention Network licensees cross-license their patents. No patent
+ license is granted, either expressly or impliedly, by designation as an
+ included package. Should you wish to participate in the Open Invention
+ Network licensing program, please visit www.openinventionnetwork.com
+ <http://www.openinventionnetwork.com>. */
+
+#ifndef _NLIST_H
+#define _NLIST_H 1
+
+
+/* Symbol list type. */
+struct nlist
+{
+ char *n_name; /* Symbol name. */
+ long int n_value; /* Value of symbol. */
+ short int n_scnum; /* Section number found in. */
+ unsigned short int n_type; /* Type of symbol. */
+ char n_sclass; /* Storage class. */
+ char n_numaux; /* Number of auxiliary entries. */
+};
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Get specified entries from file. */
+extern int nlist (__const char *__filename, struct nlist *__nl);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* nlist.h */
diff --git a/tools/perf/external/usr/include/plugin-api.h b/tools/perf/external/usr/include/plugin-api.h
new file mode 100644
index 00000000..9d58fef4
--- /dev/null
+++ b/tools/perf/external/usr/include/plugin-api.h
@@ -0,0 +1,308 @@
+/* plugin-api.h -- External linker plugin API. */
+
+/* Copyright 2009 Free Software Foundation, Inc.
+ Written by Cary Coutant <ccoutant@google.com>.
+
+ This file is part of binutils.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
+ MA 02110-1301, USA. */
+
+/* This file defines the interface for writing a linker plugin, which is
+ described at < http://gcc.gnu.org/wiki/whopr/driver >. */
+
+#ifndef PLUGIN_API_H
+#define PLUGIN_API_H
+
+#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#elif defined(HAVE_INTTYPES_H)
+#include <inttypes.h>
+#endif
+#include <sys/types.h>
+#if !defined(HAVE_STDINT_H) && !defined(HAVE_INTTYPES_H) && \
+ !defined(UINT64_MAX) && !defined(uint64_t)
+#error can not find uint64_t type
+#endif
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/* Status code returned by most API routines. */
+
+enum ld_plugin_status
+{
+ LDPS_OK = 0,
+ LDPS_NO_SYMS, /* Attempt to get symbols that haven't been added. */
+ LDPS_BAD_HANDLE, /* No claimed object associated with given handle. */
+ LDPS_ERR
+ /* Additional Error codes TBD. */
+};
+
+/* The version of the API specification. */
+
+enum ld_plugin_api_version
+{
+ LD_PLUGIN_API_VERSION = 1
+};
+
+/* The type of output file being generated by the linker. */
+
+enum ld_plugin_output_file_type
+{
+ LDPO_REL,
+ LDPO_EXEC,
+ LDPO_DYN
+};
+
+/* An input file managed by the plugin library. */
+
+struct ld_plugin_input_file
+{
+ const char *name;
+ int fd;
+ off_t offset;
+ off_t filesize;
+ void *handle;
+};
+
+/* A symbol belonging to an input file managed by the plugin library. */
+
+struct ld_plugin_symbol
+{
+ char *name;
+ char *version;
+ int def;
+ int visibility;
+ uint64_t size;
+ char *comdat_key;
+ int resolution;
+};
+
+/* Whether the symbol is a definition, reference, or common, weak or not. */
+
+enum ld_plugin_symbol_kind
+{
+ LDPK_DEF,
+ LDPK_WEAKDEF,
+ LDPK_UNDEF,
+ LDPK_WEAKUNDEF,
+ LDPK_COMMON
+};
+
+/* The visibility of the symbol. */
+
+enum ld_plugin_symbol_visibility
+{
+ LDPV_DEFAULT,
+ LDPV_PROTECTED,
+ LDPV_INTERNAL,
+ LDPV_HIDDEN
+};
+
+/* How a symbol is resolved. */
+
+enum ld_plugin_symbol_resolution
+{
+ LDPR_UNKNOWN = 0,
+
+ /* Symbol is still undefined at this point. */
+ LDPR_UNDEF,
+
+ /* This is the prevailing definition of the symbol, with references from
+ regular object code. */
+ LDPR_PREVAILING_DEF,
+
+ /* This is the prevailing definition of the symbol, with no
+ references from regular objects. It is only referenced from IR
+ code. */
+ LDPR_PREVAILING_DEF_IRONLY,
+
+ /* This definition was pre-empted by a definition in a regular
+ object file. */
+ LDPR_PREEMPTED_REG,
+
+ /* This definition was pre-empted by a definition in another IR file. */
+ LDPR_PREEMPTED_IR,
+
+ /* This symbol was resolved by a definition in another IR file. */
+ LDPR_RESOLVED_IR,
+
+ /* This symbol was resolved by a definition in a regular object
+ linked into the main executable. */
+ LDPR_RESOLVED_EXEC,
+
+ /* This symbol was resolved by a definition in a shared object. */
+ LDPR_RESOLVED_DYN
+};
+
+/* The plugin library's "claim file" handler. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_claim_file_handler) (
+ const struct ld_plugin_input_file *file, int *claimed);
+
+/* The plugin library's "all symbols read" handler. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_all_symbols_read_handler) (void);
+
+/* The plugin library's cleanup handler. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_cleanup_handler) (void);
+
+/* The linker's interface for registering the "claim file" handler. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_register_claim_file) (ld_plugin_claim_file_handler handler);
+
+/* The linker's interface for registering the "all symbols read" handler. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_register_all_symbols_read) (
+ ld_plugin_all_symbols_read_handler handler);
+
+/* The linker's interface for registering the cleanup handler. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_register_cleanup) (ld_plugin_cleanup_handler handler);
+
+/* The linker's interface for adding symbols from a claimed input file. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_add_symbols) (void *handle, int nsyms,
+ const struct ld_plugin_symbol *syms);
+
+/* The linker's interface for getting the input file information with
+ an open (possibly re-opened) file descriptor. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_get_input_file) (const void *handle,
+ struct ld_plugin_input_file *file);
+
+/* The linker's interface for releasing the input file. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_release_input_file) (const void *handle);
+
+/* The linker's interface for retrieving symbol resolution information. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_get_symbols) (const void *handle, int nsyms,
+ struct ld_plugin_symbol *syms);
+
+/* The linker's interface for adding a compiled input file. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_add_input_file) (const char *pathname);
+
+/* The linker's interface for adding a library that should be searched. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_add_input_library) (const char *libname);
+
+/* The linker's interface for adding a library path that should be searched. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_set_extra_library_path) (const char *path);
+
+/* The linker's interface for issuing a warning or error message. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_message) (int level, const char *format, ...);
+
+enum ld_plugin_level
+{
+ LDPL_INFO,
+ LDPL_WARNING,
+ LDPL_ERROR,
+ LDPL_FATAL
+};
+
+/* Values for the tv_tag field of the transfer vector. */
+
+enum ld_plugin_tag
+{
+ LDPT_NULL = 0,
+ LDPT_API_VERSION,
+ LDPT_GOLD_VERSION,
+ LDPT_LINKER_OUTPUT,
+ LDPT_OPTION,
+ LDPT_REGISTER_CLAIM_FILE_HOOK,
+ LDPT_REGISTER_ALL_SYMBOLS_READ_HOOK,
+ LDPT_REGISTER_CLEANUP_HOOK,
+ LDPT_ADD_SYMBOLS,
+ LDPT_GET_SYMBOLS,
+ LDPT_ADD_INPUT_FILE,
+ LDPT_MESSAGE,
+ LDPT_GET_INPUT_FILE,
+ LDPT_RELEASE_INPUT_FILE,
+ LDPT_ADD_INPUT_LIBRARY,
+ LDPT_OUTPUT_NAME,
+ LDPT_SET_EXTRA_LIBRARY_PATH,
+ LDPT_GNU_LD_VERSION
+};
+
+/* The plugin transfer vector. */
+
+struct ld_plugin_tv
+{
+ enum ld_plugin_tag tv_tag;
+ union
+ {
+ int tv_val;
+ const char *tv_string;
+ ld_plugin_register_claim_file tv_register_claim_file;
+ ld_plugin_register_all_symbols_read tv_register_all_symbols_read;
+ ld_plugin_register_cleanup tv_register_cleanup;
+ ld_plugin_add_symbols tv_add_symbols;
+ ld_plugin_get_symbols tv_get_symbols;
+ ld_plugin_add_input_file tv_add_input_file;
+ ld_plugin_message tv_message;
+ ld_plugin_get_input_file tv_get_input_file;
+ ld_plugin_release_input_file tv_release_input_file;
+ ld_plugin_add_input_library tv_add_input_library;
+ ld_plugin_set_extra_library_path tv_set_extra_library_path;
+ } tv_u;
+};
+
+/* The plugin library's "onload" entry point. */
+
+typedef
+enum ld_plugin_status
+(*ld_plugin_onload) (struct ld_plugin_tv *tv);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !defined(PLUGIN_API_H) */
diff --git a/tools/perf/external/usr/include/symcat.h b/tools/perf/external/usr/include/symcat.h
new file mode 100644
index 00000000..b4612879
--- /dev/null
+++ b/tools/perf/external/usr/include/symcat.h
@@ -0,0 +1,55 @@
+/* Symbol concatenation utilities.
+
+ Copyright (C) 1998, 2000, 2010 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef SYM_CAT_H
+#define SYM_CAT_H
+
+#if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE)
+#define CONCAT2(a,b) a##b
+#define CONCAT3(a,b,c) a##b##c
+#define CONCAT4(a,b,c,d) a##b##c##d
+#define CONCAT5(a,b,c,d,e) a##b##c##d##e
+#define CONCAT6(a,b,c,d,e,f) a##b##c##d##e##f
+#define STRINGX(s) #s
+#else
+/* Note one should never pass extra whitespace to the CONCATn macros,
+ e.g. CONCAT2(foo, bar) because traditonal C will keep the space between
+ the two labels instead of concatenating them. Instead, make sure to
+ write CONCAT2(foo,bar). */
+#define CONCAT2(a,b) a/**/b
+#define CONCAT3(a,b,c) a/**/b/**/c
+#define CONCAT4(a,b,c,d) a/**/b/**/c/**/d
+#define CONCAT5(a,b,c,d,e) a/**/b/**/c/**/d/**/e
+#define CONCAT6(a,b,c,d,e,f) a/**/b/**/c/**/d/**/e/**/f
+#define STRINGX(s) "s"
+#endif
+
+#define XCONCAT2(a,b) CONCAT2(a,b)
+#define XCONCAT3(a,b,c) CONCAT3(a,b,c)
+#define XCONCAT4(a,b,c,d) CONCAT4(a,b,c,d)
+#define XCONCAT5(a,b,c,d,e) CONCAT5(a,b,c,d,e)
+#define XCONCAT6(a,b,c,d,e,f) CONCAT6(a,b,c,d,e,f)
+
+/* Note the layer of indirection here is typically used to allow
+ stringification of the expansion of macros. I.e. "#define foo
+ bar", "XSTRING(foo)", to yield "bar". Be aware that this only
+ works for __STDC__, not for traditional C which will still resolve
+ to "foo". */
+#define XSTRING(s) STRINGX(s)
+
+#endif /* SYM_CAT_H */
diff --git a/tools/perf/feature-tests.mak b/tools/perf/feature-tests.mak
new file mode 100644
index 00000000..a2febabf
--- /dev/null
+++ b/tools/perf/feature-tests.mak
@@ -0,0 +1,119 @@
+define SOURCE_HELLO
+#include <stdio.h>
+int main(void)
+{
+ return puts(\"hi\");
+}
+endef
+
+ifndef NO_DWARF
+define SOURCE_DWARF
+#include <dwarf.h>
+#include <elfutils/libdw.h>
+#include <elfutils/version.h>
+#ifndef _ELFUTILS_PREREQ
+#error
+#endif
+
+int main(void)
+{
+ Dwarf *dbg = dwarf_begin(0, DWARF_C_READ);
+ return (long)dbg;
+}
+endef
+endif
+
+define SOURCE_LIBELF
+#include <libelf.h>
+
+int main(void)
+{
+ Elf *elf = elf_begin(0, ELF_C_READ, 0);
+ return (long)elf;
+}
+endef
+
+define SOURCE_GLIBC
+#include <gnu/libc-version.h>
+
+int main(void)
+{
+ const char *version = gnu_get_libc_version();
+ return (long)version;
+}
+endef
+
+define SOURCE_ELF_MMAP
+#include <libelf.h>
+int main(void)
+{
+ Elf *elf = elf_begin(0, ELF_C_READ_MMAP, 0);
+ return (long)elf;
+}
+endef
+
+ifndef NO_NEWT
+define SOURCE_NEWT
+#include <newt.h>
+
+int main(void)
+{
+ newtInit();
+ newtCls();
+ return newtFinished();
+}
+endef
+endif
+
+ifndef NO_LIBPERL
+define SOURCE_PERL_EMBED
+#include <EXTERN.h>
+#include <perl.h>
+
+int main(void)
+{
+perl_alloc();
+return 0;
+}
+endef
+endif
+
+ifndef NO_LIBPYTHON
+define SOURCE_PYTHON_EMBED
+#include <Python.h>
+
+int main(void)
+{
+ Py_Initialize();
+ return 0;
+}
+endef
+endif
+
+define SOURCE_BFD
+#include <bfd.h>
+
+int main(void)
+{
+ bfd_demangle(0, 0, 0);
+ return 0;
+}
+endef
+
+define SOURCE_CPLUS_DEMANGLE
+extern char *cplus_demangle(const char *, int);
+
+int main(void)
+{
+ cplus_demangle(0, 0);
+ return 0;
+}
+endef
+
+# try-cc
+# Usage: option = $(call try-cc, source-to-build, cc-options)
+try-cc = $(shell sh -c \
+ 'TMP="$(OUTPUT)$(TMPOUT).$$$$"; \
+ echo "$(1)" | \
+ $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \
+ rm -f "$$TMP"')
diff --git a/tools/perf/perf-archive.sh b/tools/perf/perf-archive.sh
new file mode 100644
index 00000000..677e59d6
--- /dev/null
+++ b/tools/perf/perf-archive.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+# perf archive
+# Arnaldo Carvalho de Melo <acme@redhat.com>
+
+PERF_DATA=perf.data
+if [ $# -ne 0 ] ; then
+ PERF_DATA=$1
+fi
+
+#
+# PERF_BUILDID_DIR environment variable set by perf
+# path to buildid directory, default to $HOME/.debug
+#
+if [ -z $PERF_BUILDID_DIR ]; then
+ PERF_BUILDID_DIR=~/.debug/
+else
+ # append / to make substitutions work
+ PERF_BUILDID_DIR=$PERF_BUILDID_DIR/
+fi
+
+BUILDIDS=$(mktemp /tmp/perf-archive-buildids.XXXXXX)
+NOBUILDID=0000000000000000000000000000000000000000
+
+perf buildid-list -i $PERF_DATA --with-hits | grep -v "^$NOBUILDID " > $BUILDIDS
+if [ ! -s $BUILDIDS ] ; then
+ echo "perf archive: no build-ids found"
+ rm -f $BUILDIDS
+ exit 1
+fi
+
+MANIFEST=$(mktemp /tmp/perf-archive-manifest.XXXXXX)
+
+cut -d ' ' -f 1 $BUILDIDS | \
+while read build_id ; do
+ linkname=$PERF_BUILDID_DIR.build-id/${build_id:0:2}/${build_id:2}
+ filename=$(readlink -f $linkname)
+ echo ${linkname#$PERF_BUILDID_DIR} >> $MANIFEST
+ echo ${filename#$PERF_BUILDID_DIR} >> $MANIFEST
+done
+
+tar cfj $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST
+rm -f $MANIFEST $BUILDIDS
+echo -e "Now please run:\n"
+echo -e "$ tar xvf $PERF_DATA.tar.bz2 -C ~/.debug\n"
+echo "wherever you need to run 'perf report' on."
+exit 0
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
new file mode 100644
index 00000000..cdd6c03f
--- /dev/null
+++ b/tools/perf/perf.c
@@ -0,0 +1,508 @@
+/*
+ * perf.c
+ *
+ * Performance analysis utility.
+ *
+ * This is the main hub from which the sub-commands (perf stat,
+ * perf top, perf record, perf report, etc.) are started.
+ */
+#include "builtin.h"
+
+#include "util/exec_cmd.h"
+#include "util/cache.h"
+#include "util/quote.h"
+#include "util/run-command.h"
+#include "util/parse-events.h"
+#include "util/debugfs.h"
+
+const char perf_usage_string[] =
+ "perf [--version] [--help] COMMAND [ARGS]";
+
+const char perf_more_info_string[] =
+ "See 'perf help COMMAND' for more information on a specific command.";
+
+int use_browser = -1;
+static int use_pager = -1;
+
+struct pager_config {
+ const char *cmd;
+ int val;
+};
+
+static char debugfs_mntpt[MAXPATHLEN];
+
+static int pager_command_config(const char *var, const char *value, void *data)
+{
+ struct pager_config *c = data;
+ if (!prefixcmp(var, "pager.") && !strcmp(var + 6, c->cmd))
+ c->val = perf_config_bool(var, value);
+ return 0;
+}
+
+/* returns 0 for "no pager", 1 for "use pager", and -1 for "not specified" */
+int check_pager_config(const char *cmd)
+{
+ struct pager_config c;
+ c.cmd = cmd;
+ c.val = -1;
+ perf_config(pager_command_config, &c);
+ return c.val;
+}
+
+static int tui_command_config(const char *var, const char *value, void *data)
+{
+ struct pager_config *c = data;
+ if (!prefixcmp(var, "tui.") && !strcmp(var + 4, c->cmd))
+ c->val = perf_config_bool(var, value);
+ return 0;
+}
+
+/* returns 0 for "no tui", 1 for "use tui", and -1 for "not specified" */
+static int check_tui_config(const char *cmd)
+{
+ struct pager_config c;
+ c.cmd = cmd;
+ c.val = -1;
+ perf_config(tui_command_config, &c);
+ return c.val;
+}
+
+static void commit_pager_choice(void)
+{
+ switch (use_pager) {
+ case 0:
+ setenv("PERF_PAGER", "cat", 1);
+ break;
+ case 1:
+ /* setup_pager(); */
+ break;
+ default:
+ break;
+ }
+}
+
+static void set_debugfs_path(void)
+{
+ char *path;
+
+ path = getenv(PERF_DEBUGFS_ENVIRONMENT);
+ snprintf(debugfs_path, MAXPATHLEN, "%s/%s", path ?: debugfs_mntpt,
+ "tracing/events");
+}
+
+static int handle_options(const char ***argv, int *argc, int *envchanged)
+{
+ int handled = 0;
+
+ while (*argc > 0) {
+ const char *cmd = (*argv)[0];
+ if (cmd[0] != '-')
+ break;
+
+ /*
+ * For legacy reasons, the "version" and "help"
+ * commands can be written with "--" prepended
+ * to make them look like flags.
+ */
+ if (!strcmp(cmd, "--help") || !strcmp(cmd, "--version"))
+ break;
+
+ /*
+ * Check remaining flags.
+ */
+ if (!prefixcmp(cmd, CMD_EXEC_PATH)) {
+ cmd += strlen(CMD_EXEC_PATH);
+ if (*cmd == '=')
+ perf_set_argv_exec_path(cmd + 1);
+ else {
+ puts(perf_exec_path());
+ exit(0);
+ }
+ } else if (!strcmp(cmd, "--html-path")) {
+ puts(system_path(PERF_HTML_PATH));
+ exit(0);
+ } else if (!strcmp(cmd, "-p") || !strcmp(cmd, "--paginate")) {
+ use_pager = 1;
+ } else if (!strcmp(cmd, "--no-pager")) {
+ use_pager = 0;
+ if (envchanged)
+ *envchanged = 1;
+ } else if (!strcmp(cmd, "--perf-dir")) {
+ if (*argc < 2) {
+ fprintf(stderr, "No directory given for --perf-dir.\n");
+ usage(perf_usage_string);
+ }
+ setenv(PERF_DIR_ENVIRONMENT, (*argv)[1], 1);
+ if (envchanged)
+ *envchanged = 1;
+ (*argv)++;
+ (*argc)--;
+ handled++;
+ } else if (!prefixcmp(cmd, CMD_PERF_DIR)) {
+ setenv(PERF_DIR_ENVIRONMENT, cmd + strlen(CMD_PERF_DIR), 1);
+ if (envchanged)
+ *envchanged = 1;
+ } else if (!strcmp(cmd, "--work-tree")) {
+ if (*argc < 2) {
+ fprintf(stderr, "No directory given for --work-tree.\n");
+ usage(perf_usage_string);
+ }
+ setenv(PERF_WORK_TREE_ENVIRONMENT, (*argv)[1], 1);
+ if (envchanged)
+ *envchanged = 1;
+ (*argv)++;
+ (*argc)--;
+ } else if (!prefixcmp(cmd, CMD_WORK_TREE)) {
+ setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + strlen(CMD_WORK_TREE), 1);
+ if (envchanged)
+ *envchanged = 1;
+ } else if (!strcmp(cmd, "--debugfs-dir")) {
+ if (*argc < 2) {
+ fprintf(stderr, "No directory given for --debugfs-dir.\n");
+ usage(perf_usage_string);
+ }
+ strncpy(debugfs_mntpt, (*argv)[1], MAXPATHLEN);
+ debugfs_mntpt[MAXPATHLEN - 1] = '\0';
+ if (envchanged)
+ *envchanged = 1;
+ (*argv)++;
+ (*argc)--;
+ } else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) {
+ strncpy(debugfs_mntpt, cmd + strlen(CMD_DEBUGFS_DIR), MAXPATHLEN);
+ debugfs_mntpt[MAXPATHLEN - 1] = '\0';
+ if (envchanged)
+ *envchanged = 1;
+ } else {
+ fprintf(stderr, "Unknown option: %s\n", cmd);
+ usage(perf_usage_string);
+ }
+
+ (*argv)++;
+ (*argc)--;
+ handled++;
+ }
+ return handled;
+}
+
+static int handle_alias(int *argcp, const char ***argv)
+{
+ int envchanged = 0, ret = 0, saved_errno = errno;
+ int count, option_count;
+ const char **new_argv;
+ const char *alias_command;
+ char *alias_string;
+
+ alias_command = (*argv)[0];
+ alias_string = alias_lookup(alias_command);
+ if (alias_string) {
+ if (alias_string[0] == '!') {
+ if (*argcp > 1) {
+ struct strbuf buf;
+
+ strbuf_init(&buf, PATH_MAX);
+ strbuf_addstr(&buf, alias_string);
+ sq_quote_argv(&buf, (*argv) + 1, PATH_MAX);
+ free(alias_string);
+ alias_string = buf.buf;
+ }
+ ret = system(alias_string + 1);
+ if (ret >= 0 && WIFEXITED(ret) &&
+ WEXITSTATUS(ret) != 127)
+ exit(WEXITSTATUS(ret));
+ die("Failed to run '%s' when expanding alias '%s'",
+ alias_string + 1, alias_command);
+ }
+ count = split_cmdline(alias_string, &new_argv);
+ if (count < 0)
+ die("Bad alias.%s string", alias_command);
+ option_count = handle_options(&new_argv, &count, &envchanged);
+ if (envchanged)
+ die("alias '%s' changes environment variables\n"
+ "You can use '!perf' in the alias to do this.",
+ alias_command);
+ memmove(new_argv - option_count, new_argv,
+ count * sizeof(char *));
+ new_argv -= option_count;
+
+ if (count < 1)
+ die("empty alias for %s", alias_command);
+
+ if (!strcmp(alias_command, new_argv[0]))
+ die("recursive alias: %s", alias_command);
+
+ new_argv = realloc(new_argv, sizeof(char *) *
+ (count + *argcp + 1));
+ /* insert after command name */
+ memcpy(new_argv + count, *argv + 1, sizeof(char *) * *argcp);
+ new_argv[count + *argcp] = NULL;
+
+ *argv = new_argv;
+ *argcp += count - 1;
+
+ ret = 1;
+ }
+
+ errno = saved_errno;
+
+ return ret;
+}
+
+const char perf_version_string[] = PERF_VERSION;
+
+#define RUN_SETUP (1<<0)
+#define USE_PAGER (1<<1)
+/*
+ * require working tree to be present -- anything uses this needs
+ * RUN_SETUP for reading from the configuration file.
+ */
+#define NEED_WORK_TREE (1<<2)
+
+struct cmd_struct {
+ const char *cmd;
+ int (*fn)(int, const char **, const char *);
+ int option;
+};
+
+static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
+{
+ int status;
+ struct stat st;
+ const char *prefix;
+
+ prefix = NULL;
+ if (p->option & RUN_SETUP)
+ prefix = NULL; /* setup_perf_directory(); */
+
+ if (use_browser == -1)
+ use_browser = check_tui_config(p->cmd);
+
+ if (use_pager == -1 && p->option & RUN_SETUP)
+ use_pager = check_pager_config(p->cmd);
+ if (use_pager == -1 && p->option & USE_PAGER)
+ use_pager = 1;
+ commit_pager_choice();
+ set_debugfs_path();
+
+ status = p->fn(argc, argv, prefix);
+ exit_browser(status);
+
+ if (status)
+ return status & 0xff;
+
+ /* Somebody closed stdout? */
+ if (fstat(fileno(stdout), &st))
+ return 0;
+ /* Ignore write errors for pipes and sockets.. */
+ if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode))
+ return 0;
+
+ /* Check for ENOSPC and EIO errors.. */
+ if (fflush(stdout))
+ die("write failure on standard output: %s", strerror(errno));
+ if (ferror(stdout))
+ die("unknown write failure on standard output");
+ if (fclose(stdout))
+ die("close failed on standard output: %s", strerror(errno));
+ return 0;
+}
+
+static void handle_internal_command(int argc, const char **argv)
+{
+ const char *cmd = argv[0];
+ static struct cmd_struct commands[] = {
+ { "buildid-cache", cmd_buildid_cache, 0 },
+ { "buildid-list", cmd_buildid_list, 0 },
+ { "diff", cmd_diff, 0 },
+ { "help", cmd_help, 0 },
+ { "list", cmd_list, 0 },
+ { "record", cmd_record, 0 },
+ { "report", cmd_report, 0 },
+ { "bench", cmd_bench, 0 },
+ { "stat", cmd_stat, 0 },
+ { "timechart", cmd_timechart, 0 },
+ { "top", cmd_top, 0 },
+ { "annotate", cmd_annotate, 0 },
+ { "version", cmd_version, 0 },
+ { "trace", cmd_trace, 0 },
+ { "sched", cmd_sched, 0 },
+ { "probe", cmd_probe, 0 },
+ { "kmem", cmd_kmem, 0 },
+ { "lock", cmd_lock, 0 },
+ { "kvm", cmd_kvm, 0 },
+ { "test", cmd_test, 0 },
+ { "inject", cmd_inject, 0 },
+ };
+ unsigned int i;
+ static const char ext[] = STRIP_EXTENSION;
+
+ if (sizeof(ext) > 1) {
+ i = strlen(argv[0]) - strlen(ext);
+ if (i > 0 && !strcmp(argv[0] + i, ext)) {
+ char *argv0 = strdup(argv[0]);
+ argv[0] = cmd = argv0;
+ argv0[i] = '\0';
+ }
+ }
+
+ /* Turn "perf cmd --help" into "perf help cmd" */
+ if (argc > 1 && !strcmp(argv[1], "--help")) {
+ argv[1] = argv[0];
+ argv[0] = cmd = "help";
+ }
+
+ for (i = 0; i < ARRAY_SIZE(commands); i++) {
+ struct cmd_struct *p = commands+i;
+ if (strcmp(p->cmd, cmd))
+ continue;
+ exit(run_builtin(p, argc, argv));
+ }
+}
+
+static void execv_dashed_external(const char **argv)
+{
+ struct strbuf cmd = STRBUF_INIT;
+ const char *tmp;
+ int status;
+
+ strbuf_addf(&cmd, "perf-%s", argv[0]);
+
+ /*
+ * argv[0] must be the perf command, but the argv array
+ * belongs to the caller, and may be reused in
+ * subsequent loop iterations. Save argv[0] and
+ * restore it on error.
+ */
+ tmp = argv[0];
+ argv[0] = cmd.buf;
+
+ /*
+ * if we fail because the command is not found, it is
+ * OK to return. Otherwise, we just pass along the status code.
+ */
+ status = run_command_v_opt(argv, 0);
+ if (status != -ERR_RUN_COMMAND_EXEC) {
+ if (IS_RUN_COMMAND_ERR(status))
+ die("unable to run '%s'", argv[0]);
+ exit(-status);
+ }
+ errno = ENOENT; /* as if we called execvp */
+
+ argv[0] = tmp;
+
+ strbuf_release(&cmd);
+}
+
+static int run_argv(int *argcp, const char ***argv)
+{
+ int done_alias = 0;
+
+ while (1) {
+ /* See if it's an internal command */
+ handle_internal_command(*argcp, *argv);
+
+ /* .. then try the external ones */
+ execv_dashed_external(*argv);
+
+ /* It could be an alias -- this works around the insanity
+ * of overriding "perf log" with "perf show" by having
+ * alias.log = show
+ */
+ if (done_alias || !handle_alias(argcp, argv))
+ break;
+ done_alias = 1;
+ }
+
+ return done_alias;
+}
+
+/* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */
+static void get_debugfs_mntpt(void)
+{
+ const char *path = debugfs_mount(NULL);
+
+ if (path)
+ strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt));
+ else
+ debugfs_mntpt[0] = '\0';
+}
+
+int main(int argc, const char **argv)
+{
+ const char *cmd;
+
+ cmd = perf_extract_argv0_path(argv[0]);
+ if (!cmd)
+ cmd = "perf-help";
+ /* get debugfs mount point from /proc/mounts */
+ get_debugfs_mntpt();
+ /*
+ * "perf-xxxx" is the same as "perf xxxx", but we obviously:
+ *
+ * - cannot take flags in between the "perf" and the "xxxx".
+ * - cannot execute it externally (since it would just do
+ * the same thing over again)
+ *
+ * So we just directly call the internal command handler, and
+ * die if that one cannot handle it.
+ */
+ if (!prefixcmp(cmd, "perf-")) {
+ cmd += 5;
+ argv[0] = cmd;
+ handle_internal_command(argc, argv);
+ die("cannot handle %s internally", cmd);
+ }
+
+ /* Look for flags.. */
+ argv++;
+ argc--;
+ handle_options(&argv, &argc, NULL);
+ commit_pager_choice();
+ set_debugfs_path();
+ set_buildid_dir();
+
+ if (argc > 0) {
+ if (!prefixcmp(argv[0], "--"))
+ argv[0] += 2;
+ } else {
+ /* The user didn't specify a command; give them help */
+ printf("\n usage: %s\n\n", perf_usage_string);
+ list_common_cmds_help();
+ printf("\n %s\n\n", perf_more_info_string);
+ exit(1);
+ }
+ cmd = argv[0];
+
+ /*
+ * We use PATH to find perf commands, but we prepend some higher
+ * precedence paths: the "--exec-path" option, the PERF_EXEC_PATH
+ * environment, and the $(perfexecdir) from the Makefile at build
+ * time.
+ */
+ setup_path();
+
+ while (1) {
+ static int done_help;
+ static int was_alias;
+
+ was_alias = run_argv(&argc, &argv);
+ if (errno != ENOENT)
+ break;
+
+ if (was_alias) {
+ fprintf(stderr, "Expansion of alias '%s' failed; "
+ "'%s' is not a perf-command\n",
+ cmd, argv[0]);
+ exit(1);
+ }
+ if (!done_help) {
+ cmd = argv[0] = help_unknown_cmd(cmd);
+ done_help = 1;
+ } else
+ break;
+ }
+
+ fprintf(stderr, "Failed to run command '%s': %s\n",
+ cmd, strerror(errno));
+
+ return 1;
+}
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
new file mode 100644
index 00000000..95aaf565
--- /dev/null
+++ b/tools/perf/perf.h
@@ -0,0 +1,149 @@
+#ifndef _PERF_PERF_H
+#define _PERF_PERF_H
+
+struct winsize;
+
+void get_term_dimensions(struct winsize *ws);
+
+#if defined(__i386__)
+#include "../../arch/x86/include/asm/unistd.h"
+#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
+#define cpu_relax() asm volatile("rep; nop" ::: "memory");
+#endif
+
+#if defined(__x86_64__)
+#include "../../arch/x86/include/asm/unistd.h"
+#define rmb() asm volatile("lfence" ::: "memory")
+#define cpu_relax() asm volatile("rep; nop" ::: "memory");
+#endif
+
+#ifdef __powerpc__
+#include "../../arch/powerpc/include/asm/unistd.h"
+#define rmb() asm volatile ("sync" ::: "memory")
+#define cpu_relax() asm volatile ("" ::: "memory");
+#endif
+
+#ifdef __s390__
+#include "../../arch/s390/include/asm/unistd.h"
+#define rmb() asm volatile("bcr 15,0" ::: "memory")
+#define cpu_relax() asm volatile("" ::: "memory");
+#endif
+
+#ifdef __sh__
+#include "../../arch/sh/include/asm/unistd.h"
+#if defined(__SH4A__) || defined(__SH5__)
+# define rmb() asm volatile("synco" ::: "memory")
+#else
+# define rmb() asm volatile("" ::: "memory")
+#endif
+#define cpu_relax() asm volatile("" ::: "memory")
+#endif
+
+#ifdef __hppa__
+#include "../../arch/parisc/include/asm/unistd.h"
+#define rmb() asm volatile("" ::: "memory")
+#define cpu_relax() asm volatile("" ::: "memory");
+#endif
+
+#ifdef __sparc__
+#include "../../arch/sparc/include/asm/unistd.h"
+#define rmb() asm volatile("":::"memory")
+#define cpu_relax() asm volatile("":::"memory")
+#endif
+
+#ifdef __alpha__
+#include "../../arch/alpha/include/asm/unistd.h"
+#define rmb() asm volatile("mb" ::: "memory")
+#define cpu_relax() asm volatile("" ::: "memory")
+#endif
+
+#ifdef __ia64__
+#include "../../arch/ia64/include/asm/unistd.h"
+#define rmb() asm volatile ("mf" ::: "memory")
+#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
+#endif
+
+#ifdef __arm__
+#include "../../arch/arm/include/asm/unistd.h"
+/*
+ * Use the __kuser_memory_barrier helper in the CPU helper page. See
+ * arch/arm/kernel/entry-armv.S in the kernel source for details.
+ */
+#define rmb() ((void(*)(void))0xffff0fa0)()
+#define cpu_relax() asm volatile("":::"memory")
+#endif
+
+#ifdef __mips__
+#include "../../arch/mips/include/asm/unistd.h"
+#define rmb() asm volatile( \
+ ".set mips2\n\t" \
+ "sync\n\t" \
+ ".set mips0" \
+ : /* no output */ \
+ : /* no input */ \
+ : "memory")
+#define cpu_relax() asm volatile("" ::: "memory")
+#endif
+
+#include <time.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+
+#include "../../include/linux/perf_event.h"
+#include "util/types.h"
+#include <stdbool.h>
+
+/*
+ * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
+ * counters in the current task.
+ */
+#define PR_TASK_PERF_EVENTS_DISABLE 31
+#define PR_TASK_PERF_EVENTS_ENABLE 32
+
+#ifndef NSEC_PER_SEC
+# define NSEC_PER_SEC 1000000000ULL
+#endif
+
+static inline unsigned long long rdclock(void)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
+}
+
+/*
+ * Pick up some kernel type conventions:
+ */
+#define __user
+#define asmlinkage
+
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#define min(x, y) ({ \
+ typeof(x) _min1 = (x); \
+ typeof(y) _min2 = (y); \
+ (void) (&_min1 == &_min2); \
+ _min1 < _min2 ? _min1 : _min2; })
+
+static inline int
+sys_perf_event_open(struct perf_event_attr *attr,
+ pid_t pid, int cpu, int group_fd,
+ unsigned long flags)
+{
+ attr->size = sizeof(*attr);
+ return syscall(__NR_perf_event_open, attr, pid, cpu,
+ group_fd, flags);
+}
+
+#define MAX_COUNTERS 256
+#define MAX_NR_CPUS 256
+
+struct ip_callchain {
+ u64 nr;
+ u64 ips[0];
+};
+
+extern bool perf_host, perf_guest;
+
+#endif
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
new file mode 100644
index 00000000..01a64ad6
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
@@ -0,0 +1,135 @@
+/*
+ * This file was generated automatically by ExtUtils::ParseXS version 2.18_02 from the
+ * contents of Context.xs. Do not edit this file, edit Context.xs instead.
+ *
+ * ANY CHANGES MADE HERE WILL BE LOST!
+ *
+ */
+
+#line 1 "Context.xs"
+/*
+ * Context.xs. XS interfaces for perf trace.
+ *
+ * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+#include "../../../perf.h"
+#include "../../../util/trace-event.h"
+
+#ifndef PERL_UNUSED_VAR
+# define PERL_UNUSED_VAR(var) if (0) var = var
+#endif
+
+#line 42 "Context.c"
+
+XS(XS_Perf__Trace__Context_common_pc); /* prototype to pass -Wmissing-prototypes */
+XS(XS_Perf__Trace__Context_common_pc)
+{
+#ifdef dVAR
+ dVAR; dXSARGS;
+#else
+ dXSARGS;
+#endif
+ if (items != 1)
+ Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_pc", "context");
+ PERL_UNUSED_VAR(cv); /* -W */
+ {
+ struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0)));
+ int RETVAL;
+ dXSTARG;
+
+ RETVAL = common_pc(context);
+ XSprePUSH; PUSHi((IV)RETVAL);
+ }
+ XSRETURN(1);
+}
+
+
+XS(XS_Perf__Trace__Context_common_flags); /* prototype to pass -Wmissing-prototypes */
+XS(XS_Perf__Trace__Context_common_flags)
+{
+#ifdef dVAR
+ dVAR; dXSARGS;
+#else
+ dXSARGS;
+#endif
+ if (items != 1)
+ Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_flags", "context");
+ PERL_UNUSED_VAR(cv); /* -W */
+ {
+ struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0)));
+ int RETVAL;
+ dXSTARG;
+
+ RETVAL = common_flags(context);
+ XSprePUSH; PUSHi((IV)RETVAL);
+ }
+ XSRETURN(1);
+}
+
+
+XS(XS_Perf__Trace__Context_common_lock_depth); /* prototype to pass -Wmissing-prototypes */
+XS(XS_Perf__Trace__Context_common_lock_depth)
+{
+#ifdef dVAR
+ dVAR; dXSARGS;
+#else
+ dXSARGS;
+#endif
+ if (items != 1)
+ Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_lock_depth", "context");
+ PERL_UNUSED_VAR(cv); /* -W */
+ {
+ struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0)));
+ int RETVAL;
+ dXSTARG;
+
+ RETVAL = common_lock_depth(context);
+ XSprePUSH; PUSHi((IV)RETVAL);
+ }
+ XSRETURN(1);
+}
+
+#ifdef __cplusplus
+extern "C"
+#endif
+XS(boot_Perf__Trace__Context); /* prototype to pass -Wmissing-prototypes */
+XS(boot_Perf__Trace__Context)
+{
+#ifdef dVAR
+ dVAR; dXSARGS;
+#else
+ dXSARGS;
+#endif
+ const char* file = __FILE__;
+
+ PERL_UNUSED_VAR(cv); /* -W */
+ PERL_UNUSED_VAR(items); /* -W */
+ XS_VERSION_BOOTCHECK ;
+
+ newXSproto("Perf::Trace::Context::common_pc", XS_Perf__Trace__Context_common_pc, file, "$");
+ newXSproto("Perf::Trace::Context::common_flags", XS_Perf__Trace__Context_common_flags, file, "$");
+ newXSproto("Perf::Trace::Context::common_lock_depth", XS_Perf__Trace__Context_common_lock_depth, file, "$");
+ if (PL_unitcheckav)
+ call_list(PL_scopestack_ix, PL_unitcheckav);
+ XSRETURN_YES;
+}
+
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs
new file mode 100644
index 00000000..549cf046
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.xs
@@ -0,0 +1,42 @@
+/*
+ * Context.xs. XS interfaces for perf trace.
+ *
+ * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+#include "../../../perf.h"
+#include "../../../util/trace-event.h"
+
+MODULE = Perf::Trace::Context PACKAGE = Perf::Trace::Context
+PROTOTYPES: ENABLE
+
+int
+common_pc(context)
+ struct scripting_context * context
+
+int
+common_flags(context)
+ struct scripting_context * context
+
+int
+common_lock_depth(context)
+ struct scripting_context * context
+
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL b/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL
new file mode 100644
index 00000000..decdeb0f
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Makefile.PL
@@ -0,0 +1,17 @@
+use 5.010000;
+use ExtUtils::MakeMaker;
+# See lib/ExtUtils/MakeMaker.pm for details of how to influence
+# the contents of the Makefile that is written.
+WriteMakefile(
+ NAME => 'Perf::Trace::Context',
+ VERSION_FROM => 'lib/Perf/Trace/Context.pm', # finds $VERSION
+ PREREQ_PM => {}, # e.g., Module::Name => 1.1
+ ($] >= 5.005 ? ## Add these new keywords supported since 5.005
+ (ABSTRACT_FROM => 'lib/Perf/Trace/Context.pm', # retrieve abstract from module
+ AUTHOR => 'Tom Zanussi <tzanussi@gmail.com>') : ()),
+ LIBS => [''], # e.g., '-lm'
+ DEFINE => '-I ../..', # e.g., '-DHAVE_SOMETHING'
+ INC => '-I.', # e.g., '-I. -I/usr/include/other'
+ # Un-comment this if you add C files to link with later:
+ OBJECT => 'Context.o', # link all the C files too
+);
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/README b/tools/perf/scripts/perl/Perf-Trace-Util/README
new file mode 100644
index 00000000..9a970763
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/README
@@ -0,0 +1,59 @@
+Perf-Trace-Util version 0.01
+============================
+
+This module contains utility functions for use with perf trace.
+
+Core.pm and Util.pm are pure Perl modules; Core.pm contains routines
+that the core perf support for Perl calls on and should always be
+'used', while Util.pm contains useful but optional utility functions
+that scripts may want to use. Context.pm contains the Perl->C
+interface that allows scripts to access data in the embedding perf
+executable; scripts wishing to do that should 'use Context.pm'.
+
+The Perl->C perf interface is completely driven by Context.xs. If you
+want to add new Perl functions that end up accessing C data in the
+perf executable, you add desciptions of the new functions here.
+scripting_context is a pointer to the perf data in the perf executable
+that you want to access - it's passed as the second parameter,
+$context, to all handler functions.
+
+After you do that:
+
+ perl Makefile.PL # to create a Makefile for the next step
+ make # to create Context.c
+
+ edit Context.c to add const to the char* file = __FILE__ line in
+ XS(boot_Perf__Trace__Context) to silence a warning/error.
+
+ You can delete the Makefile, object files and anything else that was
+ generated e.g. blib and shared library, etc, except for of course
+ Context.c
+
+ You should then be able to run the normal perf make as usual.
+
+INSTALLATION
+
+Building perf with perf trace Perl scripting should install this
+module in the right place.
+
+You should make sure libperl and ExtUtils/Embed.pm are installed first
+e.g. apt-get install libperl-dev or yum install perl-ExtUtils-Embed.
+
+DEPENDENCIES
+
+This module requires these other modules and libraries:
+
+ None
+
+COPYRIGHT AND LICENCE
+
+Copyright (C) 2009 by Tom Zanussi <tzanussi@gmail.com>
+
+This library is free software; you can redistribute it and/or modify
+it under the same terms as Perl itself, either Perl version 5.10.0 or,
+at your option, any later version of Perl 5 you may have available.
+
+Alternatively, this software may be distributed under the terms of the
+GNU General Public License ("GPL") version 2 as published by the Free
+Software Foundation.
+
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm
new file mode 100644
index 00000000..6c7f3659
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Context.pm
@@ -0,0 +1,55 @@
+package Perf::Trace::Context;
+
+use 5.010000;
+use strict;
+use warnings;
+
+require Exporter;
+
+our @ISA = qw(Exporter);
+
+our %EXPORT_TAGS = ( 'all' => [ qw(
+) ] );
+
+our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
+
+our @EXPORT = qw(
+ common_pc common_flags common_lock_depth
+);
+
+our $VERSION = '0.01';
+
+require XSLoader;
+XSLoader::load('Perf::Trace::Context', $VERSION);
+
+1;
+__END__
+=head1 NAME
+
+Perf::Trace::Context - Perl extension for accessing functions in perf.
+
+=head1 SYNOPSIS
+
+ use Perf::Trace::Context;
+
+=head1 SEE ALSO
+
+Perf (trace) documentation
+
+=head1 AUTHOR
+
+Tom Zanussi, E<lt>tzanussi@gmail.com<gt>
+
+=head1 COPYRIGHT AND LICENSE
+
+Copyright (C) 2009 by Tom Zanussi
+
+This library is free software; you can redistribute it and/or modify
+it under the same terms as Perl itself, either Perl version 5.10.0 or,
+at your option, any later version of Perl 5 you may have available.
+
+Alternatively, this software may be distributed under the terms of the
+GNU General Public License ("GPL") version 2 as published by the Free
+Software Foundation.
+
+=cut
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm
new file mode 100644
index 00000000..9df376a9
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Core.pm
@@ -0,0 +1,192 @@
+package Perf::Trace::Core;
+
+use 5.010000;
+use strict;
+use warnings;
+
+require Exporter;
+
+our @ISA = qw(Exporter);
+
+our %EXPORT_TAGS = ( 'all' => [ qw(
+) ] );
+
+our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
+
+our @EXPORT = qw(
+define_flag_field define_flag_value flag_str dump_flag_fields
+define_symbolic_field define_symbolic_value symbol_str dump_symbolic_fields
+trace_flag_str
+);
+
+our $VERSION = '0.01';
+
+my %trace_flags = (0x00 => "NONE",
+ 0x01 => "IRQS_OFF",
+ 0x02 => "IRQS_NOSUPPORT",
+ 0x04 => "NEED_RESCHED",
+ 0x08 => "HARDIRQ",
+ 0x10 => "SOFTIRQ");
+
+sub trace_flag_str
+{
+ my ($value) = @_;
+
+ my $string;
+
+ my $print_delim = 0;
+
+ foreach my $idx (sort {$a <=> $b} keys %trace_flags) {
+ if (!$value && !$idx) {
+ $string .= "NONE";
+ last;
+ }
+
+ if ($idx && ($value & $idx) == $idx) {
+ if ($print_delim) {
+ $string .= " | ";
+ }
+ $string .= "$trace_flags{$idx}";
+ $print_delim = 1;
+ $value &= ~$idx;
+ }
+ }
+
+ return $string;
+}
+
+my %flag_fields;
+my %symbolic_fields;
+
+sub flag_str
+{
+ my ($event_name, $field_name, $value) = @_;
+
+ my $string;
+
+ if ($flag_fields{$event_name}{$field_name}) {
+ my $print_delim = 0;
+ foreach my $idx (sort {$a <=> $b} keys %{$flag_fields{$event_name}{$field_name}{"values"}}) {
+ if (!$value && !$idx) {
+ $string .= "$flag_fields{$event_name}{$field_name}{'values'}{$idx}";
+ last;
+ }
+ if ($idx && ($value & $idx) == $idx) {
+ if ($print_delim && $flag_fields{$event_name}{$field_name}{'delim'}) {
+ $string .= " $flag_fields{$event_name}{$field_name}{'delim'} ";
+ }
+ $string .= "$flag_fields{$event_name}{$field_name}{'values'}{$idx}";
+ $print_delim = 1;
+ $value &= ~$idx;
+ }
+ }
+ }
+
+ return $string;
+}
+
+sub define_flag_field
+{
+ my ($event_name, $field_name, $delim) = @_;
+
+ $flag_fields{$event_name}{$field_name}{"delim"} = $delim;
+}
+
+sub define_flag_value
+{
+ my ($event_name, $field_name, $value, $field_str) = @_;
+
+ $flag_fields{$event_name}{$field_name}{"values"}{$value} = $field_str;
+}
+
+sub dump_flag_fields
+{
+ for my $event (keys %flag_fields) {
+ print "event $event:\n";
+ for my $field (keys %{$flag_fields{$event}}) {
+ print " field: $field:\n";
+ print " delim: $flag_fields{$event}{$field}{'delim'}\n";
+ foreach my $idx (sort {$a <=> $b} keys %{$flag_fields{$event}{$field}{"values"}}) {
+ print " value $idx: $flag_fields{$event}{$field}{'values'}{$idx}\n";
+ }
+ }
+ }
+}
+
+sub symbol_str
+{
+ my ($event_name, $field_name, $value) = @_;
+
+ if ($symbolic_fields{$event_name}{$field_name}) {
+ foreach my $idx (sort {$a <=> $b} keys %{$symbolic_fields{$event_name}{$field_name}{"values"}}) {
+ if (!$value && !$idx) {
+ return "$symbolic_fields{$event_name}{$field_name}{'values'}{$idx}";
+ last;
+ }
+ if ($value == $idx) {
+ return "$symbolic_fields{$event_name}{$field_name}{'values'}{$idx}";
+ }
+ }
+ }
+
+ return undef;
+}
+
+sub define_symbolic_field
+{
+ my ($event_name, $field_name) = @_;
+
+ # nothing to do, really
+}
+
+sub define_symbolic_value
+{
+ my ($event_name, $field_name, $value, $field_str) = @_;
+
+ $symbolic_fields{$event_name}{$field_name}{"values"}{$value} = $field_str;
+}
+
+sub dump_symbolic_fields
+{
+ for my $event (keys %symbolic_fields) {
+ print "event $event:\n";
+ for my $field (keys %{$symbolic_fields{$event}}) {
+ print " field: $field:\n";
+ foreach my $idx (sort {$a <=> $b} keys %{$symbolic_fields{$event}{$field}{"values"}}) {
+ print " value $idx: $symbolic_fields{$event}{$field}{'values'}{$idx}\n";
+ }
+ }
+ }
+}
+
+1;
+__END__
+=head1 NAME
+
+Perf::Trace::Core - Perl extension for perf trace
+
+=head1 SYNOPSIS
+
+ use Perf::Trace::Core
+
+=head1 SEE ALSO
+
+Perf (trace) documentation
+
+=head1 AUTHOR
+
+Tom Zanussi, E<lt>tzanussi@gmail.com<gt>
+
+=head1 COPYRIGHT AND LICENSE
+
+Copyright (C) 2009 by Tom Zanussi
+
+This library is free software; you can redistribute it and/or modify
+it under the same terms as Perl itself, either Perl version 5.10.0 or,
+at your option, any later version of Perl 5 you may have available.
+
+Alternatively, this software may be distributed under the terms of the
+GNU General Public License ("GPL") version 2 as published by the Free
+Software Foundation.
+
+=cut
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm
new file mode 100644
index 00000000..d94b40c8
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/Util.pm
@@ -0,0 +1,94 @@
+package Perf::Trace::Util;
+
+use 5.010000;
+use strict;
+use warnings;
+
+require Exporter;
+
+our @ISA = qw(Exporter);
+
+our %EXPORT_TAGS = ( 'all' => [ qw(
+) ] );
+
+our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
+
+our @EXPORT = qw(
+avg nsecs nsecs_secs nsecs_nsecs nsecs_usecs print_nsecs
+clear_term
+);
+
+our $VERSION = '0.01';
+
+sub avg
+{
+ my ($total, $n) = @_;
+
+ return $total / $n;
+}
+
+my $NSECS_PER_SEC = 1000000000;
+
+sub nsecs
+{
+ my ($secs, $nsecs) = @_;
+
+ return $secs * $NSECS_PER_SEC + $nsecs;
+}
+
+sub nsecs_secs {
+ my ($nsecs) = @_;
+
+ return $nsecs / $NSECS_PER_SEC;
+}
+
+sub nsecs_nsecs {
+ my ($nsecs) = @_;
+
+ return $nsecs % $NSECS_PER_SEC;
+}
+
+sub nsecs_str {
+ my ($nsecs) = @_;
+
+ my $str = sprintf("%5u.%09u", nsecs_secs($nsecs), nsecs_nsecs($nsecs));
+
+ return $str;
+}
+
+sub clear_term
+{
+ print "\x1b[H\x1b[2J";
+}
+
+1;
+__END__
+=head1 NAME
+
+Perf::Trace::Util - Perl extension for perf trace
+
+=head1 SYNOPSIS
+
+ use Perf::Trace::Util;
+
+=head1 SEE ALSO
+
+Perf (trace) documentation
+
+=head1 AUTHOR
+
+Tom Zanussi, E<lt>tzanussi@gmail.com<gt>
+
+=head1 COPYRIGHT AND LICENSE
+
+Copyright (C) 2009 by Tom Zanussi
+
+This library is free software; you can redistribute it and/or modify
+it under the same terms as Perl itself, either Perl version 5.10.0 or,
+at your option, any later version of Perl 5 you may have available.
+
+Alternatively, this software may be distributed under the terms of the
+GNU General Public License ("GPL") version 2 as published by the Free
+Software Foundation.
+
+=cut
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/typemap b/tools/perf/scripts/perl/Perf-Trace-Util/typemap
new file mode 100644
index 00000000..84083680
--- /dev/null
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/typemap
@@ -0,0 +1 @@
+struct scripting_context * T_PTR
diff --git a/tools/perf/scripts/perl/bin/check-perf-trace-record b/tools/perf/scripts/perl/bin/check-perf-trace-record
new file mode 100644
index 00000000..423ad6ae
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/check-perf-trace-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -a -e kmem:kmalloc -e irq:softirq_entry -e kmem:kfree
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-record b/tools/perf/scripts/perl/bin/failed-syscalls-record
new file mode 100644
index 00000000..eb5846bc
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -a -e raw_syscalls:sys_exit $@
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-report b/tools/perf/scripts/perl/bin/failed-syscalls-report
new file mode 100644
index 00000000..e3a5e55d
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-report
@@ -0,0 +1,10 @@
+#!/bin/bash
+# description: system-wide failed syscalls
+# args: [comm]
+if [ $# -gt 0 ] ; then
+ if ! expr match "$1" "-" > /dev/null ; then
+ comm=$1
+ shift
+ fi
+fi
+perf trace $@ -s ~/libexec/perf-core/scripts/perl/failed-syscalls.pl $comm
diff --git a/tools/perf/scripts/perl/bin/rw-by-file-record b/tools/perf/scripts/perl/bin/rw-by-file-record
new file mode 100644
index 00000000..5bfaae5a
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/rw-by-file-record
@@ -0,0 +1,3 @@
+#!/bin/bash
+perf record -a -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@
+
diff --git a/tools/perf/scripts/perl/bin/rw-by-file-report b/tools/perf/scripts/perl/bin/rw-by-file-report
new file mode 100644
index 00000000..d83070b7
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/rw-by-file-report
@@ -0,0 +1,13 @@
+#!/bin/bash
+# description: r/w activity for a program, by file
+# args: <comm>
+if [ $# -lt 1 ] ; then
+ echo "usage: rw-by-file <comm>"
+ exit
+fi
+comm=$1
+shift
+perf trace $@ -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl $comm
+
+
+
diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-record b/tools/perf/scripts/perl/bin/rw-by-pid-record
new file mode 100644
index 00000000..6e0b2f77
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/rw-by-pid-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-report b/tools/perf/scripts/perl/bin/rw-by-pid-report
new file mode 100644
index 00000000..7ef46983
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/rw-by-pid-report
@@ -0,0 +1,6 @@
+#!/bin/bash
+# description: system-wide r/w activity
+perf trace $@ -s ~/libexec/perf-core/scripts/perl/rw-by-pid.pl
+
+
+
diff --git a/tools/perf/scripts/perl/bin/rwtop-record b/tools/perf/scripts/perl/bin/rwtop-record
new file mode 100644
index 00000000..6e0b2f77
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/rwtop-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
diff --git a/tools/perf/scripts/perl/bin/rwtop-report b/tools/perf/scripts/perl/bin/rwtop-report
new file mode 100644
index 00000000..93e698cd
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/rwtop-report
@@ -0,0 +1,23 @@
+#!/bin/bash
+# description: system-wide r/w top
+# args: [interval]
+n_args=0
+for i in "$@"
+do
+ if expr match "$i" "-" > /dev/null ; then
+ break
+ fi
+ n_args=$(( $n_args + 1 ))
+done
+if [ "$n_args" -gt 1 ] ; then
+ echo "usage: rwtop-report [interval]"
+ exit
+fi
+if [ "$n_args" -gt 0 ] ; then
+ interval=$1
+ shift
+fi
+perf trace $@ -s ~/libexec/perf-core/scripts/perl/rwtop.pl $interval
+
+
+
diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-record b/tools/perf/scripts/perl/bin/wakeup-latency-record
new file mode 100644
index 00000000..9f2acaaa
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/wakeup-latency-record
@@ -0,0 +1,6 @@
+#!/bin/bash
+perf record -a -e sched:sched_switch -e sched:sched_wakeup $@
+
+
+
+
diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-report b/tools/perf/scripts/perl/bin/wakeup-latency-report
new file mode 100644
index 00000000..a0d898f9
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/wakeup-latency-report
@@ -0,0 +1,6 @@
+#!/bin/bash
+# description: system-wide min/max/avg wakeup latency
+perf trace $@ -s ~/libexec/perf-core/scripts/perl/wakeup-latency.pl
+
+
+
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-record b/tools/perf/scripts/perl/bin/workqueue-stats-record
new file mode 100644
index 00000000..85301f24
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/workqueue-stats-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -a -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-report b/tools/perf/scripts/perl/bin/workqueue-stats-report
new file mode 100644
index 00000000..35081132
--- /dev/null
+++ b/tools/perf/scripts/perl/bin/workqueue-stats-report
@@ -0,0 +1,7 @@
+#!/bin/bash
+# description: workqueue stats (ins/exe/create/destroy)
+perf trace $@ -s ~/libexec/perf-core/scripts/perl/workqueue-stats.pl
+
+
+
+
diff --git a/tools/perf/scripts/perl/check-perf-trace.pl b/tools/perf/scripts/perl/check-perf-trace.pl
new file mode 100644
index 00000000..4e7dc0a4
--- /dev/null
+++ b/tools/perf/scripts/perl/check-perf-trace.pl
@@ -0,0 +1,106 @@
+# perf trace event handlers, generated by perf trace -g perl
+# (c) 2009, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+
+# This script tests basic functionality such as flag and symbol
+# strings, common_xxx() calls back into perf, begin, end, unhandled
+# events, etc. Basically, if this script runs successfully and
+# displays expected results, perl scripting support should be ok.
+
+use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+use lib "./Perf-Trace-Util/lib";
+use Perf::Trace::Core;
+use Perf::Trace::Context;
+use Perf::Trace::Util;
+
+sub trace_begin
+{
+ print "trace_begin\n";
+}
+
+sub trace_end
+{
+ print "trace_end\n";
+
+ print_unhandled();
+}
+
+sub irq::softirq_entry
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $vec) = @_;
+
+ print_header($event_name, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm);
+
+ print_uncommon($context);
+
+ printf("vec=%s\n",
+ symbol_str("irq::softirq_entry", "vec", $vec));
+}
+
+sub kmem::kmalloc
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $call_site, $ptr, $bytes_req, $bytes_alloc,
+ $gfp_flags) = @_;
+
+ print_header($event_name, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm);
+
+ print_uncommon($context);
+
+ printf("call_site=%p, ptr=%p, bytes_req=%u, bytes_alloc=%u, ".
+ "gfp_flags=%s\n",
+ $call_site, $ptr, $bytes_req, $bytes_alloc,
+
+ flag_str("kmem::kmalloc", "gfp_flags", $gfp_flags));
+}
+
+# print trace fields not included in handler args
+sub print_uncommon
+{
+ my ($context) = @_;
+
+ printf("common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, ",
+ common_pc($context), trace_flag_str(common_flags($context)),
+ common_lock_depth($context));
+
+}
+
+my %unhandled;
+
+sub print_unhandled
+{
+ if ((scalar keys %unhandled) == 0) {
+ return;
+ }
+
+ print "\nunhandled events:\n\n";
+
+ printf("%-40s %10s\n", "event", "count");
+ printf("%-40s %10s\n", "----------------------------------------",
+ "-----------");
+
+ foreach my $event_name (keys %unhandled) {
+ printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
+ }
+}
+
+sub trace_unhandled
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm) = @_;
+
+ $unhandled{$event_name}++;
+}
+
+sub print_header
+{
+ my ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;
+
+ printf("%-20s %5u %05u.%09u %8u %-20s ",
+ $event_name, $cpu, $secs, $nsecs, $pid, $comm);
+}
diff --git a/tools/perf/scripts/perl/failed-syscalls.pl b/tools/perf/scripts/perl/failed-syscalls.pl
new file mode 100644
index 00000000..94bc25a3
--- /dev/null
+++ b/tools/perf/scripts/perl/failed-syscalls.pl
@@ -0,0 +1,42 @@
+# failed system call counts
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Displays system-wide failed system call totals
+# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+
+use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+use lib "./Perf-Trace-Util/lib";
+use Perf::Trace::Core;
+use Perf::Trace::Context;
+use Perf::Trace::Util;
+
+my $for_comm = shift;
+
+my %failed_syscalls;
+
+sub raw_syscalls::sys_exit
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $id, $ret) = @_;
+
+ if ($ret < 0) {
+ $failed_syscalls{$common_comm}++;
+ }
+}
+
+sub trace_end
+{
+ printf("\nfailed syscalls by comm:\n\n");
+
+ printf("%-20s %10s\n", "comm", "# errors");
+ printf("%-20s %6s %10s\n", "--------------------", "----------");
+
+ foreach my $comm (sort {$failed_syscalls{$b} <=> $failed_syscalls{$a}}
+ keys %failed_syscalls) {
+ next if ($for_comm && $comm ne $for_comm);
+
+ printf("%-20s %10s\n", $comm, $failed_syscalls{$comm});
+ }
+}
diff --git a/tools/perf/scripts/perl/rw-by-file.pl b/tools/perf/scripts/perl/rw-by-file.pl
new file mode 100644
index 00000000..2a390976
--- /dev/null
+++ b/tools/perf/scripts/perl/rw-by-file.pl
@@ -0,0 +1,106 @@
+#!/usr/bin/perl -w
+# (c) 2009, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+
+# Display r/w activity for files read/written to for a given program
+
+# The common_* event handler fields are the most useful fields common to
+# all events. They don't necessarily correspond to the 'common_*' fields
+# in the status files. Those fields not available as handler params can
+# be retrieved via script functions of the form get_common_*().
+
+use 5.010000;
+use strict;
+use warnings;
+
+use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+use lib "./Perf-Trace-Util/lib";
+use Perf::Trace::Core;
+use Perf::Trace::Util;
+
+my $usage = "perf trace -s rw-by-file.pl <comm>\n";
+
+my $for_comm = shift or die $usage;
+
+my %reads;
+my %writes;
+
+sub syscalls::sys_enter_read
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $nr, $fd, $buf, $count) = @_;
+
+ if ($common_comm eq $for_comm) {
+ $reads{$fd}{bytes_requested} += $count;
+ $reads{$fd}{total_reads}++;
+ }
+}
+
+sub syscalls::sys_enter_write
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm, $nr, $fd, $buf, $count) = @_;
+
+ if ($common_comm eq $for_comm) {
+ $writes{$fd}{bytes_written} += $count;
+ $writes{$fd}{total_writes}++;
+ }
+}
+
+sub trace_end
+{
+ printf("file read counts for $for_comm:\n\n");
+
+ printf("%6s %10s %10s\n", "fd", "# reads", "bytes_requested");
+ printf("%6s %10s %10s\n", "------", "----------", "-----------");
+
+ foreach my $fd (sort {$reads{$b}{bytes_requested} <=>
+ $reads{$a}{bytes_requested}} keys %reads) {
+ my $total_reads = $reads{$fd}{total_reads};
+ my $bytes_requested = $reads{$fd}{bytes_requested};
+ printf("%6u %10u %10u\n", $fd, $total_reads, $bytes_requested);
+ }
+
+ printf("\nfile write counts for $for_comm:\n\n");
+
+ printf("%6s %10s %10s\n", "fd", "# writes", "bytes_written");
+ printf("%6s %10s %10s\n", "------", "----------", "-----------");
+
+ foreach my $fd (sort {$writes{$b}{bytes_written} <=>
+ $writes{$a}{bytes_written}} keys %writes) {
+ my $total_writes = $writes{$fd}{total_writes};
+ my $bytes_written = $writes{$fd}{bytes_written};
+ printf("%6u %10u %10u\n", $fd, $total_writes, $bytes_written);
+ }
+
+ print_unhandled();
+}
+
+my %unhandled;
+
+sub print_unhandled
+{
+ if ((scalar keys %unhandled) == 0) {
+ return;
+ }
+
+ print "\nunhandled events:\n\n";
+
+ printf("%-40s %10s\n", "event", "count");
+ printf("%-40s %10s\n", "----------------------------------------",
+ "-----------");
+
+ foreach my $event_name (keys %unhandled) {
+ printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
+ }
+}
+
+sub trace_unhandled
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm) = @_;
+
+ $unhandled{$event_name}++;
+}
+
+
diff --git a/tools/perf/scripts/perl/rw-by-pid.pl b/tools/perf/scripts/perl/rw-by-pid.pl
new file mode 100644
index 00000000..9db23c9d
--- /dev/null
+++ b/tools/perf/scripts/perl/rw-by-pid.pl
@@ -0,0 +1,184 @@
+#!/usr/bin/perl -w
+# (c) 2009, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+
+# Display r/w activity for all processes
+
+# The common_* event handler fields are the most useful fields common to
+# all events. They don't necessarily correspond to the 'common_*' fields
+# in the status files. Those fields not available as handler params can
+# be retrieved via script functions of the form get_common_*().
+
+use 5.010000;
+use strict;
+use warnings;
+
+use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+use lib "./Perf-Trace-Util/lib";
+use Perf::Trace::Core;
+use Perf::Trace::Util;
+
+my %reads;
+my %writes;
+
+sub syscalls::sys_exit_read
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $nr, $ret) = @_;
+
+ if ($ret > 0) {
+ $reads{$common_pid}{bytes_read} += $ret;
+ } else {
+ if (!defined ($reads{$common_pid}{bytes_read})) {
+ $reads{$common_pid}{bytes_read} = 0;
+ }
+ $reads{$common_pid}{errors}{$ret}++;
+ }
+}
+
+sub syscalls::sys_enter_read
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $nr, $fd, $buf, $count) = @_;
+
+ $reads{$common_pid}{bytes_requested} += $count;
+ $reads{$common_pid}{total_reads}++;
+ $reads{$common_pid}{comm} = $common_comm;
+}
+
+sub syscalls::sys_exit_write
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $nr, $ret) = @_;
+
+ if ($ret <= 0) {
+ $writes{$common_pid}{errors}{$ret}++;
+ }
+}
+
+sub syscalls::sys_enter_write
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $nr, $fd, $buf, $count) = @_;
+
+ $writes{$common_pid}{bytes_written} += $count;
+ $writes{$common_pid}{total_writes}++;
+ $writes{$common_pid}{comm} = $common_comm;
+}
+
+sub trace_end
+{
+ printf("read counts by pid:\n\n");
+
+ printf("%6s %20s %10s %10s %10s\n", "pid", "comm",
+ "# reads", "bytes_requested", "bytes_read");
+ printf("%6s %-20s %10s %10s %10s\n", "------", "--------------------",
+ "-----------", "----------", "----------");
+
+ foreach my $pid (sort { ($reads{$b}{bytes_read} || 0) <=>
+ ($reads{$a}{bytes_read} || 0) } keys %reads) {
+ my $comm = $reads{$pid}{comm} || "";
+ my $total_reads = $reads{$pid}{total_reads} || 0;
+ my $bytes_requested = $reads{$pid}{bytes_requested} || 0;
+ my $bytes_read = $reads{$pid}{bytes_read} || 0;
+
+ printf("%6s %-20s %10s %10s %10s\n", $pid, $comm,
+ $total_reads, $bytes_requested, $bytes_read);
+ }
+
+ printf("\nfailed reads by pid:\n\n");
+
+ printf("%6s %20s %6s %10s\n", "pid", "comm", "error #", "# errors");
+ printf("%6s %20s %6s %10s\n", "------", "--------------------",
+ "------", "----------");
+
+ my @errcounts = ();
+
+ foreach my $pid (keys %reads) {
+ foreach my $error (keys %{$reads{$pid}{errors}}) {
+ my $comm = $reads{$pid}{comm} || "";
+ my $errcount = $reads{$pid}{errors}{$error} || 0;
+ push @errcounts, [$pid, $comm, $error, $errcount];
+ }
+ }
+
+ @errcounts = sort { $b->[3] <=> $a->[3] } @errcounts;
+
+ for my $i (0 .. $#errcounts) {
+ printf("%6d %-20s %6d %10s\n", $errcounts[$i][0],
+ $errcounts[$i][1], $errcounts[$i][2], $errcounts[$i][3]);
+ }
+
+ printf("\nwrite counts by pid:\n\n");
+
+ printf("%6s %20s %10s %10s\n", "pid", "comm",
+ "# writes", "bytes_written");
+ printf("%6s %-20s %10s %10s\n", "------", "--------------------",
+ "-----------", "----------");
+
+ foreach my $pid (sort { ($writes{$b}{bytes_written} || 0) <=>
+ ($writes{$a}{bytes_written} || 0)} keys %writes) {
+ my $comm = $writes{$pid}{comm} || "";
+ my $total_writes = $writes{$pid}{total_writes} || 0;
+ my $bytes_written = $writes{$pid}{bytes_written} || 0;
+
+ printf("%6s %-20s %10s %10s\n", $pid, $comm,
+ $total_writes, $bytes_written);
+ }
+
+ printf("\nfailed writes by pid:\n\n");
+
+ printf("%6s %20s %6s %10s\n", "pid", "comm", "error #", "# errors");
+ printf("%6s %20s %6s %10s\n", "------", "--------------------",
+ "------", "----------");
+
+ @errcounts = ();
+
+ foreach my $pid (keys %writes) {
+ foreach my $error (keys %{$writes{$pid}{errors}}) {
+ my $comm = $writes{$pid}{comm} || "";
+ my $errcount = $writes{$pid}{errors}{$error} || 0;
+ push @errcounts, [$pid, $comm, $error, $errcount];
+ }
+ }
+
+ @errcounts = sort { $b->[3] <=> $a->[3] } @errcounts;
+
+ for my $i (0 .. $#errcounts) {
+ printf("%6d %-20s %6d %10s\n", $errcounts[$i][0],
+ $errcounts[$i][1], $errcounts[$i][2], $errcounts[$i][3]);
+ }
+
+ print_unhandled();
+}
+
+my %unhandled;
+
+sub print_unhandled
+{
+ if ((scalar keys %unhandled) == 0) {
+ return;
+ }
+
+ print "\nunhandled events:\n\n";
+
+ printf("%-40s %10s\n", "event", "count");
+ printf("%-40s %10s\n", "----------------------------------------",
+ "-----------");
+
+ foreach my $event_name (keys %unhandled) {
+ printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
+ }
+}
+
+sub trace_unhandled
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm) = @_;
+
+ $unhandled{$event_name}++;
+}
diff --git a/tools/perf/scripts/perl/rwtop.pl b/tools/perf/scripts/perl/rwtop.pl
new file mode 100644
index 00000000..4bb3ecd3
--- /dev/null
+++ b/tools/perf/scripts/perl/rwtop.pl
@@ -0,0 +1,199 @@
+#!/usr/bin/perl -w
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+
+# read/write top
+#
+# Periodically displays system-wide r/w call activity, broken down by
+# pid. If an [interval] arg is specified, the display will be
+# refreshed every [interval] seconds. The default interval is 3
+# seconds.
+
+use 5.010000;
+use strict;
+use warnings;
+
+use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+use lib "./Perf-Trace-Util/lib";
+use Perf::Trace::Core;
+use Perf::Trace::Util;
+
+my $default_interval = 3;
+my $nlines = 20;
+my $print_thread;
+my $print_pending = 0;
+
+my %reads;
+my %writes;
+
+my $interval = shift;
+if (!$interval) {
+ $interval = $default_interval;
+}
+
+sub syscalls::sys_exit_read
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $nr, $ret) = @_;
+
+ print_check();
+
+ if ($ret > 0) {
+ $reads{$common_pid}{bytes_read} += $ret;
+ } else {
+ if (!defined ($reads{$common_pid}{bytes_read})) {
+ $reads{$common_pid}{bytes_read} = 0;
+ }
+ $reads{$common_pid}{errors}{$ret}++;
+ }
+}
+
+sub syscalls::sys_enter_read
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $nr, $fd, $buf, $count) = @_;
+
+ print_check();
+
+ $reads{$common_pid}{bytes_requested} += $count;
+ $reads{$common_pid}{total_reads}++;
+ $reads{$common_pid}{comm} = $common_comm;
+}
+
+sub syscalls::sys_exit_write
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $nr, $ret) = @_;
+
+ print_check();
+
+ if ($ret <= 0) {
+ $writes{$common_pid}{errors}{$ret}++;
+ }
+}
+
+sub syscalls::sys_enter_write
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $nr, $fd, $buf, $count) = @_;
+
+ print_check();
+
+ $writes{$common_pid}{bytes_written} += $count;
+ $writes{$common_pid}{total_writes}++;
+ $writes{$common_pid}{comm} = $common_comm;
+}
+
+sub trace_begin
+{
+ $SIG{ALRM} = \&set_print_pending;
+ alarm 1;
+}
+
+sub trace_end
+{
+ print_unhandled();
+ print_totals();
+}
+
+sub print_check()
+{
+ if ($print_pending == 1) {
+ $print_pending = 0;
+ print_totals();
+ }
+}
+
+sub set_print_pending()
+{
+ $print_pending = 1;
+ alarm $interval;
+}
+
+sub print_totals
+{
+ my $count;
+
+ $count = 0;
+
+ clear_term();
+
+ printf("\nread counts by pid:\n\n");
+
+ printf("%6s %20s %10s %10s %10s\n", "pid", "comm",
+ "# reads", "bytes_req", "bytes_read");
+ printf("%6s %-20s %10s %10s %10s\n", "------", "--------------------",
+ "----------", "----------", "----------");
+
+ foreach my $pid (sort { ($reads{$b}{bytes_read} || 0) <=>
+ ($reads{$a}{bytes_read} || 0) } keys %reads) {
+ my $comm = $reads{$pid}{comm} || "";
+ my $total_reads = $reads{$pid}{total_reads} || 0;
+ my $bytes_requested = $reads{$pid}{bytes_requested} || 0;
+ my $bytes_read = $reads{$pid}{bytes_read} || 0;
+
+ printf("%6s %-20s %10s %10s %10s\n", $pid, $comm,
+ $total_reads, $bytes_requested, $bytes_read);
+
+ if (++$count == $nlines) {
+ last;
+ }
+ }
+
+ $count = 0;
+
+ printf("\nwrite counts by pid:\n\n");
+
+ printf("%6s %20s %10s %13s\n", "pid", "comm",
+ "# writes", "bytes_written");
+ printf("%6s %-20s %10s %13s\n", "------", "--------------------",
+ "----------", "-------------");
+
+ foreach my $pid (sort { ($writes{$b}{bytes_written} || 0) <=>
+ ($writes{$a}{bytes_written} || 0)} keys %writes) {
+ my $comm = $writes{$pid}{comm} || "";
+ my $total_writes = $writes{$pid}{total_writes} || 0;
+ my $bytes_written = $writes{$pid}{bytes_written} || 0;
+
+ printf("%6s %-20s %10s %13s\n", $pid, $comm,
+ $total_writes, $bytes_written);
+
+ if (++$count == $nlines) {
+ last;
+ }
+ }
+
+ %reads = ();
+ %writes = ();
+}
+
+my %unhandled;
+
+sub print_unhandled
+{
+ if ((scalar keys %unhandled) == 0) {
+ return;
+ }
+
+ print "\nunhandled events:\n\n";
+
+ printf("%-40s %10s\n", "event", "count");
+ printf("%-40s %10s\n", "----------------------------------------",
+ "-----------");
+
+ foreach my $event_name (keys %unhandled) {
+ printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
+ }
+}
+
+sub trace_unhandled
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm) = @_;
+
+ $unhandled{$event_name}++;
+}
diff --git a/tools/perf/scripts/perl/wakeup-latency.pl b/tools/perf/scripts/perl/wakeup-latency.pl
new file mode 100644
index 00000000..d9143dce
--- /dev/null
+++ b/tools/perf/scripts/perl/wakeup-latency.pl
@@ -0,0 +1,107 @@
+#!/usr/bin/perl -w
+# (c) 2009, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+
+# Display avg/min/max wakeup latency
+
+# The common_* event handler fields are the most useful fields common to
+# all events. They don't necessarily correspond to the 'common_*' fields
+# in the status files. Those fields not available as handler params can
+# be retrieved via script functions of the form get_common_*().
+
+use 5.010000;
+use strict;
+use warnings;
+
+use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+use lib "./Perf-Trace-Util/lib";
+use Perf::Trace::Core;
+use Perf::Trace::Util;
+
+my %last_wakeup;
+
+my $max_wakeup_latency;
+my $min_wakeup_latency;
+my $total_wakeup_latency = 0;
+my $total_wakeups = 0;
+
+sub sched::sched_switch
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $prev_comm, $prev_pid, $prev_prio, $prev_state, $next_comm, $next_pid,
+ $next_prio) = @_;
+
+ my $wakeup_ts = $last_wakeup{$common_cpu}{ts};
+ if ($wakeup_ts) {
+ my $switch_ts = nsecs($common_secs, $common_nsecs);
+ my $wakeup_latency = $switch_ts - $wakeup_ts;
+ if ($wakeup_latency > $max_wakeup_latency) {
+ $max_wakeup_latency = $wakeup_latency;
+ }
+ if ($wakeup_latency < $min_wakeup_latency) {
+ $min_wakeup_latency = $wakeup_latency;
+ }
+ $total_wakeup_latency += $wakeup_latency;
+ $total_wakeups++;
+ }
+ $last_wakeup{$common_cpu}{ts} = 0;
+}
+
+sub sched::sched_wakeup
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $comm, $pid, $prio, $success, $target_cpu) = @_;
+
+ $last_wakeup{$target_cpu}{ts} = nsecs($common_secs, $common_nsecs);
+}
+
+sub trace_begin
+{
+ $min_wakeup_latency = 1000000000;
+ $max_wakeup_latency = 0;
+}
+
+sub trace_end
+{
+ printf("wakeup_latency stats:\n\n");
+ print "total_wakeups: $total_wakeups\n";
+ if ($total_wakeups) {
+ printf("avg_wakeup_latency (ns): %u\n",
+ avg($total_wakeup_latency, $total_wakeups));
+ } else {
+ printf("avg_wakeup_latency (ns): N/A\n");
+ }
+ printf("min_wakeup_latency (ns): %u\n", $min_wakeup_latency);
+ printf("max_wakeup_latency (ns): %u\n", $max_wakeup_latency);
+
+ print_unhandled();
+}
+
+my %unhandled;
+
+sub print_unhandled
+{
+ if ((scalar keys %unhandled) == 0) {
+ return;
+ }
+
+ print "\nunhandled events:\n\n";
+
+ printf("%-40s %10s\n", "event", "count");
+ printf("%-40s %10s\n", "----------------------------------------",
+ "-----------");
+
+ foreach my $event_name (keys %unhandled) {
+ printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
+ }
+}
+
+sub trace_unhandled
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm) = @_;
+
+ $unhandled{$event_name}++;
+}
diff --git a/tools/perf/scripts/perl/workqueue-stats.pl b/tools/perf/scripts/perl/workqueue-stats.pl
new file mode 100644
index 00000000..b84b1269
--- /dev/null
+++ b/tools/perf/scripts/perl/workqueue-stats.pl
@@ -0,0 +1,129 @@
+#!/usr/bin/perl -w
+# (c) 2009, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+
+# Displays workqueue stats
+#
+# Usage:
+#
+# perf record -c 1 -f -a -R -e workqueue:workqueue_creation -e
+# workqueue:workqueue_destruction -e workqueue:workqueue_execution
+# -e workqueue:workqueue_insertion
+#
+# perf trace -p -s tools/perf/scripts/perl/workqueue-stats.pl
+
+use 5.010000;
+use strict;
+use warnings;
+
+use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
+use lib "./Perf-Trace-Util/lib";
+use Perf::Trace::Core;
+use Perf::Trace::Util;
+
+my @cpus;
+
+sub workqueue::workqueue_destruction
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $thread_comm, $thread_pid) = @_;
+
+ $cpus[$common_cpu]{$thread_pid}{destroyed}++;
+ $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm;
+}
+
+sub workqueue::workqueue_creation
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $thread_comm, $thread_pid, $cpu) = @_;
+
+ $cpus[$common_cpu]{$thread_pid}{created}++;
+ $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm;
+}
+
+sub workqueue::workqueue_execution
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $thread_comm, $thread_pid, $func) = @_;
+
+ $cpus[$common_cpu]{$thread_pid}{executed}++;
+ $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm;
+}
+
+sub workqueue::workqueue_insertion
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm,
+ $thread_comm, $thread_pid, $func) = @_;
+
+ $cpus[$common_cpu]{$thread_pid}{inserted}++;
+ $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm;
+}
+
+sub trace_end
+{
+ print "workqueue work stats:\n\n";
+ my $cpu = 0;
+ printf("%3s %6s %6s\t%-20s\n", "cpu", "ins", "exec", "name");
+ printf("%3s %6s %6s\t%-20s\n", "---", "---", "----", "----");
+ foreach my $pidhash (@cpus) {
+ while ((my $pid, my $wqhash) = each %$pidhash) {
+ my $ins = $$wqhash{'inserted'} || 0;
+ my $exe = $$wqhash{'executed'} || 0;
+ my $comm = $$wqhash{'comm'} || "";
+ if ($ins || $exe) {
+ printf("%3u %6u %6u\t%-20s\n", $cpu, $ins, $exe, $comm);
+ }
+ }
+ $cpu++;
+ }
+
+ $cpu = 0;
+ print "\nworkqueue lifecycle stats:\n\n";
+ printf("%3s %6s %6s\t%-20s\n", "cpu", "created", "destroyed", "name");
+ printf("%3s %6s %6s\t%-20s\n", "---", "-------", "---------", "----");
+ foreach my $pidhash (@cpus) {
+ while ((my $pid, my $wqhash) = each %$pidhash) {
+ my $created = $$wqhash{'created'} || 0;
+ my $destroyed = $$wqhash{'destroyed'} || 0;
+ my $comm = $$wqhash{'comm'} || "";
+ if ($created || $destroyed) {
+ printf("%3u %6u %6u\t%-20s\n", $cpu, $created, $destroyed,
+ $comm);
+ }
+ }
+ $cpu++;
+ }
+
+ print_unhandled();
+}
+
+my %unhandled;
+
+sub print_unhandled
+{
+ if ((scalar keys %unhandled) == 0) {
+ return;
+ }
+
+ print "\nunhandled events:\n\n";
+
+ printf("%-40s %10s\n", "event", "count");
+ printf("%-40s %10s\n", "----------------------------------------",
+ "-----------");
+
+ foreach my $event_name (keys %unhandled) {
+ printf("%-40s %10d\n", $event_name, $unhandled{$event_name});
+ }
+}
+
+sub trace_unhandled
+{
+ my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
+ $common_pid, $common_comm) = @_;
+
+ $unhandled{$event_name}++;
+}
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
new file mode 100644
index 00000000..957085dd
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
@@ -0,0 +1,88 @@
+/*
+ * Context.c. Python interfaces for perf trace.
+ *
+ * Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <Python.h>
+#include "../../../perf.h"
+#include "../../../util/trace-event.h"
+
+PyMODINIT_FUNC initperf_trace_context(void);
+
+static PyObject *perf_trace_context_common_pc(PyObject *self, PyObject *args)
+{
+ static struct scripting_context *scripting_context;
+ PyObject *context;
+ int retval;
+
+ if (!PyArg_ParseTuple(args, "O", &context))
+ return NULL;
+
+ scripting_context = PyCObject_AsVoidPtr(context);
+ retval = common_pc(scripting_context);
+
+ return Py_BuildValue("i", retval);
+}
+
+static PyObject *perf_trace_context_common_flags(PyObject *self,
+ PyObject *args)
+{
+ static struct scripting_context *scripting_context;
+ PyObject *context;
+ int retval;
+
+ if (!PyArg_ParseTuple(args, "O", &context))
+ return NULL;
+
+ scripting_context = PyCObject_AsVoidPtr(context);
+ retval = common_flags(scripting_context);
+
+ return Py_BuildValue("i", retval);
+}
+
+static PyObject *perf_trace_context_common_lock_depth(PyObject *self,
+ PyObject *args)
+{
+ static struct scripting_context *scripting_context;
+ PyObject *context;
+ int retval;
+
+ if (!PyArg_ParseTuple(args, "O", &context))
+ return NULL;
+
+ scripting_context = PyCObject_AsVoidPtr(context);
+ retval = common_lock_depth(scripting_context);
+
+ return Py_BuildValue("i", retval);
+}
+
+static PyMethodDef ContextMethods[] = {
+ { "common_pc", perf_trace_context_common_pc, METH_VARARGS,
+ "Get the common preempt count event field value."},
+ { "common_flags", perf_trace_context_common_flags, METH_VARARGS,
+ "Get the common flags event field value."},
+ { "common_lock_depth", perf_trace_context_common_lock_depth,
+ METH_VARARGS, "Get the common lock depth event field value."},
+ { NULL, NULL, 0, NULL}
+};
+
+PyMODINIT_FUNC initperf_trace_context(void)
+{
+ (void) Py_InitModule("perf_trace_context", ContextMethods);
+}
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
new file mode 100644
index 00000000..aad7525b
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
@@ -0,0 +1,121 @@
+# Core.py - Python extension for perf trace, core functions
+#
+# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
+#
+# This software may be distributed under the terms of the GNU General
+# Public License ("GPL") version 2 as published by the Free Software
+# Foundation.
+
+from collections import defaultdict
+
+def autodict():
+ return defaultdict(autodict)
+
+flag_fields = autodict()
+symbolic_fields = autodict()
+
+def define_flag_field(event_name, field_name, delim):
+ flag_fields[event_name][field_name]['delim'] = delim
+
+def define_flag_value(event_name, field_name, value, field_str):
+ flag_fields[event_name][field_name]['values'][value] = field_str
+
+def define_symbolic_field(event_name, field_name):
+ # nothing to do, really
+ pass
+
+def define_symbolic_value(event_name, field_name, value, field_str):
+ symbolic_fields[event_name][field_name]['values'][value] = field_str
+
+def flag_str(event_name, field_name, value):
+ string = ""
+
+ if flag_fields[event_name][field_name]:
+ print_delim = 0
+ keys = flag_fields[event_name][field_name]['values'].keys()
+ keys.sort()
+ for idx in keys:
+ if not value and not idx:
+ string += flag_fields[event_name][field_name]['values'][idx]
+ break
+ if idx and (value & idx) == idx:
+ if print_delim and flag_fields[event_name][field_name]['delim']:
+ string += " " + flag_fields[event_name][field_name]['delim'] + " "
+ string += flag_fields[event_name][field_name]['values'][idx]
+ print_delim = 1
+ value &= ~idx
+
+ return string
+
+def symbol_str(event_name, field_name, value):
+ string = ""
+
+ if symbolic_fields[event_name][field_name]:
+ keys = symbolic_fields[event_name][field_name]['values'].keys()
+ keys.sort()
+ for idx in keys:
+ if not value and not idx:
+ string = symbolic_fields[event_name][field_name]['values'][idx]
+ break
+ if (value == idx):
+ string = symbolic_fields[event_name][field_name]['values'][idx]
+ break
+
+ return string
+
+trace_flags = { 0x00: "NONE", \
+ 0x01: "IRQS_OFF", \
+ 0x02: "IRQS_NOSUPPORT", \
+ 0x04: "NEED_RESCHED", \
+ 0x08: "HARDIRQ", \
+ 0x10: "SOFTIRQ" }
+
+def trace_flag_str(value):
+ string = ""
+ print_delim = 0
+
+ keys = trace_flags.keys()
+
+ for idx in keys:
+ if not value and not idx:
+ string += "NONE"
+ break
+
+ if idx and (value & idx) == idx:
+ if print_delim:
+ string += " | ";
+ string += trace_flags[idx]
+ print_delim = 1
+ value &= ~idx
+
+ return string
+
+
+def taskState(state):
+ states = {
+ 0 : "R",
+ 1 : "S",
+ 2 : "D",
+ 64: "DEAD"
+ }
+
+ if state not in states:
+ return "Unknown"
+
+ return states[state]
+
+
+class EventHeaders:
+ def __init__(self, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm):
+ self.cpu = common_cpu
+ self.secs = common_secs
+ self.nsecs = common_nsecs
+ self.pid = common_pid
+ self.comm = common_comm
+
+ def ts(self):
+ return (self.secs * (10 ** 9)) + self.nsecs
+
+ def ts_format(self):
+ return "%d.%d" % (self.secs, int(self.nsecs / 1000))
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
new file mode 100644
index 00000000..ae9a56e4
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
@@ -0,0 +1,184 @@
+# SchedGui.py - Python extension for perf trace, basic GUI code for
+# traces drawing and overview.
+#
+# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
+#
+# This software is distributed under the terms of the GNU General
+# Public License ("GPL") version 2 as published by the Free Software
+# Foundation.
+
+
+try:
+ import wx
+except ImportError:
+ raise ImportError, "You need to install the wxpython lib for this script"
+
+
+class RootFrame(wx.Frame):
+ Y_OFFSET = 100
+ RECT_HEIGHT = 100
+ RECT_SPACE = 50
+ EVENT_MARKING_WIDTH = 5
+
+ def __init__(self, sched_tracer, title, parent = None, id = -1):
+ wx.Frame.__init__(self, parent, id, title)
+
+ (self.screen_width, self.screen_height) = wx.GetDisplaySize()
+ self.screen_width -= 10
+ self.screen_height -= 10
+ self.zoom = 0.5
+ self.scroll_scale = 20
+ self.sched_tracer = sched_tracer
+ self.sched_tracer.set_root_win(self)
+ (self.ts_start, self.ts_end) = sched_tracer.interval()
+ self.update_width_virtual()
+ self.nr_rects = sched_tracer.nr_rectangles() + 1
+ self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
+
+ # whole window panel
+ self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
+
+ # scrollable container
+ self.scroll = wx.ScrolledWindow(self.panel)
+ self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
+ self.scroll.EnableScrolling(True, True)
+ self.scroll.SetFocus()
+
+ # scrollable drawing area
+ self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
+ self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
+ self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
+ self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
+ self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
+ self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
+ self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
+
+ self.scroll.Fit()
+ self.Fit()
+
+ self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
+
+ self.txt = None
+
+ self.Show(True)
+
+ def us_to_px(self, val):
+ return val / (10 ** 3) * self.zoom
+
+ def px_to_us(self, val):
+ return (val / self.zoom) * (10 ** 3)
+
+ def scroll_start(self):
+ (x, y) = self.scroll.GetViewStart()
+ return (x * self.scroll_scale, y * self.scroll_scale)
+
+ def scroll_start_us(self):
+ (x, y) = self.scroll_start()
+ return self.px_to_us(x)
+
+ def paint_rectangle_zone(self, nr, color, top_color, start, end):
+ offset_px = self.us_to_px(start - self.ts_start)
+ width_px = self.us_to_px(end - self.ts_start)
+
+ offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
+ width_py = RootFrame.RECT_HEIGHT
+
+ dc = self.dc
+
+ if top_color is not None:
+ (r, g, b) = top_color
+ top_color = wx.Colour(r, g, b)
+ brush = wx.Brush(top_color, wx.SOLID)
+ dc.SetBrush(brush)
+ dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
+ width_py -= RootFrame.EVENT_MARKING_WIDTH
+ offset_py += RootFrame.EVENT_MARKING_WIDTH
+
+ (r ,g, b) = color
+ color = wx.Colour(r, g, b)
+ brush = wx.Brush(color, wx.SOLID)
+ dc.SetBrush(brush)
+ dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
+
+ def update_rectangles(self, dc, start, end):
+ start += self.ts_start
+ end += self.ts_start
+ self.sched_tracer.fill_zone(start, end)
+
+ def on_paint(self, event):
+ dc = wx.PaintDC(self.scroll_panel)
+ self.dc = dc
+
+ width = min(self.width_virtual, self.screen_width)
+ (x, y) = self.scroll_start()
+ start = self.px_to_us(x)
+ end = self.px_to_us(x + width)
+ self.update_rectangles(dc, start, end)
+
+ def rect_from_ypixel(self, y):
+ y -= RootFrame.Y_OFFSET
+ rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
+ height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
+
+ if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
+ return -1
+
+ return rect
+
+ def update_summary(self, txt):
+ if self.txt:
+ self.txt.Destroy()
+ self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
+
+
+ def on_mouse_down(self, event):
+ (x, y) = event.GetPositionTuple()
+ rect = self.rect_from_ypixel(y)
+ if rect == -1:
+ return
+
+ t = self.px_to_us(x) + self.ts_start
+
+ self.sched_tracer.mouse_down(rect, t)
+
+
+ def update_width_virtual(self):
+ self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
+
+ def __zoom(self, x):
+ self.update_width_virtual()
+ (xpos, ypos) = self.scroll.GetViewStart()
+ xpos = self.us_to_px(x) / self.scroll_scale
+ self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
+ self.Refresh()
+
+ def zoom_in(self):
+ x = self.scroll_start_us()
+ self.zoom *= 2
+ self.__zoom(x)
+
+ def zoom_out(self):
+ x = self.scroll_start_us()
+ self.zoom /= 2
+ self.__zoom(x)
+
+
+ def on_key_press(self, event):
+ key = event.GetRawKeyCode()
+ if key == ord("+"):
+ self.zoom_in()
+ return
+ if key == ord("-"):
+ self.zoom_out()
+ return
+
+ key = event.GetKeyCode()
+ (x, y) = self.scroll.GetViewStart()
+ if key == wx.WXK_RIGHT:
+ self.scroll.Scroll(x + 1, y)
+ elif key == wx.WXK_LEFT:
+ self.scroll.Scroll(x - 1, y)
+ elif key == wx.WXK_DOWN:
+ self.scroll.Scroll(x, y + 1)
+ elif key == wx.WXK_UP:
+ self.scroll.Scroll(x, y - 1)
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
new file mode 100644
index 00000000..9689bc0a
--- /dev/null
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
@@ -0,0 +1,28 @@
+# Util.py - Python extension for perf trace, miscellaneous utility code
+#
+# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
+#
+# This software may be distributed under the terms of the GNU General
+# Public License ("GPL") version 2 as published by the Free Software
+# Foundation.
+
+NSECS_PER_SEC = 1000000000
+
+def avg(total, n):
+ return total / n
+
+def nsecs(secs, nsecs):
+ return secs * NSECS_PER_SEC + nsecs
+
+def nsecs_secs(nsecs):
+ return nsecs / NSECS_PER_SEC
+
+def nsecs_nsecs(nsecs):
+ return nsecs % NSECS_PER_SEC
+
+def nsecs_str(nsecs):
+ str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
+ return str
+
+def clear_term():
+ print("\x1b[H\x1b[2J")
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
new file mode 100644
index 00000000..eb5846bc
--- /dev/null
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -a -e raw_syscalls:sys_exit $@
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
new file mode 100644
index 00000000..30293545
--- /dev/null
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-report
@@ -0,0 +1,10 @@
+#!/bin/bash
+# description: system-wide failed syscalls, by pid
+# args: [comm]
+if [ $# -gt 0 ] ; then
+ if ! expr match "$1" "-" > /dev/null ; then
+ comm=$1
+ shift
+ fi
+fi
+perf trace $@ -s ~/libexec/perf-core/scripts/python/failed-syscalls-by-pid.py $comm
diff --git a/tools/perf/scripts/python/bin/sched-migration-record b/tools/perf/scripts/python/bin/sched-migration-record
new file mode 100644
index 00000000..17a3e9bd
--- /dev/null
+++ b/tools/perf/scripts/python/bin/sched-migration-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -m 16384 -a -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@
diff --git a/tools/perf/scripts/python/bin/sched-migration-report b/tools/perf/scripts/python/bin/sched-migration-report
new file mode 100644
index 00000000..61d05f72
--- /dev/null
+++ b/tools/perf/scripts/python/bin/sched-migration-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: sched migration overview
+perf trace $@ -s ~/libexec/perf-core/scripts/python/sched-migration.py
diff --git a/tools/perf/scripts/python/bin/sctop-record b/tools/perf/scripts/python/bin/sctop-record
new file mode 100644
index 00000000..1fc5998b
--- /dev/null
+++ b/tools/perf/scripts/python/bin/sctop-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -a -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/scripts/python/bin/sctop-report b/tools/perf/scripts/python/bin/sctop-report
new file mode 100644
index 00000000..b01c842a
--- /dev/null
+++ b/tools/perf/scripts/python/bin/sctop-report
@@ -0,0 +1,24 @@
+#!/bin/bash
+# description: syscall top
+# args: [comm] [interval]
+n_args=0
+for i in "$@"
+do
+ if expr match "$i" "-" > /dev/null ; then
+ break
+ fi
+ n_args=$(( $n_args + 1 ))
+done
+if [ "$n_args" -gt 2 ] ; then
+ echo "usage: sctop-report [comm] [interval]"
+ exit
+fi
+if [ "$n_args" -gt 1 ] ; then
+ comm=$1
+ interval=$2
+ shift 2
+elif [ "$n_args" -gt 0 ] ; then
+ interval=$1
+ shift
+fi
+perf trace $@ -s ~/libexec/perf-core/scripts/python/sctop.py $comm $interval
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
new file mode 100644
index 00000000..1fc5998b
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -a -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-report b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report
new file mode 100644
index 00000000..9e9d8ddd
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-report
@@ -0,0 +1,10 @@
+#!/bin/bash
+# description: system-wide syscall counts, by pid
+# args: [comm]
+if [ $# -gt 0 ] ; then
+ if ! expr match "$1" "-" > /dev/null ; then
+ comm=$1
+ shift
+ fi
+fi
+perf trace $@ -s ~/libexec/perf-core/scripts/python/syscall-counts-by-pid.py $comm
diff --git a/tools/perf/scripts/python/bin/syscall-counts-record b/tools/perf/scripts/python/bin/syscall-counts-record
new file mode 100644
index 00000000..1fc5998b
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -a -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/scripts/python/bin/syscall-counts-report b/tools/perf/scripts/python/bin/syscall-counts-report
new file mode 100644
index 00000000..dc076b61
--- /dev/null
+++ b/tools/perf/scripts/python/bin/syscall-counts-report
@@ -0,0 +1,10 @@
+#!/bin/bash
+# description: system-wide syscall counts
+# args: [comm]
+if [ $# -gt 0 ] ; then
+ if ! expr match "$1" "-" > /dev/null ; then
+ comm=$1
+ shift
+ fi
+fi
+perf trace $@ -s ~/libexec/perf-core/scripts/python/syscall-counts.py $comm
diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py
new file mode 100644
index 00000000..d9f7893e
--- /dev/null
+++ b/tools/perf/scripts/python/check-perf-trace.py
@@ -0,0 +1,82 @@
+# perf trace event handlers, generated by perf trace -g python
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# This script tests basic functionality such as flag and symbol
+# strings, common_xxx() calls back into perf, begin, end, unhandled
+# events, etc. Basically, if this script runs successfully and
+# displays expected results, Python scripting support should be ok.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from Core import *
+from perf_trace_context import *
+
+unhandled = autodict()
+
+def trace_begin():
+ print "trace_begin"
+ pass
+
+def trace_end():
+ print_unhandled()
+
+def irq__softirq_entry(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ vec):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
+
+ print_uncommon(context)
+
+ print "vec=%s\n" % \
+ (symbol_str("irq__softirq_entry", "vec", vec)),
+
+def kmem__kmalloc(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ call_site, ptr, bytes_req, bytes_alloc,
+ gfp_flags):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
+
+ print_uncommon(context)
+
+ print "call_site=%u, ptr=%u, bytes_req=%u, " \
+ "bytes_alloc=%u, gfp_flags=%s\n" % \
+ (call_site, ptr, bytes_req, bytes_alloc,
+
+ flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
+
+def trace_unhandled(event_name, context, event_fields_dict):
+ try:
+ unhandled[event_name] += 1
+ except TypeError:
+ unhandled[event_name] = 1
+
+def print_header(event_name, cpu, secs, nsecs, pid, comm):
+ print "%-20s %5u %05u.%09u %8u %-20s " % \
+ (event_name, cpu, secs, nsecs, pid, comm),
+
+# print trace fields not included in handler args
+def print_uncommon(context):
+ print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
+ % (common_pc(context), trace_flag_str(common_flags(context)), \
+ common_lock_depth(context))
+
+def print_unhandled():
+ keys = unhandled.keys()
+ if not keys:
+ return
+
+ print "\nunhandled events:\n\n",
+
+ print "%-40s %10s\n" % ("event", "count"),
+ print "%-40s %10s\n" % ("----------------------------------------", \
+ "-----------"),
+
+ for event_name in keys:
+ print "%-40s %10d\n" % (event_name, unhandled[event_name])
diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py
new file mode 100644
index 00000000..0ca02278
--- /dev/null
+++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py
@@ -0,0 +1,68 @@
+# failed system call counts, by pid
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Displays system-wide failed system call totals, broken down by pid.
+# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+
+usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
+
+for_comm = None
+
+if len(sys.argv) > 2:
+ sys.exit(usage)
+
+if len(sys.argv) > 1:
+ for_comm = sys.argv[1]
+
+syscalls = autodict()
+
+def trace_begin():
+ pass
+
+def trace_end():
+ print_error_totals()
+
+def raw_syscalls__sys_exit(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, ret):
+ if for_comm is not None:
+ if common_comm != for_comm:
+ return
+
+ if ret < 0:
+ try:
+ syscalls[common_comm][common_pid][id][ret] += 1
+ except TypeError:
+ syscalls[common_comm][common_pid][id][ret] = 1
+
+def print_error_totals():
+ if for_comm is not None:
+ print "\nsyscall errors for %s:\n\n" % (for_comm),
+ else:
+ print "\nsyscall errors:\n\n",
+
+ print "%-30s %10s\n" % ("comm [pid]", "count"),
+ print "%-30s %10s\n" % ("------------------------------", \
+ "----------"),
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print "\n%s [%d]\n" % (comm, pid),
+ id_keys = syscalls[comm][pid].keys()
+ for id in id_keys:
+ print " syscall: %-16d\n" % (id),
+ ret_keys = syscalls[comm][pid][id].keys()
+ for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
+ print " err = %-20d %10d\n" % (ret, val),
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py
new file mode 100644
index 00000000..b934383c
--- /dev/null
+++ b/tools/perf/scripts/python/sched-migration.py
@@ -0,0 +1,461 @@
+#!/usr/bin/python
+#
+# Cpu task migration overview toy
+#
+# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
+#
+# perf trace event handlers have been generated by perf trace -g python
+#
+# This software is distributed under the terms of the GNU General
+# Public License ("GPL") version 2 as published by the Free Software
+# Foundation.
+
+
+import os
+import sys
+
+from collections import defaultdict
+from UserList import UserList
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from SchedGui import *
+
+
+threads = { 0 : "idle"}
+
+def thread_name(pid):
+ return "%s:%d" % (threads[pid], pid)
+
+class RunqueueEventUnknown:
+ @staticmethod
+ def color():
+ return None
+
+ def __repr__(self):
+ return "unknown"
+
+class RunqueueEventSleep:
+ @staticmethod
+ def color():
+ return (0, 0, 0xff)
+
+ def __init__(self, sleeper):
+ self.sleeper = sleeper
+
+ def __repr__(self):
+ return "%s gone to sleep" % thread_name(self.sleeper)
+
+class RunqueueEventWakeup:
+ @staticmethod
+ def color():
+ return (0xff, 0xff, 0)
+
+ def __init__(self, wakee):
+ self.wakee = wakee
+
+ def __repr__(self):
+ return "%s woke up" % thread_name(self.wakee)
+
+class RunqueueEventFork:
+ @staticmethod
+ def color():
+ return (0, 0xff, 0)
+
+ def __init__(self, child):
+ self.child = child
+
+ def __repr__(self):
+ return "new forked task %s" % thread_name(self.child)
+
+class RunqueueMigrateIn:
+ @staticmethod
+ def color():
+ return (0, 0xf0, 0xff)
+
+ def __init__(self, new):
+ self.new = new
+
+ def __repr__(self):
+ return "task migrated in %s" % thread_name(self.new)
+
+class RunqueueMigrateOut:
+ @staticmethod
+ def color():
+ return (0xff, 0, 0xff)
+
+ def __init__(self, old):
+ self.old = old
+
+ def __repr__(self):
+ return "task migrated out %s" % thread_name(self.old)
+
+class RunqueueSnapshot:
+ def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
+ self.tasks = tuple(tasks)
+ self.event = event
+
+ def sched_switch(self, prev, prev_state, next):
+ event = RunqueueEventUnknown()
+
+ if taskState(prev_state) == "R" and next in self.tasks \
+ and prev in self.tasks:
+ return self
+
+ if taskState(prev_state) != "R":
+ event = RunqueueEventSleep(prev)
+
+ next_tasks = list(self.tasks[:])
+ if prev in self.tasks:
+ if taskState(prev_state) != "R":
+ next_tasks.remove(prev)
+ elif taskState(prev_state) == "R":
+ next_tasks.append(prev)
+
+ if next not in next_tasks:
+ next_tasks.append(next)
+
+ return RunqueueSnapshot(next_tasks, event)
+
+ def migrate_out(self, old):
+ if old not in self.tasks:
+ return self
+ next_tasks = [task for task in self.tasks if task != old]
+
+ return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
+
+ def __migrate_in(self, new, event):
+ if new in self.tasks:
+ self.event = event
+ return self
+ next_tasks = self.tasks[:] + tuple([new])
+
+ return RunqueueSnapshot(next_tasks, event)
+
+ def migrate_in(self, new):
+ return self.__migrate_in(new, RunqueueMigrateIn(new))
+
+ def wake_up(self, new):
+ return self.__migrate_in(new, RunqueueEventWakeup(new))
+
+ def wake_up_new(self, new):
+ return self.__migrate_in(new, RunqueueEventFork(new))
+
+ def load(self):
+ """ Provide the number of tasks on the runqueue.
+ Don't count idle"""
+ return len(self.tasks) - 1
+
+ def __repr__(self):
+ ret = self.tasks.__repr__()
+ ret += self.origin_tostring()
+
+ return ret
+
+class TimeSlice:
+ def __init__(self, start, prev):
+ self.start = start
+ self.prev = prev
+ self.end = start
+ # cpus that triggered the event
+ self.event_cpus = []
+ if prev is not None:
+ self.total_load = prev.total_load
+ self.rqs = prev.rqs.copy()
+ else:
+ self.rqs = defaultdict(RunqueueSnapshot)
+ self.total_load = 0
+
+ def __update_total_load(self, old_rq, new_rq):
+ diff = new_rq.load() - old_rq.load()
+ self.total_load += diff
+
+ def sched_switch(self, ts_list, prev, prev_state, next, cpu):
+ old_rq = self.prev.rqs[cpu]
+ new_rq = old_rq.sched_switch(prev, prev_state, next)
+
+ if old_rq is new_rq:
+ return
+
+ self.rqs[cpu] = new_rq
+ self.__update_total_load(old_rq, new_rq)
+ ts_list.append(self)
+ self.event_cpus = [cpu]
+
+ def migrate(self, ts_list, new, old_cpu, new_cpu):
+ if old_cpu == new_cpu:
+ return
+ old_rq = self.prev.rqs[old_cpu]
+ out_rq = old_rq.migrate_out(new)
+ self.rqs[old_cpu] = out_rq
+ self.__update_total_load(old_rq, out_rq)
+
+ new_rq = self.prev.rqs[new_cpu]
+ in_rq = new_rq.migrate_in(new)
+ self.rqs[new_cpu] = in_rq
+ self.__update_total_load(new_rq, in_rq)
+
+ ts_list.append(self)
+
+ if old_rq is not out_rq:
+ self.event_cpus.append(old_cpu)
+ self.event_cpus.append(new_cpu)
+
+ def wake_up(self, ts_list, pid, cpu, fork):
+ old_rq = self.prev.rqs[cpu]
+ if fork:
+ new_rq = old_rq.wake_up_new(pid)
+ else:
+ new_rq = old_rq.wake_up(pid)
+
+ if new_rq is old_rq:
+ return
+ self.rqs[cpu] = new_rq
+ self.__update_total_load(old_rq, new_rq)
+ ts_list.append(self)
+ self.event_cpus = [cpu]
+
+ def next(self, t):
+ self.end = t
+ return TimeSlice(t, self)
+
+class TimeSliceList(UserList):
+ def __init__(self, arg = []):
+ self.data = arg
+
+ def get_time_slice(self, ts):
+ if len(self.data) == 0:
+ slice = TimeSlice(ts, TimeSlice(-1, None))
+ else:
+ slice = self.data[-1].next(ts)
+ return slice
+
+ def find_time_slice(self, ts):
+ start = 0
+ end = len(self.data)
+ found = -1
+ searching = True
+ while searching:
+ if start == end or start == end - 1:
+ searching = False
+
+ i = (end + start) / 2
+ if self.data[i].start <= ts and self.data[i].end >= ts:
+ found = i
+ end = i
+ continue
+
+ if self.data[i].end < ts:
+ start = i
+
+ elif self.data[i].start > ts:
+ end = i
+
+ return found
+
+ def set_root_win(self, win):
+ self.root_win = win
+
+ def mouse_down(self, cpu, t):
+ idx = self.find_time_slice(t)
+ if idx == -1:
+ return
+
+ ts = self[idx]
+ rq = ts.rqs[cpu]
+ raw = "CPU: %d\n" % cpu
+ raw += "Last event : %s\n" % rq.event.__repr__()
+ raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
+ raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
+ raw += "Load = %d\n" % rq.load()
+ for t in rq.tasks:
+ raw += "%s \n" % thread_name(t)
+
+ self.root_win.update_summary(raw)
+
+ def update_rectangle_cpu(self, slice, cpu):
+ rq = slice.rqs[cpu]
+
+ if slice.total_load != 0:
+ load_rate = rq.load() / float(slice.total_load)
+ else:
+ load_rate = 0
+
+ red_power = int(0xff - (0xff * load_rate))
+ color = (0xff, red_power, red_power)
+
+ top_color = None
+
+ if cpu in slice.event_cpus:
+ top_color = rq.event.color()
+
+ self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
+
+ def fill_zone(self, start, end):
+ i = self.find_time_slice(start)
+ if i == -1:
+ return
+
+ for i in xrange(i, len(self.data)):
+ timeslice = self.data[i]
+ if timeslice.start > end:
+ return
+
+ for cpu in timeslice.rqs:
+ self.update_rectangle_cpu(timeslice, cpu)
+
+ def interval(self):
+ if len(self.data) == 0:
+ return (0, 0)
+
+ return (self.data[0].start, self.data[-1].end)
+
+ def nr_rectangles(self):
+ last_ts = self.data[-1]
+ max_cpu = 0
+ for cpu in last_ts.rqs:
+ if cpu > max_cpu:
+ max_cpu = cpu
+ return max_cpu
+
+
+class SchedEventProxy:
+ def __init__(self):
+ self.current_tsk = defaultdict(lambda : -1)
+ self.timeslices = TimeSliceList()
+
+ def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
+ next_comm, next_pid, next_prio):
+ """ Ensure the task we sched out this cpu is really the one
+ we logged. Otherwise we may have missed traces """
+
+ on_cpu_task = self.current_tsk[headers.cpu]
+
+ if on_cpu_task != -1 and on_cpu_task != prev_pid:
+ print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
+ (headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
+
+ threads[prev_pid] = prev_comm
+ threads[next_pid] = next_comm
+ self.current_tsk[headers.cpu] = next_pid
+
+ ts = self.timeslices.get_time_slice(headers.ts())
+ ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
+
+ def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
+ ts = self.timeslices.get_time_slice(headers.ts())
+ ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
+
+ def wake_up(self, headers, comm, pid, success, target_cpu, fork):
+ if success == 0:
+ return
+ ts = self.timeslices.get_time_slice(headers.ts())
+ ts.wake_up(self.timeslices, pid, target_cpu, fork)
+
+
+def trace_begin():
+ global parser
+ parser = SchedEventProxy()
+
+def trace_end():
+ app = wx.App(False)
+ timeslices = parser.timeslices
+ frame = RootFrame(timeslices, "Migration")
+ app.MainLoop()
+
+def sched__sched_stat_runtime(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ comm, pid, runtime, vruntime):
+ pass
+
+def sched__sched_stat_iowait(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ comm, pid, delay):
+ pass
+
+def sched__sched_stat_sleep(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ comm, pid, delay):
+ pass
+
+def sched__sched_stat_wait(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ comm, pid, delay):
+ pass
+
+def sched__sched_process_fork(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ parent_comm, parent_pid, child_comm, child_pid):
+ pass
+
+def sched__sched_process_wait(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ comm, pid, prio):
+ pass
+
+def sched__sched_process_exit(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ comm, pid, prio):
+ pass
+
+def sched__sched_process_free(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ comm, pid, prio):
+ pass
+
+def sched__sched_migrate_task(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ comm, pid, prio, orig_cpu,
+ dest_cpu):
+ headers = EventHeaders(common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
+ parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
+
+def sched__sched_switch(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ prev_comm, prev_pid, prev_prio, prev_state,
+ next_comm, next_pid, next_prio):
+
+ headers = EventHeaders(common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
+ parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
+ next_comm, next_pid, next_prio)
+
+def sched__sched_wakeup_new(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ comm, pid, prio, success,
+ target_cpu):
+ headers = EventHeaders(common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
+ parser.wake_up(headers, comm, pid, success, target_cpu, 1)
+
+def sched__sched_wakeup(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ comm, pid, prio, success,
+ target_cpu):
+ headers = EventHeaders(common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
+ parser.wake_up(headers, comm, pid, success, target_cpu, 0)
+
+def sched__sched_wait_task(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ comm, pid, prio):
+ pass
+
+def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ ret):
+ pass
+
+def sched__sched_kthread_stop(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ comm, pid):
+ pass
+
+def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm):
+ pass
diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py
new file mode 100644
index 00000000..6cafad40
--- /dev/null
+++ b/tools/perf/scripts/python/sctop.py
@@ -0,0 +1,78 @@
+# system call top
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Periodically displays system-wide system call totals, broken down by
+# syscall. If a [comm] arg is specified, only syscalls called by
+# [comm] are displayed. If an [interval] arg is specified, the display
+# will be refreshed every [interval] seconds. The default interval is
+# 3 seconds.
+
+import thread
+import time
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from Util import *
+
+usage = "perf trace -s syscall-counts.py [comm] [interval]\n";
+
+for_comm = None
+default_interval = 3
+interval = default_interval
+
+if len(sys.argv) > 3:
+ sys.exit(usage)
+
+if len(sys.argv) > 2:
+ for_comm = sys.argv[1]
+ interval = int(sys.argv[2])
+elif len(sys.argv) > 1:
+ try:
+ interval = int(sys.argv[1])
+ except ValueError:
+ for_comm = sys.argv[1]
+ interval = default_interval
+
+syscalls = autodict()
+
+def trace_begin():
+ thread.start_new_thread(print_syscall_totals, (interval,))
+ pass
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
+ if for_comm is not None:
+ if common_comm != for_comm:
+ return
+ try:
+ syscalls[id] += 1
+ except TypeError:
+ syscalls[id] = 1
+
+def print_syscall_totals(interval):
+ while 1:
+ clear_term()
+ if for_comm is not None:
+ print "\nsyscall events for %s:\n\n" % (for_comm),
+ else:
+ print "\nsyscall events:\n\n",
+
+ print "%-40s %10s\n" % ("event", "count"),
+ print "%-40s %10s\n" % ("----------------------------------------", \
+ "----------"),
+
+ for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
+ reverse = True):
+ try:
+ print "%-40d %10d\n" % (id, val),
+ except TypeError:
+ pass
+ syscalls.clear()
+ time.sleep(interval)
diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py
new file mode 100644
index 00000000..af722d6a
--- /dev/null
+++ b/tools/perf/scripts/python/syscall-counts-by-pid.py
@@ -0,0 +1,64 @@
+# system call counts, by pid
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Displays system-wide system call totals, broken down by syscall.
+# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+
+usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
+
+for_comm = None
+
+if len(sys.argv) > 2:
+ sys.exit(usage)
+
+if len(sys.argv) > 1:
+ for_comm = sys.argv[1]
+
+syscalls = autodict()
+
+def trace_begin():
+ pass
+
+def trace_end():
+ print_syscall_totals()
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
+ if for_comm is not None:
+ if common_comm != for_comm:
+ return
+ try:
+ syscalls[common_comm][common_pid][id] += 1
+ except TypeError:
+ syscalls[common_comm][common_pid][id] = 1
+
+def print_syscall_totals():
+ if for_comm is not None:
+ print "\nsyscall events for %s:\n\n" % (for_comm),
+ else:
+ print "\nsyscall events by comm/pid:\n\n",
+
+ print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
+ print "%-40s %10s\n" % ("----------------------------------------", \
+ "----------"),
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print "\n%s [%d]\n" % (comm, pid),
+ id_keys = syscalls[comm][pid].keys()
+ for id, val in sorted(syscalls[comm][pid].iteritems(), \
+ key = lambda(k, v): (v, k), reverse = True):
+ print " %-38d %10d\n" % (id, val),
diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py
new file mode 100644
index 00000000..f977e85f
--- /dev/null
+++ b/tools/perf/scripts/python/syscall-counts.py
@@ -0,0 +1,58 @@
+# system call counts
+# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
+# Licensed under the terms of the GNU GPL License version 2
+#
+# Displays system-wide system call totals, broken down by syscall.
+# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+
+import os
+import sys
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+
+usage = "perf trace -s syscall-counts.py [comm]\n";
+
+for_comm = None
+
+if len(sys.argv) > 2:
+ sys.exit(usage)
+
+if len(sys.argv) > 1:
+ for_comm = sys.argv[1]
+
+syscalls = autodict()
+
+def trace_begin():
+ pass
+
+def trace_end():
+ print_syscall_totals()
+
+def raw_syscalls__sys_enter(event_name, context, common_cpu,
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
+ if for_comm is not None:
+ if common_comm != for_comm:
+ return
+ try:
+ syscalls[id] += 1
+ except TypeError:
+ syscalls[id] = 1
+
+def print_syscall_totals():
+ if for_comm is not None:
+ print "\nsyscall events for %s:\n\n" % (for_comm),
+ else:
+ print "\nsyscall events:\n\n",
+
+ print "%-40s %10s\n" % ("event", "count"),
+ print "%-40s %10s\n" % ("----------------------------------------", \
+ "-----------"),
+
+ for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
+ reverse = True):
+ print "%-40d %10d\n" % (id, val),
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN
new file mode 100755
index 00000000..97d76562
--- /dev/null
+++ b/tools/perf/util/PERF-VERSION-GEN
@@ -0,0 +1,47 @@
+#!/bin/sh
+
+if [ $# -eq 1 ] ; then
+ OUTPUT=$1
+fi
+
+GVF=${OUTPUT}PERF-VERSION-FILE
+
+LF='
+'
+
+# First check if there is a .git to get the version from git describe
+# otherwise try to get the version from the kernel makefile
+if test -d ../../.git -o -f ../../.git &&
+ VN=$(git describe --abbrev=4 HEAD 2>/dev/null) &&
+ case "$VN" in
+ *$LF*) (exit 1) ;;
+ v[0-9]*)
+ git update-index -q --refresh
+ test -z "$(git diff-index --name-only HEAD --)" ||
+ VN="$VN-dirty" ;;
+ esac
+then
+ VN=$(echo "$VN" | sed -e 's/-/./g');
+else
+ eval `grep '^VERSION\s*=' ../../Makefile|tr -d ' '`
+ eval `grep '^PATCHLEVEL\s*=' ../../Makefile|tr -d ' '`
+ eval `grep '^SUBLEVEL\s*=' ../../Makefile|tr -d ' '`
+ eval `grep '^EXTRAVERSION\s*=' ../../Makefile|tr -d ' '`
+
+ VN="${VERSION}.${PATCHLEVEL}.${SUBLEVEL}${EXTRAVERSION}"
+fi
+
+VN=$(expr "$VN" : v*'\(.*\)')
+
+if test -r $GVF
+then
+ VC=$(sed -e 's/^PERF_VERSION = //' <$GVF)
+else
+ VC=unset
+fi
+test "$VN" = "$VC" || {
+ echo >&2 "PERF_VERSION = $VN"
+ echo "PERF_VERSION = $VN" >$GVF
+}
+
+
diff --git a/tools/perf/util/abspath.c b/tools/perf/util/abspath.c
new file mode 100644
index 00000000..0e76affe
--- /dev/null
+++ b/tools/perf/util/abspath.c
@@ -0,0 +1,37 @@
+#include "cache.h"
+
+static const char *get_pwd_cwd(void)
+{
+ static char cwd[PATH_MAX + 1];
+ char *pwd;
+ struct stat cwd_stat, pwd_stat;
+ if (getcwd(cwd, PATH_MAX) == NULL)
+ return NULL;
+ pwd = getenv("PWD");
+ if (pwd && strcmp(pwd, cwd)) {
+ stat(cwd, &cwd_stat);
+ if (!stat(pwd, &pwd_stat) &&
+ pwd_stat.st_dev == cwd_stat.st_dev &&
+ pwd_stat.st_ino == cwd_stat.st_ino) {
+ strlcpy(cwd, pwd, PATH_MAX);
+ }
+ }
+ return cwd;
+}
+
+const char *make_nonrelative_path(const char *path)
+{
+ static char buf[PATH_MAX + 1];
+
+ if (is_absolute_path(path)) {
+ if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX)
+ die("Too long path: %.*s", 60, path);
+ } else {
+ const char *cwd = get_pwd_cwd();
+ if (!cwd)
+ die("Cannot determine the current working directory");
+ if (snprintf(buf, PATH_MAX, "%s/%s", cwd, path) >= PATH_MAX)
+ die("Too long path: %.*s", 60, path);
+ }
+ return buf;
+}
diff --git a/tools/perf/util/alias.c b/tools/perf/util/alias.c
new file mode 100644
index 00000000..b8144e80
--- /dev/null
+++ b/tools/perf/util/alias.c
@@ -0,0 +1,77 @@
+#include "cache.h"
+
+static const char *alias_key;
+static char *alias_val;
+
+static int alias_lookup_cb(const char *k, const char *v, void *cb __used)
+{
+ if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) {
+ if (!v)
+ return config_error_nonbool(k);
+ alias_val = strdup(v);
+ return 0;
+ }
+ return 0;
+}
+
+char *alias_lookup(const char *alias)
+{
+ alias_key = alias;
+ alias_val = NULL;
+ perf_config(alias_lookup_cb, NULL);
+ return alias_val;
+}
+
+int split_cmdline(char *cmdline, const char ***argv)
+{
+ int src, dst, count = 0, size = 16;
+ char quoted = 0;
+
+ *argv = malloc(sizeof(char*) * size);
+
+ /* split alias_string */
+ (*argv)[count++] = cmdline;
+ for (src = dst = 0; cmdline[src];) {
+ char c = cmdline[src];
+ if (!quoted && isspace(c)) {
+ cmdline[dst++] = 0;
+ while (cmdline[++src]
+ && isspace(cmdline[src]))
+ ; /* skip */
+ if (count >= size) {
+ size += 16;
+ *argv = realloc(*argv, sizeof(char*) * size);
+ }
+ (*argv)[count++] = cmdline + dst;
+ } else if (!quoted && (c == '\'' || c == '"')) {
+ quoted = c;
+ src++;
+ } else if (c == quoted) {
+ quoted = 0;
+ src++;
+ } else {
+ if (c == '\\' && quoted != '\'') {
+ src++;
+ c = cmdline[src];
+ if (!c) {
+ free(*argv);
+ *argv = NULL;
+ return error("cmdline ends with \\");
+ }
+ }
+ cmdline[dst++] = c;
+ src++;
+ }
+ }
+
+ cmdline[dst] = 0;
+
+ if (quoted) {
+ free(*argv);
+ *argv = NULL;
+ return error("unclosed quote");
+ }
+
+ return count;
+}
+
diff --git a/tools/perf/util/bitmap.c b/tools/perf/util/bitmap.c
new file mode 100644
index 00000000..5e230aca
--- /dev/null
+++ b/tools/perf/util/bitmap.c
@@ -0,0 +1,21 @@
+/*
+ * From lib/bitmap.c
+ * Helper functions for bitmap.h.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+#include <linux/bitmap.h>
+
+int __bitmap_weight(const unsigned long *bitmap, int bits)
+{
+ int k, w = 0, lim = bits/BITS_PER_LONG;
+
+ for (k = 0; k < lim; k++)
+ w += hweight_long(bitmap[k]);
+
+ if (bits % BITS_PER_LONG)
+ w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
+
+ return w;
+}
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
new file mode 100644
index 00000000..e437edb7
--- /dev/null
+++ b/tools/perf/util/build-id.c
@@ -0,0 +1,77 @@
+/*
+ * build-id.c
+ *
+ * build-id support
+ *
+ * Copyright (C) 2009, 2010 Red Hat Inc.
+ * Copyright (C) 2009, 2010 Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+#include "util.h"
+#include <stdio.h>
+#include "build-id.h"
+#include "event.h"
+#include "symbol.h"
+#include <linux/kernel.h>
+#include "debug.h"
+
+static int build_id__mark_dso_hit(event_t *event, struct perf_session *session)
+{
+ struct addr_location al;
+ u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ struct thread *thread = perf_session__findnew(session, event->ip.pid);
+
+ if (thread == NULL) {
+ pr_err("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
+ event->ip.pid, event->ip.ip, &al);
+
+ if (al.map != NULL)
+ al.map->dso->hit = 1;
+
+ return 0;
+}
+
+static int event__exit_del_thread(event_t *self, struct perf_session *session)
+{
+ struct thread *thread = perf_session__findnew(session, self->fork.tid);
+
+ dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
+ self->fork.ppid, self->fork.ptid);
+
+ if (thread) {
+ rb_erase(&thread->rb_node, &session->threads);
+ session->last_match = NULL;
+ thread__delete(thread);
+ }
+
+ return 0;
+}
+
+struct perf_event_ops build_id__mark_dso_hit_ops = {
+ .sample = build_id__mark_dso_hit,
+ .mmap = event__process_mmap,
+ .fork = event__process_task,
+ .exit = event__exit_del_thread,
+};
+
+char *dso__build_id_filename(struct dso *self, char *bf, size_t size)
+{
+ char build_id_hex[BUILD_ID_SIZE * 2 + 1];
+
+ if (!self->has_build_id)
+ return NULL;
+
+ build_id__sprintf(self->build_id, sizeof(self->build_id), build_id_hex);
+ if (bf == NULL) {
+ if (asprintf(&bf, "%s/.build-id/%.2s/%s", buildid_dir,
+ build_id_hex, build_id_hex + 2) < 0)
+ return NULL;
+ } else
+ snprintf(bf, size, "%s/.build-id/%.2s/%s", buildid_dir,
+ build_id_hex, build_id_hex + 2);
+ return bf;
+}
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
new file mode 100644
index 00000000..5dafb00e
--- /dev/null
+++ b/tools/perf/util/build-id.h
@@ -0,0 +1,10 @@
+#ifndef PERF_BUILD_ID_H_
+#define PERF_BUILD_ID_H_ 1
+
+#include "session.h"
+
+extern struct perf_event_ops build_id__mark_dso_hit_ops;
+
+char *dso__build_id_filename(struct dso *self, char *bf, size_t size);
+
+#endif
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
new file mode 100644
index 00000000..27e9ebe4
--- /dev/null
+++ b/tools/perf/util/cache.h
@@ -0,0 +1,87 @@
+#ifndef __PERF_CACHE_H
+#define __PERF_CACHE_H
+
+#include <stdbool.h>
+#include "util.h"
+#include "strbuf.h"
+#include "../perf.h"
+
+#define CMD_EXEC_PATH "--exec-path"
+#define CMD_PERF_DIR "--perf-dir="
+#define CMD_WORK_TREE "--work-tree="
+#define CMD_DEBUGFS_DIR "--debugfs-dir="
+
+#define PERF_DIR_ENVIRONMENT "PERF_DIR"
+#define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE"
+#define EXEC_PATH_ENVIRONMENT "PERF_EXEC_PATH"
+#define DEFAULT_PERF_DIR_ENVIRONMENT ".perf"
+#define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR"
+
+typedef int (*config_fn_t)(const char *, const char *, void *);
+extern int perf_default_config(const char *, const char *, void *);
+extern int perf_config(config_fn_t fn, void *);
+extern int perf_config_int(const char *, const char *);
+extern int perf_config_bool(const char *, const char *);
+extern int config_error_nonbool(const char *);
+extern const char *perf_config_dirname(const char *, const char *);
+
+/* pager.c */
+extern void setup_pager(void);
+extern const char *pager_program;
+extern int pager_in_use(void);
+extern int pager_use_color;
+
+extern int use_browser;
+
+#ifdef NO_NEWT_SUPPORT
+static inline void setup_browser(void)
+{
+ setup_pager();
+}
+static inline void exit_browser(bool wait_for_ok __used) {}
+#else
+void setup_browser(void);
+void exit_browser(bool wait_for_ok);
+#endif
+
+char *alias_lookup(const char *alias);
+int split_cmdline(char *cmdline, const char ***argv);
+
+#define alloc_nr(x) (((x)+16)*3/2)
+
+/*
+ * Realloc the buffer pointed at by variable 'x' so that it can hold
+ * at least 'nr' entries; the number of entries currently allocated
+ * is 'alloc', using the standard growing factor alloc_nr() macro.
+ *
+ * DO NOT USE any expression with side-effect for 'x' or 'alloc'.
+ */
+#define ALLOC_GROW(x, nr, alloc) \
+ do { \
+ if ((nr) > alloc) { \
+ if (alloc_nr(alloc) < (nr)) \
+ alloc = (nr); \
+ else \
+ alloc = alloc_nr(alloc); \
+ x = xrealloc((x), alloc * sizeof(*(x))); \
+ } \
+ } while(0)
+
+
+static inline int is_absolute_path(const char *path)
+{
+ return path[0] == '/';
+}
+
+const char *make_nonrelative_path(const char *path);
+char *strip_path_suffix(const char *path, const char *suffix);
+
+extern char *mkpath(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
+extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
+
+extern char *perf_pathdup(const char *fmt, ...)
+ __attribute__((format (printf, 1, 2)));
+
+extern size_t strlcpy(char *dest, const char *src, size_t size);
+
+#endif /* __PERF_CACHE_H */
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
new file mode 100644
index 00000000..f231f434
--- /dev/null
+++ b/tools/perf/util/callchain.c
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2009-2010, Frederic Weisbecker <fweisbec@gmail.com>
+ *
+ * Handle the callchains from the stream in an ad-hoc radix tree and then
+ * sort them in an rbtree.
+ *
+ * Using a radix for code path provides a fast retrieval and factorizes
+ * memory use. Also that lets us use the paths in a hierarchical graph view.
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <math.h>
+
+#include "util.h"
+#include "callchain.h"
+
+bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event)
+{
+ unsigned int chain_size = event->header.size;
+ chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
+ return chain->nr * sizeof(u64) <= chain_size;
+}
+
+#define chain_for_each_child(child, parent) \
+ list_for_each_entry(child, &parent->children, brothers)
+
+static void
+rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
+ enum chain_mode mode)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct callchain_node *rnode;
+ u64 chain_cumul = cumul_hits(chain);
+
+ while (*p) {
+ u64 rnode_cumul;
+
+ parent = *p;
+ rnode = rb_entry(parent, struct callchain_node, rb_node);
+ rnode_cumul = cumul_hits(rnode);
+
+ switch (mode) {
+ case CHAIN_FLAT:
+ if (rnode->hit < chain->hit)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ break;
+ case CHAIN_GRAPH_ABS: /* Falldown */
+ case CHAIN_GRAPH_REL:
+ if (rnode_cumul < chain_cumul)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ break;
+ case CHAIN_NONE:
+ default:
+ break;
+ }
+ }
+
+ rb_link_node(&chain->rb_node, parent, p);
+ rb_insert_color(&chain->rb_node, root);
+}
+
+static void
+__sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
+ u64 min_hit)
+{
+ struct callchain_node *child;
+
+ chain_for_each_child(child, node)
+ __sort_chain_flat(rb_root, child, min_hit);
+
+ if (node->hit && node->hit >= min_hit)
+ rb_insert_callchain(rb_root, node, CHAIN_FLAT);
+}
+
+/*
+ * Once we get every callchains from the stream, we can now
+ * sort them by hit
+ */
+static void
+sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
+ u64 min_hit, struct callchain_param *param __used)
+{
+ __sort_chain_flat(rb_root, node, min_hit);
+}
+
+static void __sort_chain_graph_abs(struct callchain_node *node,
+ u64 min_hit)
+{
+ struct callchain_node *child;
+
+ node->rb_root = RB_ROOT;
+
+ chain_for_each_child(child, node) {
+ __sort_chain_graph_abs(child, min_hit);
+ if (cumul_hits(child) >= min_hit)
+ rb_insert_callchain(&node->rb_root, child,
+ CHAIN_GRAPH_ABS);
+ }
+}
+
+static void
+sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_node *chain_root,
+ u64 min_hit, struct callchain_param *param __used)
+{
+ __sort_chain_graph_abs(chain_root, min_hit);
+ rb_root->rb_node = chain_root->rb_root.rb_node;
+}
+
+static void __sort_chain_graph_rel(struct callchain_node *node,
+ double min_percent)
+{
+ struct callchain_node *child;
+ u64 min_hit;
+
+ node->rb_root = RB_ROOT;
+ min_hit = ceil(node->children_hit * min_percent);
+
+ chain_for_each_child(child, node) {
+ __sort_chain_graph_rel(child, min_percent);
+ if (cumul_hits(child) >= min_hit)
+ rb_insert_callchain(&node->rb_root, child,
+ CHAIN_GRAPH_REL);
+ }
+}
+
+static void
+sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_node *chain_root,
+ u64 min_hit __used, struct callchain_param *param)
+{
+ __sort_chain_graph_rel(chain_root, param->min_percent / 100.0);
+ rb_root->rb_node = chain_root->rb_root.rb_node;
+}
+
+int register_callchain_param(struct callchain_param *param)
+{
+ switch (param->mode) {
+ case CHAIN_GRAPH_ABS:
+ param->sort = sort_chain_graph_abs;
+ break;
+ case CHAIN_GRAPH_REL:
+ param->sort = sort_chain_graph_rel;
+ break;
+ case CHAIN_FLAT:
+ param->sort = sort_chain_flat;
+ break;
+ case CHAIN_NONE:
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Create a child for a parent. If inherit_children, then the new child
+ * will become the new parent of it's parent children
+ */
+static struct callchain_node *
+create_child(struct callchain_node *parent, bool inherit_children)
+{
+ struct callchain_node *new;
+
+ new = zalloc(sizeof(*new));
+ if (!new) {
+ perror("not enough memory to create child for code path tree");
+ return NULL;
+ }
+ new->parent = parent;
+ INIT_LIST_HEAD(&new->children);
+ INIT_LIST_HEAD(&new->val);
+
+ if (inherit_children) {
+ struct callchain_node *next;
+
+ list_splice(&parent->children, &new->children);
+ INIT_LIST_HEAD(&parent->children);
+
+ chain_for_each_child(next, new)
+ next->parent = new;
+ }
+ list_add_tail(&new->brothers, &parent->children);
+
+ return new;
+}
+
+
+struct resolved_ip {
+ u64 ip;
+ struct map_symbol ms;
+};
+
+struct resolved_chain {
+ u64 nr;
+ struct resolved_ip ips[0];
+};
+
+
+/*
+ * Fill the node with callchain values
+ */
+static void
+fill_node(struct callchain_node *node, struct resolved_chain *chain, int start)
+{
+ unsigned int i;
+
+ for (i = start; i < chain->nr; i++) {
+ struct callchain_list *call;
+
+ call = zalloc(sizeof(*call));
+ if (!call) {
+ perror("not enough memory for the code path tree");
+ return;
+ }
+ call->ip = chain->ips[i].ip;
+ call->ms = chain->ips[i].ms;
+ list_add_tail(&call->list, &node->val);
+ }
+ node->val_nr = chain->nr - start;
+ if (!node->val_nr)
+ pr_warning("Warning: empty node in callchain tree\n");
+}
+
+static void
+add_child(struct callchain_node *parent, struct resolved_chain *chain,
+ int start, u64 period)
+{
+ struct callchain_node *new;
+
+ new = create_child(parent, false);
+ fill_node(new, chain, start);
+
+ new->children_hit = 0;
+ new->hit = period;
+}
+
+/*
+ * Split the parent in two parts (a new child is created) and
+ * give a part of its callchain to the created child.
+ * Then create another child to host the given callchain of new branch
+ */
+static void
+split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
+ struct callchain_list *to_split, int idx_parents, int idx_local,
+ u64 period)
+{
+ struct callchain_node *new;
+ struct list_head *old_tail;
+ unsigned int idx_total = idx_parents + idx_local;
+
+ /* split */
+ new = create_child(parent, true);
+
+ /* split the callchain and move a part to the new child */
+ old_tail = parent->val.prev;
+ list_del_range(&to_split->list, old_tail);
+ new->val.next = &to_split->list;
+ new->val.prev = old_tail;
+ to_split->list.prev = &new->val;
+ old_tail->next = &new->val;
+
+ /* split the hits */
+ new->hit = parent->hit;
+ new->children_hit = parent->children_hit;
+ parent->children_hit = cumul_hits(new);
+ new->val_nr = parent->val_nr - idx_local;
+ parent->val_nr = idx_local;
+
+ /* create a new child for the new branch if any */
+ if (idx_total < chain->nr) {
+ parent->hit = 0;
+ add_child(parent, chain, idx_total, period);
+ parent->children_hit += period;
+ } else {
+ parent->hit = period;
+ }
+}
+
+static int
+__append_chain(struct callchain_node *root, struct resolved_chain *chain,
+ unsigned int start, u64 period);
+
+static void
+__append_chain_children(struct callchain_node *root,
+ struct resolved_chain *chain,
+ unsigned int start, u64 period)
+{
+ struct callchain_node *rnode;
+
+ /* lookup in childrens */
+ chain_for_each_child(rnode, root) {
+ unsigned int ret = __append_chain(rnode, chain, start, period);
+
+ if (!ret)
+ goto inc_children_hit;
+ }
+ /* nothing in children, add to the current node */
+ add_child(root, chain, start, period);
+
+inc_children_hit:
+ root->children_hit += period;
+}
+
+static int
+__append_chain(struct callchain_node *root, struct resolved_chain *chain,
+ unsigned int start, u64 period)
+{
+ struct callchain_list *cnode;
+ unsigned int i = start;
+ bool found = false;
+
+ /*
+ * Lookup in the current node
+ * If we have a symbol, then compare the start to match
+ * anywhere inside a function.
+ */
+ list_for_each_entry(cnode, &root->val, list) {
+ struct symbol *sym;
+
+ if (i == chain->nr)
+ break;
+
+ sym = chain->ips[i].ms.sym;
+
+ if (cnode->ms.sym && sym) {
+ if (cnode->ms.sym->start != sym->start)
+ break;
+ } else if (cnode->ip != chain->ips[i].ip)
+ break;
+
+ if (!found)
+ found = true;
+ i++;
+ }
+
+ /* matches not, relay on the parent */
+ if (!found)
+ return -1;
+
+ /* we match only a part of the node. Split it and add the new chain */
+ if (i - start < root->val_nr) {
+ split_add_child(root, chain, cnode, start, i - start, period);
+ return 0;
+ }
+
+ /* we match 100% of the path, increment the hit */
+ if (i - start == root->val_nr && i == chain->nr) {
+ root->hit += period;
+ return 0;
+ }
+
+ /* We match the node and still have a part remaining */
+ __append_chain_children(root, chain, i, period);
+
+ return 0;
+}
+
+static void filter_context(struct ip_callchain *old, struct resolved_chain *new,
+ struct map_symbol *syms)
+{
+ int i, j = 0;
+
+ for (i = 0; i < (int)old->nr; i++) {
+ if (old->ips[i] >= PERF_CONTEXT_MAX)
+ continue;
+
+ new->ips[j].ip = old->ips[i];
+ new->ips[j].ms = syms[i];
+ j++;
+ }
+
+ new->nr = j;
+}
+
+
+int append_chain(struct callchain_node *root, struct ip_callchain *chain,
+ struct map_symbol *syms, u64 period)
+{
+ struct resolved_chain *filtered;
+
+ if (!chain->nr)
+ return 0;
+
+ filtered = zalloc(sizeof(*filtered) +
+ chain->nr * sizeof(struct resolved_ip));
+ if (!filtered)
+ return -ENOMEM;
+
+ filter_context(chain, filtered, syms);
+
+ if (!filtered->nr)
+ goto end;
+
+ __append_chain_children(root, filtered, 0, period);
+end:
+ free(filtered);
+
+ return 0;
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
new file mode 100644
index 00000000..6de43139
--- /dev/null
+++ b/tools/perf/util/callchain.h
@@ -0,0 +1,68 @@
+#ifndef __PERF_CALLCHAIN_H
+#define __PERF_CALLCHAIN_H
+
+#include "../perf.h"
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include "event.h"
+#include "symbol.h"
+
+enum chain_mode {
+ CHAIN_NONE,
+ CHAIN_FLAT,
+ CHAIN_GRAPH_ABS,
+ CHAIN_GRAPH_REL
+};
+
+struct callchain_node {
+ struct callchain_node *parent;
+ struct list_head brothers;
+ struct list_head children;
+ struct list_head val;
+ struct rb_node rb_node; /* to sort nodes in an rbtree */
+ struct rb_root rb_root; /* sorted tree of children */
+ unsigned int val_nr;
+ u64 hit;
+ u64 children_hit;
+};
+
+struct callchain_param;
+
+typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_node *,
+ u64, struct callchain_param *);
+
+struct callchain_param {
+ enum chain_mode mode;
+ u32 print_limit;
+ double min_percent;
+ sort_chain_func_t sort;
+};
+
+struct callchain_list {
+ u64 ip;
+ struct map_symbol ms;
+ struct list_head list;
+};
+
+static inline void callchain_init(struct callchain_node *node)
+{
+ INIT_LIST_HEAD(&node->brothers);
+ INIT_LIST_HEAD(&node->children);
+ INIT_LIST_HEAD(&node->val);
+
+ node->children_hit = 0;
+ node->parent = NULL;
+ node->hit = 0;
+}
+
+static inline u64 cumul_hits(struct callchain_node *node)
+{
+ return node->hit + node->children_hit;
+}
+
+int register_callchain_param(struct callchain_param *param);
+int append_chain(struct callchain_node *root, struct ip_callchain *chain,
+ struct map_symbol *syms, u64 period);
+
+bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event);
+#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
new file mode 100644
index 00000000..e191eb9a
--- /dev/null
+++ b/tools/perf/util/color.c
@@ -0,0 +1,324 @@
+#include "cache.h"
+#include "color.h"
+
+int perf_use_color_default = -1;
+
+static int parse_color(const char *name, int len)
+{
+ static const char * const color_names[] = {
+ "normal", "black", "red", "green", "yellow",
+ "blue", "magenta", "cyan", "white"
+ };
+ char *end;
+ int i;
+
+ for (i = 0; i < (int)ARRAY_SIZE(color_names); i++) {
+ const char *str = color_names[i];
+ if (!strncasecmp(name, str, len) && !str[len])
+ return i - 1;
+ }
+ i = strtol(name, &end, 10);
+ if (end - name == len && i >= -1 && i <= 255)
+ return i;
+ return -2;
+}
+
+static int parse_attr(const char *name, int len)
+{
+ static const int attr_values[] = { 1, 2, 4, 5, 7 };
+ static const char * const attr_names[] = {
+ "bold", "dim", "ul", "blink", "reverse"
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
+ const char *str = attr_names[i];
+ if (!strncasecmp(name, str, len) && !str[len])
+ return attr_values[i];
+ }
+ return -1;
+}
+
+void color_parse(const char *value, const char *var, char *dst)
+{
+ color_parse_mem(value, strlen(value), var, dst);
+}
+
+void color_parse_mem(const char *value, int value_len, const char *var,
+ char *dst)
+{
+ const char *ptr = value;
+ int len = value_len;
+ int attr = -1;
+ int fg = -2;
+ int bg = -2;
+
+ if (!strncasecmp(value, "reset", len)) {
+ strcpy(dst, PERF_COLOR_RESET);
+ return;
+ }
+
+ /* [fg [bg]] [attr] */
+ while (len > 0) {
+ const char *word = ptr;
+ int val, wordlen = 0;
+
+ while (len > 0 && !isspace(word[wordlen])) {
+ wordlen++;
+ len--;
+ }
+
+ ptr = word + wordlen;
+ while (len > 0 && isspace(*ptr)) {
+ ptr++;
+ len--;
+ }
+
+ val = parse_color(word, wordlen);
+ if (val >= -1) {
+ if (fg == -2) {
+ fg = val;
+ continue;
+ }
+ if (bg == -2) {
+ bg = val;
+ continue;
+ }
+ goto bad;
+ }
+ val = parse_attr(word, wordlen);
+ if (val < 0 || attr != -1)
+ goto bad;
+ attr = val;
+ }
+
+ if (attr >= 0 || fg >= 0 || bg >= 0) {
+ int sep = 0;
+
+ *dst++ = '\033';
+ *dst++ = '[';
+ if (attr >= 0) {
+ *dst++ = '0' + attr;
+ sep++;
+ }
+ if (fg >= 0) {
+ if (sep++)
+ *dst++ = ';';
+ if (fg < 8) {
+ *dst++ = '3';
+ *dst++ = '0' + fg;
+ } else {
+ dst += sprintf(dst, "38;5;%d", fg);
+ }
+ }
+ if (bg >= 0) {
+ if (sep++)
+ *dst++ = ';';
+ if (bg < 8) {
+ *dst++ = '4';
+ *dst++ = '0' + bg;
+ } else {
+ dst += sprintf(dst, "48;5;%d", bg);
+ }
+ }
+ *dst++ = 'm';
+ }
+ *dst = 0;
+ return;
+bad:
+ die("bad color value '%.*s' for variable '%s'", value_len, value, var);
+}
+
+int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
+{
+ if (value) {
+ if (!strcasecmp(value, "never"))
+ return 0;
+ if (!strcasecmp(value, "always"))
+ return 1;
+ if (!strcasecmp(value, "auto"))
+ goto auto_color;
+ }
+
+ /* Missing or explicit false to turn off colorization */
+ if (!perf_config_bool(var, value))
+ return 0;
+
+ /* any normal truth value defaults to 'auto' */
+ auto_color:
+ if (stdout_is_tty < 0)
+ stdout_is_tty = isatty(1);
+ if (stdout_is_tty || (pager_in_use() && pager_use_color)) {
+ char *term = getenv("TERM");
+ if (term && strcmp(term, "dumb"))
+ return 1;
+ }
+ return 0;
+}
+
+int perf_color_default_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "color.ui")) {
+ perf_use_color_default = perf_config_colorbool(var, value, -1);
+ return 0;
+ }
+
+ return perf_default_config(var, value, cb);
+}
+
+static int __color_vsnprintf(char *bf, size_t size, const char *color,
+ const char *fmt, va_list args, const char *trail)
+{
+ int r = 0;
+
+ /*
+ * Auto-detect:
+ */
+ if (perf_use_color_default < 0) {
+ if (isatty(1) || pager_in_use())
+ perf_use_color_default = 1;
+ else
+ perf_use_color_default = 0;
+ }
+
+ if (perf_use_color_default && *color)
+ r += snprintf(bf, size, "%s", color);
+ r += vsnprintf(bf + r, size - r, fmt, args);
+ if (perf_use_color_default && *color)
+ r += snprintf(bf + r, size - r, "%s", PERF_COLOR_RESET);
+ if (trail)
+ r += snprintf(bf + r, size - r, "%s", trail);
+ return r;
+}
+
+static int __color_vfprintf(FILE *fp, const char *color, const char *fmt,
+ va_list args, const char *trail)
+{
+ int r = 0;
+
+ /*
+ * Auto-detect:
+ */
+ if (perf_use_color_default < 0) {
+ if (isatty(1) || pager_in_use())
+ perf_use_color_default = 1;
+ else
+ perf_use_color_default = 0;
+ }
+
+ if (perf_use_color_default && *color)
+ r += fprintf(fp, "%s", color);
+ r += vfprintf(fp, fmt, args);
+ if (perf_use_color_default && *color)
+ r += fprintf(fp, "%s", PERF_COLOR_RESET);
+ if (trail)
+ r += fprintf(fp, "%s", trail);
+ return r;
+}
+
+int color_vsnprintf(char *bf, size_t size, const char *color,
+ const char *fmt, va_list args)
+{
+ return __color_vsnprintf(bf, size, color, fmt, args, NULL);
+}
+
+int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args)
+{
+ return __color_vfprintf(fp, color, fmt, args, NULL);
+}
+
+int color_snprintf(char *bf, size_t size, const char *color,
+ const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+ r = color_vsnprintf(bf, size, color, fmt, args);
+ va_end(args);
+ return r;
+}
+
+int color_fprintf(FILE *fp, const char *color, const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+ r = color_vfprintf(fp, color, fmt, args);
+ va_end(args);
+ return r;
+}
+
+int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...)
+{
+ va_list args;
+ int r;
+ va_start(args, fmt);
+ r = __color_vfprintf(fp, color, fmt, args, "\n");
+ va_end(args);
+ return r;
+}
+
+/*
+ * This function splits the buffer by newlines and colors the lines individually.
+ *
+ * Returns 0 on success.
+ */
+int color_fwrite_lines(FILE *fp, const char *color,
+ size_t count, const char *buf)
+{
+ if (!*color)
+ return fwrite(buf, count, 1, fp) != 1;
+
+ while (count) {
+ char *p = memchr(buf, '\n', count);
+
+ if (p != buf && (fputs(color, fp) < 0 ||
+ fwrite(buf, p ? (size_t)(p - buf) : count, 1, fp) != 1 ||
+ fputs(PERF_COLOR_RESET, fp) < 0))
+ return -1;
+ if (!p)
+ return 0;
+ if (fputc('\n', fp) < 0)
+ return -1;
+ count -= p + 1 - buf;
+ buf = p + 1;
+ }
+ return 0;
+}
+
+const char *get_percent_color(double percent)
+{
+ const char *color = PERF_COLOR_NORMAL;
+
+ /*
+ * We color high-overhead entries in red, mid-overhead
+ * entries in green - and keep the low overhead places
+ * normal:
+ */
+ if (percent >= MIN_RED)
+ color = PERF_COLOR_RED;
+ else {
+ if (percent > MIN_GREEN)
+ color = PERF_COLOR_GREEN;
+ }
+ return color;
+}
+
+int percent_color_fprintf(FILE *fp, const char *fmt, double percent)
+{
+ int r;
+ const char *color;
+
+ color = get_percent_color(percent);
+ r = color_fprintf(fp, color, fmt, percent);
+
+ return r;
+}
+
+int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent)
+{
+ const char *color = get_percent_color(percent);
+ return color_snprintf(bf, size, color, fmt, percent);
+}
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
new file mode 100644
index 00000000..dea082b7
--- /dev/null
+++ b/tools/perf/util/color.h
@@ -0,0 +1,46 @@
+#ifndef __PERF_COLOR_H
+#define __PERF_COLOR_H
+
+/* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */
+#define COLOR_MAXLEN 24
+
+#define PERF_COLOR_NORMAL ""
+#define PERF_COLOR_RESET "\033[m"
+#define PERF_COLOR_BOLD "\033[1m"
+#define PERF_COLOR_RED "\033[31m"
+#define PERF_COLOR_GREEN "\033[32m"
+#define PERF_COLOR_YELLOW "\033[33m"
+#define PERF_COLOR_BLUE "\033[34m"
+#define PERF_COLOR_MAGENTA "\033[35m"
+#define PERF_COLOR_CYAN "\033[36m"
+#define PERF_COLOR_BG_RED "\033[41m"
+
+#define MIN_GREEN 0.5
+#define MIN_RED 5.0
+
+/*
+ * This variable stores the value of color.ui
+ */
+extern int perf_use_color_default;
+
+
+/*
+ * Use this instead of perf_default_config if you need the value of color.ui.
+ */
+int perf_color_default_config(const char *var, const char *value, void *cb);
+
+int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty);
+void color_parse(const char *value, const char *var, char *dst);
+void color_parse_mem(const char *value, int len, const char *var, char *dst);
+int color_vsnprintf(char *bf, size_t size, const char *color,
+ const char *fmt, va_list args);
+int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args);
+int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
+int color_snprintf(char *bf, size_t size, const char *color, const char *fmt, ...);
+int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
+int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf);
+int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent);
+int percent_color_fprintf(FILE *fp, const char *fmt, double percent);
+const char *get_percent_color(double percent);
+
+#endif /* __PERF_COLOR_H */
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
new file mode 100644
index 00000000..e02d78ca
--- /dev/null
+++ b/tools/perf/util/config.c
@@ -0,0 +1,492 @@
+/*
+ * GIT - The information manager from hell
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ * Copyright (C) Johannes Schindelin, 2005
+ *
+ */
+#include "util.h"
+#include "cache.h"
+#include "exec_cmd.h"
+
+#define MAXNAME (256)
+
+#define DEBUG_CACHE_DIR ".debug"
+
+
+char buildid_dir[MAXPATHLEN]; /* root dir for buildid, binary cache */
+
+static FILE *config_file;
+static const char *config_file_name;
+static int config_linenr;
+static int config_file_eof;
+
+static const char *config_exclusive_filename;
+
+static int get_next_char(void)
+{
+ int c;
+ FILE *f;
+
+ c = '\n';
+ if ((f = config_file) != NULL) {
+ c = fgetc(f);
+ if (c == '\r') {
+ /* DOS like systems */
+ c = fgetc(f);
+ if (c != '\n') {
+ ungetc(c, f);
+ c = '\r';
+ }
+ }
+ if (c == '\n')
+ config_linenr++;
+ if (c == EOF) {
+ config_file_eof = 1;
+ c = '\n';
+ }
+ }
+ return c;
+}
+
+static char *parse_value(void)
+{
+ static char value[1024];
+ int quote = 0, comment = 0, space = 0;
+ size_t len = 0;
+
+ for (;;) {
+ int c = get_next_char();
+
+ if (len >= sizeof(value) - 1)
+ return NULL;
+ if (c == '\n') {
+ if (quote)
+ return NULL;
+ value[len] = 0;
+ return value;
+ }
+ if (comment)
+ continue;
+ if (isspace(c) && !quote) {
+ space = 1;
+ continue;
+ }
+ if (!quote) {
+ if (c == ';' || c == '#') {
+ comment = 1;
+ continue;
+ }
+ }
+ if (space) {
+ if (len)
+ value[len++] = ' ';
+ space = 0;
+ }
+ if (c == '\\') {
+ c = get_next_char();
+ switch (c) {
+ case '\n':
+ continue;
+ case 't':
+ c = '\t';
+ break;
+ case 'b':
+ c = '\b';
+ break;
+ case 'n':
+ c = '\n';
+ break;
+ /* Some characters escape as themselves */
+ case '\\': case '"':
+ break;
+ /* Reject unknown escape sequences */
+ default:
+ return NULL;
+ }
+ value[len++] = c;
+ continue;
+ }
+ if (c == '"') {
+ quote = 1-quote;
+ continue;
+ }
+ value[len++] = c;
+ }
+}
+
+static inline int iskeychar(int c)
+{
+ return isalnum(c) || c == '-';
+}
+
+static int get_value(config_fn_t fn, void *data, char *name, unsigned int len)
+{
+ int c;
+ char *value;
+
+ /* Get the full name */
+ for (;;) {
+ c = get_next_char();
+ if (config_file_eof)
+ break;
+ if (!iskeychar(c))
+ break;
+ name[len++] = c;
+ if (len >= MAXNAME)
+ return -1;
+ }
+ name[len] = 0;
+ while (c == ' ' || c == '\t')
+ c = get_next_char();
+
+ value = NULL;
+ if (c != '\n') {
+ if (c != '=')
+ return -1;
+ value = parse_value();
+ if (!value)
+ return -1;
+ }
+ return fn(name, value, data);
+}
+
+static int get_extended_base_var(char *name, int baselen, int c)
+{
+ do {
+ if (c == '\n')
+ return -1;
+ c = get_next_char();
+ } while (isspace(c));
+
+ /* We require the format to be '[base "extension"]' */
+ if (c != '"')
+ return -1;
+ name[baselen++] = '.';
+
+ for (;;) {
+ int ch = get_next_char();
+
+ if (ch == '\n')
+ return -1;
+ if (ch == '"')
+ break;
+ if (ch == '\\') {
+ ch = get_next_char();
+ if (ch == '\n')
+ return -1;
+ }
+ name[baselen++] = ch;
+ if (baselen > MAXNAME / 2)
+ return -1;
+ }
+
+ /* Final ']' */
+ if (get_next_char() != ']')
+ return -1;
+ return baselen;
+}
+
+static int get_base_var(char *name)
+{
+ int baselen = 0;
+
+ for (;;) {
+ int c = get_next_char();
+ if (config_file_eof)
+ return -1;
+ if (c == ']')
+ return baselen;
+ if (isspace(c))
+ return get_extended_base_var(name, baselen, c);
+ if (!iskeychar(c) && c != '.')
+ return -1;
+ if (baselen > MAXNAME / 2)
+ return -1;
+ name[baselen++] = tolower(c);
+ }
+}
+
+static int perf_parse_file(config_fn_t fn, void *data)
+{
+ int comment = 0;
+ int baselen = 0;
+ static char var[MAXNAME];
+
+ /* U+FEFF Byte Order Mark in UTF8 */
+ static const unsigned char *utf8_bom = (unsigned char *) "\xef\xbb\xbf";
+ const unsigned char *bomptr = utf8_bom;
+
+ for (;;) {
+ int c = get_next_char();
+ if (bomptr && *bomptr) {
+ /* We are at the file beginning; skip UTF8-encoded BOM
+ * if present. Sane editors won't put this in on their
+ * own, but e.g. Windows Notepad will do it happily. */
+ if ((unsigned char) c == *bomptr) {
+ bomptr++;
+ continue;
+ } else {
+ /* Do not tolerate partial BOM. */
+ if (bomptr != utf8_bom)
+ break;
+ /* No BOM at file beginning. Cool. */
+ bomptr = NULL;
+ }
+ }
+ if (c == '\n') {
+ if (config_file_eof)
+ return 0;
+ comment = 0;
+ continue;
+ }
+ if (comment || isspace(c))
+ continue;
+ if (c == '#' || c == ';') {
+ comment = 1;
+ continue;
+ }
+ if (c == '[') {
+ baselen = get_base_var(var);
+ if (baselen <= 0)
+ break;
+ var[baselen++] = '.';
+ var[baselen] = 0;
+ continue;
+ }
+ if (!isalpha(c))
+ break;
+ var[baselen] = tolower(c);
+ if (get_value(fn, data, var, baselen+1) < 0)
+ break;
+ }
+ die("bad config file line %d in %s", config_linenr, config_file_name);
+}
+
+static int parse_unit_factor(const char *end, unsigned long *val)
+{
+ if (!*end)
+ return 1;
+ else if (!strcasecmp(end, "k")) {
+ *val *= 1024;
+ return 1;
+ }
+ else if (!strcasecmp(end, "m")) {
+ *val *= 1024 * 1024;
+ return 1;
+ }
+ else if (!strcasecmp(end, "g")) {
+ *val *= 1024 * 1024 * 1024;
+ return 1;
+ }
+ return 0;
+}
+
+static int perf_parse_long(const char *value, long *ret)
+{
+ if (value && *value) {
+ char *end;
+ long val = strtol(value, &end, 0);
+ unsigned long factor = 1;
+ if (!parse_unit_factor(end, &factor))
+ return 0;
+ *ret = val * factor;
+ return 1;
+ }
+ return 0;
+}
+
+static void die_bad_config(const char *name)
+{
+ if (config_file_name)
+ die("bad config value for '%s' in %s", name, config_file_name);
+ die("bad config value for '%s'", name);
+}
+
+int perf_config_int(const char *name, const char *value)
+{
+ long ret = 0;
+ if (!perf_parse_long(value, &ret))
+ die_bad_config(name);
+ return ret;
+}
+
+static int perf_config_bool_or_int(const char *name, const char *value, int *is_bool)
+{
+ *is_bool = 1;
+ if (!value)
+ return 1;
+ if (!*value)
+ return 0;
+ if (!strcasecmp(value, "true") || !strcasecmp(value, "yes") || !strcasecmp(value, "on"))
+ return 1;
+ if (!strcasecmp(value, "false") || !strcasecmp(value, "no") || !strcasecmp(value, "off"))
+ return 0;
+ *is_bool = 0;
+ return perf_config_int(name, value);
+}
+
+int perf_config_bool(const char *name, const char *value)
+{
+ int discard;
+ return !!perf_config_bool_or_int(name, value, &discard);
+}
+
+const char *perf_config_dirname(const char *name, const char *value)
+{
+ if (!name)
+ return NULL;
+ return value;
+}
+
+static int perf_default_core_config(const char *var __used, const char *value __used)
+{
+ /* Add other config variables here and to Documentation/config.txt. */
+ return 0;
+}
+
+int perf_default_config(const char *var, const char *value, void *dummy __used)
+{
+ if (!prefixcmp(var, "core."))
+ return perf_default_core_config(var, value);
+
+ /* Add other config variables here and to Documentation/config.txt. */
+ return 0;
+}
+
+static int perf_config_from_file(config_fn_t fn, const char *filename, void *data)
+{
+ int ret;
+ FILE *f = fopen(filename, "r");
+
+ ret = -1;
+ if (f) {
+ config_file = f;
+ config_file_name = filename;
+ config_linenr = 1;
+ config_file_eof = 0;
+ ret = perf_parse_file(fn, data);
+ fclose(f);
+ config_file_name = NULL;
+ }
+ return ret;
+}
+
+static const char *perf_etc_perfconfig(void)
+{
+ static const char *system_wide;
+ if (!system_wide)
+ system_wide = system_path(ETC_PERFCONFIG);
+ return system_wide;
+}
+
+static int perf_env_bool(const char *k, int def)
+{
+ const char *v = getenv(k);
+ return v ? perf_config_bool(k, v) : def;
+}
+
+static int perf_config_system(void)
+{
+ return !perf_env_bool("PERF_CONFIG_NOSYSTEM", 0);
+}
+
+static int perf_config_global(void)
+{
+ return !perf_env_bool("PERF_CONFIG_NOGLOBAL", 0);
+}
+
+int perf_config(config_fn_t fn, void *data)
+{
+ int ret = 0, found = 0;
+ char *repo_config = NULL;
+ const char *home = NULL;
+
+ /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
+ if (config_exclusive_filename)
+ return perf_config_from_file(fn, config_exclusive_filename, data);
+ if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) {
+ ret += perf_config_from_file(fn, perf_etc_perfconfig(),
+ data);
+ found += 1;
+ }
+
+ home = getenv("HOME");
+ if (perf_config_global() && home) {
+ char *user_config = strdup(mkpath("%s/.perfconfig", home));
+ if (!access(user_config, R_OK)) {
+ ret += perf_config_from_file(fn, user_config, data);
+ found += 1;
+ }
+ free(user_config);
+ }
+
+ repo_config = perf_pathdup("config");
+ if (!access(repo_config, R_OK)) {
+ ret += perf_config_from_file(fn, repo_config, data);
+ found += 1;
+ }
+ free(repo_config);
+ if (found == 0)
+ return -1;
+ return ret;
+}
+
+/*
+ * Call this to report error for your variable that should not
+ * get a boolean value (i.e. "[my] var" means "true").
+ */
+int config_error_nonbool(const char *var)
+{
+ return error("Missing value for '%s'", var);
+}
+
+struct buildid_dir_config {
+ char *dir;
+};
+
+static int buildid_dir_command_config(const char *var, const char *value,
+ void *data)
+{
+ struct buildid_dir_config *c = data;
+ const char *v;
+
+ /* same dir for all commands */
+ if (!prefixcmp(var, "buildid.") && !strcmp(var + 8, "dir")) {
+ v = perf_config_dirname(var, value);
+ if (!v)
+ return -1;
+ strncpy(c->dir, v, MAXPATHLEN-1);
+ c->dir[MAXPATHLEN-1] = '\0';
+ }
+ return 0;
+}
+
+static void check_buildid_dir_config(void)
+{
+ struct buildid_dir_config c;
+ c.dir = buildid_dir;
+ perf_config(buildid_dir_command_config, &c);
+}
+
+void set_buildid_dir(void)
+{
+ buildid_dir[0] = '\0';
+
+ /* try config file */
+ check_buildid_dir_config();
+
+ /* default to $HOME/.debug */
+ if (buildid_dir[0] == '\0') {
+ char *v = getenv("HOME");
+ if (v) {
+ snprintf(buildid_dir, MAXPATHLEN-1, "%s/%s",
+ v, DEBUG_CACHE_DIR);
+ } else {
+ strncpy(buildid_dir, DEBUG_CACHE_DIR, MAXPATHLEN-1);
+ }
+ buildid_dir[MAXPATHLEN-1] = '\0';
+ }
+ /* for communicating with external commands */
+ setenv("PERF_BUILDID_DIR", buildid_dir, 1);
+}
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
new file mode 100644
index 00000000..0f9b8d7a
--- /dev/null
+++ b/tools/perf/util/cpumap.c
@@ -0,0 +1,114 @@
+#include "util.h"
+#include "../perf.h"
+#include "cpumap.h"
+#include <assert.h>
+#include <stdio.h>
+
+int cpumap[MAX_NR_CPUS];
+
+static int default_cpu_map(void)
+{
+ int nr_cpus, i;
+
+ nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+ assert(nr_cpus <= MAX_NR_CPUS);
+ assert((int)nr_cpus >= 0);
+
+ for (i = 0; i < nr_cpus; ++i)
+ cpumap[i] = i;
+
+ return nr_cpus;
+}
+
+static int read_all_cpu_map(void)
+{
+ FILE *onlnf;
+ int nr_cpus = 0;
+ int n, cpu, prev;
+ char sep;
+
+ onlnf = fopen("/sys/devices/system/cpu/online", "r");
+ if (!onlnf)
+ return default_cpu_map();
+
+ sep = 0;
+ prev = -1;
+ for (;;) {
+ n = fscanf(onlnf, "%u%c", &cpu, &sep);
+ if (n <= 0)
+ break;
+ if (prev >= 0) {
+ assert(nr_cpus + cpu - prev - 1 < MAX_NR_CPUS);
+ while (++prev < cpu)
+ cpumap[nr_cpus++] = prev;
+ }
+ assert (nr_cpus < MAX_NR_CPUS);
+ cpumap[nr_cpus++] = cpu;
+ if (n == 2 && sep == '-')
+ prev = cpu;
+ else
+ prev = -1;
+ if (n == 1 || sep == '\n')
+ break;
+ }
+ fclose(onlnf);
+ if (nr_cpus > 0)
+ return nr_cpus;
+
+ return default_cpu_map();
+}
+
+int read_cpu_map(const char *cpu_list)
+{
+ unsigned long start_cpu, end_cpu = 0;
+ char *p = NULL;
+ int i, nr_cpus = 0;
+
+ if (!cpu_list)
+ return read_all_cpu_map();
+
+ if (!isdigit(*cpu_list))
+ goto invalid;
+
+ while (isdigit(*cpu_list)) {
+ p = NULL;
+ start_cpu = strtoul(cpu_list, &p, 0);
+ if (start_cpu >= INT_MAX
+ || (*p != '\0' && *p != ',' && *p != '-'))
+ goto invalid;
+
+ if (*p == '-') {
+ cpu_list = ++p;
+ p = NULL;
+ end_cpu = strtoul(cpu_list, &p, 0);
+
+ if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
+ goto invalid;
+
+ if (end_cpu < start_cpu)
+ goto invalid;
+ } else {
+ end_cpu = start_cpu;
+ }
+
+ for (; start_cpu <= end_cpu; start_cpu++) {
+ /* check for duplicates */
+ for (i = 0; i < nr_cpus; i++)
+ if (cpumap[i] == (int)start_cpu)
+ goto invalid;
+
+ assert(nr_cpus < MAX_NR_CPUS);
+ cpumap[nr_cpus++] = (int)start_cpu;
+ }
+ if (*p)
+ ++p;
+
+ cpu_list = p;
+ }
+ if (nr_cpus > 0)
+ return nr_cpus;
+
+ return default_cpu_map();
+invalid:
+ return -1;
+}
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
new file mode 100644
index 00000000..3e60f56e
--- /dev/null
+++ b/tools/perf/util/cpumap.h
@@ -0,0 +1,7 @@
+#ifndef __PERF_CPUMAP_H
+#define __PERF_CPUMAP_H
+
+extern int read_cpu_map(const char *cpu_list);
+extern int cpumap[];
+
+#endif /* __PERF_CPUMAP_H */
diff --git a/tools/perf/util/ctype.c b/tools/perf/util/ctype.c
new file mode 100644
index 00000000..35073621
--- /dev/null
+++ b/tools/perf/util/ctype.c
@@ -0,0 +1,39 @@
+/*
+ * Sane locale-independent, ASCII ctype.
+ *
+ * No surprises, and works with signed and unsigned chars.
+ */
+#include "cache.h"
+
+enum {
+ S = GIT_SPACE,
+ A = GIT_ALPHA,
+ D = GIT_DIGIT,
+ G = GIT_GLOB_SPECIAL, /* *, ?, [, \\ */
+ R = GIT_REGEX_SPECIAL, /* $, (, ), +, ., ^, {, | * */
+ P = GIT_PRINT_EXTRA, /* printable - alpha - digit - glob - regex */
+
+ PS = GIT_SPACE | GIT_PRINT_EXTRA,
+};
+
+unsigned char sane_ctype[256] = {
+/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, S, S, 0, 0, S, 0, 0, /* 0.. 15 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 16.. 31 */
+ PS,P, P, P, R, P, P, P, R, R, G, R, P, P, R, P, /* 32.. 47 */
+ D, D, D, D, D, D, D, D, D, D, P, P, P, P, P, G, /* 48.. 63 */
+ P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 64.. 79 */
+ A, A, A, A, A, A, A, A, A, A, A, G, G, P, R, P, /* 80.. 95 */
+ P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 96..111 */
+ A, A, A, A, A, A, A, A, A, A, A, R, R, P, P, 0, /* 112..127 */
+ /* Nothing in the 128.. range */
+};
+
+const char *graph_line =
+ "_____________________________________________________________________"
+ "_____________________________________________________________________";
+const char *graph_dotted_line =
+ "---------------------------------------------------------------------"
+ "---------------------------------------------------------------------"
+ "---------------------------------------------------------------------";
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
new file mode 100644
index 00000000..f9c7e3ad
--- /dev/null
+++ b/tools/perf/util/debug.c
@@ -0,0 +1,98 @@
+/* For general debugging purposes */
+
+#include "../perf.h"
+
+#include <string.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+#include "cache.h"
+#include "color.h"
+#include "event.h"
+#include "debug.h"
+#include "util.h"
+
+int verbose = 0;
+bool dump_trace = false;
+
+int eprintf(int level, const char *fmt, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (verbose >= level) {
+ va_start(args, fmt);
+ if (use_browser > 0)
+ ret = ui_helpline__show_help(fmt, args);
+ else
+ ret = vfprintf(stderr, fmt, args);
+ va_end(args);
+ }
+
+ return ret;
+}
+
+int dump_printf(const char *fmt, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (dump_trace) {
+ va_start(args, fmt);
+ ret = vprintf(fmt, args);
+ va_end(args);
+ }
+
+ return ret;
+}
+
+static int dump_printf_color(const char *fmt, const char *color, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (dump_trace) {
+ va_start(args, color);
+ ret = color_vfprintf(stdout, color, fmt, args);
+ va_end(args);
+ }
+
+ return ret;
+}
+
+
+void trace_event(event_t *event)
+{
+ unsigned char *raw_event = (void *)event;
+ const char *color = PERF_COLOR_BLUE;
+ int i, j;
+
+ if (!dump_trace)
+ return;
+
+ dump_printf(".");
+ dump_printf_color("\n. ... raw event: size %d bytes\n", color,
+ event->header.size);
+
+ for (i = 0; i < event->header.size; i++) {
+ if ((i & 15) == 0) {
+ dump_printf(".");
+ dump_printf_color(" %04x: ", color, i);
+ }
+
+ dump_printf_color(" %02x", color, raw_event[i]);
+
+ if (((i & 15) == 15) || i == event->header.size-1) {
+ dump_printf_color(" ", color);
+ for (j = 0; j < 15-(i & 15); j++)
+ dump_printf_color(" ", color);
+ for (j = i & ~15; j <= i; j++) {
+ dump_printf_color("%c", color,
+ isprint(raw_event[j]) ?
+ raw_event[j] : '.');
+ }
+ dump_printf_color("\n", color);
+ }
+ }
+ dump_printf(".\n");
+}
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
new file mode 100644
index 00000000..7a17ee06
--- /dev/null
+++ b/tools/perf/util/debug.h
@@ -0,0 +1,38 @@
+/* For debugging general purposes */
+#ifndef __PERF_DEBUG_H
+#define __PERF_DEBUG_H
+
+#include <stdbool.h>
+#include "event.h"
+
+extern int verbose;
+extern bool dump_trace;
+
+int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
+void trace_event(event_t *event);
+
+struct ui_progress;
+
+#ifdef NO_NEWT_SUPPORT
+static inline int ui_helpline__show_help(const char *format __used, va_list ap __used)
+{
+ return 0;
+}
+
+static inline struct ui_progress *ui_progress__new(const char *title __used,
+ u64 total __used)
+{
+ return (struct ui_progress *)1;
+}
+
+static inline void ui_progress__update(struct ui_progress *self __used,
+ u64 curr __used) {}
+
+static inline void ui_progress__delete(struct ui_progress *self __used) {}
+#else
+extern char ui_helpline__last_msg[];
+int ui_helpline__show_help(const char *format, va_list ap);
+#include "ui/progress.h"
+#endif
+
+#endif /* __PERF_DEBUG_H */
diff --git a/tools/perf/util/debugfs.c b/tools/perf/util/debugfs.c
new file mode 100644
index 00000000..a88fefc0
--- /dev/null
+++ b/tools/perf/util/debugfs.c
@@ -0,0 +1,240 @@
+#include "util.h"
+#include "debugfs.h"
+#include "cache.h"
+
+static int debugfs_premounted;
+static char debugfs_mountpoint[MAX_PATH+1];
+
+static const char *debugfs_known_mountpoints[] = {
+ "/sys/kernel/debug/",
+ "/debug/",
+ 0,
+};
+
+/* use this to force a umount */
+void debugfs_force_cleanup(void)
+{
+ debugfs_find_mountpoint();
+ debugfs_premounted = 0;
+ debugfs_umount();
+}
+
+/* construct a full path to a debugfs element */
+int debugfs_make_path(const char *element, char *buffer, int size)
+{
+ int len;
+
+ if (strlen(debugfs_mountpoint) == 0) {
+ buffer[0] = '\0';
+ return -1;
+ }
+
+ len = strlen(debugfs_mountpoint) + strlen(element) + 1;
+ if (len >= size)
+ return len+1;
+
+ snprintf(buffer, size-1, "%s/%s", debugfs_mountpoint, element);
+ return 0;
+}
+
+static int debugfs_found;
+
+/* find the path to the mounted debugfs */
+const char *debugfs_find_mountpoint(void)
+{
+ const char **ptr;
+ char type[100];
+ FILE *fp;
+
+ if (debugfs_found)
+ return (const char *) debugfs_mountpoint;
+
+ ptr = debugfs_known_mountpoints;
+ while (*ptr) {
+ if (debugfs_valid_mountpoint(*ptr) == 0) {
+ debugfs_found = 1;
+ strcpy(debugfs_mountpoint, *ptr);
+ return debugfs_mountpoint;
+ }
+ ptr++;
+ }
+
+ /* give up and parse /proc/mounts */
+ fp = fopen("/proc/mounts", "r");
+ if (fp == NULL)
+ die("Can't open /proc/mounts for read");
+
+ while (fscanf(fp, "%*s %"
+ STR(MAX_PATH)
+ "s %99s %*s %*d %*d\n",
+ debugfs_mountpoint, type) == 2) {
+ if (strcmp(type, "debugfs") == 0)
+ break;
+ }
+ fclose(fp);
+
+ if (strcmp(type, "debugfs") != 0)
+ return NULL;
+
+ debugfs_found = 1;
+
+ return debugfs_mountpoint;
+}
+
+/* verify that a mountpoint is actually a debugfs instance */
+
+int debugfs_valid_mountpoint(const char *debugfs)
+{
+ struct statfs st_fs;
+
+ if (statfs(debugfs, &st_fs) < 0)
+ return -ENOENT;
+ else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
+ return -ENOENT;
+
+ return 0;
+}
+
+
+int debugfs_valid_entry(const char *path)
+{
+ struct stat st;
+
+ if (stat(path, &st))
+ return -errno;
+
+ return 0;
+}
+
+/* mount the debugfs somewhere if it's not mounted */
+
+char *debugfs_mount(const char *mountpoint)
+{
+ /* see if it's already mounted */
+ if (debugfs_find_mountpoint()) {
+ debugfs_premounted = 1;
+ return debugfs_mountpoint;
+ }
+
+ /* if not mounted and no argument */
+ if (mountpoint == NULL) {
+ /* see if environment variable set */
+ mountpoint = getenv(PERF_DEBUGFS_ENVIRONMENT);
+ /* if no environment variable, use default */
+ if (mountpoint == NULL)
+ mountpoint = "/sys/kernel/debug";
+ }
+
+ if (mount(NULL, mountpoint, "debugfs", 0, NULL) < 0)
+ return NULL;
+
+ /* save the mountpoint */
+ strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
+ debugfs_found = 1;
+
+ return debugfs_mountpoint;
+}
+
+/* umount the debugfs */
+
+int debugfs_umount(void)
+{
+ char umountcmd[128];
+ int ret;
+
+ /* if it was already mounted, leave it */
+ if (debugfs_premounted)
+ return 0;
+
+ /* make sure it's a valid mount point */
+ ret = debugfs_valid_mountpoint(debugfs_mountpoint);
+ if (ret)
+ return ret;
+
+ snprintf(umountcmd, sizeof(umountcmd),
+ "/bin/umount %s", debugfs_mountpoint);
+ return system(umountcmd);
+}
+
+int debugfs_write(const char *entry, const char *value)
+{
+ char path[MAX_PATH+1];
+ int ret, count;
+ int fd;
+
+ /* construct the path */
+ snprintf(path, sizeof(path), "%s/%s", debugfs_mountpoint, entry);
+
+ /* verify that it exists */
+ ret = debugfs_valid_entry(path);
+ if (ret)
+ return ret;
+
+ /* get how many chars we're going to write */
+ count = strlen(value);
+
+ /* open the debugfs entry */
+ fd = open(path, O_RDWR);
+ if (fd < 0)
+ return -errno;
+
+ while (count > 0) {
+ /* write it */
+ ret = write(fd, value, count);
+ if (ret <= 0) {
+ if (ret == EAGAIN)
+ continue;
+ close(fd);
+ return -errno;
+ }
+ count -= ret;
+ }
+
+ /* close it */
+ close(fd);
+
+ /* return success */
+ return 0;
+}
+
+/*
+ * read a debugfs entry
+ * returns the number of chars read or a negative errno
+ */
+int debugfs_read(const char *entry, char *buffer, size_t size)
+{
+ char path[MAX_PATH+1];
+ int ret;
+ int fd;
+
+ /* construct the path */
+ snprintf(path, sizeof(path), "%s/%s", debugfs_mountpoint, entry);
+
+ /* verify that it exists */
+ ret = debugfs_valid_entry(path);
+ if (ret)
+ return ret;
+
+ /* open the debugfs entry */
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return -errno;
+
+ do {
+ /* read it */
+ ret = read(fd, buffer, size);
+ if (ret == 0) {
+ close(fd);
+ return EOF;
+ }
+ } while (ret < 0 && errno == EAGAIN);
+
+ /* close it */
+ close(fd);
+
+ /* make *sure* there's a null character at the end */
+ buffer[ret] = '\0';
+
+ /* return the number of chars read */
+ return ret;
+}
diff --git a/tools/perf/util/debugfs.h b/tools/perf/util/debugfs.h
new file mode 100644
index 00000000..83a02879
--- /dev/null
+++ b/tools/perf/util/debugfs.h
@@ -0,0 +1,25 @@
+#ifndef __DEBUGFS_H__
+#define __DEBUGFS_H__
+
+#include <sys/mount.h>
+
+#ifndef MAX_PATH
+# define MAX_PATH 256
+#endif
+
+#ifndef STR
+# define _STR(x) #x
+# define STR(x) _STR(x)
+#endif
+
+extern const char *debugfs_find_mountpoint(void);
+extern int debugfs_valid_mountpoint(const char *debugfs);
+extern int debugfs_valid_entry(const char *path);
+extern char *debugfs_mount(const char *mountpoint);
+extern int debugfs_umount(void);
+extern int debugfs_write(const char *entry, const char *value);
+extern int debugfs_read(const char *entry, char *buffer, size_t size);
+extern void debugfs_force_cleanup(void);
+extern int debugfs_make_path(const char *element, char *buffer, int size);
+
+#endif /* __DEBUGFS_H__ */
diff --git a/tools/perf/util/environment.c b/tools/perf/util/environment.c
new file mode 100644
index 00000000..275b0ee3
--- /dev/null
+++ b/tools/perf/util/environment.c
@@ -0,0 +1,9 @@
+/*
+ * We put all the perf config variables in this same object
+ * file, so that programs can link against the config parser
+ * without having to link against all the rest of perf.
+ */
+#include "cache.h"
+
+const char *pager_program;
+int pager_use_color = 1;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
new file mode 100644
index 00000000..dab9e754
--- /dev/null
+++ b/tools/perf/util/event.c
@@ -0,0 +1,836 @@
+#include <linux/types.h>
+#include "event.h"
+#include "debug.h"
+#include "session.h"
+#include "sort.h"
+#include "string.h"
+#include "strlist.h"
+#include "thread.h"
+
+const char *event__name[] = {
+ [0] = "TOTAL",
+ [PERF_RECORD_MMAP] = "MMAP",
+ [PERF_RECORD_LOST] = "LOST",
+ [PERF_RECORD_COMM] = "COMM",
+ [PERF_RECORD_EXIT] = "EXIT",
+ [PERF_RECORD_THROTTLE] = "THROTTLE",
+ [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
+ [PERF_RECORD_FORK] = "FORK",
+ [PERF_RECORD_READ] = "READ",
+ [PERF_RECORD_SAMPLE] = "SAMPLE",
+ [PERF_RECORD_HEADER_ATTR] = "ATTR",
+ [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
+ [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
+ [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID",
+};
+
+static pid_t event__synthesize_comm(pid_t pid, int full,
+ event__handler_t process,
+ struct perf_session *session)
+{
+ event_t ev;
+ char filename[PATH_MAX];
+ char bf[BUFSIZ];
+ FILE *fp;
+ size_t size = 0;
+ DIR *tasks;
+ struct dirent dirent, *next;
+ pid_t tgid = 0;
+
+ snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
+
+ fp = fopen(filename, "r");
+ if (fp == NULL) {
+out_race:
+ /*
+ * We raced with a task exiting - just return:
+ */
+ pr_debug("couldn't open %s\n", filename);
+ return 0;
+ }
+
+ memset(&ev.comm, 0, sizeof(ev.comm));
+ while (!ev.comm.comm[0] || !ev.comm.pid) {
+ if (fgets(bf, sizeof(bf), fp) == NULL)
+ goto out_failure;
+
+ if (memcmp(bf, "Name:", 5) == 0) {
+ char *name = bf + 5;
+ while (*name && isspace(*name))
+ ++name;
+ size = strlen(name) - 1;
+ memcpy(ev.comm.comm, name, size++);
+ } else if (memcmp(bf, "Tgid:", 5) == 0) {
+ char *tgids = bf + 5;
+ while (*tgids && isspace(*tgids))
+ ++tgids;
+ tgid = ev.comm.pid = atoi(tgids);
+ }
+ }
+
+ ev.comm.header.type = PERF_RECORD_COMM;
+ size = ALIGN(size, sizeof(u64));
+ ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size);
+
+ if (!full) {
+ ev.comm.tid = pid;
+
+ process(&ev, session);
+ goto out_fclose;
+ }
+
+ snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
+
+ tasks = opendir(filename);
+ if (tasks == NULL)
+ goto out_race;
+
+ while (!readdir_r(tasks, &dirent, &next) && next) {
+ char *end;
+ pid = strtol(dirent.d_name, &end, 10);
+ if (*end)
+ continue;
+
+ ev.comm.tid = pid;
+
+ process(&ev, session);
+ }
+ closedir(tasks);
+
+out_fclose:
+ fclose(fp);
+ return tgid;
+
+out_failure:
+ pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
+ return -1;
+}
+
+static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
+ event__handler_t process,
+ struct perf_session *session)
+{
+ char filename[PATH_MAX];
+ FILE *fp;
+
+ snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
+
+ fp = fopen(filename, "r");
+ if (fp == NULL) {
+ /*
+ * We raced with a task exiting - just return:
+ */
+ pr_debug("couldn't open %s\n", filename);
+ return -1;
+ }
+
+ while (1) {
+ char bf[BUFSIZ], *pbf = bf;
+ event_t ev = {
+ .header = {
+ .type = PERF_RECORD_MMAP,
+ /*
+ * Just like the kernel, see __perf_event_mmap
+ * in kernel/perf_event.c
+ */
+ .misc = PERF_RECORD_MISC_USER,
+ },
+ };
+ int n;
+ size_t size;
+ if (fgets(bf, sizeof(bf), fp) == NULL)
+ break;
+
+ /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
+ n = hex2u64(pbf, &ev.mmap.start);
+ if (n < 0)
+ continue;
+ pbf += n + 1;
+ n = hex2u64(pbf, &ev.mmap.len);
+ if (n < 0)
+ continue;
+ pbf += n + 3;
+ if (*pbf == 'x') { /* vm_exec */
+ char *execname = strchr(bf, '/');
+
+ /* Catch VDSO */
+ if (execname == NULL)
+ execname = strstr(bf, "[vdso]");
+
+ if (execname == NULL)
+ continue;
+
+ pbf += 3;
+ n = hex2u64(pbf, &ev.mmap.pgoff);
+
+ size = strlen(execname);
+ execname[size - 1] = '\0'; /* Remove \n */
+ memcpy(ev.mmap.filename, execname, size);
+ size = ALIGN(size, sizeof(u64));
+ ev.mmap.len -= ev.mmap.start;
+ ev.mmap.header.size = (sizeof(ev.mmap) -
+ (sizeof(ev.mmap.filename) - size));
+ ev.mmap.pid = tgid;
+ ev.mmap.tid = pid;
+
+ process(&ev, session);
+ }
+ }
+
+ fclose(fp);
+ return 0;
+}
+
+int event__synthesize_modules(event__handler_t process,
+ struct perf_session *session,
+ struct machine *machine)
+{
+ struct rb_node *nd;
+ struct map_groups *kmaps = &machine->kmaps;
+ u16 misc;
+
+ /*
+ * kernel uses 0 for user space maps, see kernel/perf_event.c
+ * __perf_event_mmap
+ */
+ if (machine__is_host(machine))
+ misc = PERF_RECORD_MISC_KERNEL;
+ else
+ misc = PERF_RECORD_MISC_GUEST_KERNEL;
+
+ for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
+ nd; nd = rb_next(nd)) {
+ event_t ev;
+ size_t size;
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+
+ if (pos->dso->kernel)
+ continue;
+
+ size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
+ memset(&ev, 0, sizeof(ev));
+ ev.mmap.header.misc = misc;
+ ev.mmap.header.type = PERF_RECORD_MMAP;
+ ev.mmap.header.size = (sizeof(ev.mmap) -
+ (sizeof(ev.mmap.filename) - size));
+ ev.mmap.start = pos->start;
+ ev.mmap.len = pos->end - pos->start;
+ ev.mmap.pid = machine->pid;
+
+ memcpy(ev.mmap.filename, pos->dso->long_name,
+ pos->dso->long_name_len + 1);
+ process(&ev, session);
+ }
+
+ return 0;
+}
+
+int event__synthesize_thread(pid_t pid, event__handler_t process,
+ struct perf_session *session)
+{
+ pid_t tgid = event__synthesize_comm(pid, 1, process, session);
+ if (tgid == -1)
+ return -1;
+ return event__synthesize_mmap_events(pid, tgid, process, session);
+}
+
+void event__synthesize_threads(event__handler_t process,
+ struct perf_session *session)
+{
+ DIR *proc;
+ struct dirent dirent, *next;
+
+ proc = opendir("/proc");
+
+ while (!readdir_r(proc, &dirent, &next) && next) {
+ char *end;
+ pid_t pid = strtol(dirent.d_name, &end, 10);
+
+ if (*end) /* only interested in proper numerical dirents */
+ continue;
+
+ event__synthesize_thread(pid, process, session);
+ }
+
+ closedir(proc);
+}
+
+struct process_symbol_args {
+ const char *name;
+ u64 start;
+};
+
+static int find_symbol_cb(void *arg, const char *name, char type, u64 start)
+{
+ struct process_symbol_args *args = arg;
+
+ /*
+ * Must be a function or at least an alias, as in PARISC64, where "_text" is
+ * an 'A' to the same address as "_stext".
+ */
+ if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
+ type == 'A') || strcmp(name, args->name))
+ return 0;
+
+ args->start = start;
+ return 1;
+}
+
+int event__synthesize_kernel_mmap(event__handler_t process,
+ struct perf_session *session,
+ struct machine *machine,
+ const char *symbol_name)
+{
+ size_t size;
+ const char *filename, *mmap_name;
+ char path[PATH_MAX];
+ char name_buff[PATH_MAX];
+ struct map *map;
+
+ event_t ev = {
+ .header = {
+ .type = PERF_RECORD_MMAP,
+ },
+ };
+ /*
+ * We should get this from /sys/kernel/sections/.text, but till that is
+ * available use this, and after it is use this as a fallback for older
+ * kernels.
+ */
+ struct process_symbol_args args = { .name = symbol_name, };
+
+ mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
+ if (machine__is_host(machine)) {
+ /*
+ * kernel uses PERF_RECORD_MISC_USER for user space maps,
+ * see kernel/perf_event.c __perf_event_mmap
+ */
+ ev.header.misc = PERF_RECORD_MISC_KERNEL;
+ filename = "/proc/kallsyms";
+ } else {
+ ev.header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
+ if (machine__is_default_guest(machine))
+ filename = (char *) symbol_conf.default_guest_kallsyms;
+ else {
+ sprintf(path, "%s/proc/kallsyms", machine->root_dir);
+ filename = path;
+ }
+ }
+
+ if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
+ return -ENOENT;
+
+ map = machine->vmlinux_maps[MAP__FUNCTION];
+ size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename),
+ "%s%s", mmap_name, symbol_name) + 1;
+ size = ALIGN(size, sizeof(u64));
+ ev.mmap.header.size = (sizeof(ev.mmap) -
+ (sizeof(ev.mmap.filename) - size));
+ ev.mmap.pgoff = args.start;
+ ev.mmap.start = map->start;
+ ev.mmap.len = map->end - ev.mmap.start;
+ ev.mmap.pid = machine->pid;
+
+ return process(&ev, session);
+}
+
+static void thread__comm_adjust(struct thread *self, struct hists *hists)
+{
+ char *comm = self->comm;
+
+ if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
+ (!symbol_conf.comm_list ||
+ strlist__has_entry(symbol_conf.comm_list, comm))) {
+ u16 slen = strlen(comm);
+
+ if (hists__new_col_len(hists, HISTC_COMM, slen))
+ hists__set_col_len(hists, HISTC_THREAD, slen + 6);
+ }
+}
+
+static int thread__set_comm_adjust(struct thread *self, const char *comm,
+ struct hists *hists)
+{
+ int ret = thread__set_comm(self, comm);
+
+ if (ret)
+ return ret;
+
+ thread__comm_adjust(self, hists);
+
+ return 0;
+}
+
+int event__process_comm(event_t *self, struct perf_session *session)
+{
+ struct thread *thread = perf_session__findnew(session, self->comm.tid);
+
+ dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid);
+
+ if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm,
+ &session->hists)) {
+ dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int event__process_lost(event_t *self, struct perf_session *session)
+{
+ dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
+ session->hists.stats.total_lost += self->lost.lost;
+ return 0;
+}
+
+static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
+{
+ maps[MAP__FUNCTION]->start = self->mmap.start;
+ maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len;
+ /*
+ * Be a bit paranoid here, some perf.data file came with
+ * a zero sized synthesized MMAP event for the kernel.
+ */
+ if (maps[MAP__FUNCTION]->end == 0)
+ maps[MAP__FUNCTION]->end = ~0UL;
+}
+
+static int event__process_kernel_mmap(event_t *self,
+ struct perf_session *session)
+{
+ struct map *map;
+ char kmmap_prefix[PATH_MAX];
+ struct machine *machine;
+ enum dso_kernel_type kernel_type;
+ bool is_kernel_mmap;
+
+ machine = perf_session__findnew_machine(session, self->mmap.pid);
+ if (!machine) {
+ pr_err("Can't find id %d's machine\n", self->mmap.pid);
+ goto out_problem;
+ }
+
+ machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
+ if (machine__is_host(machine))
+ kernel_type = DSO_TYPE_KERNEL;
+ else
+ kernel_type = DSO_TYPE_GUEST_KERNEL;
+
+ is_kernel_mmap = memcmp(self->mmap.filename,
+ kmmap_prefix,
+ strlen(kmmap_prefix)) == 0;
+ if (self->mmap.filename[0] == '/' ||
+ (!is_kernel_mmap && self->mmap.filename[0] == '[')) {
+
+ char short_module_name[1024];
+ char *name, *dot;
+
+ if (self->mmap.filename[0] == '/') {
+ name = strrchr(self->mmap.filename, '/');
+ if (name == NULL)
+ goto out_problem;
+
+ ++name; /* skip / */
+ dot = strrchr(name, '.');
+ if (dot == NULL)
+ goto out_problem;
+ snprintf(short_module_name, sizeof(short_module_name),
+ "[%.*s]", (int)(dot - name), name);
+ strxfrchar(short_module_name, '-', '_');
+ } else
+ strcpy(short_module_name, self->mmap.filename);
+
+ map = machine__new_module(machine, self->mmap.start,
+ self->mmap.filename);
+ if (map == NULL)
+ goto out_problem;
+
+ name = strdup(short_module_name);
+ if (name == NULL)
+ goto out_problem;
+
+ map->dso->short_name = name;
+ map->dso->sname_alloc = 1;
+ map->end = map->start + self->mmap.len;
+ } else if (is_kernel_mmap) {
+ const char *symbol_name = (self->mmap.filename +
+ strlen(kmmap_prefix));
+ /*
+ * Should be there already, from the build-id table in
+ * the header.
+ */
+ struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
+ kmmap_prefix);
+ if (kernel == NULL)
+ goto out_problem;
+
+ kernel->kernel = kernel_type;
+ if (__machine__create_kernel_maps(machine, kernel) < 0)
+ goto out_problem;
+
+ event_set_kernel_mmap_len(machine->vmlinux_maps, self);
+ perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
+ symbol_name,
+ self->mmap.pgoff);
+ if (machine__is_default_guest(machine)) {
+ /*
+ * preload dso of guest kernel and modules
+ */
+ dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
+ NULL);
+ }
+ }
+ return 0;
+out_problem:
+ return -1;
+}
+
+int event__process_mmap(event_t *self, struct perf_session *session)
+{
+ struct machine *machine;
+ struct thread *thread;
+ struct map *map;
+ u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ int ret = 0;
+
+ dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
+ self->mmap.pid, self->mmap.tid, self->mmap.start,
+ self->mmap.len, self->mmap.pgoff, self->mmap.filename);
+
+ if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
+ cpumode == PERF_RECORD_MISC_KERNEL) {
+ ret = event__process_kernel_mmap(self, session);
+ if (ret < 0)
+ goto out_problem;
+ return 0;
+ }
+
+ machine = perf_session__find_host_machine(session);
+ if (machine == NULL)
+ goto out_problem;
+ thread = perf_session__findnew(session, self->mmap.pid);
+ if (thread == NULL)
+ goto out_problem;
+ map = map__new(&machine->user_dsos, self->mmap.start,
+ self->mmap.len, self->mmap.pgoff,
+ self->mmap.pid, self->mmap.filename,
+ MAP__FUNCTION);
+ if (map == NULL)
+ goto out_problem;
+
+ thread__insert_map(thread, map);
+ return 0;
+
+out_problem:
+ dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
+ return 0;
+}
+
+int event__process_task(event_t *self, struct perf_session *session)
+{
+ struct thread *thread = perf_session__findnew(session, self->fork.tid);
+ struct thread *parent = perf_session__findnew(session, self->fork.ptid);
+
+ dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
+ self->fork.ppid, self->fork.ptid);
+
+ if (self->header.type == PERF_RECORD_EXIT) {
+ perf_session__remove_thread(session, thread);
+ return 0;
+ }
+
+ if (thread == NULL || parent == NULL ||
+ thread__fork(thread, parent) < 0) {
+ dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int event__process(event_t *event, struct perf_session *session)
+{
+ switch (event->header.type) {
+ case PERF_RECORD_COMM:
+ event__process_comm(event, session);
+ break;
+ case PERF_RECORD_MMAP:
+ event__process_mmap(event, session);
+ break;
+ case PERF_RECORD_FORK:
+ case PERF_RECORD_EXIT:
+ event__process_task(event, session);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void thread__find_addr_map(struct thread *self,
+ struct perf_session *session, u8 cpumode,
+ enum map_type type, pid_t pid, u64 addr,
+ struct addr_location *al)
+{
+ struct map_groups *mg = &self->mg;
+ struct machine *machine = NULL;
+
+ al->thread = self;
+ al->addr = addr;
+ al->cpumode = cpumode;
+ al->filtered = false;
+
+ if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
+ al->level = 'k';
+ machine = perf_session__find_host_machine(session);
+ if (machine == NULL) {
+ al->map = NULL;
+ return;
+ }
+ mg = &machine->kmaps;
+ } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
+ al->level = '.';
+ machine = perf_session__find_host_machine(session);
+ } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
+ al->level = 'g';
+ machine = perf_session__find_machine(session, pid);
+ if (machine == NULL) {
+ al->map = NULL;
+ return;
+ }
+ mg = &machine->kmaps;
+ } else {
+ /*
+ * 'u' means guest os user space.
+ * TODO: We don't support guest user space. Might support late.
+ */
+ if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
+ al->level = 'u';
+ else
+ al->level = 'H';
+ al->map = NULL;
+
+ if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
+ cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
+ !perf_guest)
+ al->filtered = true;
+ if ((cpumode == PERF_RECORD_MISC_USER ||
+ cpumode == PERF_RECORD_MISC_KERNEL) &&
+ !perf_host)
+ al->filtered = true;
+
+ return;
+ }
+try_again:
+ al->map = map_groups__find(mg, type, al->addr);
+ if (al->map == NULL) {
+ /*
+ * If this is outside of all known maps, and is a negative
+ * address, try to look it up in the kernel dso, as it might be
+ * a vsyscall or vdso (which executes in user-mode).
+ *
+ * XXX This is nasty, we should have a symbol list in the
+ * "[vdso]" dso, but for now lets use the old trick of looking
+ * in the whole kernel symbol list.
+ */
+ if ((long long)al->addr < 0 &&
+ cpumode == PERF_RECORD_MISC_KERNEL &&
+ machine && mg != &machine->kmaps) {
+ mg = &machine->kmaps;
+ goto try_again;
+ }
+ } else
+ al->addr = al->map->map_ip(al->map, al->addr);
+}
+
+void thread__find_addr_location(struct thread *self,
+ struct perf_session *session, u8 cpumode,
+ enum map_type type, pid_t pid, u64 addr,
+ struct addr_location *al,
+ symbol_filter_t filter)
+{
+ thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
+ if (al->map != NULL)
+ al->sym = map__find_symbol(al->map, al->addr, filter);
+ else
+ al->sym = NULL;
+}
+
+static void dso__calc_col_width(struct dso *self, struct hists *hists)
+{
+ if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
+ (!symbol_conf.dso_list ||
+ strlist__has_entry(symbol_conf.dso_list, self->name))) {
+ u16 slen = dso__name_len(self);
+ hists__new_col_len(hists, HISTC_DSO, slen);
+ }
+
+ self->slen_calculated = 1;
+}
+
+int event__preprocess_sample(const event_t *self, struct perf_session *session,
+ struct addr_location *al, struct sample_data *data,
+ symbol_filter_t filter)
+{
+ u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ struct thread *thread;
+
+ event__parse_sample(self, session->sample_type, data);
+
+ dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld cpu:%d\n",
+ self->header.misc, data->pid, data->tid, data->ip,
+ data->period, data->cpu);
+
+ if (session->sample_type & PERF_SAMPLE_CALLCHAIN) {
+ unsigned int i;
+
+ dump_printf("... chain: nr:%Lu\n", data->callchain->nr);
+
+ if (!ip_callchain__valid(data->callchain, self)) {
+ pr_debug("call-chain problem with event, "
+ "skipping it.\n");
+ goto out_filtered;
+ }
+
+ if (dump_trace) {
+ for (i = 0; i < data->callchain->nr; i++)
+ dump_printf("..... %2d: %016Lx\n",
+ i, data->callchain->ips[i]);
+ }
+ }
+ thread = perf_session__findnew(session, self->ip.pid);
+ if (thread == NULL)
+ return -1;
+
+ if (symbol_conf.comm_list &&
+ !strlist__has_entry(symbol_conf.comm_list, thread->comm))
+ goto out_filtered;
+
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+ /*
+ * Have we already created the kernel maps for the host machine?
+ *
+ * This should have happened earlier, when we processed the kernel MMAP
+ * events, but for older perf.data files there was no such thing, so do
+ * it now.
+ */
+ if (cpumode == PERF_RECORD_MISC_KERNEL &&
+ session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL)
+ machine__create_kernel_maps(&session->host_machine);
+
+ thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
+ self->ip.pid, self->ip.ip, al);
+ dump_printf(" ...... dso: %s\n",
+ al->map ? al->map->dso->long_name :
+ al->level == 'H' ? "[hypervisor]" : "<not found>");
+ al->sym = NULL;
+ al->cpu = data->cpu;
+
+ if (al->map) {
+ if (symbol_conf.dso_list &&
+ (!al->map || !al->map->dso ||
+ !(strlist__has_entry(symbol_conf.dso_list,
+ al->map->dso->short_name) ||
+ (al->map->dso->short_name != al->map->dso->long_name &&
+ strlist__has_entry(symbol_conf.dso_list,
+ al->map->dso->long_name)))))
+ goto out_filtered;
+ /*
+ * We have to do this here as we may have a dso with no symbol
+ * hit that has a name longer than the ones with symbols
+ * sampled.
+ */
+ if (!sort_dso.elide && !al->map->dso->slen_calculated)
+ dso__calc_col_width(al->map->dso, &session->hists);
+
+ al->sym = map__find_symbol(al->map, al->addr, filter);
+ } else {
+ const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
+
+ if (hists__col_len(&session->hists, HISTC_DSO) < unresolved_col_width &&
+ !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
+ !symbol_conf.dso_list)
+ hists__set_col_len(&session->hists, HISTC_DSO,
+ unresolved_col_width);
+ }
+
+ if (symbol_conf.sym_list && al->sym &&
+ !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
+ goto out_filtered;
+
+ return 0;
+
+out_filtered:
+ al->filtered = true;
+ return 0;
+}
+
+int event__parse_sample(const event_t *event, u64 type, struct sample_data *data)
+{
+ const u64 *array = event->sample.array;
+
+ if (type & PERF_SAMPLE_IP) {
+ data->ip = event->ip.ip;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u32 *p = (u32 *)array;
+ data->pid = p[0];
+ data->tid = p[1];
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ data->time = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ADDR) {
+ data->addr = *array;
+ array++;
+ }
+
+ data->id = -1ULL;
+ if (type & PERF_SAMPLE_ID) {
+ data->id = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ data->stream_id = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ u32 *p = (u32 *)array;
+ data->cpu = *p;
+ array++;
+ } else
+ data->cpu = -1;
+
+ if (type & PERF_SAMPLE_PERIOD) {
+ data->period = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_READ) {
+ pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
+ return -1;
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ data->callchain = (struct ip_callchain *)array;
+ array += 1 + data->callchain->nr;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ u32 *p = (u32 *)array;
+ data->raw_size = *p;
+ p++;
+ data->raw_data = p;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
new file mode 100644
index 00000000..8e790dae
--- /dev/null
+++ b/tools/perf/util/event.h
@@ -0,0 +1,167 @@
+#ifndef __PERF_RECORD_H
+#define __PERF_RECORD_H
+
+#include <limits.h>
+
+#include "../perf.h"
+#include "map.h"
+
+/*
+ * PERF_SAMPLE_IP | PERF_SAMPLE_TID | *
+ */
+struct ip_event {
+ struct perf_event_header header;
+ u64 ip;
+ u32 pid, tid;
+ unsigned char __more_data[];
+};
+
+struct mmap_event {
+ struct perf_event_header header;
+ u32 pid, tid;
+ u64 start;
+ u64 len;
+ u64 pgoff;
+ char filename[PATH_MAX];
+};
+
+struct comm_event {
+ struct perf_event_header header;
+ u32 pid, tid;
+ char comm[16];
+};
+
+struct fork_event {
+ struct perf_event_header header;
+ u32 pid, ppid;
+ u32 tid, ptid;
+ u64 time;
+};
+
+struct lost_event {
+ struct perf_event_header header;
+ u64 id;
+ u64 lost;
+};
+
+/*
+ * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
+ */
+struct read_event {
+ struct perf_event_header header;
+ u32 pid, tid;
+ u64 value;
+ u64 time_enabled;
+ u64 time_running;
+ u64 id;
+};
+
+struct sample_event {
+ struct perf_event_header header;
+ u64 array[];
+};
+
+struct sample_data {
+ u64 ip;
+ u32 pid, tid;
+ u64 time;
+ u64 addr;
+ u64 id;
+ u64 stream_id;
+ u64 period;
+ u32 cpu;
+ u32 raw_size;
+ void *raw_data;
+ struct ip_callchain *callchain;
+};
+
+#define BUILD_ID_SIZE 20
+
+struct build_id_event {
+ struct perf_event_header header;
+ pid_t pid;
+ u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
+ char filename[];
+};
+
+enum perf_user_event_type { /* above any possible kernel type */
+ PERF_RECORD_HEADER_ATTR = 64,
+ PERF_RECORD_HEADER_EVENT_TYPE = 65,
+ PERF_RECORD_HEADER_TRACING_DATA = 66,
+ PERF_RECORD_HEADER_BUILD_ID = 67,
+ PERF_RECORD_FINISHED_ROUND = 68,
+ PERF_RECORD_HEADER_MAX
+};
+
+struct attr_event {
+ struct perf_event_header header;
+ struct perf_event_attr attr;
+ u64 id[];
+};
+
+#define MAX_EVENT_NAME 64
+
+struct perf_trace_event_type {
+ u64 event_id;
+ char name[MAX_EVENT_NAME];
+};
+
+struct event_type_event {
+ struct perf_event_header header;
+ struct perf_trace_event_type event_type;
+};
+
+struct tracing_data_event {
+ struct perf_event_header header;
+ u32 size;
+};
+
+typedef union event_union {
+ struct perf_event_header header;
+ struct ip_event ip;
+ struct mmap_event mmap;
+ struct comm_event comm;
+ struct fork_event fork;
+ struct lost_event lost;
+ struct read_event read;
+ struct sample_event sample;
+ struct attr_event attr;
+ struct event_type_event event_type;
+ struct tracing_data_event tracing_data;
+ struct build_id_event build_id;
+} event_t;
+
+void event__print_totals(void);
+
+struct perf_session;
+
+typedef int (*event__handler_t)(event_t *event, struct perf_session *session);
+
+int event__synthesize_thread(pid_t pid, event__handler_t process,
+ struct perf_session *session);
+void event__synthesize_threads(event__handler_t process,
+ struct perf_session *session);
+int event__synthesize_kernel_mmap(event__handler_t process,
+ struct perf_session *session,
+ struct machine *machine,
+ const char *symbol_name);
+
+int event__synthesize_modules(event__handler_t process,
+ struct perf_session *session,
+ struct machine *machine);
+
+int event__process_comm(event_t *self, struct perf_session *session);
+int event__process_lost(event_t *self, struct perf_session *session);
+int event__process_mmap(event_t *self, struct perf_session *session);
+int event__process_task(event_t *self, struct perf_session *session);
+int event__process(event_t *event, struct perf_session *session);
+
+struct addr_location;
+int event__preprocess_sample(const event_t *self, struct perf_session *session,
+ struct addr_location *al, struct sample_data *data,
+ symbol_filter_t filter);
+int event__parse_sample(const event_t *event, u64 type, struct sample_data *data);
+
+extern const char *event__name[];
+
+#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/exec_cmd.c b/tools/perf/util/exec_cmd.c
new file mode 100644
index 00000000..67eeff57
--- /dev/null
+++ b/tools/perf/util/exec_cmd.c
@@ -0,0 +1,167 @@
+#include "cache.h"
+#include "exec_cmd.h"
+#include "quote.h"
+
+#include <string.h>
+
+#define MAX_ARGS 32
+
+static const char *argv_exec_path;
+static const char *argv0_path;
+
+const char *system_path(const char *path)
+{
+#ifdef RUNTIME_PREFIX
+ static const char *prefix;
+#else
+ static const char *prefix = PREFIX;
+#endif
+ struct strbuf d = STRBUF_INIT;
+
+ if (is_absolute_path(path))
+ return path;
+
+#ifdef RUNTIME_PREFIX
+ assert(argv0_path);
+ assert(is_absolute_path(argv0_path));
+
+ if (!prefix &&
+ !(prefix = strip_path_suffix(argv0_path, PERF_EXEC_PATH)) &&
+ !(prefix = strip_path_suffix(argv0_path, BINDIR)) &&
+ !(prefix = strip_path_suffix(argv0_path, "perf"))) {
+ prefix = PREFIX;
+ fprintf(stderr, "RUNTIME_PREFIX requested, "
+ "but prefix computation failed. "
+ "Using static fallback '%s'.\n", prefix);
+ }
+#endif
+
+ strbuf_addf(&d, "%s/%s", prefix, path);
+ path = strbuf_detach(&d, NULL);
+ return path;
+}
+
+const char *perf_extract_argv0_path(const char *argv0)
+{
+ const char *slash;
+
+ if (!argv0 || !*argv0)
+ return NULL;
+ slash = argv0 + strlen(argv0);
+
+ while (argv0 <= slash && !is_dir_sep(*slash))
+ slash--;
+
+ if (slash >= argv0) {
+ argv0_path = strndup(argv0, slash - argv0);
+ return argv0_path ? slash + 1 : NULL;
+ }
+
+ return argv0;
+}
+
+void perf_set_argv_exec_path(const char *exec_path)
+{
+ argv_exec_path = exec_path;
+ /*
+ * Propagate this setting to external programs.
+ */
+ setenv(EXEC_PATH_ENVIRONMENT, exec_path, 1);
+}
+
+
+/* Returns the highest-priority, location to look for perf programs. */
+const char *perf_exec_path(void)
+{
+ const char *env;
+
+ if (argv_exec_path)
+ return argv_exec_path;
+
+ env = getenv(EXEC_PATH_ENVIRONMENT);
+ if (env && *env) {
+ return env;
+ }
+
+ return system_path(PERF_EXEC_PATH);
+}
+
+static void add_path(struct strbuf *out, const char *path)
+{
+ if (path && *path) {
+ if (is_absolute_path(path))
+ strbuf_addstr(out, path);
+ else
+ strbuf_addstr(out, make_nonrelative_path(path));
+
+ strbuf_addch(out, PATH_SEP);
+ }
+}
+
+void setup_path(void)
+{
+ const char *old_path = getenv("PATH");
+ struct strbuf new_path = STRBUF_INIT;
+
+ add_path(&new_path, perf_exec_path());
+ add_path(&new_path, argv0_path);
+
+ if (old_path)
+ strbuf_addstr(&new_path, old_path);
+ else
+ strbuf_addstr(&new_path, "/usr/local/bin:/usr/bin:/bin");
+
+ setenv("PATH", new_path.buf, 1);
+
+ strbuf_release(&new_path);
+}
+
+static const char **prepare_perf_cmd(const char **argv)
+{
+ int argc;
+ const char **nargv;
+
+ for (argc = 0; argv[argc]; argc++)
+ ; /* just counting */
+ nargv = malloc(sizeof(*nargv) * (argc + 2));
+
+ nargv[0] = "perf";
+ for (argc = 0; argv[argc]; argc++)
+ nargv[argc + 1] = argv[argc];
+ nargv[argc + 1] = NULL;
+ return nargv;
+}
+
+int execv_perf_cmd(const char **argv) {
+ const char **nargv = prepare_perf_cmd(argv);
+
+ /* execvp() can only ever return if it fails */
+ execvp("perf", (char **)nargv);
+
+ free(nargv);
+ return -1;
+}
+
+
+int execl_perf_cmd(const char *cmd,...)
+{
+ int argc;
+ const char *argv[MAX_ARGS + 1];
+ const char *arg;
+ va_list param;
+
+ va_start(param, cmd);
+ argv[0] = cmd;
+ argc = 1;
+ while (argc < MAX_ARGS) {
+ arg = argv[argc++] = va_arg(param, char *);
+ if (!arg)
+ break;
+ }
+ va_end(param);
+ if (MAX_ARGS <= argc)
+ return error("too many args to run %s", cmd);
+
+ argv[argc] = NULL;
+ return execv_perf_cmd(argv);
+}
diff --git a/tools/perf/util/exec_cmd.h b/tools/perf/util/exec_cmd.h
new file mode 100644
index 00000000..bc4b9159
--- /dev/null
+++ b/tools/perf/util/exec_cmd.h
@@ -0,0 +1,12 @@
+#ifndef __PERF_EXEC_CMD_H
+#define __PERF_EXEC_CMD_H
+
+extern void perf_set_argv_exec_path(const char *exec_path);
+extern const char *perf_extract_argv0_path(const char *path);
+extern const char *perf_exec_path(void);
+extern void setup_path(void);
+extern int execv_perf_cmd(const char **argv); /* NULL terminated */
+extern int execl_perf_cmd(const char *cmd, ...);
+extern const char *system_path(const char *path);
+
+#endif /* __PERF_EXEC_CMD_H */
diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh
new file mode 100755
index 00000000..f06f6fd1
--- /dev/null
+++ b/tools/perf/util/generate-cmdlist.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+echo "/* Automatically generated by $0 */
+struct cmdname_help
+{
+ char name[16];
+ char help[80];
+};
+
+static struct cmdname_help common_cmds[] = {"
+
+sed -n -e 's/^perf-\([^ ]*\)[ ].* common.*/\1/p' command-list.txt |
+sort |
+while read cmd
+do
+ sed -n '
+ /^NAME/,/perf-'"$cmd"'/H
+ ${
+ x
+ s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/
+ p
+ }' "Documentation/perf-$cmd.txt"
+done
+echo "};"
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
new file mode 100644
index 00000000..d7e67b16
--- /dev/null
+++ b/tools/perf/util/header.c
@@ -0,0 +1,1199 @@
+#define _FILE_OFFSET_BITS 64
+
+#include <sys/types.h>
+#include <byteswap.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+
+#include "util.h"
+#include "header.h"
+#include "../perf.h"
+#include "trace-event.h"
+#include "session.h"
+#include "symbol.h"
+#include "debug.h"
+
+static bool no_buildid_cache = false;
+
+/*
+ * Create new perf.data header attribute:
+ */
+struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr)
+{
+ struct perf_header_attr *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ self->attr = *attr;
+ self->ids = 0;
+ self->size = 1;
+ self->id = malloc(sizeof(u64));
+ if (self->id == NULL) {
+ free(self);
+ self = NULL;
+ }
+ }
+
+ return self;
+}
+
+void perf_header_attr__delete(struct perf_header_attr *self)
+{
+ free(self->id);
+ free(self);
+}
+
+int perf_header_attr__add_id(struct perf_header_attr *self, u64 id)
+{
+ int pos = self->ids;
+
+ self->ids++;
+ if (self->ids > self->size) {
+ int nsize = self->size * 2;
+ u64 *nid = realloc(self->id, nsize * sizeof(u64));
+
+ if (nid == NULL)
+ return -1;
+
+ self->size = nsize;
+ self->id = nid;
+ }
+ self->id[pos] = id;
+ return 0;
+}
+
+int perf_header__init(struct perf_header *self)
+{
+ self->size = 1;
+ self->attr = malloc(sizeof(void *));
+ return self->attr == NULL ? -ENOMEM : 0;
+}
+
+void perf_header__exit(struct perf_header *self)
+{
+ int i;
+ for (i = 0; i < self->attrs; ++i)
+ perf_header_attr__delete(self->attr[i]);
+ free(self->attr);
+}
+
+int perf_header__add_attr(struct perf_header *self,
+ struct perf_header_attr *attr)
+{
+ if (self->frozen)
+ return -1;
+
+ if (self->attrs == self->size) {
+ int nsize = self->size * 2;
+ struct perf_header_attr **nattr;
+
+ nattr = realloc(self->attr, nsize * sizeof(void *));
+ if (nattr == NULL)
+ return -1;
+
+ self->size = nsize;
+ self->attr = nattr;
+ }
+
+ self->attr[self->attrs++] = attr;
+ return 0;
+}
+
+static int event_count;
+static struct perf_trace_event_type *events;
+
+int perf_header__push_event(u64 id, const char *name)
+{
+ if (strlen(name) > MAX_EVENT_NAME)
+ pr_warning("Event %s will be truncated\n", name);
+
+ if (!events) {
+ events = malloc(sizeof(struct perf_trace_event_type));
+ if (events == NULL)
+ return -ENOMEM;
+ } else {
+ struct perf_trace_event_type *nevents;
+
+ nevents = realloc(events, (event_count + 1) * sizeof(*events));
+ if (nevents == NULL)
+ return -ENOMEM;
+ events = nevents;
+ }
+ memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
+ events[event_count].event_id = id;
+ strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
+ event_count++;
+ return 0;
+}
+
+char *perf_header__find_event(u64 id)
+{
+ int i;
+ for (i = 0 ; i < event_count; i++) {
+ if (events[i].event_id == id)
+ return events[i].name;
+ }
+ return NULL;
+}
+
+static const char *__perf_magic = "PERFFILE";
+
+#define PERF_MAGIC (*(u64 *)__perf_magic)
+
+struct perf_file_attr {
+ struct perf_event_attr attr;
+ struct perf_file_section ids;
+};
+
+void perf_header__set_feat(struct perf_header *self, int feat)
+{
+ set_bit(feat, self->adds_features);
+}
+
+bool perf_header__has_feat(const struct perf_header *self, int feat)
+{
+ return test_bit(feat, self->adds_features);
+}
+
+static int do_write(int fd, const void *buf, size_t size)
+{
+ while (size) {
+ int ret = write(fd, buf, size);
+
+ if (ret < 0)
+ return -errno;
+
+ size -= ret;
+ buf += ret;
+ }
+
+ return 0;
+}
+
+#define NAME_ALIGN 64
+
+static int write_padded(int fd, const void *bf, size_t count,
+ size_t count_aligned)
+{
+ static const char zero_buf[NAME_ALIGN];
+ int err = do_write(fd, bf, count);
+
+ if (!err)
+ err = do_write(fd, zero_buf, count_aligned - count);
+
+ return err;
+}
+
+#define dsos__for_each_with_build_id(pos, head) \
+ list_for_each_entry(pos, head, node) \
+ if (!pos->has_build_id) \
+ continue; \
+ else
+
+static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
+ u16 misc, int fd)
+{
+ struct dso *pos;
+
+ dsos__for_each_with_build_id(pos, head) {
+ int err;
+ struct build_id_event b;
+ size_t len;
+
+ if (!pos->hit)
+ continue;
+ len = pos->long_name_len + 1;
+ len = ALIGN(len, NAME_ALIGN);
+ memset(&b, 0, sizeof(b));
+ memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
+ b.pid = pid;
+ b.header.misc = misc;
+ b.header.size = sizeof(b) + len;
+ err = do_write(fd, &b, sizeof(b));
+ if (err < 0)
+ return err;
+ err = write_padded(fd, pos->long_name,
+ pos->long_name_len + 1, len);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int machine__write_buildid_table(struct machine *self, int fd)
+{
+ int err;
+ u16 kmisc = PERF_RECORD_MISC_KERNEL,
+ umisc = PERF_RECORD_MISC_USER;
+
+ if (!machine__is_host(self)) {
+ kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
+ umisc = PERF_RECORD_MISC_GUEST_USER;
+ }
+
+ err = __dsos__write_buildid_table(&self->kernel_dsos, self->pid,
+ kmisc, fd);
+ if (err == 0)
+ err = __dsos__write_buildid_table(&self->user_dsos,
+ self->pid, umisc, fd);
+ return err;
+}
+
+static int dsos__write_buildid_table(struct perf_header *header, int fd)
+{
+ struct perf_session *session = container_of(header,
+ struct perf_session, header);
+ struct rb_node *nd;
+ int err = machine__write_buildid_table(&session->host_machine, fd);
+
+ if (err)
+ return err;
+
+ for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ err = machine__write_buildid_table(pos, fd);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
+ const char *name, bool is_kallsyms)
+{
+ const size_t size = PATH_MAX;
+ char *filename = malloc(size),
+ *linkname = malloc(size), *targetname;
+ int len, err = -1;
+
+ if (filename == NULL || linkname == NULL)
+ goto out_free;
+
+ len = snprintf(filename, size, "%s%s%s",
+ debugdir, is_kallsyms ? "/" : "", name);
+ if (mkdir_p(filename, 0755))
+ goto out_free;
+
+ snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
+
+ if (access(filename, F_OK)) {
+ if (is_kallsyms) {
+ if (copyfile("/proc/kallsyms", filename))
+ goto out_free;
+ } else if (link(name, filename) && copyfile(name, filename))
+ goto out_free;
+ }
+
+ len = snprintf(linkname, size, "%s/.build-id/%.2s",
+ debugdir, sbuild_id);
+
+ if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
+ goto out_free;
+
+ snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
+ targetname = filename + strlen(debugdir) - 5;
+ memcpy(targetname, "../..", 5);
+
+ if (symlink(targetname, linkname) == 0)
+ err = 0;
+out_free:
+ free(filename);
+ free(linkname);
+ return err;
+}
+
+static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
+ const char *name, const char *debugdir,
+ bool is_kallsyms)
+{
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ build_id__sprintf(build_id, build_id_size, sbuild_id);
+
+ return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
+}
+
+int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
+{
+ const size_t size = PATH_MAX;
+ char *filename = malloc(size),
+ *linkname = malloc(size);
+ int err = -1;
+
+ if (filename == NULL || linkname == NULL)
+ goto out_free;
+
+ snprintf(linkname, size, "%s/.build-id/%.2s/%s",
+ debugdir, sbuild_id, sbuild_id + 2);
+
+ if (access(linkname, F_OK))
+ goto out_free;
+
+ if (readlink(linkname, filename, size) < 0)
+ goto out_free;
+
+ if (unlink(linkname))
+ goto out_free;
+
+ /*
+ * Since the link is relative, we must make it absolute:
+ */
+ snprintf(linkname, size, "%s/.build-id/%.2s/%s",
+ debugdir, sbuild_id, filename);
+
+ if (unlink(linkname))
+ goto out_free;
+
+ err = 0;
+out_free:
+ free(filename);
+ free(linkname);
+ return err;
+}
+
+static int dso__cache_build_id(struct dso *self, const char *debugdir)
+{
+ bool is_kallsyms = self->kernel && self->long_name[0] != '/';
+
+ return build_id_cache__add_b(self->build_id, sizeof(self->build_id),
+ self->long_name, debugdir, is_kallsyms);
+}
+
+static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
+{
+ struct dso *pos;
+ int err = 0;
+
+ dsos__for_each_with_build_id(pos, head)
+ if (dso__cache_build_id(pos, debugdir))
+ err = -1;
+
+ return err;
+}
+
+static int machine__cache_build_ids(struct machine *self, const char *debugdir)
+{
+ int ret = __dsos__cache_build_ids(&self->kernel_dsos, debugdir);
+ ret |= __dsos__cache_build_ids(&self->user_dsos, debugdir);
+ return ret;
+}
+
+static int perf_session__cache_build_ids(struct perf_session *self)
+{
+ struct rb_node *nd;
+ int ret;
+ char debugdir[PATH_MAX];
+
+ snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
+
+ if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
+ return -1;
+
+ ret = machine__cache_build_ids(&self->host_machine, debugdir);
+
+ for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret |= machine__cache_build_ids(pos, debugdir);
+ }
+ return ret ? -1 : 0;
+}
+
+static bool machine__read_build_ids(struct machine *self, bool with_hits)
+{
+ bool ret = __dsos__read_build_ids(&self->kernel_dsos, with_hits);
+ ret |= __dsos__read_build_ids(&self->user_dsos, with_hits);
+ return ret;
+}
+
+static bool perf_session__read_build_ids(struct perf_session *self, bool with_hits)
+{
+ struct rb_node *nd;
+ bool ret = machine__read_build_ids(&self->host_machine, with_hits);
+
+ for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret |= machine__read_build_ids(pos, with_hits);
+ }
+
+ return ret;
+}
+
+static int perf_header__adds_write(struct perf_header *self, int fd)
+{
+ int nr_sections;
+ struct perf_session *session;
+ struct perf_file_section *feat_sec;
+ int sec_size;
+ u64 sec_start;
+ int idx = 0, err;
+
+ session = container_of(self, struct perf_session, header);
+ if (perf_session__read_build_ids(session, true))
+ perf_header__set_feat(self, HEADER_BUILD_ID);
+
+ nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
+ if (!nr_sections)
+ return 0;
+
+ feat_sec = calloc(sizeof(*feat_sec), nr_sections);
+ if (feat_sec == NULL)
+ return -ENOMEM;
+
+ sec_size = sizeof(*feat_sec) * nr_sections;
+
+ sec_start = self->data_offset + self->data_size;
+ lseek(fd, sec_start + sec_size, SEEK_SET);
+
+ if (perf_header__has_feat(self, HEADER_TRACE_INFO)) {
+ struct perf_file_section *trace_sec;
+
+ trace_sec = &feat_sec[idx++];
+
+ /* Write trace info */
+ trace_sec->offset = lseek(fd, 0, SEEK_CUR);
+ read_tracing_data(fd, attrs, nr_counters);
+ trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
+ }
+
+ if (perf_header__has_feat(self, HEADER_BUILD_ID)) {
+ struct perf_file_section *buildid_sec;
+
+ buildid_sec = &feat_sec[idx++];
+
+ /* Write build-ids */
+ buildid_sec->offset = lseek(fd, 0, SEEK_CUR);
+ err = dsos__write_buildid_table(self, fd);
+ if (err < 0) {
+ pr_debug("failed to write buildid table\n");
+ goto out_free;
+ }
+ buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
+ buildid_sec->offset;
+ if (!no_buildid_cache)
+ perf_session__cache_build_ids(session);
+ }
+
+ lseek(fd, sec_start, SEEK_SET);
+ err = do_write(fd, feat_sec, sec_size);
+ if (err < 0)
+ pr_debug("failed to write feature section\n");
+out_free:
+ free(feat_sec);
+ return err;
+}
+
+int perf_header__write_pipe(int fd)
+{
+ struct perf_pipe_file_header f_header;
+ int err;
+
+ f_header = (struct perf_pipe_file_header){
+ .magic = PERF_MAGIC,
+ .size = sizeof(f_header),
+ };
+
+ err = do_write(fd, &f_header, sizeof(f_header));
+ if (err < 0) {
+ pr_debug("failed to write perf pipe header\n");
+ return err;
+ }
+
+ return 0;
+}
+
+int perf_header__write(struct perf_header *self, int fd, bool at_exit)
+{
+ struct perf_file_header f_header;
+ struct perf_file_attr f_attr;
+ struct perf_header_attr *attr;
+ int i, err;
+
+ lseek(fd, sizeof(f_header), SEEK_SET);
+
+ for (i = 0; i < self->attrs; i++) {
+ attr = self->attr[i];
+
+ attr->id_offset = lseek(fd, 0, SEEK_CUR);
+ err = do_write(fd, attr->id, attr->ids * sizeof(u64));
+ if (err < 0) {
+ pr_debug("failed to write perf header\n");
+ return err;
+ }
+ }
+
+
+ self->attr_offset = lseek(fd, 0, SEEK_CUR);
+
+ for (i = 0; i < self->attrs; i++) {
+ attr = self->attr[i];
+
+ f_attr = (struct perf_file_attr){
+ .attr = attr->attr,
+ .ids = {
+ .offset = attr->id_offset,
+ .size = attr->ids * sizeof(u64),
+ }
+ };
+ err = do_write(fd, &f_attr, sizeof(f_attr));
+ if (err < 0) {
+ pr_debug("failed to write perf header attribute\n");
+ return err;
+ }
+ }
+
+ self->event_offset = lseek(fd, 0, SEEK_CUR);
+ self->event_size = event_count * sizeof(struct perf_trace_event_type);
+ if (events) {
+ err = do_write(fd, events, self->event_size);
+ if (err < 0) {
+ pr_debug("failed to write perf header events\n");
+ return err;
+ }
+ }
+
+ self->data_offset = lseek(fd, 0, SEEK_CUR);
+
+ if (at_exit) {
+ err = perf_header__adds_write(self, fd);
+ if (err < 0)
+ return err;
+ }
+
+ f_header = (struct perf_file_header){
+ .magic = PERF_MAGIC,
+ .size = sizeof(f_header),
+ .attr_size = sizeof(f_attr),
+ .attrs = {
+ .offset = self->attr_offset,
+ .size = self->attrs * sizeof(f_attr),
+ },
+ .data = {
+ .offset = self->data_offset,
+ .size = self->data_size,
+ },
+ .event_types = {
+ .offset = self->event_offset,
+ .size = self->event_size,
+ },
+ };
+
+ memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features));
+
+ lseek(fd, 0, SEEK_SET);
+ err = do_write(fd, &f_header, sizeof(f_header));
+ if (err < 0) {
+ pr_debug("failed to write perf header\n");
+ return err;
+ }
+ lseek(fd, self->data_offset + self->data_size, SEEK_SET);
+
+ self->frozen = 1;
+ return 0;
+}
+
+static int perf_header__getbuffer64(struct perf_header *self,
+ int fd, void *buf, size_t size)
+{
+ if (do_read(fd, buf, size) <= 0)
+ return -1;
+
+ if (self->needs_swap)
+ mem_bswap_64(buf, size);
+
+ return 0;
+}
+
+int perf_header__process_sections(struct perf_header *self, int fd,
+ int (*process)(struct perf_file_section *self,
+ struct perf_header *ph,
+ int feat, int fd))
+{
+ struct perf_file_section *feat_sec;
+ int nr_sections;
+ int sec_size;
+ int idx = 0;
+ int err = -1, feat = 1;
+
+ nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
+ if (!nr_sections)
+ return 0;
+
+ feat_sec = calloc(sizeof(*feat_sec), nr_sections);
+ if (!feat_sec)
+ return -1;
+
+ sec_size = sizeof(*feat_sec) * nr_sections;
+
+ lseek(fd, self->data_offset + self->data_size, SEEK_SET);
+
+ if (perf_header__getbuffer64(self, fd, feat_sec, sec_size))
+ goto out_free;
+
+ err = 0;
+ while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
+ if (perf_header__has_feat(self, feat)) {
+ struct perf_file_section *sec = &feat_sec[idx++];
+
+ err = process(sec, self, feat, fd);
+ if (err < 0)
+ break;
+ }
+ ++feat;
+ }
+out_free:
+ free(feat_sec);
+ return err;
+}
+
+int perf_file_header__read(struct perf_file_header *self,
+ struct perf_header *ph, int fd)
+{
+ lseek(fd, 0, SEEK_SET);
+
+ if (do_read(fd, self, sizeof(*self)) <= 0 ||
+ memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
+ return -1;
+
+ if (self->attr_size != sizeof(struct perf_file_attr)) {
+ u64 attr_size = bswap_64(self->attr_size);
+
+ if (attr_size != sizeof(struct perf_file_attr))
+ return -1;
+
+ mem_bswap_64(self, offsetof(struct perf_file_header,
+ adds_features));
+ ph->needs_swap = true;
+ }
+
+ if (self->size != sizeof(*self)) {
+ /* Support the previous format */
+ if (self->size == offsetof(typeof(*self), adds_features))
+ bitmap_zero(self->adds_features, HEADER_FEAT_BITS);
+ else
+ return -1;
+ }
+
+ memcpy(&ph->adds_features, &self->adds_features,
+ sizeof(ph->adds_features));
+ /*
+ * FIXME: hack that assumes that if we need swap the perf.data file
+ * may be coming from an arch with a different word-size, ergo different
+ * DEFINE_BITMAP format, investigate more later, but for now its mostly
+ * safe to assume that we have a build-id section. Trace files probably
+ * have several other issues in this realm anyway...
+ */
+ if (ph->needs_swap) {
+ memset(&ph->adds_features, 0, sizeof(ph->adds_features));
+ perf_header__set_feat(ph, HEADER_BUILD_ID);
+ }
+
+ ph->event_offset = self->event_types.offset;
+ ph->event_size = self->event_types.size;
+ ph->data_offset = self->data.offset;
+ ph->data_size = self->data.size;
+ return 0;
+}
+
+static int __event_process_build_id(struct build_id_event *bev,
+ char *filename,
+ struct perf_session *session)
+{
+ int err = -1;
+ struct list_head *head;
+ struct machine *machine;
+ u16 misc;
+ struct dso *dso;
+ enum dso_kernel_type dso_type;
+
+ machine = perf_session__findnew_machine(session, bev->pid);
+ if (!machine)
+ goto out;
+
+ misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
+ switch (misc) {
+ case PERF_RECORD_MISC_KERNEL:
+ dso_type = DSO_TYPE_KERNEL;
+ head = &machine->kernel_dsos;
+ break;
+ case PERF_RECORD_MISC_GUEST_KERNEL:
+ dso_type = DSO_TYPE_GUEST_KERNEL;
+ head = &machine->kernel_dsos;
+ break;
+ case PERF_RECORD_MISC_USER:
+ case PERF_RECORD_MISC_GUEST_USER:
+ dso_type = DSO_TYPE_USER;
+ head = &machine->user_dsos;
+ break;
+ default:
+ goto out;
+ }
+
+ dso = __dsos__findnew(head, filename);
+ if (dso != NULL) {
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ dso__set_build_id(dso, &bev->build_id);
+
+ if (filename[0] == '[')
+ dso->kernel = dso_type;
+
+ build_id__sprintf(dso->build_id, sizeof(dso->build_id),
+ sbuild_id);
+ pr_debug("build id event received for %s: %s\n",
+ dso->long_name, sbuild_id);
+ }
+
+ err = 0;
+out:
+ return err;
+}
+
+static int perf_header__read_build_ids(struct perf_header *self,
+ int input, u64 offset, u64 size)
+{
+ struct perf_session *session = container_of(self,
+ struct perf_session, header);
+ struct build_id_event bev;
+ char filename[PATH_MAX];
+ u64 limit = offset + size;
+ int err = -1;
+
+ while (offset < limit) {
+ ssize_t len;
+
+ if (read(input, &bev, sizeof(bev)) != sizeof(bev))
+ goto out;
+
+ if (self->needs_swap)
+ perf_event_header__bswap(&bev.header);
+
+ len = bev.header.size - sizeof(bev);
+ if (read(input, filename, len) != len)
+ goto out;
+
+ __event_process_build_id(&bev, filename, session);
+
+ offset += bev.header.size;
+ }
+ err = 0;
+out:
+ return err;
+}
+
+static int perf_file_section__process(struct perf_file_section *self,
+ struct perf_header *ph,
+ int feat, int fd)
+{
+ if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) {
+ pr_debug("Failed to lseek to %Ld offset for feature %d, "
+ "continuing...\n", self->offset, feat);
+ return 0;
+ }
+
+ switch (feat) {
+ case HEADER_TRACE_INFO:
+ trace_report(fd, false);
+ break;
+
+ case HEADER_BUILD_ID:
+ if (perf_header__read_build_ids(ph, fd, self->offset, self->size))
+ pr_debug("Failed to read buildids, continuing...\n");
+ break;
+ default:
+ pr_debug("unknown feature %d, continuing...\n", feat);
+ }
+
+ return 0;
+}
+
+static int perf_file_header__read_pipe(struct perf_pipe_file_header *self,
+ struct perf_header *ph, int fd,
+ bool repipe)
+{
+ if (do_read(fd, self, sizeof(*self)) <= 0 ||
+ memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
+ return -1;
+
+ if (repipe && do_write(STDOUT_FILENO, self, sizeof(*self)) < 0)
+ return -1;
+
+ if (self->size != sizeof(*self)) {
+ u64 size = bswap_64(self->size);
+
+ if (size != sizeof(*self))
+ return -1;
+
+ ph->needs_swap = true;
+ }
+
+ return 0;
+}
+
+static int perf_header__read_pipe(struct perf_session *session, int fd)
+{
+ struct perf_header *self = &session->header;
+ struct perf_pipe_file_header f_header;
+
+ if (perf_file_header__read_pipe(&f_header, self, fd,
+ session->repipe) < 0) {
+ pr_debug("incompatible file format\n");
+ return -EINVAL;
+ }
+
+ session->fd = fd;
+
+ return 0;
+}
+
+int perf_header__read(struct perf_session *session, int fd)
+{
+ struct perf_header *self = &session->header;
+ struct perf_file_header f_header;
+ struct perf_file_attr f_attr;
+ u64 f_id;
+ int nr_attrs, nr_ids, i, j;
+
+ if (session->fd_pipe)
+ return perf_header__read_pipe(session, fd);
+
+ if (perf_file_header__read(&f_header, self, fd) < 0) {
+ pr_debug("incompatible file format\n");
+ return -EINVAL;
+ }
+
+ nr_attrs = f_header.attrs.size / sizeof(f_attr);
+ lseek(fd, f_header.attrs.offset, SEEK_SET);
+
+ for (i = 0; i < nr_attrs; i++) {
+ struct perf_header_attr *attr;
+ off_t tmp;
+
+ if (perf_header__getbuffer64(self, fd, &f_attr, sizeof(f_attr)))
+ goto out_errno;
+
+ tmp = lseek(fd, 0, SEEK_CUR);
+
+ attr = perf_header_attr__new(&f_attr.attr);
+ if (attr == NULL)
+ return -ENOMEM;
+
+ nr_ids = f_attr.ids.size / sizeof(u64);
+ lseek(fd, f_attr.ids.offset, SEEK_SET);
+
+ for (j = 0; j < nr_ids; j++) {
+ if (perf_header__getbuffer64(self, fd, &f_id, sizeof(f_id)))
+ goto out_errno;
+
+ if (perf_header_attr__add_id(attr, f_id) < 0) {
+ perf_header_attr__delete(attr);
+ return -ENOMEM;
+ }
+ }
+ if (perf_header__add_attr(self, attr) < 0) {
+ perf_header_attr__delete(attr);
+ return -ENOMEM;
+ }
+
+ lseek(fd, tmp, SEEK_SET);
+ }
+
+ if (f_header.event_types.size) {
+ lseek(fd, f_header.event_types.offset, SEEK_SET);
+ events = malloc(f_header.event_types.size);
+ if (events == NULL)
+ return -ENOMEM;
+ if (perf_header__getbuffer64(self, fd, events,
+ f_header.event_types.size))
+ goto out_errno;
+ event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
+ }
+
+ perf_header__process_sections(self, fd, perf_file_section__process);
+
+ lseek(fd, self->data_offset, SEEK_SET);
+
+ self->frozen = 1;
+ return 0;
+out_errno:
+ return -errno;
+}
+
+u64 perf_header__sample_type(struct perf_header *header)
+{
+ u64 type = 0;
+ int i;
+
+ for (i = 0; i < header->attrs; i++) {
+ struct perf_header_attr *attr = header->attr[i];
+
+ if (!type)
+ type = attr->attr.sample_type;
+ else if (type != attr->attr.sample_type)
+ die("non matching sample_type");
+ }
+
+ return type;
+}
+
+struct perf_event_attr *
+perf_header__find_attr(u64 id, struct perf_header *header)
+{
+ int i;
+
+ /*
+ * We set id to -1 if the data file doesn't contain sample
+ * ids. Check for this and avoid walking through the entire
+ * list of ids which may be large.
+ */
+ if (id == -1ULL)
+ return NULL;
+
+ for (i = 0; i < header->attrs; i++) {
+ struct perf_header_attr *attr = header->attr[i];
+ int j;
+
+ for (j = 0; j < attr->ids; j++) {
+ if (attr->id[j] == id)
+ return &attr->attr;
+ }
+ }
+
+ return NULL;
+}
+
+int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
+ event__handler_t process,
+ struct perf_session *session)
+{
+ event_t *ev;
+ size_t size;
+ int err;
+
+ size = sizeof(struct perf_event_attr);
+ size = ALIGN(size, sizeof(u64));
+ size += sizeof(struct perf_event_header);
+ size += ids * sizeof(u64);
+
+ ev = malloc(size);
+
+ ev->attr.attr = *attr;
+ memcpy(ev->attr.id, id, ids * sizeof(u64));
+
+ ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
+ ev->attr.header.size = size;
+
+ err = process(ev, session);
+
+ free(ev);
+
+ return err;
+}
+
+int event__synthesize_attrs(struct perf_header *self,
+ event__handler_t process,
+ struct perf_session *session)
+{
+ struct perf_header_attr *attr;
+ int i, err = 0;
+
+ for (i = 0; i < self->attrs; i++) {
+ attr = self->attr[i];
+
+ err = event__synthesize_attr(&attr->attr, attr->ids, attr->id,
+ process, session);
+ if (err) {
+ pr_debug("failed to create perf header attribute\n");
+ return err;
+ }
+ }
+
+ return err;
+}
+
+int event__process_attr(event_t *self, struct perf_session *session)
+{
+ struct perf_header_attr *attr;
+ unsigned int i, ids, n_ids;
+
+ attr = perf_header_attr__new(&self->attr.attr);
+ if (attr == NULL)
+ return -ENOMEM;
+
+ ids = self->header.size;
+ ids -= (void *)&self->attr.id - (void *)self;
+ n_ids = ids / sizeof(u64);
+
+ for (i = 0; i < n_ids; i++) {
+ if (perf_header_attr__add_id(attr, self->attr.id[i]) < 0) {
+ perf_header_attr__delete(attr);
+ return -ENOMEM;
+ }
+ }
+
+ if (perf_header__add_attr(&session->header, attr) < 0) {
+ perf_header_attr__delete(attr);
+ return -ENOMEM;
+ }
+
+ perf_session__update_sample_type(session);
+
+ return 0;
+}
+
+int event__synthesize_event_type(u64 event_id, char *name,
+ event__handler_t process,
+ struct perf_session *session)
+{
+ event_t ev;
+ size_t size = 0;
+ int err = 0;
+
+ memset(&ev, 0, sizeof(ev));
+
+ ev.event_type.event_type.event_id = event_id;
+ memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
+ strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
+
+ ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
+ size = strlen(name);
+ size = ALIGN(size, sizeof(u64));
+ ev.event_type.header.size = sizeof(ev.event_type) -
+ (sizeof(ev.event_type.event_type.name) - size);
+
+ err = process(&ev, session);
+
+ return err;
+}
+
+int event__synthesize_event_types(event__handler_t process,
+ struct perf_session *session)
+{
+ struct perf_trace_event_type *type;
+ int i, err = 0;
+
+ for (i = 0; i < event_count; i++) {
+ type = &events[i];
+
+ err = event__synthesize_event_type(type->event_id, type->name,
+ process, session);
+ if (err) {
+ pr_debug("failed to create perf header event type\n");
+ return err;
+ }
+ }
+
+ return err;
+}
+
+int event__process_event_type(event_t *self,
+ struct perf_session *session __unused)
+{
+ if (perf_header__push_event(self->event_type.event_type.event_id,
+ self->event_type.event_type.name) < 0)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs,
+ int nb_events,
+ event__handler_t process,
+ struct perf_session *session __unused)
+{
+ event_t ev;
+ ssize_t size = 0, aligned_size = 0, padding;
+ int err = 0;
+
+ memset(&ev, 0, sizeof(ev));
+
+ ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
+ size = read_tracing_data_size(fd, pattrs, nb_events);
+ if (size <= 0)
+ return size;
+ aligned_size = ALIGN(size, sizeof(u64));
+ padding = aligned_size - size;
+ ev.tracing_data.header.size = sizeof(ev.tracing_data);
+ ev.tracing_data.size = aligned_size;
+
+ process(&ev, session);
+
+ err = read_tracing_data(fd, pattrs, nb_events);
+ write_padded(fd, NULL, 0, padding);
+
+ return aligned_size;
+}
+
+int event__process_tracing_data(event_t *self,
+ struct perf_session *session)
+{
+ ssize_t size_read, padding, size = self->tracing_data.size;
+ off_t offset = lseek(session->fd, 0, SEEK_CUR);
+ char buf[BUFSIZ];
+
+ /* setup for reading amidst mmap */
+ lseek(session->fd, offset + sizeof(struct tracing_data_event),
+ SEEK_SET);
+
+ size_read = trace_report(session->fd, session->repipe);
+
+ padding = ALIGN(size_read, sizeof(u64)) - size_read;
+
+ if (read(session->fd, buf, padding) < 0)
+ die("reading input file");
+ if (session->repipe) {
+ int retw = write(STDOUT_FILENO, buf, padding);
+ if (retw <= 0 || retw != padding)
+ die("repiping tracing data padding");
+ }
+
+ if (size_read + padding != size)
+ die("tracing data size mismatch");
+
+ return size_read + padding;
+}
+
+int event__synthesize_build_id(struct dso *pos, u16 misc,
+ event__handler_t process,
+ struct machine *machine,
+ struct perf_session *session)
+{
+ event_t ev;
+ size_t len;
+ int err = 0;
+
+ if (!pos->hit)
+ return err;
+
+ memset(&ev, 0, sizeof(ev));
+
+ len = pos->long_name_len + 1;
+ len = ALIGN(len, NAME_ALIGN);
+ memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
+ ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
+ ev.build_id.header.misc = misc;
+ ev.build_id.pid = machine->pid;
+ ev.build_id.header.size = sizeof(ev.build_id) + len;
+ memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
+
+ err = process(&ev, session);
+
+ return err;
+}
+
+int event__process_build_id(event_t *self,
+ struct perf_session *session)
+{
+ __event_process_build_id(&self->build_id,
+ self->build_id.filename,
+ session);
+ return 0;
+}
+
+void disable_buildid_cache(void)
+{
+ no_buildid_cache = true;
+}
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
new file mode 100644
index 00000000..402ac245
--- /dev/null
+++ b/tools/perf/util/header.h
@@ -0,0 +1,127 @@
+#ifndef __PERF_HEADER_H
+#define __PERF_HEADER_H
+
+#include "../../../include/linux/perf_event.h"
+#include <sys/types.h>
+#include <stdbool.h>
+#include "types.h"
+#include "event.h"
+
+#include <linux/bitmap.h>
+
+struct perf_header_attr {
+ struct perf_event_attr attr;
+ int ids, size;
+ u64 *id;
+ off_t id_offset;
+};
+
+enum {
+ HEADER_TRACE_INFO = 1,
+ HEADER_BUILD_ID,
+ HEADER_LAST_FEATURE,
+};
+
+#define HEADER_FEAT_BITS 256
+
+struct perf_file_section {
+ u64 offset;
+ u64 size;
+};
+
+struct perf_file_header {
+ u64 magic;
+ u64 size;
+ u64 attr_size;
+ struct perf_file_section attrs;
+ struct perf_file_section data;
+ struct perf_file_section event_types;
+ DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
+};
+
+struct perf_pipe_file_header {
+ u64 magic;
+ u64 size;
+};
+
+struct perf_header;
+
+int perf_file_header__read(struct perf_file_header *self,
+ struct perf_header *ph, int fd);
+
+struct perf_header {
+ int frozen;
+ int attrs, size;
+ bool needs_swap;
+ struct perf_header_attr **attr;
+ s64 attr_offset;
+ u64 data_offset;
+ u64 data_size;
+ u64 event_offset;
+ u64 event_size;
+ DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
+};
+
+int perf_header__init(struct perf_header *self);
+void perf_header__exit(struct perf_header *self);
+
+int perf_header__read(struct perf_session *session, int fd);
+int perf_header__write(struct perf_header *self, int fd, bool at_exit);
+int perf_header__write_pipe(int fd);
+
+int perf_header__add_attr(struct perf_header *self,
+ struct perf_header_attr *attr);
+
+int perf_header__push_event(u64 id, const char *name);
+char *perf_header__find_event(u64 id);
+
+struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr);
+void perf_header_attr__delete(struct perf_header_attr *self);
+
+int perf_header_attr__add_id(struct perf_header_attr *self, u64 id);
+
+u64 perf_header__sample_type(struct perf_header *header);
+struct perf_event_attr *
+perf_header__find_attr(u64 id, struct perf_header *header);
+void perf_header__set_feat(struct perf_header *self, int feat);
+bool perf_header__has_feat(const struct perf_header *self, int feat);
+
+int perf_header__process_sections(struct perf_header *self, int fd,
+ int (*process)(struct perf_file_section *self,
+ struct perf_header *ph,
+ int feat, int fd));
+
+int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
+ const char *name, bool is_kallsyms);
+int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
+
+int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
+ event__handler_t process,
+ struct perf_session *session);
+int event__synthesize_attrs(struct perf_header *self,
+ event__handler_t process,
+ struct perf_session *session);
+int event__process_attr(event_t *self, struct perf_session *session);
+
+int event__synthesize_event_type(u64 event_id, char *name,
+ event__handler_t process,
+ struct perf_session *session);
+int event__synthesize_event_types(event__handler_t process,
+ struct perf_session *session);
+int event__process_event_type(event_t *self,
+ struct perf_session *session);
+
+int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs,
+ int nb_events,
+ event__handler_t process,
+ struct perf_session *session);
+int event__process_tracing_data(event_t *self,
+ struct perf_session *session);
+
+int event__synthesize_build_id(struct dso *pos, u16 misc,
+ event__handler_t process,
+ struct machine *machine,
+ struct perf_session *session);
+int event__process_build_id(event_t *self, struct perf_session *session);
+
+#endif /* __PERF_HEADER_H */
diff --git a/tools/perf/util/help.c b/tools/perf/util/help.c
new file mode 100644
index 00000000..6f2975a0
--- /dev/null
+++ b/tools/perf/util/help.c
@@ -0,0 +1,338 @@
+#include "cache.h"
+#include "../builtin.h"
+#include "exec_cmd.h"
+#include "levenshtein.h"
+#include "help.h"
+
+void add_cmdname(struct cmdnames *cmds, const char *name, size_t len)
+{
+ struct cmdname *ent = malloc(sizeof(*ent) + len + 1);
+
+ ent->len = len;
+ memcpy(ent->name, name, len);
+ ent->name[len] = 0;
+
+ ALLOC_GROW(cmds->names, cmds->cnt + 1, cmds->alloc);
+ cmds->names[cmds->cnt++] = ent;
+}
+
+static void clean_cmdnames(struct cmdnames *cmds)
+{
+ unsigned int i;
+
+ for (i = 0; i < cmds->cnt; ++i)
+ free(cmds->names[i]);
+ free(cmds->names);
+ cmds->cnt = 0;
+ cmds->alloc = 0;
+}
+
+static int cmdname_compare(const void *a_, const void *b_)
+{
+ struct cmdname *a = *(struct cmdname **)a_;
+ struct cmdname *b = *(struct cmdname **)b_;
+ return strcmp(a->name, b->name);
+}
+
+static void uniq(struct cmdnames *cmds)
+{
+ unsigned int i, j;
+
+ if (!cmds->cnt)
+ return;
+
+ for (i = j = 1; i < cmds->cnt; i++)
+ if (strcmp(cmds->names[i]->name, cmds->names[i-1]->name))
+ cmds->names[j++] = cmds->names[i];
+
+ cmds->cnt = j;
+}
+
+void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
+{
+ size_t ci, cj, ei;
+ int cmp;
+
+ ci = cj = ei = 0;
+ while (ci < cmds->cnt && ei < excludes->cnt) {
+ cmp = strcmp(cmds->names[ci]->name, excludes->names[ei]->name);
+ if (cmp < 0)
+ cmds->names[cj++] = cmds->names[ci++];
+ else if (cmp == 0)
+ ci++, ei++;
+ else if (cmp > 0)
+ ei++;
+ }
+
+ while (ci < cmds->cnt)
+ cmds->names[cj++] = cmds->names[ci++];
+
+ cmds->cnt = cj;
+}
+
+static void pretty_print_string_list(struct cmdnames *cmds, int longest)
+{
+ int cols = 1, rows;
+ int space = longest + 1; /* min 1 SP between words */
+ struct winsize win;
+ int max_cols;
+ int i, j;
+
+ get_term_dimensions(&win);
+ max_cols = win.ws_col - 1; /* don't print *on* the edge */
+
+ if (space < max_cols)
+ cols = max_cols / space;
+ rows = (cmds->cnt + cols - 1) / cols;
+
+ for (i = 0; i < rows; i++) {
+ printf(" ");
+
+ for (j = 0; j < cols; j++) {
+ unsigned int n = j * rows + i;
+ unsigned int size = space;
+
+ if (n >= cmds->cnt)
+ break;
+ if (j == cols-1 || n + rows >= cmds->cnt)
+ size = 1;
+ printf("%-*s", size, cmds->names[n]->name);
+ }
+ putchar('\n');
+ }
+}
+
+static int is_executable(const char *name)
+{
+ struct stat st;
+
+ if (stat(name, &st) || /* stat, not lstat */
+ !S_ISREG(st.st_mode))
+ return 0;
+
+ return st.st_mode & S_IXUSR;
+}
+
+static void list_commands_in_dir(struct cmdnames *cmds,
+ const char *path,
+ const char *prefix)
+{
+ int prefix_len;
+ DIR *dir = opendir(path);
+ struct dirent *de;
+ struct strbuf buf = STRBUF_INIT;
+ int len;
+
+ if (!dir)
+ return;
+ if (!prefix)
+ prefix = "perf-";
+ prefix_len = strlen(prefix);
+
+ strbuf_addf(&buf, "%s/", path);
+ len = buf.len;
+
+ while ((de = readdir(dir)) != NULL) {
+ int entlen;
+
+ if (prefixcmp(de->d_name, prefix))
+ continue;
+
+ strbuf_setlen(&buf, len);
+ strbuf_addstr(&buf, de->d_name);
+ if (!is_executable(buf.buf))
+ continue;
+
+ entlen = strlen(de->d_name) - prefix_len;
+ if (has_extension(de->d_name, ".exe"))
+ entlen -= 4;
+
+ add_cmdname(cmds, de->d_name + prefix_len, entlen);
+ }
+ closedir(dir);
+ strbuf_release(&buf);
+}
+
+void load_command_list(const char *prefix,
+ struct cmdnames *main_cmds,
+ struct cmdnames *other_cmds)
+{
+ const char *env_path = getenv("PATH");
+ const char *exec_path = perf_exec_path();
+
+ if (exec_path) {
+ list_commands_in_dir(main_cmds, exec_path, prefix);
+ qsort(main_cmds->names, main_cmds->cnt,
+ sizeof(*main_cmds->names), cmdname_compare);
+ uniq(main_cmds);
+ }
+
+ if (env_path) {
+ char *paths, *path, *colon;
+ path = paths = strdup(env_path);
+ while (1) {
+ if ((colon = strchr(path, PATH_SEP)))
+ *colon = 0;
+ if (!exec_path || strcmp(path, exec_path))
+ list_commands_in_dir(other_cmds, path, prefix);
+
+ if (!colon)
+ break;
+ path = colon + 1;
+ }
+ free(paths);
+
+ qsort(other_cmds->names, other_cmds->cnt,
+ sizeof(*other_cmds->names), cmdname_compare);
+ uniq(other_cmds);
+ }
+ exclude_cmds(other_cmds, main_cmds);
+}
+
+void list_commands(const char *title, struct cmdnames *main_cmds,
+ struct cmdnames *other_cmds)
+{
+ unsigned int i, longest = 0;
+
+ for (i = 0; i < main_cmds->cnt; i++)
+ if (longest < main_cmds->names[i]->len)
+ longest = main_cmds->names[i]->len;
+ for (i = 0; i < other_cmds->cnt; i++)
+ if (longest < other_cmds->names[i]->len)
+ longest = other_cmds->names[i]->len;
+
+ if (main_cmds->cnt) {
+ const char *exec_path = perf_exec_path();
+ printf("available %s in '%s'\n", title, exec_path);
+ printf("----------------");
+ mput_char('-', strlen(title) + strlen(exec_path));
+ putchar('\n');
+ pretty_print_string_list(main_cmds, longest);
+ putchar('\n');
+ }
+
+ if (other_cmds->cnt) {
+ printf("%s available from elsewhere on your $PATH\n", title);
+ printf("---------------------------------------");
+ mput_char('-', strlen(title));
+ putchar('\n');
+ pretty_print_string_list(other_cmds, longest);
+ putchar('\n');
+ }
+}
+
+int is_in_cmdlist(struct cmdnames *c, const char *s)
+{
+ unsigned int i;
+
+ for (i = 0; i < c->cnt; i++)
+ if (!strcmp(s, c->names[i]->name))
+ return 1;
+ return 0;
+}
+
+static int autocorrect;
+static struct cmdnames aliases;
+
+static int perf_unknown_cmd_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "help.autocorrect"))
+ autocorrect = perf_config_int(var,value);
+ /* Also use aliases for command lookup */
+ if (!prefixcmp(var, "alias."))
+ add_cmdname(&aliases, var + 6, strlen(var + 6));
+
+ return perf_default_config(var, value, cb);
+}
+
+static int levenshtein_compare(const void *p1, const void *p2)
+{
+ const struct cmdname *const *c1 = p1, *const *c2 = p2;
+ const char *s1 = (*c1)->name, *s2 = (*c2)->name;
+ int l1 = (*c1)->len;
+ int l2 = (*c2)->len;
+ return l1 != l2 ? l1 - l2 : strcmp(s1, s2);
+}
+
+static void add_cmd_list(struct cmdnames *cmds, struct cmdnames *old)
+{
+ unsigned int i;
+
+ ALLOC_GROW(cmds->names, cmds->cnt + old->cnt, cmds->alloc);
+
+ for (i = 0; i < old->cnt; i++)
+ cmds->names[cmds->cnt++] = old->names[i];
+ free(old->names);
+ old->cnt = 0;
+ old->names = NULL;
+}
+
+const char *help_unknown_cmd(const char *cmd)
+{
+ unsigned int i, n = 0, best_similarity = 0;
+ struct cmdnames main_cmds, other_cmds;
+
+ memset(&main_cmds, 0, sizeof(main_cmds));
+ memset(&other_cmds, 0, sizeof(main_cmds));
+ memset(&aliases, 0, sizeof(aliases));
+
+ perf_config(perf_unknown_cmd_config, NULL);
+
+ load_command_list("perf-", &main_cmds, &other_cmds);
+
+ add_cmd_list(&main_cmds, &aliases);
+ add_cmd_list(&main_cmds, &other_cmds);
+ qsort(main_cmds.names, main_cmds.cnt,
+ sizeof(main_cmds.names), cmdname_compare);
+ uniq(&main_cmds);
+
+ if (main_cmds.cnt) {
+ /* This reuses cmdname->len for similarity index */
+ for (i = 0; i < main_cmds.cnt; ++i)
+ main_cmds.names[i]->len =
+ levenshtein(cmd, main_cmds.names[i]->name, 0, 2, 1, 4);
+
+ qsort(main_cmds.names, main_cmds.cnt,
+ sizeof(*main_cmds.names), levenshtein_compare);
+
+ best_similarity = main_cmds.names[0]->len;
+ n = 1;
+ while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len)
+ ++n;
+ }
+
+ if (autocorrect && n == 1) {
+ const char *assumed = main_cmds.names[0]->name;
+
+ main_cmds.names[0] = NULL;
+ clean_cmdnames(&main_cmds);
+ fprintf(stderr, "WARNING: You called a perf program named '%s', "
+ "which does not exist.\n"
+ "Continuing under the assumption that you meant '%s'\n",
+ cmd, assumed);
+ if (autocorrect > 0) {
+ fprintf(stderr, "in %0.1f seconds automatically...\n",
+ (float)autocorrect/10.0);
+ poll(NULL, 0, autocorrect * 100);
+ }
+ return assumed;
+ }
+
+ fprintf(stderr, "perf: '%s' is not a perf-command. See 'perf --help'.\n", cmd);
+
+ if (main_cmds.cnt && best_similarity < 6) {
+ fprintf(stderr, "\nDid you mean %s?\n",
+ n < 2 ? "this": "one of these");
+
+ for (i = 0; i < n; i++)
+ fprintf(stderr, "\t%s\n", main_cmds.names[i]->name);
+ }
+
+ exit(1);
+}
+
+int cmd_version(int argc __used, const char **argv __used, const char *prefix __used)
+{
+ printf("perf version %s\n", perf_version_string);
+ return 0;
+}
diff --git a/tools/perf/util/help.h b/tools/perf/util/help.h
new file mode 100644
index 00000000..7f5c6ded
--- /dev/null
+++ b/tools/perf/util/help.h
@@ -0,0 +1,29 @@
+#ifndef __PERF_HELP_H
+#define __PERF_HELP_H
+
+struct cmdnames {
+ size_t alloc;
+ size_t cnt;
+ struct cmdname {
+ size_t len; /* also used for similarity index in help.c */
+ char name[FLEX_ARRAY];
+ } **names;
+};
+
+static inline void mput_char(char c, unsigned int num)
+{
+ while(num--)
+ putchar(c);
+}
+
+void load_command_list(const char *prefix,
+ struct cmdnames *main_cmds,
+ struct cmdnames *other_cmds);
+void add_cmdname(struct cmdnames *cmds, const char *name, size_t len);
+/* Here we require that excludes is a sorted list. */
+void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes);
+int is_in_cmdlist(struct cmdnames *c, const char *s);
+void list_commands(const char *title, struct cmdnames *main_cmds,
+ struct cmdnames *other_cmds);
+
+#endif /* __PERF_HELP_H */
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
new file mode 100644
index 00000000..be22ae6e
--- /dev/null
+++ b/tools/perf/util/hist.c
@@ -0,0 +1,1176 @@
+#include "util.h"
+#include "build-id.h"
+#include "hist.h"
+#include "session.h"
+#include "sort.h"
+#include <math.h>
+
+enum hist_filter {
+ HIST_FILTER__DSO,
+ HIST_FILTER__THREAD,
+ HIST_FILTER__PARENT,
+};
+
+struct callchain_param callchain_param = {
+ .mode = CHAIN_GRAPH_REL,
+ .min_percent = 0.5
+};
+
+u16 hists__col_len(struct hists *self, enum hist_column col)
+{
+ return self->col_len[col];
+}
+
+void hists__set_col_len(struct hists *self, enum hist_column col, u16 len)
+{
+ self->col_len[col] = len;
+}
+
+bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len)
+{
+ if (len > hists__col_len(self, col)) {
+ hists__set_col_len(self, col, len);
+ return true;
+ }
+ return false;
+}
+
+static void hists__reset_col_len(struct hists *self)
+{
+ enum hist_column col;
+
+ for (col = 0; col < HISTC_NR_COLS; ++col)
+ hists__set_col_len(self, col, 0);
+}
+
+static void hists__calc_col_len(struct hists *self, struct hist_entry *h)
+{
+ u16 len;
+
+ if (h->ms.sym)
+ hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen);
+
+ len = thread__comm_len(h->thread);
+ if (hists__new_col_len(self, HISTC_COMM, len))
+ hists__set_col_len(self, HISTC_THREAD, len + 6);
+
+ if (h->ms.map) {
+ len = dso__name_len(h->ms.map->dso);
+ hists__new_col_len(self, HISTC_DSO, len);
+ }
+}
+
+static void hist_entry__add_cpumode_period(struct hist_entry *self,
+ unsigned int cpumode, u64 period)
+{
+ switch (cpumode) {
+ case PERF_RECORD_MISC_KERNEL:
+ self->period_sys += period;
+ break;
+ case PERF_RECORD_MISC_USER:
+ self->period_us += period;
+ break;
+ case PERF_RECORD_MISC_GUEST_KERNEL:
+ self->period_guest_sys += period;
+ break;
+ case PERF_RECORD_MISC_GUEST_USER:
+ self->period_guest_us += period;
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * histogram, sorted on item, collects periods
+ */
+
+static struct hist_entry *hist_entry__new(struct hist_entry *template)
+{
+ size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_node) : 0;
+ struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
+
+ if (self != NULL) {
+ *self = *template;
+ self->nr_events = 1;
+ if (self->ms.map)
+ self->ms.map->referenced = true;
+ if (symbol_conf.use_callchain)
+ callchain_init(self->callchain);
+ }
+
+ return self;
+}
+
+static void hists__inc_nr_entries(struct hists *self, struct hist_entry *h)
+{
+ if (!h->filtered) {
+ hists__calc_col_len(self, h);
+ ++self->nr_entries;
+ }
+}
+
+static u8 symbol__parent_filter(const struct symbol *parent)
+{
+ if (symbol_conf.exclude_other && parent == NULL)
+ return 1 << HIST_FILTER__PARENT;
+ return 0;
+}
+
+struct hist_entry *__hists__add_entry(struct hists *self,
+ struct addr_location *al,
+ struct symbol *sym_parent, u64 period)
+{
+ struct rb_node **p = &self->entries.rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *he;
+ struct hist_entry entry = {
+ .thread = al->thread,
+ .ms = {
+ .map = al->map,
+ .sym = al->sym,
+ },
+ .cpu = al->cpu,
+ .ip = al->addr,
+ .level = al->level,
+ .period = period,
+ .parent = sym_parent,
+ .filtered = symbol__parent_filter(sym_parent),
+ };
+ int cmp;
+
+ while (*p != NULL) {
+ parent = *p;
+ he = rb_entry(parent, struct hist_entry, rb_node);
+
+ cmp = hist_entry__cmp(&entry, he);
+
+ if (!cmp) {
+ he->period += period;
+ ++he->nr_events;
+ goto out;
+ }
+
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ he = hist_entry__new(&entry);
+ if (!he)
+ return NULL;
+ rb_link_node(&he->rb_node, parent, p);
+ rb_insert_color(&he->rb_node, &self->entries);
+ hists__inc_nr_entries(self, he);
+out:
+ hist_entry__add_cpumode_period(he, al->cpumode, period);
+ return he;
+}
+
+int64_t
+hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct sort_entry *se;
+ int64_t cmp = 0;
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ cmp = se->se_cmp(left, right);
+ if (cmp)
+ break;
+ }
+
+ return cmp;
+}
+
+int64_t
+hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
+{
+ struct sort_entry *se;
+ int64_t cmp = 0;
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ int64_t (*f)(struct hist_entry *, struct hist_entry *);
+
+ f = se->se_collapse ?: se->se_cmp;
+
+ cmp = f(left, right);
+ if (cmp)
+ break;
+ }
+
+ return cmp;
+}
+
+void hist_entry__free(struct hist_entry *he)
+{
+ free(he);
+}
+
+/*
+ * collapse the histogram
+ */
+
+static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter;
+ int64_t cmp;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node);
+
+ cmp = hist_entry__collapse(iter, he);
+
+ if (!cmp) {
+ iter->period += he->period;
+ hist_entry__free(he);
+ return false;
+ }
+
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&he->rb_node, parent, p);
+ rb_insert_color(&he->rb_node, root);
+ return true;
+}
+
+void hists__collapse_resort(struct hists *self)
+{
+ struct rb_root tmp;
+ struct rb_node *next;
+ struct hist_entry *n;
+
+ if (!sort__need_collapse)
+ return;
+
+ tmp = RB_ROOT;
+ next = rb_first(&self->entries);
+ self->nr_entries = 0;
+ hists__reset_col_len(self);
+
+ while (next) {
+ n = rb_entry(next, struct hist_entry, rb_node);
+ next = rb_next(&n->rb_node);
+
+ rb_erase(&n->rb_node, &self->entries);
+ if (collapse__insert_entry(&tmp, n))
+ hists__inc_nr_entries(self, n);
+ }
+
+ self->entries = tmp;
+}
+
+/*
+ * reverse the map, sort on period.
+ */
+
+static void __hists__insert_output_entry(struct rb_root *entries,
+ struct hist_entry *he,
+ u64 min_callchain_hits)
+{
+ struct rb_node **p = &entries->rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter;
+
+ if (symbol_conf.use_callchain)
+ callchain_param.sort(&he->sorted_chain, he->callchain,
+ min_callchain_hits, &callchain_param);
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node);
+
+ if (he->period > iter->period)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&he->rb_node, parent, p);
+ rb_insert_color(&he->rb_node, entries);
+}
+
+void hists__output_resort(struct hists *self)
+{
+ struct rb_root tmp;
+ struct rb_node *next;
+ struct hist_entry *n;
+ u64 min_callchain_hits;
+
+ min_callchain_hits = self->stats.total_period * (callchain_param.min_percent / 100);
+
+ tmp = RB_ROOT;
+ next = rb_first(&self->entries);
+
+ self->nr_entries = 0;
+ hists__reset_col_len(self);
+
+ while (next) {
+ n = rb_entry(next, struct hist_entry, rb_node);
+ next = rb_next(&n->rb_node);
+
+ rb_erase(&n->rb_node, &self->entries);
+ __hists__insert_output_entry(&tmp, n, min_callchain_hits);
+ hists__inc_nr_entries(self, n);
+ }
+
+ self->entries = tmp;
+}
+
+static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
+{
+ int i;
+ int ret = fprintf(fp, " ");
+
+ for (i = 0; i < left_margin; i++)
+ ret += fprintf(fp, " ");
+
+ return ret;
+}
+
+static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
+ int left_margin)
+{
+ int i;
+ size_t ret = callchain__fprintf_left_margin(fp, left_margin);
+
+ for (i = 0; i < depth; i++)
+ if (depth_mask & (1 << i))
+ ret += fprintf(fp, "| ");
+ else
+ ret += fprintf(fp, " ");
+
+ ret += fprintf(fp, "\n");
+
+ return ret;
+}
+
+static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
+ int depth, int depth_mask, int period,
+ u64 total_samples, int hits,
+ int left_margin)
+{
+ int i;
+ size_t ret = 0;
+
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ for (i = 0; i < depth; i++) {
+ if (depth_mask & (1 << i))
+ ret += fprintf(fp, "|");
+ else
+ ret += fprintf(fp, " ");
+ if (!period && i == depth - 1) {
+ double percent;
+
+ percent = hits * 100.0 / total_samples;
+ ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
+ } else
+ ret += fprintf(fp, "%s", " ");
+ }
+ if (chain->ms.sym)
+ ret += fprintf(fp, "%s\n", chain->ms.sym->name);
+ else
+ ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
+
+ return ret;
+}
+
+static struct symbol *rem_sq_bracket;
+static struct callchain_list rem_hits;
+
+static void init_rem_hits(void)
+{
+ rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
+ if (!rem_sq_bracket) {
+ fprintf(stderr, "Not enough memory to display remaining hits\n");
+ return;
+ }
+
+ strcpy(rem_sq_bracket->name, "[...]");
+ rem_hits.ms.sym = rem_sq_bracket;
+}
+
+static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
+ u64 total_samples, int depth,
+ int depth_mask, int left_margin)
+{
+ struct rb_node *node, *next;
+ struct callchain_node *child;
+ struct callchain_list *chain;
+ int new_depth_mask = depth_mask;
+ u64 new_total;
+ u64 remaining;
+ size_t ret = 0;
+ int i;
+ uint entries_printed = 0;
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ new_total = self->children_hit;
+ else
+ new_total = total_samples;
+
+ remaining = new_total;
+
+ node = rb_first(&self->rb_root);
+ while (node) {
+ u64 cumul;
+
+ child = rb_entry(node, struct callchain_node, rb_node);
+ cumul = cumul_hits(child);
+ remaining -= cumul;
+
+ /*
+ * The depth mask manages the output of pipes that show
+ * the depth. We don't want to keep the pipes of the current
+ * level for the last child of this depth.
+ * Except if we have remaining filtered hits. They will
+ * supersede the last child
+ */
+ next = rb_next(node);
+ if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
+ new_depth_mask &= ~(1 << (depth - 1));
+
+ /*
+ * But we keep the older depth mask for the line separator
+ * to keep the level link until we reach the last child
+ */
+ ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
+ left_margin);
+ i = 0;
+ list_for_each_entry(chain, &child->val, list) {
+ ret += ipchain__fprintf_graph(fp, chain, depth,
+ new_depth_mask, i++,
+ new_total,
+ cumul,
+ left_margin);
+ }
+ ret += __callchain__fprintf_graph(fp, child, new_total,
+ depth + 1,
+ new_depth_mask | (1 << depth),
+ left_margin);
+ node = next;
+ if (++entries_printed == callchain_param.print_limit)
+ break;
+ }
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL &&
+ remaining && remaining != new_total) {
+
+ if (!rem_sq_bracket)
+ return ret;
+
+ new_depth_mask &= ~(1 << (depth - 1));
+
+ ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
+ new_depth_mask, 0, new_total,
+ remaining, left_margin);
+ }
+
+ return ret;
+}
+
+static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
+ u64 total_samples, int left_margin)
+{
+ struct callchain_list *chain;
+ bool printed = false;
+ int i = 0;
+ int ret = 0;
+ u32 entries_printed = 0;
+
+ list_for_each_entry(chain, &self->val, list) {
+ if (!i++ && sort__first_dimension == SORT_SYM)
+ continue;
+
+ if (!printed) {
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ ret += fprintf(fp, "|\n");
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ ret += fprintf(fp, "---");
+
+ left_margin += 3;
+ printed = true;
+ } else
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+
+ if (chain->ms.sym)
+ ret += fprintf(fp, " %s\n", chain->ms.sym->name);
+ else
+ ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
+
+ if (++entries_printed == callchain_param.print_limit)
+ break;
+ }
+
+ ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
+
+ return ret;
+}
+
+static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
+ u64 total_samples)
+{
+ struct callchain_list *chain;
+ size_t ret = 0;
+
+ if (!self)
+ return 0;
+
+ ret += callchain__fprintf_flat(fp, self->parent, total_samples);
+
+
+ list_for_each_entry(chain, &self->val, list) {
+ if (chain->ip >= PERF_CONTEXT_MAX)
+ continue;
+ if (chain->ms.sym)
+ ret += fprintf(fp, " %s\n", chain->ms.sym->name);
+ else
+ ret += fprintf(fp, " %p\n",
+ (void *)(long)chain->ip);
+ }
+
+ return ret;
+}
+
+static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
+ u64 total_samples, int left_margin)
+{
+ struct rb_node *rb_node;
+ struct callchain_node *chain;
+ size_t ret = 0;
+ u32 entries_printed = 0;
+
+ rb_node = rb_first(&self->sorted_chain);
+ while (rb_node) {
+ double percent;
+
+ chain = rb_entry(rb_node, struct callchain_node, rb_node);
+ percent = chain->hit * 100.0 / total_samples;
+ switch (callchain_param.mode) {
+ case CHAIN_FLAT:
+ ret += percent_color_fprintf(fp, " %6.2f%%\n",
+ percent);
+ ret += callchain__fprintf_flat(fp, chain, total_samples);
+ break;
+ case CHAIN_GRAPH_ABS: /* Falldown */
+ case CHAIN_GRAPH_REL:
+ ret += callchain__fprintf_graph(fp, chain, total_samples,
+ left_margin);
+ case CHAIN_NONE:
+ default:
+ break;
+ }
+ ret += fprintf(fp, "\n");
+ if (++entries_printed == callchain_param.print_limit)
+ break;
+ rb_node = rb_next(rb_node);
+ }
+
+ return ret;
+}
+
+int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
+ struct hists *hists, struct hists *pair_hists,
+ bool show_displacement, long displacement,
+ bool color, u64 session_total)
+{
+ struct sort_entry *se;
+ u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
+ const char *sep = symbol_conf.field_sep;
+ int ret;
+
+ if (symbol_conf.exclude_other && !self->parent)
+ return 0;
+
+ if (pair_hists) {
+ period = self->pair ? self->pair->period : 0;
+ total = pair_hists->stats.total_period;
+ period_sys = self->pair ? self->pair->period_sys : 0;
+ period_us = self->pair ? self->pair->period_us : 0;
+ period_guest_sys = self->pair ? self->pair->period_guest_sys : 0;
+ period_guest_us = self->pair ? self->pair->period_guest_us : 0;
+ } else {
+ period = self->period;
+ total = session_total;
+ period_sys = self->period_sys;
+ period_us = self->period_us;
+ period_guest_sys = self->period_guest_sys;
+ period_guest_us = self->period_guest_us;
+ }
+
+ if (total) {
+ if (color)
+ ret = percent_color_snprintf(s, size,
+ sep ? "%.2f" : " %6.2f%%",
+ (period * 100.0) / total);
+ else
+ ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
+ (period * 100.0) / total);
+ if (symbol_conf.show_cpu_utilization) {
+ ret += percent_color_snprintf(s + ret, size - ret,
+ sep ? "%.2f" : " %6.2f%%",
+ (period_sys * 100.0) / total);
+ ret += percent_color_snprintf(s + ret, size - ret,
+ sep ? "%.2f" : " %6.2f%%",
+ (period_us * 100.0) / total);
+ if (perf_guest) {
+ ret += percent_color_snprintf(s + ret,
+ size - ret,
+ sep ? "%.2f" : " %6.2f%%",
+ (period_guest_sys * 100.0) /
+ total);
+ ret += percent_color_snprintf(s + ret,
+ size - ret,
+ sep ? "%.2f" : " %6.2f%%",
+ (period_guest_us * 100.0) /
+ total);
+ }
+ }
+ } else
+ ret = snprintf(s, size, sep ? "%lld" : "%12lld ", period);
+
+ if (symbol_conf.show_nr_samples) {
+ if (sep)
+ ret += snprintf(s + ret, size - ret, "%c%lld", *sep, period);
+ else
+ ret += snprintf(s + ret, size - ret, "%11lld", period);
+ }
+
+ if (pair_hists) {
+ char bf[32];
+ double old_percent = 0, new_percent = 0, diff;
+
+ if (total > 0)
+ old_percent = (period * 100.0) / total;
+ if (session_total > 0)
+ new_percent = (self->period * 100.0) / session_total;
+
+ diff = new_percent - old_percent;
+
+ if (fabs(diff) >= 0.01)
+ snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
+ else
+ snprintf(bf, sizeof(bf), " ");
+
+ if (sep)
+ ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
+ else
+ ret += snprintf(s + ret, size - ret, "%11.11s", bf);
+
+ if (show_displacement) {
+ if (displacement)
+ snprintf(bf, sizeof(bf), "%+4ld", displacement);
+ else
+ snprintf(bf, sizeof(bf), " ");
+
+ if (sep)
+ ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
+ else
+ ret += snprintf(s + ret, size - ret, "%6.6s", bf);
+ }
+ }
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (se->elide)
+ continue;
+
+ ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
+ ret += se->se_snprintf(self, s + ret, size - ret,
+ hists__col_len(hists, se->se_width_idx));
+ }
+
+ return ret;
+}
+
+int hist_entry__fprintf(struct hist_entry *self, struct hists *hists,
+ struct hists *pair_hists, bool show_displacement,
+ long displacement, FILE *fp, u64 session_total)
+{
+ char bf[512];
+ hist_entry__snprintf(self, bf, sizeof(bf), hists, pair_hists,
+ show_displacement, displacement,
+ true, session_total);
+ return fprintf(fp, "%s\n", bf);
+}
+
+static size_t hist_entry__fprintf_callchain(struct hist_entry *self,
+ struct hists *hists, FILE *fp,
+ u64 session_total)
+{
+ int left_margin = 0;
+
+ if (sort__first_dimension == SORT_COMM) {
+ struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
+ typeof(*se), list);
+ left_margin = hists__col_len(hists, se->se_width_idx);
+ left_margin -= thread__comm_len(self->thread);
+ }
+
+ return hist_entry_callchain__fprintf(fp, self, session_total,
+ left_margin);
+}
+
+size_t hists__fprintf(struct hists *self, struct hists *pair,
+ bool show_displacement, FILE *fp)
+{
+ struct sort_entry *se;
+ struct rb_node *nd;
+ size_t ret = 0;
+ unsigned long position = 1;
+ long displacement = 0;
+ unsigned int width;
+ const char *sep = symbol_conf.field_sep;
+ const char *col_width = symbol_conf.col_width_list_str;
+
+ init_rem_hits();
+
+ fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
+
+ if (symbol_conf.show_nr_samples) {
+ if (sep)
+ fprintf(fp, "%cSamples", *sep);
+ else
+ fputs(" Samples ", fp);
+ }
+
+ if (symbol_conf.show_cpu_utilization) {
+ if (sep) {
+ ret += fprintf(fp, "%csys", *sep);
+ ret += fprintf(fp, "%cus", *sep);
+ if (perf_guest) {
+ ret += fprintf(fp, "%cguest sys", *sep);
+ ret += fprintf(fp, "%cguest us", *sep);
+ }
+ } else {
+ ret += fprintf(fp, " sys ");
+ ret += fprintf(fp, " us ");
+ if (perf_guest) {
+ ret += fprintf(fp, " guest sys ");
+ ret += fprintf(fp, " guest us ");
+ }
+ }
+ }
+
+ if (pair) {
+ if (sep)
+ ret += fprintf(fp, "%cDelta", *sep);
+ else
+ ret += fprintf(fp, " Delta ");
+
+ if (show_displacement) {
+ if (sep)
+ ret += fprintf(fp, "%cDisplacement", *sep);
+ else
+ ret += fprintf(fp, " Displ");
+ }
+ }
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (se->elide)
+ continue;
+ if (sep) {
+ fprintf(fp, "%c%s", *sep, se->se_header);
+ continue;
+ }
+ width = strlen(se->se_header);
+ if (symbol_conf.col_width_list_str) {
+ if (col_width) {
+ hists__set_col_len(self, se->se_width_idx,
+ atoi(col_width));
+ col_width = strchr(col_width, ',');
+ if (col_width)
+ ++col_width;
+ }
+ }
+ if (!hists__new_col_len(self, se->se_width_idx, width))
+ width = hists__col_len(self, se->se_width_idx);
+ fprintf(fp, " %*s", width, se->se_header);
+ }
+ fprintf(fp, "\n");
+
+ if (sep)
+ goto print_entries;
+
+ fprintf(fp, "# ........");
+ if (symbol_conf.show_nr_samples)
+ fprintf(fp, " ..........");
+ if (pair) {
+ fprintf(fp, " ..........");
+ if (show_displacement)
+ fprintf(fp, " .....");
+ }
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ unsigned int i;
+
+ if (se->elide)
+ continue;
+
+ fprintf(fp, " ");
+ width = hists__col_len(self, se->se_width_idx);
+ if (width == 0)
+ width = strlen(se->se_header);
+ for (i = 0; i < width; i++)
+ fprintf(fp, ".");
+ }
+
+ fprintf(fp, "\n#\n");
+
+print_entries:
+ for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (show_displacement) {
+ if (h->pair != NULL)
+ displacement = ((long)h->pair->position -
+ (long)position);
+ else
+ displacement = 0;
+ ++position;
+ }
+ ret += hist_entry__fprintf(h, self, pair, show_displacement,
+ displacement, fp, self->stats.total_period);
+
+ if (symbol_conf.use_callchain)
+ ret += hist_entry__fprintf_callchain(h, self, fp,
+ self->stats.total_period);
+ if (h->ms.map == NULL && verbose > 1) {
+ __map_groups__fprintf_maps(&h->thread->mg,
+ MAP__FUNCTION, verbose, fp);
+ fprintf(fp, "%.10s end\n", graph_dotted_line);
+ }
+ }
+
+ free(rem_sq_bracket);
+
+ return ret;
+}
+
+/*
+ * See hists__fprintf to match the column widths
+ */
+unsigned int hists__sort_list_width(struct hists *self)
+{
+ struct sort_entry *se;
+ int ret = 9; /* total % */
+
+ if (symbol_conf.show_cpu_utilization) {
+ ret += 7; /* count_sys % */
+ ret += 6; /* count_us % */
+ if (perf_guest) {
+ ret += 13; /* count_guest_sys % */
+ ret += 12; /* count_guest_us % */
+ }
+ }
+
+ if (symbol_conf.show_nr_samples)
+ ret += 11;
+
+ list_for_each_entry(se, &hist_entry__sort_list, list)
+ if (!se->elide)
+ ret += 2 + hists__col_len(self, se->se_width_idx);
+
+ if (verbose) /* Addr + origin */
+ ret += 3 + BITS_PER_LONG / 4;
+
+ return ret;
+}
+
+static void hists__remove_entry_filter(struct hists *self, struct hist_entry *h,
+ enum hist_filter filter)
+{
+ h->filtered &= ~(1 << filter);
+ if (h->filtered)
+ return;
+
+ ++self->nr_entries;
+ if (h->ms.unfolded)
+ self->nr_entries += h->nr_rows;
+ h->row_offset = 0;
+ self->stats.total_period += h->period;
+ self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
+
+ hists__calc_col_len(self, h);
+}
+
+void hists__filter_by_dso(struct hists *self, const struct dso *dso)
+{
+ struct rb_node *nd;
+
+ self->nr_entries = self->stats.total_period = 0;
+ self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
+ hists__reset_col_len(self);
+
+ for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (symbol_conf.exclude_other && !h->parent)
+ continue;
+
+ if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
+ h->filtered |= (1 << HIST_FILTER__DSO);
+ continue;
+ }
+
+ hists__remove_entry_filter(self, h, HIST_FILTER__DSO);
+ }
+}
+
+void hists__filter_by_thread(struct hists *self, const struct thread *thread)
+{
+ struct rb_node *nd;
+
+ self->nr_entries = self->stats.total_period = 0;
+ self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
+ hists__reset_col_len(self);
+
+ for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (thread != NULL && h->thread != thread) {
+ h->filtered |= (1 << HIST_FILTER__THREAD);
+ continue;
+ }
+
+ hists__remove_entry_filter(self, h, HIST_FILTER__THREAD);
+ }
+}
+
+static int symbol__alloc_hist(struct symbol *self)
+{
+ struct sym_priv *priv = symbol__priv(self);
+ const int size = (sizeof(*priv->hist) +
+ (self->end - self->start) * sizeof(u64));
+
+ priv->hist = zalloc(size);
+ return priv->hist == NULL ? -1 : 0;
+}
+
+int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
+{
+ unsigned int sym_size, offset;
+ struct symbol *sym = self->ms.sym;
+ struct sym_priv *priv;
+ struct sym_hist *h;
+
+ if (!sym || !self->ms.map)
+ return 0;
+
+ priv = symbol__priv(sym);
+ if (priv->hist == NULL && symbol__alloc_hist(sym) < 0)
+ return -ENOMEM;
+
+ sym_size = sym->end - sym->start;
+ offset = ip - sym->start;
+
+ pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
+
+ if (offset >= sym_size)
+ return 0;
+
+ h = priv->hist;
+ h->sum++;
+ h->ip[offset]++;
+
+ pr_debug3("%#Lx %s: period++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start,
+ self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]);
+ return 0;
+}
+
+static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize)
+{
+ struct objdump_line *self = malloc(sizeof(*self) + privsize);
+
+ if (self != NULL) {
+ self->offset = offset;
+ self->line = line;
+ }
+
+ return self;
+}
+
+void objdump_line__free(struct objdump_line *self)
+{
+ free(self->line);
+ free(self);
+}
+
+static void objdump__add_line(struct list_head *head, struct objdump_line *line)
+{
+ list_add_tail(&line->node, head);
+}
+
+struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
+ struct objdump_line *pos)
+{
+ list_for_each_entry_continue(pos, head, node)
+ if (pos->offset >= 0)
+ return pos;
+
+ return NULL;
+}
+
+static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
+ struct list_head *head, size_t privsize)
+{
+ struct symbol *sym = self->ms.sym;
+ struct objdump_line *objdump_line;
+ char *line = NULL, *tmp, *tmp2, *c;
+ size_t line_len;
+ s64 line_ip, offset = -1;
+
+ if (getline(&line, &line_len, file) < 0)
+ return -1;
+
+ if (!line)
+ return -1;
+
+ while (line_len != 0 && isspace(line[line_len - 1]))
+ line[--line_len] = '\0';
+
+ c = strchr(line, '\n');
+ if (c)
+ *c = 0;
+
+ line_ip = -1;
+
+ /*
+ * Strip leading spaces:
+ */
+ tmp = line;
+ while (*tmp) {
+ if (*tmp != ' ')
+ break;
+ tmp++;
+ }
+
+ if (*tmp) {
+ /*
+ * Parse hexa addresses followed by ':'
+ */
+ line_ip = strtoull(tmp, &tmp2, 16);
+ if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
+ line_ip = -1;
+ }
+
+ if (line_ip != -1) {
+ u64 start = map__rip_2objdump(self->ms.map, sym->start),
+ end = map__rip_2objdump(self->ms.map, sym->end);
+
+ offset = line_ip - start;
+ if (offset < 0 || (u64)line_ip > end)
+ offset = -1;
+ }
+
+ objdump_line = objdump_line__new(offset, line, privsize);
+ if (objdump_line == NULL) {
+ free(line);
+ return -1;
+ }
+ objdump__add_line(head, objdump_line);
+
+ return 0;
+}
+
+int hist_entry__annotate(struct hist_entry *self, struct list_head *head,
+ size_t privsize)
+{
+ struct symbol *sym = self->ms.sym;
+ struct map *map = self->ms.map;
+ struct dso *dso = map->dso;
+ char *filename = dso__build_id_filename(dso, NULL, 0);
+ bool free_filename = true;
+ char command[PATH_MAX * 2];
+ FILE *file;
+ int err = 0;
+ u64 len;
+
+ if (filename == NULL) {
+ if (dso->has_build_id) {
+ pr_err("Can't annotate %s: not enough memory\n",
+ sym->name);
+ return -ENOMEM;
+ }
+ goto fallback;
+ } else if (readlink(filename, command, sizeof(command)) < 0 ||
+ strstr(command, "[kernel.kallsyms]") ||
+ access(filename, R_OK)) {
+ free(filename);
+fallback:
+ /*
+ * If we don't have build-ids or the build-id file isn't in the
+ * cache, or is just a kallsyms file, well, lets hope that this
+ * DSO is the same as when 'perf record' ran.
+ */
+ filename = dso->long_name;
+ free_filename = false;
+ }
+
+ if (dso->origin == DSO__ORIG_KERNEL) {
+ if (dso->annotate_warned)
+ goto out_free_filename;
+ err = -ENOENT;
+ dso->annotate_warned = 1;
+ pr_err("Can't annotate %s: No vmlinux file was found in the "
+ "path\n", sym->name);
+ goto out_free_filename;
+ }
+
+ pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
+ filename, sym->name, map->unmap_ip(map, sym->start),
+ map->unmap_ip(map, sym->end));
+
+ len = sym->end - sym->start;
+
+ pr_debug("annotating [%p] %30s : [%p] %30s\n",
+ dso, dso->long_name, sym, sym->name);
+
+ snprintf(command, sizeof(command),
+ "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS -C %s|grep -v %s|expand",
+ map__rip_2objdump(map, sym->start),
+ map__rip_2objdump(map, sym->end),
+ filename, filename);
+
+ pr_debug("Executing: %s\n", command);
+
+ file = popen(command, "r");
+ if (!file)
+ goto out_free_filename;
+
+ while (!feof(file))
+ if (hist_entry__parse_objdump_line(self, file, head, privsize) < 0)
+ break;
+
+ pclose(file);
+out_free_filename:
+ if (free_filename)
+ free(filename);
+ return err;
+}
+
+void hists__inc_nr_events(struct hists *self, u32 type)
+{
+ ++self->stats.nr_events[0];
+ ++self->stats.nr_events[type];
+}
+
+size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
+{
+ int i;
+ size_t ret = 0;
+
+ for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
+ if (!event__name[i])
+ continue;
+ ret += fprintf(fp, "%10s events: %10d\n",
+ event__name[i], self->stats.nr_events[i]);
+ }
+
+ return ret;
+}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
new file mode 100644
index 00000000..587d375d
--- /dev/null
+++ b/tools/perf/util/hist.h
@@ -0,0 +1,148 @@
+#ifndef __PERF_HIST_H
+#define __PERF_HIST_H
+
+#include <linux/types.h>
+#include "callchain.h"
+
+extern struct callchain_param callchain_param;
+
+struct hist_entry;
+struct addr_location;
+struct symbol;
+struct rb_root;
+
+struct objdump_line {
+ struct list_head node;
+ s64 offset;
+ char *line;
+};
+
+void objdump_line__free(struct objdump_line *self);
+struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
+ struct objdump_line *pos);
+
+struct sym_hist {
+ u64 sum;
+ u64 ip[0];
+};
+
+struct sym_ext {
+ struct rb_node node;
+ double percent;
+ char *path;
+};
+
+struct sym_priv {
+ struct sym_hist *hist;
+ struct sym_ext *ext;
+};
+
+/*
+ * The kernel collects the number of events it couldn't send in a stretch and
+ * when possible sends this number in a PERF_RECORD_LOST event. The number of
+ * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
+ * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
+ * the sum of all struct lost_event.lost fields reported.
+ *
+ * The total_period is needed because by default auto-freq is used, so
+ * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
+ * the total number of low level events, it is necessary to to sum all struct
+ * sample_event.period and stash the result in total_period.
+ */
+struct events_stats {
+ u64 total_period;
+ u64 total_lost;
+ u32 nr_events[PERF_RECORD_HEADER_MAX];
+ u32 nr_unknown_events;
+};
+
+enum hist_column {
+ HISTC_SYMBOL,
+ HISTC_DSO,
+ HISTC_THREAD,
+ HISTC_COMM,
+ HISTC_PARENT,
+ HISTC_CPU,
+ HISTC_NR_COLS, /* Last entry */
+};
+
+struct hists {
+ struct rb_node rb_node;
+ struct rb_root entries;
+ u64 nr_entries;
+ struct events_stats stats;
+ u64 config;
+ u64 event_stream;
+ u32 type;
+ u16 col_len[HISTC_NR_COLS];
+};
+
+struct hist_entry *__hists__add_entry(struct hists *self,
+ struct addr_location *al,
+ struct symbol *parent, u64 period);
+extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *);
+extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *);
+int hist_entry__fprintf(struct hist_entry *self, struct hists *hists,
+ struct hists *pair_hists, bool show_displacement,
+ long displacement, FILE *fp, u64 total);
+int hist_entry__snprintf(struct hist_entry *self, char *bf, size_t size,
+ struct hists *hists, struct hists *pair_hists,
+ bool show_displacement, long displacement,
+ bool color, u64 total);
+void hist_entry__free(struct hist_entry *);
+
+void hists__output_resort(struct hists *self);
+void hists__collapse_resort(struct hists *self);
+
+void hists__inc_nr_events(struct hists *self, u32 type);
+size_t hists__fprintf_nr_events(struct hists *self, FILE *fp);
+
+size_t hists__fprintf(struct hists *self, struct hists *pair,
+ bool show_displacement, FILE *fp);
+
+int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip);
+int hist_entry__annotate(struct hist_entry *self, struct list_head *head,
+ size_t privsize);
+
+void hists__filter_by_dso(struct hists *self, const struct dso *dso);
+void hists__filter_by_thread(struct hists *self, const struct thread *thread);
+
+u16 hists__col_len(struct hists *self, enum hist_column col);
+void hists__set_col_len(struct hists *self, enum hist_column col, u16 len);
+bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len);
+
+#ifdef NO_NEWT_SUPPORT
+static inline int hists__browse(struct hists *self __used,
+ const char *helpline __used,
+ const char *ev_name __used)
+{
+ return 0;
+}
+
+static inline int hists__tui_browse_tree(struct rb_root *self __used,
+ const char *help __used)
+{
+ return 0;
+}
+
+static inline int hist_entry__tui_annotate(struct hist_entry *self __used)
+{
+ return 0;
+}
+#define KEY_LEFT -1
+#define KEY_RIGHT -2
+#else
+#include <newt.h>
+int hists__browse(struct hists *self, const char *helpline,
+ const char *ev_name);
+int hist_entry__tui_annotate(struct hist_entry *self);
+
+#define KEY_LEFT NEWT_KEY_LEFT
+#define KEY_RIGHT NEWT_KEY_RIGHT
+
+int hists__tui_browse_tree(struct rb_root *self, const char *help);
+#endif
+
+unsigned int hists__sort_list_width(struct hists *self);
+
+#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/hweight.c b/tools/perf/util/hweight.c
new file mode 100644
index 00000000..5c1d0d09
--- /dev/null
+++ b/tools/perf/util/hweight.c
@@ -0,0 +1,31 @@
+#include <linux/bitops.h>
+
+/**
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
+ *
+ * The Hamming Weight of a number is the total number of bits set in it.
+ */
+
+unsigned int hweight32(unsigned int w)
+{
+ unsigned int res = w - ((w >> 1) & 0x55555555);
+ res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
+ res = (res + (res >> 4)) & 0x0F0F0F0F;
+ res = res + (res >> 8);
+ return (res + (res >> 16)) & 0x000000FF;
+}
+
+unsigned long hweight64(__u64 w)
+{
+#if BITS_PER_LONG == 32
+ return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
+#elif BITS_PER_LONG == 64
+ __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
+ res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
+ res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
+ res = res + (res >> 8);
+ res = res + (res >> 16);
+ return (res + (res >> 32)) & 0x00000000000000FFul;
+#endif
+}
diff --git a/tools/perf/util/include/asm/asm-offsets.h b/tools/perf/util/include/asm/asm-offsets.h
new file mode 100644
index 00000000..ed538942
--- /dev/null
+++ b/tools/perf/util/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+/* stub */
diff --git a/tools/perf/util/include/asm/bug.h b/tools/perf/util/include/asm/bug.h
new file mode 100644
index 00000000..7fcc6810
--- /dev/null
+++ b/tools/perf/util/include/asm/bug.h
@@ -0,0 +1,22 @@
+#ifndef _PERF_ASM_GENERIC_BUG_H
+#define _PERF_ASM_GENERIC_BUG_H
+
+#define __WARN_printf(arg...) do { fprintf(stderr, arg); } while (0)
+
+#define WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_printf(format); \
+ unlikely(__ret_warn_on); \
+})
+
+#define WARN_ONCE(condition, format...) ({ \
+ static int __warned; \
+ int __ret_warn_once = !!(condition); \
+ \
+ if (unlikely(__ret_warn_once)) \
+ if (WARN(!__warned, format)) \
+ __warned = 1; \
+ unlikely(__ret_warn_once); \
+})
+#endif
diff --git a/tools/perf/util/include/asm/byteorder.h b/tools/perf/util/include/asm/byteorder.h
new file mode 100644
index 00000000..b722abe3
--- /dev/null
+++ b/tools/perf/util/include/asm/byteorder.h
@@ -0,0 +1,2 @@
+#include <asm/types.h>
+#include "../../../../include/linux/swab.h"
diff --git a/tools/perf/util/include/asm/hweight.h b/tools/perf/util/include/asm/hweight.h
new file mode 100644
index 00000000..36cf26d4
--- /dev/null
+++ b/tools/perf/util/include/asm/hweight.h
@@ -0,0 +1,8 @@
+#ifndef PERF_HWEIGHT_H
+#define PERF_HWEIGHT_H
+
+#include <linux/types.h>
+unsigned int hweight32(unsigned int w);
+unsigned long hweight64(__u64 w);
+
+#endif /* PERF_HWEIGHT_H */
diff --git a/tools/perf/util/include/asm/swab.h b/tools/perf/util/include/asm/swab.h
new file mode 100644
index 00000000..ed538942
--- /dev/null
+++ b/tools/perf/util/include/asm/swab.h
@@ -0,0 +1 @@
+/* stub */
diff --git a/tools/perf/util/include/asm/system.h b/tools/perf/util/include/asm/system.h
new file mode 100644
index 00000000..710cecca
--- /dev/null
+++ b/tools/perf/util/include/asm/system.h
@@ -0,0 +1 @@
+/* Empty */
diff --git a/tools/perf/util/include/asm/uaccess.h b/tools/perf/util/include/asm/uaccess.h
new file mode 100644
index 00000000..d0f72b8f
--- /dev/null
+++ b/tools/perf/util/include/asm/uaccess.h
@@ -0,0 +1,14 @@
+#ifndef _PERF_ASM_UACCESS_H_
+#define _PERF_ASM_UACCESS_H_
+
+#define __get_user(src, dest) \
+({ \
+ (src) = *dest; \
+ 0; \
+})
+
+#define get_user __get_user
+
+#define access_ok(type, addr, size) 1
+
+#endif
diff --git a/tools/perf/util/include/dwarf-regs.h b/tools/perf/util/include/dwarf-regs.h
new file mode 100644
index 00000000..cf6727e9
--- /dev/null
+++ b/tools/perf/util/include/dwarf-regs.h
@@ -0,0 +1,8 @@
+#ifndef _PERF_DWARF_REGS_H_
+#define _PERF_DWARF_REGS_H_
+
+#ifdef DWARF_SUPPORT
+const char *get_arch_regstr(unsigned int n);
+#endif
+
+#endif
diff --git a/tools/perf/util/include/linux/bitmap.h b/tools/perf/util/include/linux/bitmap.h
new file mode 100644
index 00000000..eda4416e
--- /dev/null
+++ b/tools/perf/util/include/linux/bitmap.h
@@ -0,0 +1,35 @@
+#ifndef _PERF_BITOPS_H
+#define _PERF_BITOPS_H
+
+#include <string.h>
+#include <linux/bitops.h>
+
+int __bitmap_weight(const unsigned long *bitmap, int bits);
+
+#define BITMAP_LAST_WORD_MASK(nbits) \
+( \
+ ((nbits) % BITS_PER_LONG) ? \
+ (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
+)
+
+#define small_const_nbits(nbits) \
+ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
+
+static inline void bitmap_zero(unsigned long *dst, int nbits)
+{
+ if (small_const_nbits(nbits))
+ *dst = 0UL;
+ else {
+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0, len);
+ }
+}
+
+static inline int bitmap_weight(const unsigned long *src, int nbits)
+{
+ if (small_const_nbits(nbits))
+ return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
+ return __bitmap_weight(src, nbits);
+}
+
+#endif /* _PERF_BITOPS_H */
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
new file mode 100644
index 00000000..bb4ac2e0
--- /dev/null
+++ b/tools/perf/util/include/linux/bitops.h
@@ -0,0 +1,27 @@
+#ifndef _PERF_LINUX_BITOPS_H_
+#define _PERF_LINUX_BITOPS_H_
+
+#include <linux/kernel.h>
+#include <asm/hweight.h>
+
+#define BITS_PER_LONG __WORDSIZE
+#define BITS_PER_BYTE 8
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+
+static inline void set_bit(int nr, unsigned long *addr)
+{
+ addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG);
+}
+
+static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
+{
+ return ((1UL << (nr % BITS_PER_LONG)) &
+ (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
+}
+
+static inline unsigned long hweight_long(unsigned long w)
+{
+ return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+}
+
+#endif
diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h
new file mode 100644
index 00000000..791f9dd2
--- /dev/null
+++ b/tools/perf/util/include/linux/compiler.h
@@ -0,0 +1,12 @@
+#ifndef _PERF_LINUX_COMPILER_H_
+#define _PERF_LINUX_COMPILER_H_
+
+#ifndef __always_inline
+#define __always_inline inline
+#endif
+#define __user
+#define __attribute_const__
+
+#define __used __attribute__((__unused__))
+
+#endif
diff --git a/tools/perf/util/include/linux/ctype.h b/tools/perf/util/include/linux/ctype.h
new file mode 100644
index 00000000..a53d4ee1
--- /dev/null
+++ b/tools/perf/util/include/linux/ctype.h
@@ -0,0 +1 @@
+#include "../util.h"
diff --git a/tools/perf/util/include/linux/hash.h b/tools/perf/util/include/linux/hash.h
new file mode 100644
index 00000000..201f5739
--- /dev/null
+++ b/tools/perf/util/include/linux/hash.h
@@ -0,0 +1,5 @@
+#include "../../../../include/linux/hash.h"
+
+#ifndef PERF_HASH_H
+#define PERF_HASH_H
+#endif
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h
new file mode 100644
index 00000000..1eb804fd
--- /dev/null
+++ b/tools/perf/util/include/linux/kernel.h
@@ -0,0 +1,111 @@
+#ifndef PERF_LINUX_KERNEL_H_
+#define PERF_LINUX_KERNEL_H_
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
+#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
+
+#ifndef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif
+
+#ifndef container_of
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({ \
+ const typeof(((type *)0)->member) * __mptr = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+#endif
+
+#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
+
+#ifndef max
+#define max(x, y) ({ \
+ typeof(x) _max1 = (x); \
+ typeof(y) _max2 = (y); \
+ (void) (&_max1 == &_max2); \
+ _max1 > _max2 ? _max1 : _max2; })
+#endif
+
+#ifndef min
+#define min(x, y) ({ \
+ typeof(x) _min1 = (x); \
+ typeof(y) _min2 = (y); \
+ (void) (&_min1 == &_min2); \
+ _min1 < _min2 ? _min1 : _min2; })
+#endif
+
+#ifndef BUG_ON
+#define BUG_ON(cond) assert(!(cond))
+#endif
+
+/*
+ * Both need more care to handle endianness
+ * (Don't use bitmap_copy_le() for now)
+ */
+#define cpu_to_le64(x) (x)
+#define cpu_to_le32(x) (x)
+
+static inline int
+vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+ int i;
+ ssize_t ssize = size;
+
+ i = vsnprintf(buf, size, fmt, args);
+
+ return (i >= ssize) ? (ssize - 1) : i;
+}
+
+static inline int scnprintf(char * buf, size_t size, const char * fmt, ...)
+{
+ va_list args;
+ ssize_t ssize = size;
+ int i;
+
+ va_start(args, fmt);
+ i = vsnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ return (i >= ssize) ? (ssize - 1) : i;
+}
+
+static inline unsigned long
+simple_strtoul(const char *nptr, char **endptr, int base)
+{
+ return strtoul(nptr, endptr, base);
+}
+
+int eprintf(int level,
+ const char *fmt, ...) __attribute__((format(printf, 2, 3)));
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) fmt
+#endif
+
+#define pr_err(fmt, ...) \
+ eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning(fmt, ...) \
+ eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+ eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug(fmt, ...) \
+ eprintf(1, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debugN(n, fmt, ...) \
+ eprintf(n, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__)
+
+#endif
diff --git a/tools/perf/util/include/linux/list.h b/tools/perf/util/include/linux/list.h
new file mode 100644
index 00000000..f5ca26e5
--- /dev/null
+++ b/tools/perf/util/include/linux/list.h
@@ -0,0 +1,26 @@
+#include "../../../../include/linux/list.h"
+
+#ifndef PERF_LIST_H
+#define PERF_LIST_H
+/**
+ * list_del_range - deletes range of entries from list.
+ * @begin: first element in the range to delete from the list.
+ * @end: last element in the range to delete from the list.
+ * Note: list_empty on the range of entries does not return true after this,
+ * the entries is in an undefined state.
+ */
+static inline void list_del_range(struct list_head *begin,
+ struct list_head *end)
+{
+ begin->prev->next = end->next;
+ end->next->prev = begin->prev;
+}
+
+/**
+ * list_for_each_from - iterate over a list from one of its nodes
+ * @pos: the &struct list_head to use as a loop cursor, from where to start
+ * @head: the head for your list.
+ */
+#define list_for_each_from(pos, head) \
+ for (; prefetch(pos->next), pos != (head); pos = pos->next)
+#endif
diff --git a/tools/perf/util/include/linux/module.h b/tools/perf/util/include/linux/module.h
new file mode 100644
index 00000000..b43e2dc2
--- /dev/null
+++ b/tools/perf/util/include/linux/module.h
@@ -0,0 +1,6 @@
+#ifndef PERF_LINUX_MODULE_H
+#define PERF_LINUX_MODULE_H
+
+#define EXPORT_SYMBOL(name)
+
+#endif
diff --git a/tools/perf/util/include/linux/poison.h b/tools/perf/util/include/linux/poison.h
new file mode 100644
index 00000000..fef6dbc9
--- /dev/null
+++ b/tools/perf/util/include/linux/poison.h
@@ -0,0 +1 @@
+#include "../../../../include/linux/poison.h"
diff --git a/tools/perf/util/include/linux/prefetch.h b/tools/perf/util/include/linux/prefetch.h
new file mode 100644
index 00000000..7841e485
--- /dev/null
+++ b/tools/perf/util/include/linux/prefetch.h
@@ -0,0 +1,6 @@
+#ifndef PERF_LINUX_PREFETCH_H
+#define PERF_LINUX_PREFETCH_H
+
+static inline void prefetch(void *a __attribute__((unused))) { }
+
+#endif
diff --git a/tools/perf/util/include/linux/rbtree.h b/tools/perf/util/include/linux/rbtree.h
new file mode 100644
index 00000000..7a243a14
--- /dev/null
+++ b/tools/perf/util/include/linux/rbtree.h
@@ -0,0 +1 @@
+#include "../../../../include/linux/rbtree.h"
diff --git a/tools/perf/util/include/linux/string.h b/tools/perf/util/include/linux/string.h
new file mode 100644
index 00000000..3b2f5900
--- /dev/null
+++ b/tools/perf/util/include/linux/string.h
@@ -0,0 +1 @@
+#include <string.h>
diff --git a/tools/perf/util/include/linux/types.h b/tools/perf/util/include/linux/types.h
new file mode 100644
index 00000000..12de3b81
--- /dev/null
+++ b/tools/perf/util/include/linux/types.h
@@ -0,0 +1,21 @@
+#ifndef _PERF_LINUX_TYPES_H_
+#define _PERF_LINUX_TYPES_H_
+
+#include <asm/types.h>
+
+#define DECLARE_BITMAP(name,bits) \
+ unsigned long name[BITS_TO_LONGS(bits)]
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+#endif
diff --git a/tools/perf/util/levenshtein.c b/tools/perf/util/levenshtein.c
new file mode 100644
index 00000000..e521d151
--- /dev/null
+++ b/tools/perf/util/levenshtein.c
@@ -0,0 +1,84 @@
+#include "cache.h"
+#include "levenshtein.h"
+
+/*
+ * This function implements the Damerau-Levenshtein algorithm to
+ * calculate a distance between strings.
+ *
+ * Basically, it says how many letters need to be swapped, substituted,
+ * deleted from, or added to string1, at least, to get string2.
+ *
+ * The idea is to build a distance matrix for the substrings of both
+ * strings. To avoid a large space complexity, only the last three rows
+ * are kept in memory (if swaps had the same or higher cost as one deletion
+ * plus one insertion, only two rows would be needed).
+ *
+ * At any stage, "i + 1" denotes the length of the current substring of
+ * string1 that the distance is calculated for.
+ *
+ * row2 holds the current row, row1 the previous row (i.e. for the substring
+ * of string1 of length "i"), and row0 the row before that.
+ *
+ * In other words, at the start of the big loop, row2[j + 1] contains the
+ * Damerau-Levenshtein distance between the substring of string1 of length
+ * "i" and the substring of string2 of length "j + 1".
+ *
+ * All the big loop does is determine the partial minimum-cost paths.
+ *
+ * It does so by calculating the costs of the path ending in characters
+ * i (in string1) and j (in string2), respectively, given that the last
+ * operation is a substition, a swap, a deletion, or an insertion.
+ *
+ * This implementation allows the costs to be weighted:
+ *
+ * - w (as in "sWap")
+ * - s (as in "Substitution")
+ * - a (for insertion, AKA "Add")
+ * - d (as in "Deletion")
+ *
+ * Note that this algorithm calculates a distance _iff_ d == a.
+ */
+int levenshtein(const char *string1, const char *string2,
+ int w, int s, int a, int d)
+{
+ int len1 = strlen(string1), len2 = strlen(string2);
+ int *row0 = malloc(sizeof(int) * (len2 + 1));
+ int *row1 = malloc(sizeof(int) * (len2 + 1));
+ int *row2 = malloc(sizeof(int) * (len2 + 1));
+ int i, j;
+
+ for (j = 0; j <= len2; j++)
+ row1[j] = j * a;
+ for (i = 0; i < len1; i++) {
+ int *dummy;
+
+ row2[0] = (i + 1) * d;
+ for (j = 0; j < len2; j++) {
+ /* substitution */
+ row2[j + 1] = row1[j] + s * (string1[i] != string2[j]);
+ /* swap */
+ if (i > 0 && j > 0 && string1[i - 1] == string2[j] &&
+ string1[i] == string2[j - 1] &&
+ row2[j + 1] > row0[j - 1] + w)
+ row2[j + 1] = row0[j - 1] + w;
+ /* deletion */
+ if (row2[j + 1] > row1[j + 1] + d)
+ row2[j + 1] = row1[j + 1] + d;
+ /* insertion */
+ if (row2[j + 1] > row2[j] + a)
+ row2[j + 1] = row2[j] + a;
+ }
+
+ dummy = row0;
+ row0 = row1;
+ row1 = row2;
+ row2 = dummy;
+ }
+
+ i = row1[len2];
+ free(row0);
+ free(row1);
+ free(row2);
+
+ return i;
+}
diff --git a/tools/perf/util/levenshtein.h b/tools/perf/util/levenshtein.h
new file mode 100644
index 00000000..b0fcb6d8
--- /dev/null
+++ b/tools/perf/util/levenshtein.h
@@ -0,0 +1,8 @@
+#ifndef __PERF_LEVENSHTEIN_H
+#define __PERF_LEVENSHTEIN_H
+
+int levenshtein(const char *string1, const char *string2,
+ int swap_penalty, int substition_penalty,
+ int insertion_penalty, int deletion_penalty);
+
+#endif /* __PERF_LEVENSHTEIN_H */
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
new file mode 100644
index 00000000..3a7eb6ec
--- /dev/null
+++ b/tools/perf/util/map.c
@@ -0,0 +1,682 @@
+#include "symbol.h"
+#include <errno.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include "map.h"
+
+const char *map_type__name[MAP__NR_TYPES] = {
+ [MAP__FUNCTION] = "Functions",
+ [MAP__VARIABLE] = "Variables",
+};
+
+static inline int is_anon_memory(const char *filename)
+{
+ return strcmp(filename, "//anon") == 0;
+}
+
+void map__init(struct map *self, enum map_type type,
+ u64 start, u64 end, u64 pgoff, struct dso *dso)
+{
+ self->type = type;
+ self->start = start;
+ self->end = end;
+ self->pgoff = pgoff;
+ self->dso = dso;
+ self->map_ip = map__map_ip;
+ self->unmap_ip = map__unmap_ip;
+ RB_CLEAR_NODE(&self->rb_node);
+ self->groups = NULL;
+ self->referenced = false;
+}
+
+struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
+ u64 pgoff, u32 pid, char *filename,
+ enum map_type type)
+{
+ struct map *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ char newfilename[PATH_MAX];
+ struct dso *dso;
+ int anon;
+
+ anon = is_anon_memory(filename);
+
+ if (anon) {
+ snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
+ filename = newfilename;
+ }
+
+ dso = __dsos__findnew(dsos__list, filename);
+ if (dso == NULL)
+ goto out_delete;
+
+ map__init(self, type, start, start + len, pgoff, dso);
+
+ if (anon) {
+set_identity:
+ self->map_ip = self->unmap_ip = identity__map_ip;
+ } else if (strcmp(filename, "[vdso]") == 0) {
+ dso__set_loaded(dso, self->type);
+ goto set_identity;
+ }
+ }
+ return self;
+out_delete:
+ free(self);
+ return NULL;
+}
+
+void map__delete(struct map *self)
+{
+ free(self);
+}
+
+void map__fixup_start(struct map *self)
+{
+ struct rb_root *symbols = &self->dso->symbols[self->type];
+ struct rb_node *nd = rb_first(symbols);
+ if (nd != NULL) {
+ struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
+ self->start = sym->start;
+ }
+}
+
+void map__fixup_end(struct map *self)
+{
+ struct rb_root *symbols = &self->dso->symbols[self->type];
+ struct rb_node *nd = rb_last(symbols);
+ if (nd != NULL) {
+ struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
+ self->end = sym->end;
+ }
+}
+
+#define DSO__DELETED "(deleted)"
+
+int map__load(struct map *self, symbol_filter_t filter)
+{
+ const char *name = self->dso->long_name;
+ int nr;
+
+ if (dso__loaded(self->dso, self->type))
+ return 0;
+
+ nr = dso__load(self->dso, self, filter);
+ if (nr < 0) {
+ if (self->dso->has_build_id) {
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ build_id__sprintf(self->dso->build_id,
+ sizeof(self->dso->build_id),
+ sbuild_id);
+ pr_warning("%s with build id %s not found",
+ name, sbuild_id);
+ } else
+ pr_warning("Failed to open %s", name);
+
+ pr_warning(", continuing without symbols\n");
+ return -1;
+ } else if (nr == 0) {
+ const size_t len = strlen(name);
+ const size_t real_len = len - sizeof(DSO__DELETED);
+
+ if (len > sizeof(DSO__DELETED) &&
+ strcmp(name + real_len + 1, DSO__DELETED) == 0) {
+ pr_warning("%.*s was updated, restart the long "
+ "running apps that use it!\n",
+ (int)real_len, name);
+ } else {
+ pr_warning("no symbols found in %s, maybe install "
+ "a debug package?\n", name);
+ }
+
+ return -1;
+ }
+ /*
+ * Only applies to the kernel, as its symtabs aren't relative like the
+ * module ones.
+ */
+ if (self->dso->kernel)
+ map__reloc_vmlinux(self);
+
+ return 0;
+}
+
+struct symbol *map__find_symbol(struct map *self, u64 addr,
+ symbol_filter_t filter)
+{
+ if (map__load(self, filter) < 0)
+ return NULL;
+
+ return dso__find_symbol(self->dso, self->type, addr);
+}
+
+struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
+ symbol_filter_t filter)
+{
+ if (map__load(self, filter) < 0)
+ return NULL;
+
+ if (!dso__sorted_by_name(self->dso, self->type))
+ dso__sort_by_name(self->dso, self->type);
+
+ return dso__find_symbol_by_name(self->dso, self->type, name);
+}
+
+struct map *map__clone(struct map *self)
+{
+ struct map *map = malloc(sizeof(*self));
+
+ if (!map)
+ return NULL;
+
+ memcpy(map, self, sizeof(*self));
+
+ return map;
+}
+
+int map__overlap(struct map *l, struct map *r)
+{
+ if (l->start > r->start) {
+ struct map *t = l;
+ l = r;
+ r = t;
+ }
+
+ if (l->end > r->start)
+ return 1;
+
+ return 0;
+}
+
+size_t map__fprintf(struct map *self, FILE *fp)
+{
+ return fprintf(fp, " %Lx-%Lx %Lx %s\n",
+ self->start, self->end, self->pgoff, self->dso->name);
+}
+
+/*
+ * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
+ * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
+ */
+u64 map__rip_2objdump(struct map *map, u64 rip)
+{
+ u64 addr = map->dso->adjust_symbols ?
+ map->unmap_ip(map, rip) : /* RIP -> IP */
+ rip;
+ return addr;
+}
+
+u64 map__objdump_2ip(struct map *map, u64 addr)
+{
+ u64 ip = map->dso->adjust_symbols ?
+ addr :
+ map->unmap_ip(map, addr); /* RIP -> IP */
+ return ip;
+}
+
+void map_groups__init(struct map_groups *self)
+{
+ int i;
+ for (i = 0; i < MAP__NR_TYPES; ++i) {
+ self->maps[i] = RB_ROOT;
+ INIT_LIST_HEAD(&self->removed_maps[i]);
+ }
+ self->machine = NULL;
+}
+
+static void maps__delete(struct rb_root *self)
+{
+ struct rb_node *next = rb_first(self);
+
+ while (next) {
+ struct map *pos = rb_entry(next, struct map, rb_node);
+
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, self);
+ map__delete(pos);
+ }
+}
+
+static void maps__delete_removed(struct list_head *self)
+{
+ struct map *pos, *n;
+
+ list_for_each_entry_safe(pos, n, self, node) {
+ list_del(&pos->node);
+ map__delete(pos);
+ }
+}
+
+void map_groups__exit(struct map_groups *self)
+{
+ int i;
+
+ for (i = 0; i < MAP__NR_TYPES; ++i) {
+ maps__delete(&self->maps[i]);
+ maps__delete_removed(&self->removed_maps[i]);
+ }
+}
+
+void map_groups__flush(struct map_groups *self)
+{
+ int type;
+
+ for (type = 0; type < MAP__NR_TYPES; type++) {
+ struct rb_root *root = &self->maps[type];
+ struct rb_node *next = rb_first(root);
+
+ while (next) {
+ struct map *pos = rb_entry(next, struct map, rb_node);
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, root);
+ /*
+ * We may have references to this map, for
+ * instance in some hist_entry instances, so
+ * just move them to a separate list.
+ */
+ list_add_tail(&pos->node, &self->removed_maps[pos->type]);
+ }
+ }
+}
+
+struct symbol *map_groups__find_symbol(struct map_groups *self,
+ enum map_type type, u64 addr,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ struct map *map = map_groups__find(self, type, addr);
+
+ if (map != NULL) {
+ if (mapp != NULL)
+ *mapp = map;
+ return map__find_symbol(map, map->map_ip(map, addr), filter);
+ }
+
+ return NULL;
+}
+
+struct symbol *map_groups__find_symbol_by_name(struct map_groups *self,
+ enum map_type type,
+ const char *name,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+ struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
+
+ if (sym == NULL)
+ continue;
+ if (mapp != NULL)
+ *mapp = pos;
+ return sym;
+ }
+
+ return NULL;
+}
+
+size_t __map_groups__fprintf_maps(struct map_groups *self,
+ enum map_type type, int verbose, FILE *fp)
+{
+ size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
+ struct rb_node *nd;
+
+ for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
+ struct map *pos = rb_entry(nd, struct map, rb_node);
+ printed += fprintf(fp, "Map:");
+ printed += map__fprintf(pos, fp);
+ if (verbose > 2) {
+ printed += dso__fprintf(pos->dso, type, fp);
+ printed += fprintf(fp, "--\n");
+ }
+ }
+
+ return printed;
+}
+
+size_t map_groups__fprintf_maps(struct map_groups *self, int verbose, FILE *fp)
+{
+ size_t printed = 0, i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ printed += __map_groups__fprintf_maps(self, i, verbose, fp);
+ return printed;
+}
+
+static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
+ enum map_type type,
+ int verbose, FILE *fp)
+{
+ struct map *pos;
+ size_t printed = 0;
+
+ list_for_each_entry(pos, &self->removed_maps[type], node) {
+ printed += fprintf(fp, "Map:");
+ printed += map__fprintf(pos, fp);
+ if (verbose > 1) {
+ printed += dso__fprintf(pos->dso, type, fp);
+ printed += fprintf(fp, "--\n");
+ }
+ }
+ return printed;
+}
+
+static size_t map_groups__fprintf_removed_maps(struct map_groups *self,
+ int verbose, FILE *fp)
+{
+ size_t printed = 0, i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ printed += __map_groups__fprintf_removed_maps(self, i, verbose, fp);
+ return printed;
+}
+
+size_t map_groups__fprintf(struct map_groups *self, int verbose, FILE *fp)
+{
+ size_t printed = map_groups__fprintf_maps(self, verbose, fp);
+ printed += fprintf(fp, "Removed maps:\n");
+ return printed + map_groups__fprintf_removed_maps(self, verbose, fp);
+}
+
+int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
+ int verbose, FILE *fp)
+{
+ struct rb_root *root = &self->maps[map->type];
+ struct rb_node *next = rb_first(root);
+ int err = 0;
+
+ while (next) {
+ struct map *pos = rb_entry(next, struct map, rb_node);
+ next = rb_next(&pos->rb_node);
+
+ if (!map__overlap(pos, map))
+ continue;
+
+ if (verbose >= 2) {
+ fputs("overlapping maps:\n", fp);
+ map__fprintf(map, fp);
+ map__fprintf(pos, fp);
+ }
+
+ rb_erase(&pos->rb_node, root);
+ /*
+ * Now check if we need to create new maps for areas not
+ * overlapped by the new map:
+ */
+ if (map->start > pos->start) {
+ struct map *before = map__clone(pos);
+
+ if (before == NULL) {
+ err = -ENOMEM;
+ goto move_map;
+ }
+
+ before->end = map->start - 1;
+ map_groups__insert(self, before);
+ if (verbose >= 2)
+ map__fprintf(before, fp);
+ }
+
+ if (map->end < pos->end) {
+ struct map *after = map__clone(pos);
+
+ if (after == NULL) {
+ err = -ENOMEM;
+ goto move_map;
+ }
+
+ after->start = map->end + 1;
+ map_groups__insert(self, after);
+ if (verbose >= 2)
+ map__fprintf(after, fp);
+ }
+move_map:
+ /*
+ * If we have references, just move them to a separate list.
+ */
+ if (pos->referenced)
+ list_add_tail(&pos->node, &self->removed_maps[map->type]);
+ else
+ map__delete(pos);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/*
+ * XXX This should not really _copy_ te maps, but refcount them.
+ */
+int map_groups__clone(struct map_groups *self,
+ struct map_groups *parent, enum map_type type)
+{
+ struct rb_node *nd;
+ for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
+ struct map *map = rb_entry(nd, struct map, rb_node);
+ struct map *new = map__clone(map);
+ if (new == NULL)
+ return -ENOMEM;
+ map_groups__insert(self, new);
+ }
+ return 0;
+}
+
+static u64 map__reloc_map_ip(struct map *map, u64 ip)
+{
+ return ip + (s64)map->pgoff;
+}
+
+static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
+{
+ return ip - (s64)map->pgoff;
+}
+
+void map__reloc_vmlinux(struct map *self)
+{
+ struct kmap *kmap = map__kmap(self);
+ s64 reloc;
+
+ if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
+ return;
+
+ reloc = (kmap->ref_reloc_sym->unrelocated_addr -
+ kmap->ref_reloc_sym->addr);
+
+ if (!reloc)
+ return;
+
+ self->map_ip = map__reloc_map_ip;
+ self->unmap_ip = map__reloc_unmap_ip;
+ self->pgoff = reloc;
+}
+
+void maps__insert(struct rb_root *maps, struct map *map)
+{
+ struct rb_node **p = &maps->rb_node;
+ struct rb_node *parent = NULL;
+ const u64 ip = map->start;
+ struct map *m;
+
+ while (*p != NULL) {
+ parent = *p;
+ m = rb_entry(parent, struct map, rb_node);
+ if (ip < m->start)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&map->rb_node, parent, p);
+ rb_insert_color(&map->rb_node, maps);
+}
+
+void maps__remove(struct rb_root *self, struct map *map)
+{
+ rb_erase(&map->rb_node, self);
+}
+
+struct map *maps__find(struct rb_root *maps, u64 ip)
+{
+ struct rb_node **p = &maps->rb_node;
+ struct rb_node *parent = NULL;
+ struct map *m;
+
+ while (*p != NULL) {
+ parent = *p;
+ m = rb_entry(parent, struct map, rb_node);
+ if (ip < m->start)
+ p = &(*p)->rb_left;
+ else if (ip > m->end)
+ p = &(*p)->rb_right;
+ else
+ return m;
+ }
+
+ return NULL;
+}
+
+int machine__init(struct machine *self, const char *root_dir, pid_t pid)
+{
+ map_groups__init(&self->kmaps);
+ RB_CLEAR_NODE(&self->rb_node);
+ INIT_LIST_HEAD(&self->user_dsos);
+ INIT_LIST_HEAD(&self->kernel_dsos);
+
+ self->kmaps.machine = self;
+ self->pid = pid;
+ self->root_dir = strdup(root_dir);
+ return self->root_dir == NULL ? -ENOMEM : 0;
+}
+
+static void dsos__delete(struct list_head *self)
+{
+ struct dso *pos, *n;
+
+ list_for_each_entry_safe(pos, n, self, node) {
+ list_del(&pos->node);
+ dso__delete(pos);
+ }
+}
+
+void machine__exit(struct machine *self)
+{
+ map_groups__exit(&self->kmaps);
+ dsos__delete(&self->user_dsos);
+ dsos__delete(&self->kernel_dsos);
+ free(self->root_dir);
+ self->root_dir = NULL;
+}
+
+void machine__delete(struct machine *self)
+{
+ machine__exit(self);
+ free(self);
+}
+
+struct machine *machines__add(struct rb_root *self, pid_t pid,
+ const char *root_dir)
+{
+ struct rb_node **p = &self->rb_node;
+ struct rb_node *parent = NULL;
+ struct machine *pos, *machine = malloc(sizeof(*machine));
+
+ if (!machine)
+ return NULL;
+
+ if (machine__init(machine, root_dir, pid) != 0) {
+ free(machine);
+ return NULL;
+ }
+
+ while (*p != NULL) {
+ parent = *p;
+ pos = rb_entry(parent, struct machine, rb_node);
+ if (pid < pos->pid)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&machine->rb_node, parent, p);
+ rb_insert_color(&machine->rb_node, self);
+
+ return machine;
+}
+
+struct machine *machines__find(struct rb_root *self, pid_t pid)
+{
+ struct rb_node **p = &self->rb_node;
+ struct rb_node *parent = NULL;
+ struct machine *machine;
+ struct machine *default_machine = NULL;
+
+ while (*p != NULL) {
+ parent = *p;
+ machine = rb_entry(parent, struct machine, rb_node);
+ if (pid < machine->pid)
+ p = &(*p)->rb_left;
+ else if (pid > machine->pid)
+ p = &(*p)->rb_right;
+ else
+ return machine;
+ if (!machine->pid)
+ default_machine = machine;
+ }
+
+ return default_machine;
+}
+
+struct machine *machines__findnew(struct rb_root *self, pid_t pid)
+{
+ char path[PATH_MAX];
+ const char *root_dir;
+ struct machine *machine = machines__find(self, pid);
+
+ if (!machine || machine->pid != pid) {
+ if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID)
+ root_dir = "";
+ else {
+ if (!symbol_conf.guestmount)
+ goto out;
+ sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
+ if (access(path, R_OK)) {
+ pr_err("Can't access file %s\n", path);
+ goto out;
+ }
+ root_dir = path;
+ }
+ machine = machines__add(self, pid, root_dir);
+ }
+
+out:
+ return machine;
+}
+
+void machines__process(struct rb_root *self, machine__process_t process, void *data)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(self); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ process(pos, data);
+ }
+}
+
+char *machine__mmap_name(struct machine *self, char *bf, size_t size)
+{
+ if (machine__is_host(self))
+ snprintf(bf, size, "[%s]", "kernel.kallsyms");
+ else if (machine__is_default_guest(self))
+ snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
+ else
+ snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid);
+
+ return bf;
+}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
new file mode 100644
index 00000000..78575796
--- /dev/null
+++ b/tools/perf/util/map.h
@@ -0,0 +1,227 @@
+#ifndef __PERF_MAP_H
+#define __PERF_MAP_H
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include "types.h"
+
+enum map_type {
+ MAP__FUNCTION = 0,
+ MAP__VARIABLE,
+};
+
+#define MAP__NR_TYPES (MAP__VARIABLE + 1)
+
+extern const char *map_type__name[MAP__NR_TYPES];
+
+struct dso;
+struct ref_reloc_sym;
+struct map_groups;
+struct machine;
+
+struct map {
+ union {
+ struct rb_node rb_node;
+ struct list_head node;
+ };
+ u64 start;
+ u64 end;
+ u8 /* enum map_type */ type;
+ bool referenced;
+ u32 priv;
+ u64 pgoff;
+
+ /* ip -> dso rip */
+ u64 (*map_ip)(struct map *, u64);
+ /* dso rip -> ip */
+ u64 (*unmap_ip)(struct map *, u64);
+
+ struct dso *dso;
+ struct map_groups *groups;
+};
+
+struct kmap {
+ struct ref_reloc_sym *ref_reloc_sym;
+ struct map_groups *kmaps;
+};
+
+struct map_groups {
+ struct rb_root maps[MAP__NR_TYPES];
+ struct list_head removed_maps[MAP__NR_TYPES];
+ struct machine *machine;
+};
+
+/* Native host kernel uses -1 as pid index in machine */
+#define HOST_KERNEL_ID (-1)
+#define DEFAULT_GUEST_KERNEL_ID (0)
+
+struct machine {
+ struct rb_node rb_node;
+ pid_t pid;
+ char *root_dir;
+ struct list_head user_dsos;
+ struct list_head kernel_dsos;
+ struct map_groups kmaps;
+ struct map *vmlinux_maps[MAP__NR_TYPES];
+};
+
+static inline
+struct map *machine__kernel_map(struct machine *self, enum map_type type)
+{
+ return self->vmlinux_maps[type];
+}
+
+static inline struct kmap *map__kmap(struct map *self)
+{
+ return (struct kmap *)(self + 1);
+}
+
+static inline u64 map__map_ip(struct map *map, u64 ip)
+{
+ return ip - map->start + map->pgoff;
+}
+
+static inline u64 map__unmap_ip(struct map *map, u64 ip)
+{
+ return ip + map->start - map->pgoff;
+}
+
+static inline u64 identity__map_ip(struct map *map __used, u64 ip)
+{
+ return ip;
+}
+
+
+/* rip/ip <-> addr suitable for passing to `objdump --start-address=` */
+u64 map__rip_2objdump(struct map *map, u64 rip);
+u64 map__objdump_2ip(struct map *map, u64 addr);
+
+struct symbol;
+
+typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
+
+void map__init(struct map *self, enum map_type type,
+ u64 start, u64 end, u64 pgoff, struct dso *dso);
+struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
+ u64 pgoff, u32 pid, char *filename,
+ enum map_type type);
+void map__delete(struct map *self);
+struct map *map__clone(struct map *self);
+int map__overlap(struct map *l, struct map *r);
+size_t map__fprintf(struct map *self, FILE *fp);
+
+int map__load(struct map *self, symbol_filter_t filter);
+struct symbol *map__find_symbol(struct map *self,
+ u64 addr, symbol_filter_t filter);
+struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
+ symbol_filter_t filter);
+void map__fixup_start(struct map *self);
+void map__fixup_end(struct map *self);
+
+void map__reloc_vmlinux(struct map *self);
+
+size_t __map_groups__fprintf_maps(struct map_groups *self,
+ enum map_type type, int verbose, FILE *fp);
+void maps__insert(struct rb_root *maps, struct map *map);
+void maps__remove(struct rb_root *self, struct map *map);
+struct map *maps__find(struct rb_root *maps, u64 addr);
+void map_groups__init(struct map_groups *self);
+void map_groups__exit(struct map_groups *self);
+int map_groups__clone(struct map_groups *self,
+ struct map_groups *parent, enum map_type type);
+size_t map_groups__fprintf(struct map_groups *self, int verbose, FILE *fp);
+size_t map_groups__fprintf_maps(struct map_groups *self, int verbose, FILE *fp);
+
+typedef void (*machine__process_t)(struct machine *self, void *data);
+
+void machines__process(struct rb_root *self, machine__process_t process, void *data);
+struct machine *machines__add(struct rb_root *self, pid_t pid,
+ const char *root_dir);
+struct machine *machines__find_host(struct rb_root *self);
+struct machine *machines__find(struct rb_root *self, pid_t pid);
+struct machine *machines__findnew(struct rb_root *self, pid_t pid);
+char *machine__mmap_name(struct machine *self, char *bf, size_t size);
+int machine__init(struct machine *self, const char *root_dir, pid_t pid);
+void machine__exit(struct machine *self);
+void machine__delete(struct machine *self);
+
+/*
+ * Default guest kernel is defined by parameter --guestkallsyms
+ * and --guestmodules
+ */
+static inline bool machine__is_default_guest(struct machine *self)
+{
+ return self ? self->pid == DEFAULT_GUEST_KERNEL_ID : false;
+}
+
+static inline bool machine__is_host(struct machine *self)
+{
+ return self ? self->pid == HOST_KERNEL_ID : false;
+}
+
+static inline void map_groups__insert(struct map_groups *self, struct map *map)
+{
+ maps__insert(&self->maps[map->type], map);
+ map->groups = self;
+}
+
+static inline void map_groups__remove(struct map_groups *self, struct map *map)
+{
+ maps__remove(&self->maps[map->type], map);
+}
+
+static inline struct map *map_groups__find(struct map_groups *self,
+ enum map_type type, u64 addr)
+{
+ return maps__find(&self->maps[type], addr);
+}
+
+struct symbol *map_groups__find_symbol(struct map_groups *self,
+ enum map_type type, u64 addr,
+ struct map **mapp,
+ symbol_filter_t filter);
+
+struct symbol *map_groups__find_symbol_by_name(struct map_groups *self,
+ enum map_type type,
+ const char *name,
+ struct map **mapp,
+ symbol_filter_t filter);
+
+static inline
+struct symbol *machine__find_kernel_symbol(struct machine *self,
+ enum map_type type, u64 addr,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ return map_groups__find_symbol(&self->kmaps, type, addr, mapp, filter);
+}
+
+static inline
+struct symbol *machine__find_kernel_function(struct machine *self, u64 addr,
+ struct map **mapp,
+ symbol_filter_t filter)
+{
+ return machine__find_kernel_symbol(self, MAP__FUNCTION, addr, mapp, filter);
+}
+
+static inline
+struct symbol *map_groups__find_function_by_name(struct map_groups *self,
+ const char *name, struct map **mapp,
+ symbol_filter_t filter)
+{
+ return map_groups__find_symbol_by_name(self, MAP__FUNCTION, name, mapp, filter);
+}
+
+int map_groups__fixup_overlappings(struct map_groups *self, struct map *map,
+ int verbose, FILE *fp);
+
+struct map *map_groups__find_by_name(struct map_groups *self,
+ enum map_type type, const char *name);
+struct map *machine__new_module(struct machine *self, u64 start, const char *filename);
+
+void map_groups__flush(struct map_groups *self);
+
+#endif /* __PERF_MAP_H */
diff --git a/tools/perf/util/pager.c b/tools/perf/util/pager.c
new file mode 100644
index 00000000..1915de20
--- /dev/null
+++ b/tools/perf/util/pager.c
@@ -0,0 +1,96 @@
+#include "cache.h"
+#include "run-command.h"
+#include "sigchain.h"
+
+/*
+ * This is split up from the rest of git so that we can do
+ * something different on Windows.
+ */
+
+static int spawned_pager;
+
+static void pager_preexec(void)
+{
+ /*
+ * Work around bug in "less" by not starting it until we
+ * have real input
+ */
+ fd_set in;
+
+ FD_ZERO(&in);
+ FD_SET(0, &in);
+ select(1, &in, NULL, &in, NULL);
+
+ setenv("LESS", "FRSX", 0);
+}
+
+static const char *pager_argv[] = { "sh", "-c", NULL, NULL };
+static struct child_process pager_process;
+
+static void wait_for_pager(void)
+{
+ fflush(stdout);
+ fflush(stderr);
+ /* signal EOF to pager */
+ close(1);
+ close(2);
+ finish_command(&pager_process);
+}
+
+static void wait_for_pager_signal(int signo)
+{
+ wait_for_pager();
+ sigchain_pop(signo);
+ raise(signo);
+}
+
+void setup_pager(void)
+{
+ const char *pager = getenv("PERF_PAGER");
+
+ if (!isatty(1))
+ return;
+ if (!pager) {
+ if (!pager_program)
+ perf_config(perf_default_config, NULL);
+ pager = pager_program;
+ }
+ if (!pager)
+ pager = getenv("PAGER");
+ if (!pager)
+ pager = "less";
+ else if (!*pager || !strcmp(pager, "cat"))
+ return;
+
+ spawned_pager = 1; /* means we are emitting to terminal */
+
+ /* spawn the pager */
+ pager_argv[2] = pager;
+ pager_process.argv = pager_argv;
+ pager_process.in = -1;
+ pager_process.preexec_cb = pager_preexec;
+
+ if (start_command(&pager_process))
+ return;
+
+ /* original process continues, but writes to the pipe */
+ dup2(pager_process.in, 1);
+ if (isatty(2))
+ dup2(pager_process.in, 2);
+ close(pager_process.in);
+
+ /* this makes sure that the parent terminates after the pager */
+ sigchain_push_common(wait_for_pager_signal);
+ atexit(wait_for_pager);
+}
+
+int pager_in_use(void)
+{
+ const char *env;
+
+ if (spawned_pager)
+ return 1;
+
+ env = getenv("PERF_PAGER_IN_USE");
+ return env ? perf_config_bool("PERF_PAGER_IN_USE", env) : 0;
+}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
new file mode 100644
index 00000000..4af5bd59
--- /dev/null
+++ b/tools/perf/util/parse-events.c
@@ -0,0 +1,965 @@
+#include "../../../include/linux/hw_breakpoint.h"
+#include "util.h"
+#include "../perf.h"
+#include "parse-options.h"
+#include "parse-events.h"
+#include "exec_cmd.h"
+#include "string.h"
+#include "symbol.h"
+#include "cache.h"
+#include "header.h"
+#include "debugfs.h"
+
+int nr_counters;
+
+struct perf_event_attr attrs[MAX_COUNTERS];
+char *filters[MAX_COUNTERS];
+
+struct event_symbol {
+ u8 type;
+ u64 config;
+ const char *symbol;
+ const char *alias;
+};
+
+enum event_result {
+ EVT_FAILED,
+ EVT_HANDLED,
+ EVT_HANDLED_ALL
+};
+
+char debugfs_path[MAXPATHLEN];
+
+#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
+#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
+
+static struct event_symbol event_symbols[] = {
+ { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
+ { CHW(INSTRUCTIONS), "instructions", "" },
+ { CHW(CACHE_REFERENCES), "cache-references", "" },
+ { CHW(CACHE_MISSES), "cache-misses", "" },
+ { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
+ { CHW(BRANCH_MISSES), "branch-misses", "" },
+ { CHW(BUS_CYCLES), "bus-cycles", "" },
+
+ { CSW(CPU_CLOCK), "cpu-clock", "" },
+ { CSW(TASK_CLOCK), "task-clock", "" },
+ { CSW(PAGE_FAULTS), "page-faults", "faults" },
+ { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
+ { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
+ { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
+ { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
+ { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" },
+ { CSW(EMULATION_FAULTS), "emulation-faults", "" },
+};
+
+#define __PERF_EVENT_FIELD(config, name) \
+ ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
+
+#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
+#define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
+#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
+#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
+
+static const char *hw_event_names[] = {
+ "cycles",
+ "instructions",
+ "cache-references",
+ "cache-misses",
+ "branches",
+ "branch-misses",
+ "bus-cycles",
+};
+
+static const char *sw_event_names[] = {
+ "cpu-clock-msecs",
+ "task-clock-msecs",
+ "page-faults",
+ "context-switches",
+ "CPU-migrations",
+ "minor-faults",
+ "major-faults",
+ "alignment-faults",
+ "emulation-faults",
+};
+
+#define MAX_ALIASES 8
+
+static const char *hw_cache[][MAX_ALIASES] = {
+ { "L1-dcache", "l1-d", "l1d", "L1-data", },
+ { "L1-icache", "l1-i", "l1i", "L1-instruction", },
+ { "LLC", "L2" },
+ { "dTLB", "d-tlb", "Data-TLB", },
+ { "iTLB", "i-tlb", "Instruction-TLB", },
+ { "branch", "branches", "bpu", "btb", "bpc", },
+};
+
+static const char *hw_cache_op[][MAX_ALIASES] = {
+ { "load", "loads", "read", },
+ { "store", "stores", "write", },
+ { "prefetch", "prefetches", "speculative-read", "speculative-load", },
+};
+
+static const char *hw_cache_result[][MAX_ALIASES] = {
+ { "refs", "Reference", "ops", "access", },
+ { "misses", "miss", },
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+#define CACHE_READ (1 << C(OP_READ))
+#define CACHE_WRITE (1 << C(OP_WRITE))
+#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
+#define COP(x) (1 << x)
+
+/*
+ * cache operartion stat
+ * L1I : Read and prefetch only
+ * ITLB and BPU : Read-only
+ */
+static unsigned long hw_cache_stat[C(MAX)] = {
+ [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
+ [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(ITLB)] = (CACHE_READ),
+ [C(BPU)] = (CACHE_READ),
+};
+
+#define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
+ while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
+ if (sys_dirent.d_type == DT_DIR && \
+ (strcmp(sys_dirent.d_name, ".")) && \
+ (strcmp(sys_dirent.d_name, "..")))
+
+static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
+{
+ char evt_path[MAXPATHLEN];
+ int fd;
+
+ snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
+ sys_dir->d_name, evt_dir->d_name);
+ fd = open(evt_path, O_RDONLY);
+ if (fd < 0)
+ return -EINVAL;
+ close(fd);
+
+ return 0;
+}
+
+#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
+ while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
+ if (evt_dirent.d_type == DT_DIR && \
+ (strcmp(evt_dirent.d_name, ".")) && \
+ (strcmp(evt_dirent.d_name, "..")) && \
+ (!tp_event_has_id(&sys_dirent, &evt_dirent)))
+
+#define MAX_EVENT_LENGTH 512
+
+
+struct tracepoint_path *tracepoint_id_to_path(u64 config)
+{
+ struct tracepoint_path *path = NULL;
+ DIR *sys_dir, *evt_dir;
+ struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+ char id_buf[4];
+ int fd;
+ u64 id;
+ char evt_path[MAXPATHLEN];
+ char dir_path[MAXPATHLEN];
+
+ if (debugfs_valid_mountpoint(debugfs_path))
+ return NULL;
+
+ sys_dir = opendir(debugfs_path);
+ if (!sys_dir)
+ return NULL;
+
+ for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+
+ snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
+ sys_dirent.d_name);
+ evt_dir = opendir(dir_path);
+ if (!evt_dir)
+ continue;
+
+ for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+
+ snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
+ evt_dirent.d_name);
+ fd = open(evt_path, O_RDONLY);
+ if (fd < 0)
+ continue;
+ if (read(fd, id_buf, sizeof(id_buf)) < 0) {
+ close(fd);
+ continue;
+ }
+ close(fd);
+ id = atoll(id_buf);
+ if (id == config) {
+ closedir(evt_dir);
+ closedir(sys_dir);
+ path = zalloc(sizeof(*path));
+ path->system = malloc(MAX_EVENT_LENGTH);
+ if (!path->system) {
+ free(path);
+ return NULL;
+ }
+ path->name = malloc(MAX_EVENT_LENGTH);
+ if (!path->name) {
+ free(path->system);
+ free(path);
+ return NULL;
+ }
+ strncpy(path->system, sys_dirent.d_name,
+ MAX_EVENT_LENGTH);
+ strncpy(path->name, evt_dirent.d_name,
+ MAX_EVENT_LENGTH);
+ return path;
+ }
+ }
+ closedir(evt_dir);
+ }
+
+ closedir(sys_dir);
+ return NULL;
+}
+
+#define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
+static const char *tracepoint_id_to_name(u64 config)
+{
+ static char buf[TP_PATH_LEN];
+ struct tracepoint_path *path;
+
+ path = tracepoint_id_to_path(config);
+ if (path) {
+ snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name);
+ free(path->name);
+ free(path->system);
+ free(path);
+ } else
+ snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown");
+
+ return buf;
+}
+
+static int is_cache_op_valid(u8 cache_type, u8 cache_op)
+{
+ if (hw_cache_stat[cache_type] & COP(cache_op))
+ return 1; /* valid */
+ else
+ return 0; /* invalid */
+}
+
+static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
+{
+ static char name[50];
+
+ if (cache_result) {
+ sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
+ hw_cache_op[cache_op][0],
+ hw_cache_result[cache_result][0]);
+ } else {
+ sprintf(name, "%s-%s", hw_cache[cache_type][0],
+ hw_cache_op[cache_op][1]);
+ }
+
+ return name;
+}
+
+const char *event_name(int counter)
+{
+ u64 config = attrs[counter].config;
+ int type = attrs[counter].type;
+
+ return __event_name(type, config);
+}
+
+const char *__event_name(int type, u64 config)
+{
+ static char buf[32];
+
+ if (type == PERF_TYPE_RAW) {
+ sprintf(buf, "raw 0x%llx", config);
+ return buf;
+ }
+
+ switch (type) {
+ case PERF_TYPE_HARDWARE:
+ if (config < PERF_COUNT_HW_MAX)
+ return hw_event_names[config];
+ return "unknown-hardware";
+
+ case PERF_TYPE_HW_CACHE: {
+ u8 cache_type, cache_op, cache_result;
+
+ cache_type = (config >> 0) & 0xff;
+ if (cache_type > PERF_COUNT_HW_CACHE_MAX)
+ return "unknown-ext-hardware-cache-type";
+
+ cache_op = (config >> 8) & 0xff;
+ if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
+ return "unknown-ext-hardware-cache-op";
+
+ cache_result = (config >> 16) & 0xff;
+ if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return "unknown-ext-hardware-cache-result";
+
+ if (!is_cache_op_valid(cache_type, cache_op))
+ return "invalid-cache";
+
+ return event_cache_name(cache_type, cache_op, cache_result);
+ }
+
+ case PERF_TYPE_SOFTWARE:
+ if (config < PERF_COUNT_SW_MAX)
+ return sw_event_names[config];
+ return "unknown-software";
+
+ case PERF_TYPE_TRACEPOINT:
+ return tracepoint_id_to_name(config);
+
+ default:
+ break;
+ }
+
+ return "unknown";
+}
+
+static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size)
+{
+ int i, j;
+ int n, longest = -1;
+
+ for (i = 0; i < size; i++) {
+ for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
+ n = strlen(names[i][j]);
+ if (n > longest && !strncasecmp(*str, names[i][j], n))
+ longest = n;
+ }
+ if (longest > 0) {
+ *str += longest;
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static enum event_result
+parse_generic_hw_event(const char **str, struct perf_event_attr *attr)
+{
+ const char *s = *str;
+ int cache_type = -1, cache_op = -1, cache_result = -1;
+
+ cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX);
+ /*
+ * No fallback - if we cannot get a clear cache type
+ * then bail out:
+ */
+ if (cache_type == -1)
+ return EVT_FAILED;
+
+ while ((cache_op == -1 || cache_result == -1) && *s == '-') {
+ ++s;
+
+ if (cache_op == -1) {
+ cache_op = parse_aliases(&s, hw_cache_op,
+ PERF_COUNT_HW_CACHE_OP_MAX);
+ if (cache_op >= 0) {
+ if (!is_cache_op_valid(cache_type, cache_op))
+ return 0;
+ continue;
+ }
+ }
+
+ if (cache_result == -1) {
+ cache_result = parse_aliases(&s, hw_cache_result,
+ PERF_COUNT_HW_CACHE_RESULT_MAX);
+ if (cache_result >= 0)
+ continue;
+ }
+
+ /*
+ * Can't parse this as a cache op or result, so back up
+ * to the '-'.
+ */
+ --s;
+ break;
+ }
+
+ /*
+ * Fall back to reads:
+ */
+ if (cache_op == -1)
+ cache_op = PERF_COUNT_HW_CACHE_OP_READ;
+
+ /*
+ * Fall back to accesses:
+ */
+ if (cache_result == -1)
+ cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
+
+ attr->config = cache_type | (cache_op << 8) | (cache_result << 16);
+ attr->type = PERF_TYPE_HW_CACHE;
+
+ *str = s;
+ return EVT_HANDLED;
+}
+
+static enum event_result
+parse_single_tracepoint_event(char *sys_name,
+ const char *evt_name,
+ unsigned int evt_length,
+ struct perf_event_attr *attr,
+ const char **strp)
+{
+ char evt_path[MAXPATHLEN];
+ char id_buf[4];
+ u64 id;
+ int fd;
+
+ snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
+ sys_name, evt_name);
+
+ fd = open(evt_path, O_RDONLY);
+ if (fd < 0)
+ return EVT_FAILED;
+
+ if (read(fd, id_buf, sizeof(id_buf)) < 0) {
+ close(fd);
+ return EVT_FAILED;
+ }
+
+ close(fd);
+ id = atoll(id_buf);
+ attr->config = id;
+ attr->type = PERF_TYPE_TRACEPOINT;
+ *strp = evt_name + evt_length;
+
+ attr->sample_type |= PERF_SAMPLE_RAW;
+ attr->sample_type |= PERF_SAMPLE_TIME;
+ attr->sample_type |= PERF_SAMPLE_CPU;
+
+ attr->sample_period = 1;
+
+
+ return EVT_HANDLED;
+}
+
+/* sys + ':' + event + ':' + flags*/
+#define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128)
+static enum event_result
+parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp,
+ char *flags)
+{
+ char evt_path[MAXPATHLEN];
+ struct dirent *evt_ent;
+ DIR *evt_dir;
+
+ snprintf(evt_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_name);
+ evt_dir = opendir(evt_path);
+
+ if (!evt_dir) {
+ perror("Can't open event dir");
+ return EVT_FAILED;
+ }
+
+ while ((evt_ent = readdir(evt_dir))) {
+ char event_opt[MAX_EVOPT_LEN + 1];
+ int len;
+
+ if (!strcmp(evt_ent->d_name, ".")
+ || !strcmp(evt_ent->d_name, "..")
+ || !strcmp(evt_ent->d_name, "enable")
+ || !strcmp(evt_ent->d_name, "filter"))
+ continue;
+
+ if (!strglobmatch(evt_ent->d_name, evt_exp))
+ continue;
+
+ len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s%s%s", sys_name,
+ evt_ent->d_name, flags ? ":" : "",
+ flags ?: "");
+ if (len < 0)
+ return EVT_FAILED;
+
+ if (parse_events(NULL, event_opt, 0))
+ return EVT_FAILED;
+ }
+
+ return EVT_HANDLED_ALL;
+}
+
+
+static enum event_result parse_tracepoint_event(const char **strp,
+ struct perf_event_attr *attr)
+{
+ const char *evt_name;
+ char *flags;
+ char sys_name[MAX_EVENT_LENGTH];
+ unsigned int sys_length, evt_length;
+
+ if (debugfs_valid_mountpoint(debugfs_path))
+ return 0;
+
+ evt_name = strchr(*strp, ':');
+ if (!evt_name)
+ return EVT_FAILED;
+
+ sys_length = evt_name - *strp;
+ if (sys_length >= MAX_EVENT_LENGTH)
+ return 0;
+
+ strncpy(sys_name, *strp, sys_length);
+ sys_name[sys_length] = '\0';
+ evt_name = evt_name + 1;
+
+ flags = strchr(evt_name, ':');
+ if (flags) {
+ /* split it out: */
+ evt_name = strndup(evt_name, flags - evt_name);
+ flags++;
+ }
+
+ evt_length = strlen(evt_name);
+ if (evt_length >= MAX_EVENT_LENGTH)
+ return EVT_FAILED;
+
+ if (strpbrk(evt_name, "*?")) {
+ *strp = evt_name + evt_length;
+ return parse_multiple_tracepoint_event(sys_name, evt_name,
+ flags);
+ } else
+ return parse_single_tracepoint_event(sys_name, evt_name,
+ evt_length, attr, strp);
+}
+
+static enum event_result
+parse_breakpoint_type(const char *type, const char **strp,
+ struct perf_event_attr *attr)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ if (!type[i])
+ break;
+
+ switch (type[i]) {
+ case 'r':
+ attr->bp_type |= HW_BREAKPOINT_R;
+ break;
+ case 'w':
+ attr->bp_type |= HW_BREAKPOINT_W;
+ break;
+ case 'x':
+ attr->bp_type |= HW_BREAKPOINT_X;
+ break;
+ default:
+ return EVT_FAILED;
+ }
+ }
+ if (!attr->bp_type) /* Default */
+ attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
+
+ *strp = type + i;
+
+ return EVT_HANDLED;
+}
+
+static enum event_result
+parse_breakpoint_event(const char **strp, struct perf_event_attr *attr)
+{
+ const char *target;
+ const char *type;
+ char *endaddr;
+ u64 addr;
+ enum event_result err;
+
+ target = strchr(*strp, ':');
+ if (!target)
+ return EVT_FAILED;
+
+ if (strncmp(*strp, "mem", target - *strp) != 0)
+ return EVT_FAILED;
+
+ target++;
+
+ addr = strtoull(target, &endaddr, 0);
+ if (target == endaddr)
+ return EVT_FAILED;
+
+ attr->bp_addr = addr;
+ *strp = endaddr;
+
+ type = strchr(target, ':');
+
+ /* If no type is defined, just rw as default */
+ if (!type) {
+ attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
+ } else {
+ err = parse_breakpoint_type(++type, strp, attr);
+ if (err == EVT_FAILED)
+ return EVT_FAILED;
+ }
+
+ /*
+ * We should find a nice way to override the access length
+ * Provide some defaults for now
+ */
+ if (attr->bp_type == HW_BREAKPOINT_X)
+ attr->bp_len = sizeof(long);
+ else
+ attr->bp_len = HW_BREAKPOINT_LEN_4;
+
+ attr->type = PERF_TYPE_BREAKPOINT;
+
+ return EVT_HANDLED;
+}
+
+static int check_events(const char *str, unsigned int i)
+{
+ int n;
+
+ n = strlen(event_symbols[i].symbol);
+ if (!strncmp(str, event_symbols[i].symbol, n))
+ return n;
+
+ n = strlen(event_symbols[i].alias);
+ if (n)
+ if (!strncmp(str, event_symbols[i].alias, n))
+ return n;
+ return 0;
+}
+
+static enum event_result
+parse_symbolic_event(const char **strp, struct perf_event_attr *attr)
+{
+ const char *str = *strp;
+ unsigned int i;
+ int n;
+
+ for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
+ n = check_events(str, i);
+ if (n > 0) {
+ attr->type = event_symbols[i].type;
+ attr->config = event_symbols[i].config;
+ *strp = str + n;
+ return EVT_HANDLED;
+ }
+ }
+ return EVT_FAILED;
+}
+
+static enum event_result
+parse_raw_event(const char **strp, struct perf_event_attr *attr)
+{
+ const char *str = *strp;
+ u64 config;
+ int n;
+
+ if (*str != 'r')
+ return EVT_FAILED;
+ n = hex2u64(str + 1, &config);
+ if (n > 0) {
+ *strp = str + n + 1;
+ attr->type = PERF_TYPE_RAW;
+ attr->config = config;
+ return EVT_HANDLED;
+ }
+ return EVT_FAILED;
+}
+
+static enum event_result
+parse_numeric_event(const char **strp, struct perf_event_attr *attr)
+{
+ const char *str = *strp;
+ char *endp;
+ unsigned long type;
+ u64 config;
+
+ type = strtoul(str, &endp, 0);
+ if (endp > str && type < PERF_TYPE_MAX && *endp == ':') {
+ str = endp + 1;
+ config = strtoul(str, &endp, 0);
+ if (endp > str) {
+ attr->type = type;
+ attr->config = config;
+ *strp = endp;
+ return EVT_HANDLED;
+ }
+ }
+ return EVT_FAILED;
+}
+
+static enum event_result
+parse_event_modifier(const char **strp, struct perf_event_attr *attr)
+{
+ const char *str = *strp;
+ int exclude = 0;
+ int eu = 0, ek = 0, eh = 0, precise = 0;
+
+ if (*str++ != ':')
+ return 0;
+ while (*str) {
+ if (*str == 'u') {
+ if (!exclude)
+ exclude = eu = ek = eh = 1;
+ eu = 0;
+ } else if (*str == 'k') {
+ if (!exclude)
+ exclude = eu = ek = eh = 1;
+ ek = 0;
+ } else if (*str == 'h') {
+ if (!exclude)
+ exclude = eu = ek = eh = 1;
+ eh = 0;
+ } else if (*str == 'p') {
+ precise++;
+ } else
+ break;
+
+ ++str;
+ }
+ if (str >= *strp + 2) {
+ *strp = str;
+ attr->exclude_user = eu;
+ attr->exclude_kernel = ek;
+ attr->exclude_hv = eh;
+ attr->precise_ip = precise;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Each event can have multiple symbolic names.
+ * Symbolic names are (almost) exactly matched.
+ */
+static enum event_result
+parse_event_symbols(const char **str, struct perf_event_attr *attr)
+{
+ enum event_result ret;
+
+ ret = parse_tracepoint_event(str, attr);
+ if (ret != EVT_FAILED)
+ goto modifier;
+
+ ret = parse_raw_event(str, attr);
+ if (ret != EVT_FAILED)
+ goto modifier;
+
+ ret = parse_numeric_event(str, attr);
+ if (ret != EVT_FAILED)
+ goto modifier;
+
+ ret = parse_symbolic_event(str, attr);
+ if (ret != EVT_FAILED)
+ goto modifier;
+
+ ret = parse_generic_hw_event(str, attr);
+ if (ret != EVT_FAILED)
+ goto modifier;
+
+ ret = parse_breakpoint_event(str, attr);
+ if (ret != EVT_FAILED)
+ goto modifier;
+
+ fprintf(stderr, "invalid or unsupported event: '%s'\n", *str);
+ fprintf(stderr, "Run 'perf list' for a list of valid events\n");
+ return EVT_FAILED;
+
+modifier:
+ parse_event_modifier(str, attr);
+
+ return ret;
+}
+
+static int store_event_type(const char *orgname)
+{
+ char filename[PATH_MAX], *c;
+ FILE *file;
+ int id, n;
+
+ sprintf(filename, "%s/", debugfs_path);
+ strncat(filename, orgname, strlen(orgname));
+ strcat(filename, "/id");
+
+ c = strchr(filename, ':');
+ if (c)
+ *c = '/';
+
+ file = fopen(filename, "r");
+ if (!file)
+ return 0;
+ n = fscanf(file, "%i", &id);
+ fclose(file);
+ if (n < 1) {
+ pr_err("cannot store event ID\n");
+ return -EINVAL;
+ }
+ return perf_header__push_event(id, orgname);
+}
+
+int parse_events(const struct option *opt __used, const char *str, int unset __used)
+{
+ struct perf_event_attr attr;
+ enum event_result ret;
+
+ if (strchr(str, ':'))
+ if (store_event_type(str) < 0)
+ return -1;
+
+ for (;;) {
+ if (nr_counters == MAX_COUNTERS)
+ return -1;
+
+ memset(&attr, 0, sizeof(attr));
+ ret = parse_event_symbols(&str, &attr);
+ if (ret == EVT_FAILED)
+ return -1;
+
+ if (!(*str == 0 || *str == ',' || isspace(*str)))
+ return -1;
+
+ if (ret != EVT_HANDLED_ALL) {
+ attrs[nr_counters] = attr;
+ nr_counters++;
+ }
+
+ if (*str == 0)
+ break;
+ if (*str == ',')
+ ++str;
+ while (isspace(*str))
+ ++str;
+ }
+
+ return 0;
+}
+
+int parse_filter(const struct option *opt __used, const char *str,
+ int unset __used)
+{
+ int i = nr_counters - 1;
+ int len = strlen(str);
+
+ if (i < 0 || attrs[i].type != PERF_TYPE_TRACEPOINT) {
+ fprintf(stderr,
+ "-F option should follow a -e tracepoint option\n");
+ return -1;
+ }
+
+ filters[i] = malloc(len + 1);
+ if (!filters[i]) {
+ fprintf(stderr, "not enough memory to hold filter string\n");
+ return -1;
+ }
+ strcpy(filters[i], str);
+
+ return 0;
+}
+
+static const char * const event_type_descriptors[] = {
+ "Hardware event",
+ "Software event",
+ "Tracepoint event",
+ "Hardware cache event",
+ "Raw hardware event descriptor",
+ "Hardware breakpoint",
+};
+
+/*
+ * Print the events from <debugfs_mount_point>/tracing/events
+ */
+
+static void print_tracepoint_events(void)
+{
+ DIR *sys_dir, *evt_dir;
+ struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+ char evt_path[MAXPATHLEN];
+ char dir_path[MAXPATHLEN];
+
+ if (debugfs_valid_mountpoint(debugfs_path))
+ return;
+
+ sys_dir = opendir(debugfs_path);
+ if (!sys_dir)
+ return;
+
+ for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+
+ snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
+ sys_dirent.d_name);
+ evt_dir = opendir(dir_path);
+ if (!evt_dir)
+ continue;
+
+ for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+ snprintf(evt_path, MAXPATHLEN, "%s:%s",
+ sys_dirent.d_name, evt_dirent.d_name);
+ printf(" %-42s [%s]\n", evt_path,
+ event_type_descriptors[PERF_TYPE_TRACEPOINT]);
+ }
+ closedir(evt_dir);
+ }
+ closedir(sys_dir);
+}
+
+/*
+ * Print the help text for the event symbols:
+ */
+void print_events(void)
+{
+ struct event_symbol *syms = event_symbols;
+ unsigned int i, type, op, prev_type = -1;
+ char name[40];
+
+ printf("\n");
+ printf("List of pre-defined events (to be used in -e):\n");
+
+ for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
+ type = syms->type;
+
+ if (type != prev_type)
+ printf("\n");
+
+ if (strlen(syms->alias))
+ sprintf(name, "%s OR %s", syms->symbol, syms->alias);
+ else
+ strcpy(name, syms->symbol);
+ printf(" %-42s [%s]\n", name,
+ event_type_descriptors[type]);
+
+ prev_type = type;
+ }
+
+ printf("\n");
+ for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
+ for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
+ /* skip invalid cache type */
+ if (!is_cache_op_valid(type, op))
+ continue;
+
+ for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
+ printf(" %-42s [%s]\n",
+ event_cache_name(type, op, i),
+ event_type_descriptors[PERF_TYPE_HW_CACHE]);
+ }
+ }
+ }
+
+ printf("\n");
+ printf(" %-42s [%s]\n",
+ "rNNN (see 'perf list --help' on how to encode it)",
+ event_type_descriptors[PERF_TYPE_RAW]);
+ printf("\n");
+
+ printf(" %-42s [%s]\n",
+ "mem:<addr>[:access]",
+ event_type_descriptors[PERF_TYPE_BREAKPOINT]);
+ printf("\n");
+
+ print_tracepoint_events();
+
+ exit(129);
+}
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
new file mode 100644
index 00000000..fc4ab3fe
--- /dev/null
+++ b/tools/perf/util/parse-events.h
@@ -0,0 +1,37 @@
+#ifndef __PERF_PARSE_EVENTS_H
+#define __PERF_PARSE_EVENTS_H
+/*
+ * Parse symbolic events/counts passed in as options:
+ */
+
+struct option;
+
+struct tracepoint_path {
+ char *system;
+ char *name;
+ struct tracepoint_path *next;
+};
+
+extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
+extern bool have_tracepoints(struct perf_event_attr *pattrs, int nb_events);
+
+extern int nr_counters;
+
+extern struct perf_event_attr attrs[MAX_COUNTERS];
+extern char *filters[MAX_COUNTERS];
+
+extern const char *event_name(int ctr);
+extern const char *__event_name(int type, u64 config);
+
+extern int parse_events(const struct option *opt, const char *str, int unset);
+extern int parse_filter(const struct option *opt, const char *str, int unset);
+
+#define EVENTS_HELP_MAX (128*1024)
+
+extern void print_events(void);
+
+extern char debugfs_path[];
+extern int valid_debugfs_mount(const char *debugfs);
+
+
+#endif /* __PERF_PARSE_EVENTS_H */
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
new file mode 100644
index 00000000..99d02aa5
--- /dev/null
+++ b/tools/perf/util/parse-options.c
@@ -0,0 +1,577 @@
+#include "util.h"
+#include "parse-options.h"
+#include "cache.h"
+
+#define OPT_SHORT 1
+#define OPT_UNSET 2
+
+static int opterror(const struct option *opt, const char *reason, int flags)
+{
+ if (flags & OPT_SHORT)
+ return error("switch `%c' %s", opt->short_name, reason);
+ if (flags & OPT_UNSET)
+ return error("option `no-%s' %s", opt->long_name, reason);
+ return error("option `%s' %s", opt->long_name, reason);
+}
+
+static int get_arg(struct parse_opt_ctx_t *p, const struct option *opt,
+ int flags, const char **arg)
+{
+ if (p->opt) {
+ *arg = p->opt;
+ p->opt = NULL;
+ } else if ((opt->flags & PARSE_OPT_LASTARG_DEFAULT) && (p->argc == 1 ||
+ **(p->argv + 1) == '-')) {
+ *arg = (const char *)opt->defval;
+ } else if (p->argc > 1) {
+ p->argc--;
+ *arg = *++p->argv;
+ } else
+ return opterror(opt, "requires a value", flags);
+ return 0;
+}
+
+static int get_value(struct parse_opt_ctx_t *p,
+ const struct option *opt, int flags)
+{
+ const char *s, *arg = NULL;
+ const int unset = flags & OPT_UNSET;
+
+ if (unset && p->opt)
+ return opterror(opt, "takes no value", flags);
+ if (unset && (opt->flags & PARSE_OPT_NONEG))
+ return opterror(opt, "isn't available", flags);
+
+ if (!(flags & OPT_SHORT) && p->opt) {
+ switch (opt->type) {
+ case OPTION_CALLBACK:
+ if (!(opt->flags & PARSE_OPT_NOARG))
+ break;
+ /* FALLTHROUGH */
+ case OPTION_BOOLEAN:
+ case OPTION_INCR:
+ case OPTION_BIT:
+ case OPTION_SET_UINT:
+ case OPTION_SET_PTR:
+ return opterror(opt, "takes no value", flags);
+ case OPTION_END:
+ case OPTION_ARGUMENT:
+ case OPTION_GROUP:
+ case OPTION_STRING:
+ case OPTION_INTEGER:
+ case OPTION_UINTEGER:
+ case OPTION_LONG:
+ case OPTION_U64:
+ default:
+ break;
+ }
+ }
+
+ switch (opt->type) {
+ case OPTION_BIT:
+ if (unset)
+ *(int *)opt->value &= ~opt->defval;
+ else
+ *(int *)opt->value |= opt->defval;
+ return 0;
+
+ case OPTION_BOOLEAN:
+ *(bool *)opt->value = unset ? false : true;
+ return 0;
+
+ case OPTION_INCR:
+ *(int *)opt->value = unset ? 0 : *(int *)opt->value + 1;
+ return 0;
+
+ case OPTION_SET_UINT:
+ *(unsigned int *)opt->value = unset ? 0 : opt->defval;
+ return 0;
+
+ case OPTION_SET_PTR:
+ *(void **)opt->value = unset ? NULL : (void *)opt->defval;
+ return 0;
+
+ case OPTION_STRING:
+ if (unset)
+ *(const char **)opt->value = NULL;
+ else if (opt->flags & PARSE_OPT_OPTARG && !p->opt)
+ *(const char **)opt->value = (const char *)opt->defval;
+ else
+ return get_arg(p, opt, flags, (const char **)opt->value);
+ return 0;
+
+ case OPTION_CALLBACK:
+ if (unset)
+ return (*opt->callback)(opt, NULL, 1) ? (-1) : 0;
+ if (opt->flags & PARSE_OPT_NOARG)
+ return (*opt->callback)(opt, NULL, 0) ? (-1) : 0;
+ if (opt->flags & PARSE_OPT_OPTARG && !p->opt)
+ return (*opt->callback)(opt, NULL, 0) ? (-1) : 0;
+ if (get_arg(p, opt, flags, &arg))
+ return -1;
+ return (*opt->callback)(opt, arg, 0) ? (-1) : 0;
+
+ case OPTION_INTEGER:
+ if (unset) {
+ *(int *)opt->value = 0;
+ return 0;
+ }
+ if (opt->flags & PARSE_OPT_OPTARG && !p->opt) {
+ *(int *)opt->value = opt->defval;
+ return 0;
+ }
+ if (get_arg(p, opt, flags, &arg))
+ return -1;
+ *(int *)opt->value = strtol(arg, (char **)&s, 10);
+ if (*s)
+ return opterror(opt, "expects a numerical value", flags);
+ return 0;
+
+ case OPTION_UINTEGER:
+ if (unset) {
+ *(unsigned int *)opt->value = 0;
+ return 0;
+ }
+ if (opt->flags & PARSE_OPT_OPTARG && !p->opt) {
+ *(unsigned int *)opt->value = opt->defval;
+ return 0;
+ }
+ if (get_arg(p, opt, flags, &arg))
+ return -1;
+ *(unsigned int *)opt->value = strtol(arg, (char **)&s, 10);
+ if (*s)
+ return opterror(opt, "expects a numerical value", flags);
+ return 0;
+
+ case OPTION_LONG:
+ if (unset) {
+ *(long *)opt->value = 0;
+ return 0;
+ }
+ if (opt->flags & PARSE_OPT_OPTARG && !p->opt) {
+ *(long *)opt->value = opt->defval;
+ return 0;
+ }
+ if (get_arg(p, opt, flags, &arg))
+ return -1;
+ *(long *)opt->value = strtol(arg, (char **)&s, 10);
+ if (*s)
+ return opterror(opt, "expects a numerical value", flags);
+ return 0;
+
+ case OPTION_U64:
+ if (unset) {
+ *(u64 *)opt->value = 0;
+ return 0;
+ }
+ if (opt->flags & PARSE_OPT_OPTARG && !p->opt) {
+ *(u64 *)opt->value = opt->defval;
+ return 0;
+ }
+ if (get_arg(p, opt, flags, &arg))
+ return -1;
+ *(u64 *)opt->value = strtoull(arg, (char **)&s, 10);
+ if (*s)
+ return opterror(opt, "expects a numerical value", flags);
+ return 0;
+
+ case OPTION_END:
+ case OPTION_ARGUMENT:
+ case OPTION_GROUP:
+ default:
+ die("should not happen, someone must be hit on the forehead");
+ }
+}
+
+static int parse_short_opt(struct parse_opt_ctx_t *p, const struct option *options)
+{
+ for (; options->type != OPTION_END; options++) {
+ if (options->short_name == *p->opt) {
+ p->opt = p->opt[1] ? p->opt + 1 : NULL;
+ return get_value(p, options, OPT_SHORT);
+ }
+ }
+ return -2;
+}
+
+static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg,
+ const struct option *options)
+{
+ const char *arg_end = strchr(arg, '=');
+ const struct option *abbrev_option = NULL, *ambiguous_option = NULL;
+ int abbrev_flags = 0, ambiguous_flags = 0;
+
+ if (!arg_end)
+ arg_end = arg + strlen(arg);
+
+ for (; options->type != OPTION_END; options++) {
+ const char *rest;
+ int flags = 0;
+
+ if (!options->long_name)
+ continue;
+
+ rest = skip_prefix(arg, options->long_name);
+ if (options->type == OPTION_ARGUMENT) {
+ if (!rest)
+ continue;
+ if (*rest == '=')
+ return opterror(options, "takes no value", flags);
+ if (*rest)
+ continue;
+ p->out[p->cpidx++] = arg - 2;
+ return 0;
+ }
+ if (!rest) {
+ /* abbreviated? */
+ if (!strncmp(options->long_name, arg, arg_end - arg)) {
+is_abbreviated:
+ if (abbrev_option) {
+ /*
+ * If this is abbreviated, it is
+ * ambiguous. So when there is no
+ * exact match later, we need to
+ * error out.
+ */
+ ambiguous_option = abbrev_option;
+ ambiguous_flags = abbrev_flags;
+ }
+ if (!(flags & OPT_UNSET) && *arg_end)
+ p->opt = arg_end + 1;
+ abbrev_option = options;
+ abbrev_flags = flags;
+ continue;
+ }
+ /* negated and abbreviated very much? */
+ if (!prefixcmp("no-", arg)) {
+ flags |= OPT_UNSET;
+ goto is_abbreviated;
+ }
+ /* negated? */
+ if (strncmp(arg, "no-", 3))
+ continue;
+ flags |= OPT_UNSET;
+ rest = skip_prefix(arg + 3, options->long_name);
+ /* abbreviated and negated? */
+ if (!rest && !prefixcmp(options->long_name, arg + 3))
+ goto is_abbreviated;
+ if (!rest)
+ continue;
+ }
+ if (*rest) {
+ if (*rest != '=')
+ continue;
+ p->opt = rest + 1;
+ }
+ return get_value(p, options, flags);
+ }
+
+ if (ambiguous_option)
+ return error("Ambiguous option: %s "
+ "(could be --%s%s or --%s%s)",
+ arg,
+ (ambiguous_flags & OPT_UNSET) ? "no-" : "",
+ ambiguous_option->long_name,
+ (abbrev_flags & OPT_UNSET) ? "no-" : "",
+ abbrev_option->long_name);
+ if (abbrev_option)
+ return get_value(p, abbrev_option, abbrev_flags);
+ return -2;
+}
+
+static void check_typos(const char *arg, const struct option *options)
+{
+ if (strlen(arg) < 3)
+ return;
+
+ if (!prefixcmp(arg, "no-")) {
+ error ("did you mean `--%s` (with two dashes ?)", arg);
+ exit(129);
+ }
+
+ for (; options->type != OPTION_END; options++) {
+ if (!options->long_name)
+ continue;
+ if (!prefixcmp(options->long_name, arg)) {
+ error ("did you mean `--%s` (with two dashes ?)", arg);
+ exit(129);
+ }
+ }
+}
+
+void parse_options_start(struct parse_opt_ctx_t *ctx,
+ int argc, const char **argv, int flags)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->argc = argc - 1;
+ ctx->argv = argv + 1;
+ ctx->out = argv;
+ ctx->cpidx = ((flags & PARSE_OPT_KEEP_ARGV0) != 0);
+ ctx->flags = flags;
+ if ((flags & PARSE_OPT_KEEP_UNKNOWN) &&
+ (flags & PARSE_OPT_STOP_AT_NON_OPTION))
+ die("STOP_AT_NON_OPTION and KEEP_UNKNOWN don't go together");
+}
+
+static int usage_with_options_internal(const char * const *,
+ const struct option *, int);
+
+int parse_options_step(struct parse_opt_ctx_t *ctx,
+ const struct option *options,
+ const char * const usagestr[])
+{
+ int internal_help = !(ctx->flags & PARSE_OPT_NO_INTERNAL_HELP);
+
+ /* we must reset ->opt, unknown short option leave it dangling */
+ ctx->opt = NULL;
+
+ for (; ctx->argc; ctx->argc--, ctx->argv++) {
+ const char *arg = ctx->argv[0];
+
+ if (*arg != '-' || !arg[1]) {
+ if (ctx->flags & PARSE_OPT_STOP_AT_NON_OPTION)
+ break;
+ ctx->out[ctx->cpidx++] = ctx->argv[0];
+ continue;
+ }
+
+ if (arg[1] != '-') {
+ ctx->opt = arg + 1;
+ if (internal_help && *ctx->opt == 'h')
+ return parse_options_usage(usagestr, options);
+ switch (parse_short_opt(ctx, options)) {
+ case -1:
+ return parse_options_usage(usagestr, options);
+ case -2:
+ goto unknown;
+ default:
+ break;
+ }
+ if (ctx->opt)
+ check_typos(arg + 1, options);
+ while (ctx->opt) {
+ if (internal_help && *ctx->opt == 'h')
+ return parse_options_usage(usagestr, options);
+ switch (parse_short_opt(ctx, options)) {
+ case -1:
+ return parse_options_usage(usagestr, options);
+ case -2:
+ /* fake a short option thing to hide the fact that we may have
+ * started to parse aggregated stuff
+ *
+ * This is leaky, too bad.
+ */
+ ctx->argv[0] = strdup(ctx->opt - 1);
+ *(char *)ctx->argv[0] = '-';
+ goto unknown;
+ default:
+ break;
+ }
+ }
+ continue;
+ }
+
+ if (!arg[2]) { /* "--" */
+ if (!(ctx->flags & PARSE_OPT_KEEP_DASHDASH)) {
+ ctx->argc--;
+ ctx->argv++;
+ }
+ break;
+ }
+
+ if (internal_help && !strcmp(arg + 2, "help-all"))
+ return usage_with_options_internal(usagestr, options, 1);
+ if (internal_help && !strcmp(arg + 2, "help"))
+ return parse_options_usage(usagestr, options);
+ switch (parse_long_opt(ctx, arg + 2, options)) {
+ case -1:
+ return parse_options_usage(usagestr, options);
+ case -2:
+ goto unknown;
+ default:
+ break;
+ }
+ continue;
+unknown:
+ if (!(ctx->flags & PARSE_OPT_KEEP_UNKNOWN))
+ return PARSE_OPT_UNKNOWN;
+ ctx->out[ctx->cpidx++] = ctx->argv[0];
+ ctx->opt = NULL;
+ }
+ return PARSE_OPT_DONE;
+}
+
+int parse_options_end(struct parse_opt_ctx_t *ctx)
+{
+ memmove(ctx->out + ctx->cpidx, ctx->argv, ctx->argc * sizeof(*ctx->out));
+ ctx->out[ctx->cpidx + ctx->argc] = NULL;
+ return ctx->cpidx + ctx->argc;
+}
+
+int parse_options(int argc, const char **argv, const struct option *options,
+ const char * const usagestr[], int flags)
+{
+ struct parse_opt_ctx_t ctx;
+
+ parse_options_start(&ctx, argc, argv, flags);
+ switch (parse_options_step(&ctx, options, usagestr)) {
+ case PARSE_OPT_HELP:
+ exit(129);
+ case PARSE_OPT_DONE:
+ break;
+ default: /* PARSE_OPT_UNKNOWN */
+ if (ctx.argv[0][1] == '-') {
+ error("unknown option `%s'", ctx.argv[0] + 2);
+ } else {
+ error("unknown switch `%c'", *ctx.opt);
+ }
+ usage_with_options(usagestr, options);
+ }
+
+ return parse_options_end(&ctx);
+}
+
+#define USAGE_OPTS_WIDTH 24
+#define USAGE_GAP 2
+
+int usage_with_options_internal(const char * const *usagestr,
+ const struct option *opts, int full)
+{
+ if (!usagestr)
+ return PARSE_OPT_HELP;
+
+ fprintf(stderr, "\n usage: %s\n", *usagestr++);
+ while (*usagestr && **usagestr)
+ fprintf(stderr, " or: %s\n", *usagestr++);
+ while (*usagestr) {
+ fprintf(stderr, "%s%s\n",
+ **usagestr ? " " : "",
+ *usagestr);
+ usagestr++;
+ }
+
+ if (opts->type != OPTION_GROUP)
+ fputc('\n', stderr);
+
+ for (; opts->type != OPTION_END; opts++) {
+ size_t pos;
+ int pad;
+
+ if (opts->type == OPTION_GROUP) {
+ fputc('\n', stderr);
+ if (*opts->help)
+ fprintf(stderr, "%s\n", opts->help);
+ continue;
+ }
+ if (!full && (opts->flags & PARSE_OPT_HIDDEN))
+ continue;
+
+ pos = fprintf(stderr, " ");
+ if (opts->short_name)
+ pos += fprintf(stderr, "-%c", opts->short_name);
+ else
+ pos += fprintf(stderr, " ");
+
+ if (opts->long_name && opts->short_name)
+ pos += fprintf(stderr, ", ");
+ if (opts->long_name)
+ pos += fprintf(stderr, "--%s", opts->long_name);
+
+ switch (opts->type) {
+ case OPTION_ARGUMENT:
+ break;
+ case OPTION_LONG:
+ case OPTION_U64:
+ case OPTION_INTEGER:
+ case OPTION_UINTEGER:
+ if (opts->flags & PARSE_OPT_OPTARG)
+ if (opts->long_name)
+ pos += fprintf(stderr, "[=<n>]");
+ else
+ pos += fprintf(stderr, "[<n>]");
+ else
+ pos += fprintf(stderr, " <n>");
+ break;
+ case OPTION_CALLBACK:
+ if (opts->flags & PARSE_OPT_NOARG)
+ break;
+ /* FALLTHROUGH */
+ case OPTION_STRING:
+ if (opts->argh) {
+ if (opts->flags & PARSE_OPT_OPTARG)
+ if (opts->long_name)
+ pos += fprintf(stderr, "[=<%s>]", opts->argh);
+ else
+ pos += fprintf(stderr, "[<%s>]", opts->argh);
+ else
+ pos += fprintf(stderr, " <%s>", opts->argh);
+ } else {
+ if (opts->flags & PARSE_OPT_OPTARG)
+ if (opts->long_name)
+ pos += fprintf(stderr, "[=...]");
+ else
+ pos += fprintf(stderr, "[...]");
+ else
+ pos += fprintf(stderr, " ...");
+ }
+ break;
+ default: /* OPTION_{BIT,BOOLEAN,SET_UINT,SET_PTR} */
+ case OPTION_END:
+ case OPTION_GROUP:
+ case OPTION_BIT:
+ case OPTION_BOOLEAN:
+ case OPTION_INCR:
+ case OPTION_SET_UINT:
+ case OPTION_SET_PTR:
+ break;
+ }
+
+ if (pos <= USAGE_OPTS_WIDTH)
+ pad = USAGE_OPTS_WIDTH - pos;
+ else {
+ fputc('\n', stderr);
+ pad = USAGE_OPTS_WIDTH;
+ }
+ fprintf(stderr, "%*s%s\n", pad + USAGE_GAP, "", opts->help);
+ }
+ fputc('\n', stderr);
+
+ return PARSE_OPT_HELP;
+}
+
+void usage_with_options(const char * const *usagestr,
+ const struct option *opts)
+{
+ exit_browser(false);
+ usage_with_options_internal(usagestr, opts, 0);
+ exit(129);
+}
+
+int parse_options_usage(const char * const *usagestr,
+ const struct option *opts)
+{
+ return usage_with_options_internal(usagestr, opts, 0);
+}
+
+
+int parse_opt_verbosity_cb(const struct option *opt, const char *arg __used,
+ int unset)
+{
+ int *target = opt->value;
+
+ if (unset)
+ /* --no-quiet, --no-verbose */
+ *target = 0;
+ else if (opt->short_name == 'v') {
+ if (*target >= 0)
+ (*target)++;
+ else
+ *target = 1;
+ } else {
+ if (*target <= 0)
+ (*target)--;
+ else
+ *target = -1;
+ }
+ return 0;
+}
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h
new file mode 100644
index 00000000..c7d72dce
--- /dev/null
+++ b/tools/perf/util/parse-options.h
@@ -0,0 +1,188 @@
+#ifndef __PERF_PARSE_OPTIONS_H
+#define __PERF_PARSE_OPTIONS_H
+
+#include <linux/kernel.h>
+#include <stdbool.h>
+
+enum parse_opt_type {
+ /* special types */
+ OPTION_END,
+ OPTION_ARGUMENT,
+ OPTION_GROUP,
+ /* options with no arguments */
+ OPTION_BIT,
+ OPTION_BOOLEAN,
+ OPTION_INCR,
+ OPTION_SET_UINT,
+ OPTION_SET_PTR,
+ /* options with arguments (usually) */
+ OPTION_STRING,
+ OPTION_INTEGER,
+ OPTION_LONG,
+ OPTION_CALLBACK,
+ OPTION_U64,
+ OPTION_UINTEGER,
+};
+
+enum parse_opt_flags {
+ PARSE_OPT_KEEP_DASHDASH = 1,
+ PARSE_OPT_STOP_AT_NON_OPTION = 2,
+ PARSE_OPT_KEEP_ARGV0 = 4,
+ PARSE_OPT_KEEP_UNKNOWN = 8,
+ PARSE_OPT_NO_INTERNAL_HELP = 16,
+};
+
+enum parse_opt_option_flags {
+ PARSE_OPT_OPTARG = 1,
+ PARSE_OPT_NOARG = 2,
+ PARSE_OPT_NONEG = 4,
+ PARSE_OPT_HIDDEN = 8,
+ PARSE_OPT_LASTARG_DEFAULT = 16,
+};
+
+struct option;
+typedef int parse_opt_cb(const struct option *, const char *arg, int unset);
+
+/*
+ * `type`::
+ * holds the type of the option, you must have an OPTION_END last in your
+ * array.
+ *
+ * `short_name`::
+ * the character to use as a short option name, '\0' if none.
+ *
+ * `long_name`::
+ * the long option name, without the leading dashes, NULL if none.
+ *
+ * `value`::
+ * stores pointers to the values to be filled.
+ *
+ * `argh`::
+ * token to explain the kind of argument this option wants. Keep it
+ * homogenous across the repository.
+ *
+ * `help`::
+ * the short help associated to what the option does.
+ * Must never be NULL (except for OPTION_END).
+ * OPTION_GROUP uses this pointer to store the group header.
+ *
+ * `flags`::
+ * mask of parse_opt_option_flags.
+ * PARSE_OPT_OPTARG: says that the argument is optionnal (not for BOOLEANs)
+ * PARSE_OPT_NOARG: says that this option takes no argument, for CALLBACKs
+ * PARSE_OPT_NONEG: says that this option cannot be negated
+ * PARSE_OPT_HIDDEN this option is skipped in the default usage, showed in
+ * the long one.
+ *
+ * `callback`::
+ * pointer to the callback to use for OPTION_CALLBACK.
+ *
+ * `defval`::
+ * default value to fill (*->value) with for PARSE_OPT_OPTARG.
+ * OPTION_{BIT,SET_UINT,SET_PTR} store the {mask,integer,pointer} to put in
+ * the value when met.
+ * CALLBACKS can use it like they want.
+ */
+struct option {
+ enum parse_opt_type type;
+ int short_name;
+ const char *long_name;
+ void *value;
+ const char *argh;
+ const char *help;
+
+ int flags;
+ parse_opt_cb *callback;
+ intptr_t defval;
+};
+
+#define check_vtype(v, type) ( BUILD_BUG_ON_ZERO(!__builtin_types_compatible_p(typeof(v), type)) + v )
+
+#define OPT_END() { .type = OPTION_END }
+#define OPT_ARGUMENT(l, h) { .type = OPTION_ARGUMENT, .long_name = (l), .help = (h) }
+#define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) }
+#define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h), .defval = (b) }
+#define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h) }
+#define OPT_INCR(s, l, v, h) { .type = OPTION_INCR, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) }
+#define OPT_SET_UINT(s, l, v, h, i) { .type = OPTION_SET_UINT, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h), .defval = (i) }
+#define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) }
+#define OPT_INTEGER(s, l, v, h) { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) }
+#define OPT_UINTEGER(s, l, v, h) { .type = OPTION_UINTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h) }
+#define OPT_LONG(s, l, v, h) { .type = OPTION_LONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, long *), .help = (h) }
+#define OPT_U64(s, l, v, h) { .type = OPTION_U64, .short_name = (s), .long_name = (l), .value = check_vtype(v, u64 *), .help = (h) }
+#define OPT_STRING(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h) }
+#define OPT_DATE(s, l, v, h) \
+ { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb }
+#define OPT_CALLBACK(s, l, v, a, h, f) \
+ { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f) }
+#define OPT_CALLBACK_NOOPT(s, l, v, a, h, f) \
+ { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .flags = PARSE_OPT_NOARG }
+#define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \
+ { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d, .flags = PARSE_OPT_LASTARG_DEFAULT }
+
+/* parse_options() will filter out the processed options and leave the
+ * non-option argments in argv[].
+ * Returns the number of arguments left in argv[].
+ */
+extern int parse_options(int argc, const char **argv,
+ const struct option *options,
+ const char * const usagestr[], int flags);
+
+extern NORETURN void usage_with_options(const char * const *usagestr,
+ const struct option *options);
+
+/*----- incremantal advanced APIs -----*/
+
+enum {
+ PARSE_OPT_HELP = -1,
+ PARSE_OPT_DONE,
+ PARSE_OPT_UNKNOWN,
+};
+
+/*
+ * It's okay for the caller to consume argv/argc in the usual way.
+ * Other fields of that structure are private to parse-options and should not
+ * be modified in any way.
+ */
+struct parse_opt_ctx_t {
+ const char **argv;
+ const char **out;
+ int argc, cpidx;
+ const char *opt;
+ int flags;
+};
+
+extern int parse_options_usage(const char * const *usagestr,
+ const struct option *opts);
+
+extern void parse_options_start(struct parse_opt_ctx_t *ctx,
+ int argc, const char **argv, int flags);
+
+extern int parse_options_step(struct parse_opt_ctx_t *ctx,
+ const struct option *options,
+ const char * const usagestr[]);
+
+extern int parse_options_end(struct parse_opt_ctx_t *ctx);
+
+
+/*----- some often used options -----*/
+extern int parse_opt_abbrev_cb(const struct option *, const char *, int);
+extern int parse_opt_approxidate_cb(const struct option *, const char *, int);
+extern int parse_opt_verbosity_cb(const struct option *, const char *, int);
+
+#define OPT__VERBOSE(var) OPT_BOOLEAN('v', "verbose", (var), "be verbose")
+#define OPT__QUIET(var) OPT_BOOLEAN('q', "quiet", (var), "be quiet")
+#define OPT__VERBOSITY(var) \
+ { OPTION_CALLBACK, 'v', "verbose", (var), NULL, "be more verbose", \
+ PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 }, \
+ { OPTION_CALLBACK, 'q', "quiet", (var), NULL, "be more quiet", \
+ PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 }
+#define OPT__DRY_RUN(var) OPT_BOOLEAN('n', "dry-run", (var), "dry run")
+#define OPT__ABBREV(var) \
+ { OPTION_CALLBACK, 0, "abbrev", (var), "n", \
+ "use <n> digits to display SHA-1s", \
+ PARSE_OPT_OPTARG, &parse_opt_abbrev_cb, 0 }
+
+extern const char *parse_options_fix_filename(const char *prefix, const char *file);
+
+#endif /* __PERF_PARSE_OPTIONS_H */
diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c
new file mode 100644
index 00000000..58a470d0
--- /dev/null
+++ b/tools/perf/util/path.c
@@ -0,0 +1,156 @@
+/*
+ * I'm tired of doing "vsnprintf()" etc just to open a
+ * file, so here's a "return static buffer with printf"
+ * interface for paths.
+ *
+ * It's obviously not thread-safe. Sue me. But it's quite
+ * useful for doing things like
+ *
+ * f = open(mkpath("%s/%s.perf", base, name), O_RDONLY);
+ *
+ * which is what it's designed for.
+ */
+#include "cache.h"
+
+static char bad_path[] = "/bad-path/";
+/*
+ * Two hacks:
+ */
+
+static const char *get_perf_dir(void)
+{
+ return ".";
+}
+
+size_t strlcpy(char *dest, const char *src, size_t size)
+{
+ size_t ret = strlen(src);
+
+ if (size) {
+ size_t len = (ret >= size) ? size - 1 : ret;
+ memcpy(dest, src, len);
+ dest[len] = '\0';
+ }
+ return ret;
+}
+
+
+static char *get_pathname(void)
+{
+ static char pathname_array[4][PATH_MAX];
+ static int idx;
+
+ return pathname_array[3 & ++idx];
+}
+
+static char *cleanup_path(char *path)
+{
+ /* Clean it up */
+ if (!memcmp(path, "./", 2)) {
+ path += 2;
+ while (*path == '/')
+ path++;
+ }
+ return path;
+}
+
+static char *perf_vsnpath(char *buf, size_t n, const char *fmt, va_list args)
+{
+ const char *perf_dir = get_perf_dir();
+ size_t len;
+
+ len = strlen(perf_dir);
+ if (n < len + 1)
+ goto bad;
+ memcpy(buf, perf_dir, len);
+ if (len && !is_dir_sep(perf_dir[len-1]))
+ buf[len++] = '/';
+ len += vsnprintf(buf + len, n - len, fmt, args);
+ if (len >= n)
+ goto bad;
+ return cleanup_path(buf);
+bad:
+ strlcpy(buf, bad_path, n);
+ return buf;
+}
+
+char *perf_pathdup(const char *fmt, ...)
+{
+ char path[PATH_MAX];
+ va_list args;
+ va_start(args, fmt);
+ (void)perf_vsnpath(path, sizeof(path), fmt, args);
+ va_end(args);
+ return xstrdup(path);
+}
+
+char *mkpath(const char *fmt, ...)
+{
+ va_list args;
+ unsigned len;
+ char *pathname = get_pathname();
+
+ va_start(args, fmt);
+ len = vsnprintf(pathname, PATH_MAX, fmt, args);
+ va_end(args);
+ if (len >= PATH_MAX)
+ return bad_path;
+ return cleanup_path(pathname);
+}
+
+char *perf_path(const char *fmt, ...)
+{
+ const char *perf_dir = get_perf_dir();
+ char *pathname = get_pathname();
+ va_list args;
+ unsigned len;
+
+ len = strlen(perf_dir);
+ if (len > PATH_MAX-100)
+ return bad_path;
+ memcpy(pathname, perf_dir, len);
+ if (len && perf_dir[len-1] != '/')
+ pathname[len++] = '/';
+ va_start(args, fmt);
+ len += vsnprintf(pathname + len, PATH_MAX - len, fmt, args);
+ va_end(args);
+ if (len >= PATH_MAX)
+ return bad_path;
+ return cleanup_path(pathname);
+}
+
+/* strip arbitrary amount of directory separators at end of path */
+static inline int chomp_trailing_dir_sep(const char *path, int len)
+{
+ while (len && is_dir_sep(path[len - 1]))
+ len--;
+ return len;
+}
+
+/*
+ * If path ends with suffix (complete path components), returns the
+ * part before suffix (sans trailing directory separators).
+ * Otherwise returns NULL.
+ */
+char *strip_path_suffix(const char *path, const char *suffix)
+{
+ int path_len = strlen(path), suffix_len = strlen(suffix);
+
+ while (suffix_len) {
+ if (!path_len)
+ return NULL;
+
+ if (is_dir_sep(path[path_len - 1])) {
+ if (!is_dir_sep(suffix[suffix_len - 1]))
+ return NULL;
+ path_len = chomp_trailing_dir_sep(path, path_len);
+ suffix_len = chomp_trailing_dir_sep(suffix, suffix_len);
+ }
+ else if (path[--path_len] != suffix[--suffix_len])
+ return NULL;
+ }
+
+ if (path_len && !is_dir_sep(path[path_len - 1]))
+ return NULL;
+ return strndup(path, chomp_trailing_dir_sep(path, path_len));
+}
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
new file mode 100644
index 00000000..fcc16e43
--- /dev/null
+++ b/tools/perf/util/probe-event.c
@@ -0,0 +1,1755 @@
+/*
+ * probe-event.c : perf-probe definition to probe_events format converter
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#define _GNU_SOURCE
+#include <sys/utsname.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <limits.h>
+
+#undef _GNU_SOURCE
+#include "util.h"
+#include "event.h"
+#include "string.h"
+#include "strlist.h"
+#include "debug.h"
+#include "cache.h"
+#include "color.h"
+#include "symbol.h"
+#include "thread.h"
+#include "debugfs.h"
+#include "trace-event.h" /* For __unused */
+#include "probe-event.h"
+#include "probe-finder.h"
+
+#define MAX_CMDLEN 256
+#define MAX_PROBE_ARGS 128
+#define PERFPROBE_GROUP "probe"
+
+bool probe_event_dry_run; /* Dry run flag */
+
+#define semantic_error(msg ...) pr_err("Semantic error :" msg)
+
+/* If there is no space to write, returns -E2BIG. */
+static int e_snprintf(char *str, size_t size, const char *format, ...)
+ __attribute__((format(printf, 3, 4)));
+
+static int e_snprintf(char *str, size_t size, const char *format, ...)
+{
+ int ret;
+ va_list ap;
+ va_start(ap, format);
+ ret = vsnprintf(str, size, format, ap);
+ va_end(ap);
+ if (ret >= (int)size)
+ ret = -E2BIG;
+ return ret;
+}
+
+static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
+static struct machine machine;
+
+/* Initialize symbol maps and path of vmlinux */
+static int init_vmlinux(void)
+{
+ struct dso *kernel;
+ int ret;
+
+ symbol_conf.sort_by_name = true;
+ if (symbol_conf.vmlinux_name == NULL)
+ symbol_conf.try_vmlinux_path = true;
+ else
+ pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
+ ret = symbol__init();
+ if (ret < 0) {
+ pr_debug("Failed to init symbol map.\n");
+ goto out;
+ }
+
+ ret = machine__init(&machine, "/", 0);
+ if (ret < 0)
+ goto out;
+
+ kernel = dso__new_kernel(symbol_conf.vmlinux_name);
+ if (kernel == NULL)
+ die("Failed to create kernel dso.");
+
+ ret = __machine__create_kernel_maps(&machine, kernel);
+ if (ret < 0)
+ pr_debug("Failed to create kernel maps.\n");
+
+out:
+ if (ret < 0)
+ pr_warning("Failed to init vmlinux path.\n");
+ return ret;
+}
+
+#ifdef DWARF_SUPPORT
+static int open_vmlinux(void)
+{
+ if (map__load(machine.vmlinux_maps[MAP__FUNCTION], NULL) < 0) {
+ pr_debug("Failed to load kernel map.\n");
+ return -EINVAL;
+ }
+ pr_debug("Try to open %s\n", machine.vmlinux_maps[MAP__FUNCTION]->dso->long_name);
+ return open(machine.vmlinux_maps[MAP__FUNCTION]->dso->long_name, O_RDONLY);
+}
+
+/*
+ * Convert trace point to probe point with debuginfo
+ * Currently only handles kprobes.
+ */
+static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
+ struct perf_probe_point *pp)
+{
+ struct symbol *sym;
+ int fd, ret = -ENOENT;
+
+ sym = map__find_symbol_by_name(machine.vmlinux_maps[MAP__FUNCTION],
+ tp->symbol, NULL);
+ if (sym) {
+ fd = open_vmlinux();
+ if (fd >= 0) {
+ ret = find_perf_probe_point(fd,
+ sym->start + tp->offset, pp);
+ close(fd);
+ }
+ }
+ if (ret <= 0) {
+ pr_debug("Failed to find corresponding probes from "
+ "debuginfo. Use kprobe event information.\n");
+ pp->function = strdup(tp->symbol);
+ if (pp->function == NULL)
+ return -ENOMEM;
+ pp->offset = tp->offset;
+ }
+ pp->retprobe = tp->retprobe;
+
+ return 0;
+}
+
+/* Try to find perf_probe_event with debuginfo */
+static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
+ struct probe_trace_event **tevs,
+ int max_tevs)
+{
+ bool need_dwarf = perf_probe_event_need_dwarf(pev);
+ int fd, ntevs;
+
+ fd = open_vmlinux();
+ if (fd < 0) {
+ if (need_dwarf) {
+ pr_warning("Failed to open debuginfo file.\n");
+ return fd;
+ }
+ pr_debug("Could not open vmlinux. Try to use symbols.\n");
+ return 0;
+ }
+
+ /* Searching trace events corresponding to probe event */
+ ntevs = find_probe_trace_events(fd, pev, tevs, max_tevs);
+ close(fd);
+
+ if (ntevs > 0) { /* Succeeded to find trace events */
+ pr_debug("find %d probe_trace_events.\n", ntevs);
+ return ntevs;
+ }
+
+ if (ntevs == 0) { /* No error but failed to find probe point. */
+ pr_warning("Probe point '%s' not found.\n",
+ synthesize_perf_probe_point(&pev->point));
+ return -ENOENT;
+ }
+ /* Error path : ntevs < 0 */
+ pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
+ if (ntevs == -EBADF) {
+ pr_warning("Warning: No dwarf info found in the vmlinux - "
+ "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
+ if (!need_dwarf) {
+ pr_debug("Trying to use symbols.\nn");
+ return 0;
+ }
+ }
+ return ntevs;
+}
+
+/*
+ * Find a src file from a DWARF tag path. Prepend optional source path prefix
+ * and chop off leading directories that do not exist. Result is passed back as
+ * a newly allocated path on success.
+ * Return 0 if file was found and readable, -errno otherwise.
+ */
+static int get_real_path(const char *raw_path, const char *comp_dir,
+ char **new_path)
+{
+ const char *prefix = symbol_conf.source_prefix;
+
+ if (!prefix) {
+ if (raw_path[0] != '/' && comp_dir)
+ /* If not an absolute path, try to use comp_dir */
+ prefix = comp_dir;
+ else {
+ if (access(raw_path, R_OK) == 0) {
+ *new_path = strdup(raw_path);
+ return 0;
+ } else
+ return -errno;
+ }
+ }
+
+ *new_path = malloc((strlen(prefix) + strlen(raw_path) + 2));
+ if (!*new_path)
+ return -ENOMEM;
+
+ for (;;) {
+ sprintf(*new_path, "%s/%s", prefix, raw_path);
+
+ if (access(*new_path, R_OK) == 0)
+ return 0;
+
+ if (!symbol_conf.source_prefix)
+ /* In case of searching comp_dir, don't retry */
+ return -errno;
+
+ switch (errno) {
+ case ENAMETOOLONG:
+ case ENOENT:
+ case EROFS:
+ case EFAULT:
+ raw_path = strchr(++raw_path, '/');
+ if (!raw_path) {
+ free(*new_path);
+ *new_path = NULL;
+ return -ENOENT;
+ }
+ continue;
+
+ default:
+ free(*new_path);
+ *new_path = NULL;
+ return -errno;
+ }
+ }
+}
+
+#define LINEBUF_SIZE 256
+#define NR_ADDITIONAL_LINES 2
+
+static int show_one_line(FILE *fp, int l, bool skip, bool show_num)
+{
+ char buf[LINEBUF_SIZE];
+ const char *color = PERF_COLOR_BLUE;
+
+ if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
+ goto error;
+ if (!skip) {
+ if (show_num)
+ fprintf(stdout, "%7d %s", l, buf);
+ else
+ color_fprintf(stdout, color, " %s", buf);
+ }
+
+ while (strlen(buf) == LINEBUF_SIZE - 1 &&
+ buf[LINEBUF_SIZE - 2] != '\n') {
+ if (fgets(buf, LINEBUF_SIZE, fp) == NULL)
+ goto error;
+ if (!skip) {
+ if (show_num)
+ fprintf(stdout, "%s", buf);
+ else
+ color_fprintf(stdout, color, "%s", buf);
+ }
+ }
+
+ return 0;
+error:
+ if (feof(fp))
+ pr_warning("Source file is shorter than expected.\n");
+ else
+ pr_warning("File read error: %s\n", strerror(errno));
+
+ return -1;
+}
+
+/*
+ * Show line-range always requires debuginfo to find source file and
+ * line number.
+ */
+int show_line_range(struct line_range *lr)
+{
+ int l = 1;
+ struct line_node *ln;
+ FILE *fp;
+ int fd, ret;
+ char *tmp;
+
+ /* Search a line range */
+ ret = init_vmlinux();
+ if (ret < 0)
+ return ret;
+
+ fd = open_vmlinux();
+ if (fd < 0) {
+ pr_warning("Failed to open debuginfo file.\n");
+ return fd;
+ }
+
+ ret = find_line_range(fd, lr);
+ close(fd);
+ if (ret == 0) {
+ pr_warning("Specified source line is not found.\n");
+ return -ENOENT;
+ } else if (ret < 0) {
+ pr_warning("Debuginfo analysis failed. (%d)\n", ret);
+ return ret;
+ }
+
+ /* Convert source file path */
+ tmp = lr->path;
+ ret = get_real_path(tmp, lr->comp_dir, &lr->path);
+ free(tmp); /* Free old path */
+ if (ret < 0) {
+ pr_warning("Failed to find source file. (%d)\n", ret);
+ return ret;
+ }
+
+ setup_pager();
+
+ if (lr->function)
+ fprintf(stdout, "<%s:%d>\n", lr->function,
+ lr->start - lr->offset);
+ else
+ fprintf(stdout, "<%s:%d>\n", lr->file, lr->start);
+
+ fp = fopen(lr->path, "r");
+ if (fp == NULL) {
+ pr_warning("Failed to open %s: %s\n", lr->path,
+ strerror(errno));
+ return -errno;
+ }
+ /* Skip to starting line number */
+ while (l < lr->start && ret >= 0)
+ ret = show_one_line(fp, l++, true, false);
+ if (ret < 0)
+ goto end;
+
+ list_for_each_entry(ln, &lr->line_list, list) {
+ while (ln->line > l && ret >= 0)
+ ret = show_one_line(fp, (l++) - lr->offset,
+ false, false);
+ if (ret >= 0)
+ ret = show_one_line(fp, (l++) - lr->offset,
+ false, true);
+ if (ret < 0)
+ goto end;
+ }
+
+ if (lr->end == INT_MAX)
+ lr->end = l + NR_ADDITIONAL_LINES;
+ while (l <= lr->end && !feof(fp) && ret >= 0)
+ ret = show_one_line(fp, (l++) - lr->offset, false, false);
+end:
+ fclose(fp);
+ return ret;
+}
+
+#else /* !DWARF_SUPPORT */
+
+static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
+ struct perf_probe_point *pp)
+{
+ pp->function = strdup(tp->symbol);
+ if (pp->function == NULL)
+ return -ENOMEM;
+ pp->offset = tp->offset;
+ pp->retprobe = tp->retprobe;
+
+ return 0;
+}
+
+static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
+ struct probe_trace_event **tevs __unused,
+ int max_tevs __unused)
+{
+ if (perf_probe_event_need_dwarf(pev)) {
+ pr_warning("Debuginfo-analysis is not supported.\n");
+ return -ENOSYS;
+ }
+ return 0;
+}
+
+int show_line_range(struct line_range *lr __unused)
+{
+ pr_warning("Debuginfo-analysis is not supported.\n");
+ return -ENOSYS;
+}
+
+#endif
+
+int parse_line_range_desc(const char *arg, struct line_range *lr)
+{
+ const char *ptr;
+ char *tmp;
+ /*
+ * <Syntax>
+ * SRC:SLN[+NUM|-ELN]
+ * FUNC[:SLN[+NUM|-ELN]]
+ */
+ ptr = strchr(arg, ':');
+ if (ptr) {
+ lr->start = (int)strtoul(ptr + 1, &tmp, 0);
+ if (*tmp == '+') {
+ lr->end = lr->start + (int)strtoul(tmp + 1, &tmp, 0);
+ lr->end--; /*
+ * Adjust the number of lines here.
+ * If the number of lines == 1, the
+ * the end of line should be equal to
+ * the start of line.
+ */
+ } else if (*tmp == '-')
+ lr->end = (int)strtoul(tmp + 1, &tmp, 0);
+ else
+ lr->end = INT_MAX;
+ pr_debug("Line range is %d to %d\n", lr->start, lr->end);
+ if (lr->start > lr->end) {
+ semantic_error("Start line must be smaller"
+ " than end line.\n");
+ return -EINVAL;
+ }
+ if (*tmp != '\0') {
+ semantic_error("Tailing with invalid character '%d'.\n",
+ *tmp);
+ return -EINVAL;
+ }
+ tmp = strndup(arg, (ptr - arg));
+ } else {
+ tmp = strdup(arg);
+ lr->end = INT_MAX;
+ }
+
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ if (strchr(tmp, '.'))
+ lr->file = tmp;
+ else
+ lr->function = tmp;
+
+ return 0;
+}
+
+/* Check the name is good for event/group */
+static bool check_event_name(const char *name)
+{
+ if (!isalpha(*name) && *name != '_')
+ return false;
+ while (*++name != '\0') {
+ if (!isalpha(*name) && !isdigit(*name) && *name != '_')
+ return false;
+ }
+ return true;
+}
+
+/* Parse probepoint definition. */
+static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
+{
+ struct perf_probe_point *pp = &pev->point;
+ char *ptr, *tmp;
+ char c, nc = 0;
+ /*
+ * <Syntax>
+ * perf probe [EVENT=]SRC[:LN|;PTN]
+ * perf probe [EVENT=]FUNC[@SRC][+OFFS|%return|:LN|;PAT]
+ *
+ * TODO:Group name support
+ */
+
+ ptr = strpbrk(arg, ";=@+%");
+ if (ptr && *ptr == '=') { /* Event name */
+ *ptr = '\0';
+ tmp = ptr + 1;
+ if (strchr(arg, ':')) {
+ semantic_error("Group name is not supported yet.\n");
+ return -ENOTSUP;
+ }
+ if (!check_event_name(arg)) {
+ semantic_error("%s is bad for event name -it must "
+ "follow C symbol-naming rule.\n", arg);
+ return -EINVAL;
+ }
+ pev->event = strdup(arg);
+ if (pev->event == NULL)
+ return -ENOMEM;
+ pev->group = NULL;
+ arg = tmp;
+ }
+
+ ptr = strpbrk(arg, ";:+@%");
+ if (ptr) {
+ nc = *ptr;
+ *ptr++ = '\0';
+ }
+
+ tmp = strdup(arg);
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ /* Check arg is function or file and copy it */
+ if (strchr(tmp, '.')) /* File */
+ pp->file = tmp;
+ else /* Function */
+ pp->function = tmp;
+
+ /* Parse other options */
+ while (ptr) {
+ arg = ptr;
+ c = nc;
+ if (c == ';') { /* Lazy pattern must be the last part */
+ pp->lazy_line = strdup(arg);
+ if (pp->lazy_line == NULL)
+ return -ENOMEM;
+ break;
+ }
+ ptr = strpbrk(arg, ";:+@%");
+ if (ptr) {
+ nc = *ptr;
+ *ptr++ = '\0';
+ }
+ switch (c) {
+ case ':': /* Line number */
+ pp->line = strtoul(arg, &tmp, 0);
+ if (*tmp != '\0') {
+ semantic_error("There is non-digit char"
+ " in line number.\n");
+ return -EINVAL;
+ }
+ break;
+ case '+': /* Byte offset from a symbol */
+ pp->offset = strtoul(arg, &tmp, 0);
+ if (*tmp != '\0') {
+ semantic_error("There is non-digit character"
+ " in offset.\n");
+ return -EINVAL;
+ }
+ break;
+ case '@': /* File name */
+ if (pp->file) {
+ semantic_error("SRC@SRC is not allowed.\n");
+ return -EINVAL;
+ }
+ pp->file = strdup(arg);
+ if (pp->file == NULL)
+ return -ENOMEM;
+ break;
+ case '%': /* Probe places */
+ if (strcmp(arg, "return") == 0) {
+ pp->retprobe = 1;
+ } else { /* Others not supported yet */
+ semantic_error("%%%s is not supported.\n", arg);
+ return -ENOTSUP;
+ }
+ break;
+ default: /* Buggy case */
+ pr_err("This program has a bug at %s:%d.\n",
+ __FILE__, __LINE__);
+ return -ENOTSUP;
+ break;
+ }
+ }
+
+ /* Exclusion check */
+ if (pp->lazy_line && pp->line) {
+ semantic_error("Lazy pattern can't be used with line number.");
+ return -EINVAL;
+ }
+
+ if (pp->lazy_line && pp->offset) {
+ semantic_error("Lazy pattern can't be used with offset.");
+ return -EINVAL;
+ }
+
+ if (pp->line && pp->offset) {
+ semantic_error("Offset can't be used with line number.");
+ return -EINVAL;
+ }
+
+ if (!pp->line && !pp->lazy_line && pp->file && !pp->function) {
+ semantic_error("File always requires line number or "
+ "lazy pattern.");
+ return -EINVAL;
+ }
+
+ if (pp->offset && !pp->function) {
+ semantic_error("Offset requires an entry function.");
+ return -EINVAL;
+ }
+
+ if (pp->retprobe && !pp->function) {
+ semantic_error("Return probe requires an entry function.");
+ return -EINVAL;
+ }
+
+ if ((pp->offset || pp->line || pp->lazy_line) && pp->retprobe) {
+ semantic_error("Offset/Line/Lazy pattern can't be used with "
+ "return probe.");
+ return -EINVAL;
+ }
+
+ pr_debug("symbol:%s file:%s line:%d offset:%lu return:%d lazy:%s\n",
+ pp->function, pp->file, pp->line, pp->offset, pp->retprobe,
+ pp->lazy_line);
+ return 0;
+}
+
+/* Parse perf-probe event argument */
+static int parse_perf_probe_arg(char *str, struct perf_probe_arg *arg)
+{
+ char *tmp, *goodname;
+ struct perf_probe_arg_field **fieldp;
+
+ pr_debug("parsing arg: %s into ", str);
+
+ tmp = strchr(str, '=');
+ if (tmp) {
+ arg->name = strndup(str, tmp - str);
+ if (arg->name == NULL)
+ return -ENOMEM;
+ pr_debug("name:%s ", arg->name);
+ str = tmp + 1;
+ }
+
+ tmp = strchr(str, ':');
+ if (tmp) { /* Type setting */
+ *tmp = '\0';
+ arg->type = strdup(tmp + 1);
+ if (arg->type == NULL)
+ return -ENOMEM;
+ pr_debug("type:%s ", arg->type);
+ }
+
+ tmp = strpbrk(str, "-.[");
+ if (!is_c_varname(str) || !tmp) {
+ /* A variable, register, symbol or special value */
+ arg->var = strdup(str);
+ if (arg->var == NULL)
+ return -ENOMEM;
+ pr_debug("%s\n", arg->var);
+ return 0;
+ }
+
+ /* Structure fields or array element */
+ arg->var = strndup(str, tmp - str);
+ if (arg->var == NULL)
+ return -ENOMEM;
+ goodname = arg->var;
+ pr_debug("%s, ", arg->var);
+ fieldp = &arg->field;
+
+ do {
+ *fieldp = zalloc(sizeof(struct perf_probe_arg_field));
+ if (*fieldp == NULL)
+ return -ENOMEM;
+ if (*tmp == '[') { /* Array */
+ str = tmp;
+ (*fieldp)->index = strtol(str + 1, &tmp, 0);
+ (*fieldp)->ref = true;
+ if (*tmp != ']' || tmp == str + 1) {
+ semantic_error("Array index must be a"
+ " number.\n");
+ return -EINVAL;
+ }
+ tmp++;
+ if (*tmp == '\0')
+ tmp = NULL;
+ } else { /* Structure */
+ if (*tmp == '.') {
+ str = tmp + 1;
+ (*fieldp)->ref = false;
+ } else if (tmp[1] == '>') {
+ str = tmp + 2;
+ (*fieldp)->ref = true;
+ } else {
+ semantic_error("Argument parse error: %s\n",
+ str);
+ return -EINVAL;
+ }
+ tmp = strpbrk(str, "-.[");
+ }
+ if (tmp) {
+ (*fieldp)->name = strndup(str, tmp - str);
+ if ((*fieldp)->name == NULL)
+ return -ENOMEM;
+ if (*str != '[')
+ goodname = (*fieldp)->name;
+ pr_debug("%s(%d), ", (*fieldp)->name, (*fieldp)->ref);
+ fieldp = &(*fieldp)->next;
+ }
+ } while (tmp);
+ (*fieldp)->name = strdup(str);
+ if ((*fieldp)->name == NULL)
+ return -ENOMEM;
+ if (*str != '[')
+ goodname = (*fieldp)->name;
+ pr_debug("%s(%d)\n", (*fieldp)->name, (*fieldp)->ref);
+
+ /* If no name is specified, set the last field name (not array index)*/
+ if (!arg->name) {
+ arg->name = strdup(goodname);
+ if (arg->name == NULL)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* Parse perf-probe event command */
+int parse_perf_probe_command(const char *cmd, struct perf_probe_event *pev)
+{
+ char **argv;
+ int argc, i, ret = 0;
+
+ argv = argv_split(cmd, &argc);
+ if (!argv) {
+ pr_debug("Failed to split arguments.\n");
+ return -ENOMEM;
+ }
+ if (argc - 1 > MAX_PROBE_ARGS) {
+ semantic_error("Too many probe arguments (%d).\n", argc - 1);
+ ret = -ERANGE;
+ goto out;
+ }
+ /* Parse probe point */
+ ret = parse_perf_probe_point(argv[0], pev);
+ if (ret < 0)
+ goto out;
+
+ /* Copy arguments and ensure return probe has no C argument */
+ pev->nargs = argc - 1;
+ pev->args = zalloc(sizeof(struct perf_probe_arg) * pev->nargs);
+ if (pev->args == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ for (i = 0; i < pev->nargs && ret >= 0; i++) {
+ ret = parse_perf_probe_arg(argv[i + 1], &pev->args[i]);
+ if (ret >= 0 &&
+ is_c_varname(pev->args[i].var) && pev->point.retprobe) {
+ semantic_error("You can't specify local variable for"
+ " kretprobe.\n");
+ ret = -EINVAL;
+ }
+ }
+out:
+ argv_free(argv);
+
+ return ret;
+}
+
+/* Return true if this perf_probe_event requires debuginfo */
+bool perf_probe_event_need_dwarf(struct perf_probe_event *pev)
+{
+ int i;
+
+ if (pev->point.file || pev->point.line || pev->point.lazy_line)
+ return true;
+
+ for (i = 0; i < pev->nargs; i++)
+ if (is_c_varname(pev->args[i].var))
+ return true;
+
+ return false;
+}
+
+/* Parse probe_events event into struct probe_point */
+static int parse_probe_trace_command(const char *cmd,
+ struct probe_trace_event *tev)
+{
+ struct probe_trace_point *tp = &tev->point;
+ char pr;
+ char *p;
+ int ret, i, argc;
+ char **argv;
+
+ pr_debug("Parsing probe_events: %s\n", cmd);
+ argv = argv_split(cmd, &argc);
+ if (!argv) {
+ pr_debug("Failed to split arguments.\n");
+ return -ENOMEM;
+ }
+ if (argc < 2) {
+ semantic_error("Too few probe arguments.\n");
+ ret = -ERANGE;
+ goto out;
+ }
+
+ /* Scan event and group name. */
+ ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]",
+ &pr, (float *)(void *)&tev->group,
+ (float *)(void *)&tev->event);
+ if (ret != 3) {
+ semantic_error("Failed to parse event name: %s\n", argv[0]);
+ ret = -EINVAL;
+ goto out;
+ }
+ pr_debug("Group:%s Event:%s probe:%c\n", tev->group, tev->event, pr);
+
+ tp->retprobe = (pr == 'r');
+
+ /* Scan function name and offset */
+ ret = sscanf(argv[1], "%a[^+]+%lu", (float *)(void *)&tp->symbol,
+ &tp->offset);
+ if (ret == 1)
+ tp->offset = 0;
+
+ tev->nargs = argc - 2;
+ tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
+ if (tev->args == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ for (i = 0; i < tev->nargs; i++) {
+ p = strchr(argv[i + 2], '=');
+ if (p) /* We don't need which register is assigned. */
+ *p++ = '\0';
+ else
+ p = argv[i + 2];
+ tev->args[i].name = strdup(argv[i + 2]);
+ /* TODO: parse regs and offset */
+ tev->args[i].value = strdup(p);
+ if (tev->args[i].name == NULL || tev->args[i].value == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ ret = 0;
+out:
+ argv_free(argv);
+ return ret;
+}
+
+/* Compose only probe arg */
+int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf, size_t len)
+{
+ struct perf_probe_arg_field *field = pa->field;
+ int ret;
+ char *tmp = buf;
+
+ if (pa->name && pa->var)
+ ret = e_snprintf(tmp, len, "%s=%s", pa->name, pa->var);
+ else
+ ret = e_snprintf(tmp, len, "%s", pa->name ? pa->name : pa->var);
+ if (ret <= 0)
+ goto error;
+ tmp += ret;
+ len -= ret;
+
+ while (field) {
+ if (field->name[0] == '[')
+ ret = e_snprintf(tmp, len, "%s", field->name);
+ else
+ ret = e_snprintf(tmp, len, "%s%s",
+ field->ref ? "->" : ".", field->name);
+ if (ret <= 0)
+ goto error;
+ tmp += ret;
+ len -= ret;
+ field = field->next;
+ }
+
+ if (pa->type) {
+ ret = e_snprintf(tmp, len, ":%s", pa->type);
+ if (ret <= 0)
+ goto error;
+ tmp += ret;
+ len -= ret;
+ }
+
+ return tmp - buf;
+error:
+ pr_debug("Failed to synthesize perf probe argument: %s",
+ strerror(-ret));
+ return ret;
+}
+
+/* Compose only probe point (not argument) */
+static char *synthesize_perf_probe_point(struct perf_probe_point *pp)
+{
+ char *buf, *tmp;
+ char offs[32] = "", line[32] = "", file[32] = "";
+ int ret, len;
+
+ buf = zalloc(MAX_CMDLEN);
+ if (buf == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ if (pp->offset) {
+ ret = e_snprintf(offs, 32, "+%lu", pp->offset);
+ if (ret <= 0)
+ goto error;
+ }
+ if (pp->line) {
+ ret = e_snprintf(line, 32, ":%d", pp->line);
+ if (ret <= 0)
+ goto error;
+ }
+ if (pp->file) {
+ len = strlen(pp->file) - 31;
+ if (len < 0)
+ len = 0;
+ tmp = strchr(pp->file + len, '/');
+ if (!tmp)
+ tmp = pp->file + len;
+ ret = e_snprintf(file, 32, "@%s", tmp + 1);
+ if (ret <= 0)
+ goto error;
+ }
+
+ if (pp->function)
+ ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s%s", pp->function,
+ offs, pp->retprobe ? "%return" : "", line,
+ file);
+ else
+ ret = e_snprintf(buf, MAX_CMDLEN, "%s%s", file, line);
+ if (ret <= 0)
+ goto error;
+
+ return buf;
+error:
+ pr_debug("Failed to synthesize perf probe point: %s",
+ strerror(-ret));
+ if (buf)
+ free(buf);
+ return NULL;
+}
+
+#if 0
+char *synthesize_perf_probe_command(struct perf_probe_event *pev)
+{
+ char *buf;
+ int i, len, ret;
+
+ buf = synthesize_perf_probe_point(&pev->point);
+ if (!buf)
+ return NULL;
+
+ len = strlen(buf);
+ for (i = 0; i < pev->nargs; i++) {
+ ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s",
+ pev->args[i].name);
+ if (ret <= 0) {
+ free(buf);
+ return NULL;
+ }
+ len += ret;
+ }
+
+ return buf;
+}
+#endif
+
+static int __synthesize_probe_trace_arg_ref(struct probe_trace_arg_ref *ref,
+ char **buf, size_t *buflen,
+ int depth)
+{
+ int ret;
+ if (ref->next) {
+ depth = __synthesize_probe_trace_arg_ref(ref->next, buf,
+ buflen, depth + 1);
+ if (depth < 0)
+ goto out;
+ }
+
+ ret = e_snprintf(*buf, *buflen, "%+ld(", ref->offset);
+ if (ret < 0)
+ depth = ret;
+ else {
+ *buf += ret;
+ *buflen -= ret;
+ }
+out:
+ return depth;
+
+}
+
+static int synthesize_probe_trace_arg(struct probe_trace_arg *arg,
+ char *buf, size_t buflen)
+{
+ struct probe_trace_arg_ref *ref = arg->ref;
+ int ret, depth = 0;
+ char *tmp = buf;
+
+ /* Argument name or separator */
+ if (arg->name)
+ ret = e_snprintf(buf, buflen, " %s=", arg->name);
+ else
+ ret = e_snprintf(buf, buflen, " ");
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ buflen -= ret;
+
+ /* Special case: @XXX */
+ if (arg->value[0] == '@' && arg->ref)
+ ref = ref->next;
+
+ /* Dereferencing arguments */
+ if (ref) {
+ depth = __synthesize_probe_trace_arg_ref(ref, &buf,
+ &buflen, 1);
+ if (depth < 0)
+ return depth;
+ }
+
+ /* Print argument value */
+ if (arg->value[0] == '@' && arg->ref)
+ ret = e_snprintf(buf, buflen, "%s%+ld", arg->value,
+ arg->ref->offset);
+ else
+ ret = e_snprintf(buf, buflen, "%s", arg->value);
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ buflen -= ret;
+
+ /* Closing */
+ while (depth--) {
+ ret = e_snprintf(buf, buflen, ")");
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ buflen -= ret;
+ }
+ /* Print argument type */
+ if (arg->type) {
+ ret = e_snprintf(buf, buflen, ":%s", arg->type);
+ if (ret <= 0)
+ return ret;
+ buf += ret;
+ }
+
+ return buf - tmp;
+}
+
+char *synthesize_probe_trace_command(struct probe_trace_event *tev)
+{
+ struct probe_trace_point *tp = &tev->point;
+ char *buf;
+ int i, len, ret;
+
+ buf = zalloc(MAX_CMDLEN);
+ if (buf == NULL)
+ return NULL;
+
+ len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s+%lu",
+ tp->retprobe ? 'r' : 'p',
+ tev->group, tev->event,
+ tp->symbol, tp->offset);
+ if (len <= 0)
+ goto error;
+
+ for (i = 0; i < tev->nargs; i++) {
+ ret = synthesize_probe_trace_arg(&tev->args[i], buf + len,
+ MAX_CMDLEN - len);
+ if (ret <= 0)
+ goto error;
+ len += ret;
+ }
+
+ return buf;
+error:
+ free(buf);
+ return NULL;
+}
+
+static int convert_to_perf_probe_event(struct probe_trace_event *tev,
+ struct perf_probe_event *pev)
+{
+ char buf[64] = "";
+ int i, ret;
+
+ /* Convert event/group name */
+ pev->event = strdup(tev->event);
+ pev->group = strdup(tev->group);
+ if (pev->event == NULL || pev->group == NULL)
+ return -ENOMEM;
+
+ /* Convert trace_point to probe_point */
+ ret = kprobe_convert_to_perf_probe(&tev->point, &pev->point);
+ if (ret < 0)
+ return ret;
+
+ /* Convert trace_arg to probe_arg */
+ pev->nargs = tev->nargs;
+ pev->args = zalloc(sizeof(struct perf_probe_arg) * pev->nargs);
+ if (pev->args == NULL)
+ return -ENOMEM;
+ for (i = 0; i < tev->nargs && ret >= 0; i++) {
+ if (tev->args[i].name)
+ pev->args[i].name = strdup(tev->args[i].name);
+ else {
+ ret = synthesize_probe_trace_arg(&tev->args[i],
+ buf, 64);
+ pev->args[i].name = strdup(buf);
+ }
+ if (pev->args[i].name == NULL && ret >= 0)
+ ret = -ENOMEM;
+ }
+
+ if (ret < 0)
+ clear_perf_probe_event(pev);
+
+ return ret;
+}
+
+void clear_perf_probe_event(struct perf_probe_event *pev)
+{
+ struct perf_probe_point *pp = &pev->point;
+ struct perf_probe_arg_field *field, *next;
+ int i;
+
+ if (pev->event)
+ free(pev->event);
+ if (pev->group)
+ free(pev->group);
+ if (pp->file)
+ free(pp->file);
+ if (pp->function)
+ free(pp->function);
+ if (pp->lazy_line)
+ free(pp->lazy_line);
+ for (i = 0; i < pev->nargs; i++) {
+ if (pev->args[i].name)
+ free(pev->args[i].name);
+ if (pev->args[i].var)
+ free(pev->args[i].var);
+ if (pev->args[i].type)
+ free(pev->args[i].type);
+ field = pev->args[i].field;
+ while (field) {
+ next = field->next;
+ if (field->name)
+ free(field->name);
+ free(field);
+ field = next;
+ }
+ }
+ if (pev->args)
+ free(pev->args);
+ memset(pev, 0, sizeof(*pev));
+}
+
+static void clear_probe_trace_event(struct probe_trace_event *tev)
+{
+ struct probe_trace_arg_ref *ref, *next;
+ int i;
+
+ if (tev->event)
+ free(tev->event);
+ if (tev->group)
+ free(tev->group);
+ if (tev->point.symbol)
+ free(tev->point.symbol);
+ for (i = 0; i < tev->nargs; i++) {
+ if (tev->args[i].name)
+ free(tev->args[i].name);
+ if (tev->args[i].value)
+ free(tev->args[i].value);
+ if (tev->args[i].type)
+ free(tev->args[i].type);
+ ref = tev->args[i].ref;
+ while (ref) {
+ next = ref->next;
+ free(ref);
+ ref = next;
+ }
+ }
+ if (tev->args)
+ free(tev->args);
+ memset(tev, 0, sizeof(*tev));
+}
+
+static int open_kprobe_events(bool readwrite)
+{
+ char buf[PATH_MAX];
+ const char *__debugfs;
+ int ret;
+
+ __debugfs = debugfs_find_mountpoint();
+ if (__debugfs == NULL) {
+ pr_warning("Debugfs is not mounted.\n");
+ return -ENOENT;
+ }
+
+ ret = e_snprintf(buf, PATH_MAX, "%stracing/kprobe_events", __debugfs);
+ if (ret >= 0) {
+ pr_debug("Opening %s write=%d\n", buf, readwrite);
+ if (readwrite && !probe_event_dry_run)
+ ret = open(buf, O_RDWR, O_APPEND);
+ else
+ ret = open(buf, O_RDONLY, 0);
+ }
+
+ if (ret < 0) {
+ if (errno == ENOENT)
+ pr_warning("kprobe_events file does not exist - please"
+ " rebuild kernel with CONFIG_KPROBE_EVENT.\n");
+ else
+ pr_warning("Failed to open kprobe_events file: %s\n",
+ strerror(errno));
+ }
+ return ret;
+}
+
+/* Get raw string list of current kprobe_events */
+static struct strlist *get_probe_trace_command_rawlist(int fd)
+{
+ int ret, idx;
+ FILE *fp;
+ char buf[MAX_CMDLEN];
+ char *p;
+ struct strlist *sl;
+
+ sl = strlist__new(true, NULL);
+
+ fp = fdopen(dup(fd), "r");
+ while (!feof(fp)) {
+ p = fgets(buf, MAX_CMDLEN, fp);
+ if (!p)
+ break;
+
+ idx = strlen(p) - 1;
+ if (p[idx] == '\n')
+ p[idx] = '\0';
+ ret = strlist__add(sl, buf);
+ if (ret < 0) {
+ pr_debug("strlist__add failed: %s\n", strerror(-ret));
+ strlist__delete(sl);
+ return NULL;
+ }
+ }
+ fclose(fp);
+
+ return sl;
+}
+
+/* Show an event */
+static int show_perf_probe_event(struct perf_probe_event *pev)
+{
+ int i, ret;
+ char buf[128];
+ char *place;
+
+ /* Synthesize only event probe point */
+ place = synthesize_perf_probe_point(&pev->point);
+ if (!place)
+ return -EINVAL;
+
+ ret = e_snprintf(buf, 128, "%s:%s", pev->group, pev->event);
+ if (ret < 0)
+ return ret;
+
+ printf(" %-20s (on %s", buf, place);
+
+ if (pev->nargs > 0) {
+ printf(" with");
+ for (i = 0; i < pev->nargs; i++) {
+ ret = synthesize_perf_probe_arg(&pev->args[i],
+ buf, 128);
+ if (ret < 0)
+ break;
+ printf(" %s", buf);
+ }
+ }
+ printf(")\n");
+ free(place);
+ return ret;
+}
+
+/* List up current perf-probe events */
+int show_perf_probe_events(void)
+{
+ int fd, ret;
+ struct probe_trace_event tev;
+ struct perf_probe_event pev;
+ struct strlist *rawlist;
+ struct str_node *ent;
+
+ setup_pager();
+ ret = init_vmlinux();
+ if (ret < 0)
+ return ret;
+
+ memset(&tev, 0, sizeof(tev));
+ memset(&pev, 0, sizeof(pev));
+
+ fd = open_kprobe_events(false);
+ if (fd < 0)
+ return fd;
+
+ rawlist = get_probe_trace_command_rawlist(fd);
+ close(fd);
+ if (!rawlist)
+ return -ENOENT;
+
+ strlist__for_each(ent, rawlist) {
+ ret = parse_probe_trace_command(ent->s, &tev);
+ if (ret >= 0) {
+ ret = convert_to_perf_probe_event(&tev, &pev);
+ if (ret >= 0)
+ ret = show_perf_probe_event(&pev);
+ }
+ clear_perf_probe_event(&pev);
+ clear_probe_trace_event(&tev);
+ if (ret < 0)
+ break;
+ }
+ strlist__delete(rawlist);
+
+ return ret;
+}
+
+/* Get current perf-probe event names */
+static struct strlist *get_probe_trace_event_names(int fd, bool include_group)
+{
+ char buf[128];
+ struct strlist *sl, *rawlist;
+ struct str_node *ent;
+ struct probe_trace_event tev;
+ int ret = 0;
+
+ memset(&tev, 0, sizeof(tev));
+ rawlist = get_probe_trace_command_rawlist(fd);
+ sl = strlist__new(true, NULL);
+ strlist__for_each(ent, rawlist) {
+ ret = parse_probe_trace_command(ent->s, &tev);
+ if (ret < 0)
+ break;
+ if (include_group) {
+ ret = e_snprintf(buf, 128, "%s:%s", tev.group,
+ tev.event);
+ if (ret >= 0)
+ ret = strlist__add(sl, buf);
+ } else
+ ret = strlist__add(sl, tev.event);
+ clear_probe_trace_event(&tev);
+ if (ret < 0)
+ break;
+ }
+ strlist__delete(rawlist);
+
+ if (ret < 0) {
+ strlist__delete(sl);
+ return NULL;
+ }
+ return sl;
+}
+
+static int write_probe_trace_event(int fd, struct probe_trace_event *tev)
+{
+ int ret = 0;
+ char *buf = synthesize_probe_trace_command(tev);
+
+ if (!buf) {
+ pr_debug("Failed to synthesize probe trace event.\n");
+ return -EINVAL;
+ }
+
+ pr_debug("Writing event: %s\n", buf);
+ if (!probe_event_dry_run) {
+ ret = write(fd, buf, strlen(buf));
+ if (ret <= 0)
+ pr_warning("Failed to write event: %s\n",
+ strerror(errno));
+ }
+ free(buf);
+ return ret;
+}
+
+static int get_new_event_name(char *buf, size_t len, const char *base,
+ struct strlist *namelist, bool allow_suffix)
+{
+ int i, ret;
+
+ /* Try no suffix */
+ ret = e_snprintf(buf, len, "%s", base);
+ if (ret < 0) {
+ pr_debug("snprintf() failed: %s\n", strerror(-ret));
+ return ret;
+ }
+ if (!strlist__has_entry(namelist, buf))
+ return 0;
+
+ if (!allow_suffix) {
+ pr_warning("Error: event \"%s\" already exists. "
+ "(Use -f to force duplicates.)\n", base);
+ return -EEXIST;
+ }
+
+ /* Try to add suffix */
+ for (i = 1; i < MAX_EVENT_INDEX; i++) {
+ ret = e_snprintf(buf, len, "%s_%d", base, i);
+ if (ret < 0) {
+ pr_debug("snprintf() failed: %s\n", strerror(-ret));
+ return ret;
+ }
+ if (!strlist__has_entry(namelist, buf))
+ break;
+ }
+ if (i == MAX_EVENT_INDEX) {
+ pr_warning("Too many events are on the same function.\n");
+ ret = -ERANGE;
+ }
+
+ return ret;
+}
+
+static int __add_probe_trace_events(struct perf_probe_event *pev,
+ struct probe_trace_event *tevs,
+ int ntevs, bool allow_suffix)
+{
+ int i, fd, ret;
+ struct probe_trace_event *tev = NULL;
+ char buf[64];
+ const char *event, *group;
+ struct strlist *namelist;
+
+ fd = open_kprobe_events(true);
+ if (fd < 0)
+ return fd;
+ /* Get current event names */
+ namelist = get_probe_trace_event_names(fd, false);
+ if (!namelist) {
+ pr_debug("Failed to get current event list.\n");
+ return -EIO;
+ }
+
+ ret = 0;
+ printf("Add new event%s\n", (ntevs > 1) ? "s:" : ":");
+ for (i = 0; i < ntevs; i++) {
+ tev = &tevs[i];
+ if (pev->event)
+ event = pev->event;
+ else
+ if (pev->point.function)
+ event = pev->point.function;
+ else
+ event = tev->point.symbol;
+ if (pev->group)
+ group = pev->group;
+ else
+ group = PERFPROBE_GROUP;
+
+ /* Get an unused new event name */
+ ret = get_new_event_name(buf, 64, event,
+ namelist, allow_suffix);
+ if (ret < 0)
+ break;
+ event = buf;
+
+ tev->event = strdup(event);
+ tev->group = strdup(group);
+ if (tev->event == NULL || tev->group == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+ ret = write_probe_trace_event(fd, tev);
+ if (ret < 0)
+ break;
+ /* Add added event name to namelist */
+ strlist__add(namelist, event);
+
+ /* Trick here - save current event/group */
+ event = pev->event;
+ group = pev->group;
+ pev->event = tev->event;
+ pev->group = tev->group;
+ show_perf_probe_event(pev);
+ /* Trick here - restore current event/group */
+ pev->event = (char *)event;
+ pev->group = (char *)group;
+
+ /*
+ * Probes after the first probe which comes from same
+ * user input are always allowed to add suffix, because
+ * there might be several addresses corresponding to
+ * one code line.
+ */
+ allow_suffix = true;
+ }
+
+ if (ret >= 0) {
+ /* Show how to use the event. */
+ printf("\nYou can now use it on all perf tools, such as:\n\n");
+ printf("\tperf record -e %s:%s -aR sleep 1\n\n", tev->group,
+ tev->event);
+ }
+
+ strlist__delete(namelist);
+ close(fd);
+ return ret;
+}
+
+static int convert_to_probe_trace_events(struct perf_probe_event *pev,
+ struct probe_trace_event **tevs,
+ int max_tevs)
+{
+ struct symbol *sym;
+ int ret = 0, i;
+ struct probe_trace_event *tev;
+
+ /* Convert perf_probe_event with debuginfo */
+ ret = try_to_find_probe_trace_events(pev, tevs, max_tevs);
+ if (ret != 0)
+ return ret;
+
+ /* Allocate trace event buffer */
+ tev = *tevs = zalloc(sizeof(struct probe_trace_event));
+ if (tev == NULL)
+ return -ENOMEM;
+
+ /* Copy parameters */
+ tev->point.symbol = strdup(pev->point.function);
+ if (tev->point.symbol == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ tev->point.offset = pev->point.offset;
+ tev->point.retprobe = pev->point.retprobe;
+ tev->nargs = pev->nargs;
+ if (tev->nargs) {
+ tev->args = zalloc(sizeof(struct probe_trace_arg)
+ * tev->nargs);
+ if (tev->args == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ for (i = 0; i < tev->nargs; i++) {
+ if (pev->args[i].name) {
+ tev->args[i].name = strdup(pev->args[i].name);
+ if (tev->args[i].name == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ }
+ tev->args[i].value = strdup(pev->args[i].var);
+ if (tev->args[i].value == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ if (pev->args[i].type) {
+ tev->args[i].type = strdup(pev->args[i].type);
+ if (tev->args[i].type == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ }
+ }
+ }
+
+ /* Currently just checking function name from symbol map */
+ sym = map__find_symbol_by_name(machine.vmlinux_maps[MAP__FUNCTION],
+ tev->point.symbol, NULL);
+ if (!sym) {
+ pr_warning("Kernel symbol \'%s\' not found.\n",
+ tev->point.symbol);
+ ret = -ENOENT;
+ goto error;
+ }
+
+ return 1;
+error:
+ clear_probe_trace_event(tev);
+ free(tev);
+ *tevs = NULL;
+ return ret;
+}
+
+struct __event_package {
+ struct perf_probe_event *pev;
+ struct probe_trace_event *tevs;
+ int ntevs;
+};
+
+int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
+ bool force_add, int max_tevs)
+{
+ int i, j, ret;
+ struct __event_package *pkgs;
+
+ pkgs = zalloc(sizeof(struct __event_package) * npevs);
+ if (pkgs == NULL)
+ return -ENOMEM;
+
+ /* Init vmlinux path */
+ ret = init_vmlinux();
+ if (ret < 0) {
+ free(pkgs);
+ return ret;
+ }
+
+ /* Loop 1: convert all events */
+ for (i = 0; i < npevs; i++) {
+ pkgs[i].pev = &pevs[i];
+ /* Convert with or without debuginfo */
+ ret = convert_to_probe_trace_events(pkgs[i].pev,
+ &pkgs[i].tevs, max_tevs);
+ if (ret < 0)
+ goto end;
+ pkgs[i].ntevs = ret;
+ }
+
+ /* Loop 2: add all events */
+ for (i = 0; i < npevs && ret >= 0; i++)
+ ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs,
+ pkgs[i].ntevs, force_add);
+end:
+ /* Loop 3: cleanup and free trace events */
+ for (i = 0; i < npevs; i++) {
+ for (j = 0; j < pkgs[i].ntevs; j++)
+ clear_probe_trace_event(&pkgs[i].tevs[j]);
+ free(pkgs[i].tevs);
+ }
+ free(pkgs);
+
+ return ret;
+}
+
+static int __del_trace_probe_event(int fd, struct str_node *ent)
+{
+ char *p;
+ char buf[128];
+ int ret;
+
+ /* Convert from perf-probe event to trace-probe event */
+ ret = e_snprintf(buf, 128, "-:%s", ent->s);
+ if (ret < 0)
+ goto error;
+
+ p = strchr(buf + 2, ':');
+ if (!p) {
+ pr_debug("Internal error: %s should have ':' but not.\n",
+ ent->s);
+ ret = -ENOTSUP;
+ goto error;
+ }
+ *p = '/';
+
+ pr_debug("Writing event: %s\n", buf);
+ ret = write(fd, buf, strlen(buf));
+ if (ret < 0)
+ goto error;
+
+ printf("Remove event: %s\n", ent->s);
+ return 0;
+error:
+ pr_warning("Failed to delete event: %s\n", strerror(-ret));
+ return ret;
+}
+
+static int del_trace_probe_event(int fd, const char *group,
+ const char *event, struct strlist *namelist)
+{
+ char buf[128];
+ struct str_node *ent, *n;
+ int found = 0, ret = 0;
+
+ ret = e_snprintf(buf, 128, "%s:%s", group, event);
+ if (ret < 0) {
+ pr_err("Failed to copy event.");
+ return ret;
+ }
+
+ if (strpbrk(buf, "*?")) { /* Glob-exp */
+ strlist__for_each_safe(ent, n, namelist)
+ if (strglobmatch(ent->s, buf)) {
+ found++;
+ ret = __del_trace_probe_event(fd, ent);
+ if (ret < 0)
+ break;
+ strlist__remove(namelist, ent);
+ }
+ } else {
+ ent = strlist__find(namelist, buf);
+ if (ent) {
+ found++;
+ ret = __del_trace_probe_event(fd, ent);
+ if (ret >= 0)
+ strlist__remove(namelist, ent);
+ }
+ }
+ if (found == 0 && ret >= 0)
+ pr_info("Info: Event \"%s\" does not exist.\n", buf);
+
+ return ret;
+}
+
+int del_perf_probe_events(struct strlist *dellist)
+{
+ int fd, ret = 0;
+ const char *group, *event;
+ char *p, *str;
+ struct str_node *ent;
+ struct strlist *namelist;
+
+ fd = open_kprobe_events(true);
+ if (fd < 0)
+ return fd;
+
+ /* Get current event names */
+ namelist = get_probe_trace_event_names(fd, true);
+ if (namelist == NULL)
+ return -EINVAL;
+
+ strlist__for_each(ent, dellist) {
+ str = strdup(ent->s);
+ if (str == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+ pr_debug("Parsing: %s\n", str);
+ p = strchr(str, ':');
+ if (p) {
+ group = str;
+ *p = '\0';
+ event = p + 1;
+ } else {
+ group = "*";
+ event = str;
+ }
+ pr_debug("Group: %s, Event: %s\n", group, event);
+ ret = del_trace_probe_event(fd, group, event, namelist);
+ free(str);
+ if (ret < 0)
+ break;
+ }
+ strlist__delete(namelist);
+ close(fd);
+
+ return ret;
+}
+
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
new file mode 100644
index 00000000..5af39243
--- /dev/null
+++ b/tools/perf/util/probe-event.h
@@ -0,0 +1,123 @@
+#ifndef _PROBE_EVENT_H
+#define _PROBE_EVENT_H
+
+#include <stdbool.h>
+#include "strlist.h"
+
+extern bool probe_event_dry_run;
+
+/* kprobe-tracer tracing point */
+struct probe_trace_point {
+ char *symbol; /* Base symbol */
+ unsigned long offset; /* Offset from symbol */
+ bool retprobe; /* Return probe flag */
+};
+
+/* probe-tracer tracing argument referencing offset */
+struct probe_trace_arg_ref {
+ struct probe_trace_arg_ref *next; /* Next reference */
+ long offset; /* Offset value */
+};
+
+/* kprobe-tracer tracing argument */
+struct probe_trace_arg {
+ char *name; /* Argument name */
+ char *value; /* Base value */
+ char *type; /* Type name */
+ struct probe_trace_arg_ref *ref; /* Referencing offset */
+};
+
+/* kprobe-tracer tracing event (point + arg) */
+struct probe_trace_event {
+ char *event; /* Event name */
+ char *group; /* Group name */
+ struct probe_trace_point point; /* Trace point */
+ int nargs; /* Number of args */
+ struct probe_trace_arg *args; /* Arguments */
+};
+
+/* Perf probe probing point */
+struct perf_probe_point {
+ char *file; /* File path */
+ char *function; /* Function name */
+ int line; /* Line number */
+ bool retprobe; /* Return probe flag */
+ char *lazy_line; /* Lazy matching pattern */
+ unsigned long offset; /* Offset from function entry */
+};
+
+/* Perf probe probing argument field chain */
+struct perf_probe_arg_field {
+ struct perf_probe_arg_field *next; /* Next field */
+ char *name; /* Name of the field */
+ long index; /* Array index number */
+ bool ref; /* Referencing flag */
+};
+
+/* Perf probe probing argument */
+struct perf_probe_arg {
+ char *name; /* Argument name */
+ char *var; /* Variable name */
+ char *type; /* Type name */
+ struct perf_probe_arg_field *field; /* Structure fields */
+};
+
+/* Perf probe probing event (point + arg) */
+struct perf_probe_event {
+ char *event; /* Event name */
+ char *group; /* Group name */
+ struct perf_probe_point point; /* Probe point */
+ int nargs; /* Number of arguments */
+ struct perf_probe_arg *args; /* Arguments */
+};
+
+
+/* Line number container */
+struct line_node {
+ struct list_head list;
+ int line;
+};
+
+/* Line range */
+struct line_range {
+ char *file; /* File name */
+ char *function; /* Function name */
+ int start; /* Start line number */
+ int end; /* End line number */
+ int offset; /* Start line offset */
+ char *path; /* Real path name */
+ char *comp_dir; /* Compile directory */
+ struct list_head line_list; /* Visible lines */
+};
+
+/* Command string to events */
+extern int parse_perf_probe_command(const char *cmd,
+ struct perf_probe_event *pev);
+
+/* Events to command string */
+extern char *synthesize_perf_probe_command(struct perf_probe_event *pev);
+extern char *synthesize_probe_trace_command(struct probe_trace_event *tev);
+extern int synthesize_perf_probe_arg(struct perf_probe_arg *pa, char *buf,
+ size_t len);
+
+/* Check the perf_probe_event needs debuginfo */
+extern bool perf_probe_event_need_dwarf(struct perf_probe_event *pev);
+
+/* Release event contents */
+extern void clear_perf_probe_event(struct perf_probe_event *pev);
+
+/* Command string to line-range */
+extern int parse_line_range_desc(const char *cmd, struct line_range *lr);
+
+
+extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
+ bool force_add, int max_probe_points);
+extern int del_perf_probe_events(struct strlist *dellist);
+extern int show_perf_probe_events(void);
+extern int show_line_range(struct line_range *lr);
+
+
+/* Maximum index number of event-name postfix */
+#define MAX_EVENT_INDEX 1024
+
+#endif /*_PROBE_EVENT_H */
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
new file mode 100644
index 00000000..32b81f70
--- /dev/null
+++ b/tools/perf/util/probe-finder.c
@@ -0,0 +1,1457 @@
+/*
+ * probe-finder.c : C expression to kprobe event converter
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+#include <sys/utsname.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <dwarf-regs.h>
+
+#include "event.h"
+#include "debug.h"
+#include "util.h"
+#include "symbol.h"
+#include "probe-finder.h"
+
+/* Kprobe tracer basic type is up to u64 */
+#define MAX_BASIC_TYPE_BITS 64
+
+/*
+ * Compare the tail of two strings.
+ * Return 0 if whole of either string is same as another's tail part.
+ */
+static int strtailcmp(const char *s1, const char *s2)
+{
+ int i1 = strlen(s1);
+ int i2 = strlen(s2);
+ while (--i1 >= 0 && --i2 >= 0) {
+ if (s1[i1] != s2[i2])
+ return s1[i1] - s2[i2];
+ }
+ return 0;
+}
+
+/* Line number list operations */
+
+/* Add a line to line number list */
+static int line_list__add_line(struct list_head *head, int line)
+{
+ struct line_node *ln;
+ struct list_head *p;
+
+ /* Reverse search, because new line will be the last one */
+ list_for_each_entry_reverse(ln, head, list) {
+ if (ln->line < line) {
+ p = &ln->list;
+ goto found;
+ } else if (ln->line == line) /* Already exist */
+ return 1;
+ }
+ /* List is empty, or the smallest entry */
+ p = head;
+found:
+ pr_debug("line list: add a line %u\n", line);
+ ln = zalloc(sizeof(struct line_node));
+ if (ln == NULL)
+ return -ENOMEM;
+ ln->line = line;
+ INIT_LIST_HEAD(&ln->list);
+ list_add(&ln->list, p);
+ return 0;
+}
+
+/* Check if the line in line number list */
+static int line_list__has_line(struct list_head *head, int line)
+{
+ struct line_node *ln;
+
+ /* Reverse search, because new line will be the last one */
+ list_for_each_entry(ln, head, list)
+ if (ln->line == line)
+ return 1;
+
+ return 0;
+}
+
+/* Init line number list */
+static void line_list__init(struct list_head *head)
+{
+ INIT_LIST_HEAD(head);
+}
+
+/* Free line number list */
+static void line_list__free(struct list_head *head)
+{
+ struct line_node *ln;
+ while (!list_empty(head)) {
+ ln = list_first_entry(head, struct line_node, list);
+ list_del(&ln->list);
+ free(ln);
+ }
+}
+
+/* Dwarf wrappers */
+
+/* Find the realpath of the target file. */
+static const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname)
+{
+ Dwarf_Files *files;
+ size_t nfiles, i;
+ const char *src = NULL;
+ int ret;
+
+ if (!fname)
+ return NULL;
+
+ ret = dwarf_getsrcfiles(cu_die, &files, &nfiles);
+ if (ret != 0)
+ return NULL;
+
+ for (i = 0; i < nfiles; i++) {
+ src = dwarf_filesrc(files, i, NULL, NULL);
+ if (strtailcmp(src, fname) == 0)
+ break;
+ }
+ if (i == nfiles)
+ return NULL;
+ return src;
+}
+
+/* Get DW_AT_comp_dir (should be NULL with older gcc) */
+static const char *cu_get_comp_dir(Dwarf_Die *cu_die)
+{
+ Dwarf_Attribute attr;
+ if (dwarf_attr(cu_die, DW_AT_comp_dir, &attr) == NULL)
+ return NULL;
+ return dwarf_formstring(&attr);
+}
+
+/* Compare diename and tname */
+static bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
+{
+ const char *name;
+ name = dwarf_diename(dw_die);
+ return name ? (strcmp(tname, name) == 0) : false;
+}
+
+/* Get type die, but skip qualifiers and typedef */
+static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
+{
+ Dwarf_Attribute attr;
+ int tag;
+
+ do {
+ if (dwarf_attr(vr_die, DW_AT_type, &attr) == NULL ||
+ dwarf_formref_die(&attr, die_mem) == NULL)
+ return NULL;
+
+ tag = dwarf_tag(die_mem);
+ vr_die = die_mem;
+ } while (tag == DW_TAG_const_type ||
+ tag == DW_TAG_restrict_type ||
+ tag == DW_TAG_volatile_type ||
+ tag == DW_TAG_shared_type ||
+ tag == DW_TAG_typedef);
+
+ return die_mem;
+}
+
+static bool die_is_signed_type(Dwarf_Die *tp_die)
+{
+ Dwarf_Attribute attr;
+ Dwarf_Word ret;
+
+ if (dwarf_attr(tp_die, DW_AT_encoding, &attr) == NULL ||
+ dwarf_formudata(&attr, &ret) != 0)
+ return false;
+
+ return (ret == DW_ATE_signed_char || ret == DW_ATE_signed ||
+ ret == DW_ATE_signed_fixed);
+}
+
+static int die_get_byte_size(Dwarf_Die *tp_die)
+{
+ Dwarf_Attribute attr;
+ Dwarf_Word ret;
+
+ if (dwarf_attr(tp_die, DW_AT_byte_size, &attr) == NULL ||
+ dwarf_formudata(&attr, &ret) != 0)
+ return 0;
+
+ return (int)ret;
+}
+
+/* Get data_member_location offset */
+static int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs)
+{
+ Dwarf_Attribute attr;
+ Dwarf_Op *expr;
+ size_t nexpr;
+ int ret;
+
+ if (dwarf_attr(mb_die, DW_AT_data_member_location, &attr) == NULL)
+ return -ENOENT;
+
+ if (dwarf_formudata(&attr, offs) != 0) {
+ /* DW_AT_data_member_location should be DW_OP_plus_uconst */
+ ret = dwarf_getlocation(&attr, &expr, &nexpr);
+ if (ret < 0 || nexpr == 0)
+ return -ENOENT;
+
+ if (expr[0].atom != DW_OP_plus_uconst || nexpr != 1) {
+ pr_debug("Unable to get offset:Unexpected OP %x (%zd)\n",
+ expr[0].atom, nexpr);
+ return -ENOTSUP;
+ }
+ *offs = (Dwarf_Word)expr[0].number;
+ }
+ return 0;
+}
+
+/* Return values for die_find callbacks */
+enum {
+ DIE_FIND_CB_FOUND = 0, /* End of Search */
+ DIE_FIND_CB_CHILD = 1, /* Search only children */
+ DIE_FIND_CB_SIBLING = 2, /* Search only siblings */
+ DIE_FIND_CB_CONTINUE = 3, /* Search children and siblings */
+};
+
+/* Search a child die */
+static Dwarf_Die *die_find_child(Dwarf_Die *rt_die,
+ int (*callback)(Dwarf_Die *, void *),
+ void *data, Dwarf_Die *die_mem)
+{
+ Dwarf_Die child_die;
+ int ret;
+
+ ret = dwarf_child(rt_die, die_mem);
+ if (ret != 0)
+ return NULL;
+
+ do {
+ ret = callback(die_mem, data);
+ if (ret == DIE_FIND_CB_FOUND)
+ return die_mem;
+
+ if ((ret & DIE_FIND_CB_CHILD) &&
+ die_find_child(die_mem, callback, data, &child_die)) {
+ memcpy(die_mem, &child_die, sizeof(Dwarf_Die));
+ return die_mem;
+ }
+ } while ((ret & DIE_FIND_CB_SIBLING) &&
+ dwarf_siblingof(die_mem, die_mem) == 0);
+
+ return NULL;
+}
+
+struct __addr_die_search_param {
+ Dwarf_Addr addr;
+ Dwarf_Die *die_mem;
+};
+
+static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
+{
+ struct __addr_die_search_param *ad = data;
+
+ if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
+ dwarf_haspc(fn_die, ad->addr)) {
+ memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
+ return DWARF_CB_ABORT;
+ }
+ return DWARF_CB_OK;
+}
+
+/* Search a real subprogram including this line, */
+static Dwarf_Die *die_find_real_subprogram(Dwarf_Die *cu_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem)
+{
+ struct __addr_die_search_param ad;
+ ad.addr = addr;
+ ad.die_mem = die_mem;
+ /* dwarf_getscopes can't find subprogram. */
+ if (!dwarf_getfuncs(cu_die, __die_search_func_cb, &ad, 0))
+ return NULL;
+ else
+ return die_mem;
+}
+
+/* die_find callback for inline function search */
+static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data)
+{
+ Dwarf_Addr *addr = data;
+
+ if (dwarf_tag(die_mem) == DW_TAG_inlined_subroutine &&
+ dwarf_haspc(die_mem, *addr))
+ return DIE_FIND_CB_FOUND;
+
+ return DIE_FIND_CB_CONTINUE;
+}
+
+/* Similar to dwarf_getfuncs, but returns inlined_subroutine if exists. */
+static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem)
+{
+ return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
+}
+
+static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
+{
+ const char *name = data;
+ int tag;
+
+ tag = dwarf_tag(die_mem);
+ if ((tag == DW_TAG_formal_parameter ||
+ tag == DW_TAG_variable) &&
+ die_compare_name(die_mem, name))
+ return DIE_FIND_CB_FOUND;
+
+ return DIE_FIND_CB_CONTINUE;
+}
+
+/* Find a variable called 'name' */
+static Dwarf_Die *die_find_variable(Dwarf_Die *sp_die, const char *name,
+ Dwarf_Die *die_mem)
+{
+ return die_find_child(sp_die, __die_find_variable_cb, (void *)name,
+ die_mem);
+}
+
+static int __die_find_member_cb(Dwarf_Die *die_mem, void *data)
+{
+ const char *name = data;
+
+ if ((dwarf_tag(die_mem) == DW_TAG_member) &&
+ die_compare_name(die_mem, name))
+ return DIE_FIND_CB_FOUND;
+
+ return DIE_FIND_CB_SIBLING;
+}
+
+/* Find a member called 'name' */
+static Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
+ Dwarf_Die *die_mem)
+{
+ return die_find_child(st_die, __die_find_member_cb, (void *)name,
+ die_mem);
+}
+
+/*
+ * Probe finder related functions
+ */
+
+static struct probe_trace_arg_ref *alloc_trace_arg_ref(long offs)
+{
+ struct probe_trace_arg_ref *ref;
+ ref = zalloc(sizeof(struct probe_trace_arg_ref));
+ if (ref != NULL)
+ ref->offset = offs;
+ return ref;
+}
+
+/* Show a location */
+static int convert_variable_location(Dwarf_Die *vr_die, struct probe_finder *pf)
+{
+ Dwarf_Attribute attr;
+ Dwarf_Op *op;
+ size_t nops;
+ unsigned int regn;
+ Dwarf_Word offs = 0;
+ bool ref = false;
+ const char *regs;
+ struct probe_trace_arg *tvar = pf->tvar;
+ int ret;
+
+ /* TODO: handle more than 1 exprs */
+ if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL ||
+ dwarf_getlocation_addr(&attr, pf->addr, &op, &nops, 1) <= 0 ||
+ nops == 0) {
+ /* TODO: Support const_value */
+ pr_err("Failed to find the location of %s at this address.\n"
+ " Perhaps, it has been optimized out.\n", pf->pvar->var);
+ return -ENOENT;
+ }
+
+ if (op->atom == DW_OP_addr) {
+ /* Static variables on memory (not stack), make @varname */
+ ret = strlen(dwarf_diename(vr_die));
+ tvar->value = zalloc(ret + 2);
+ if (tvar->value == NULL)
+ return -ENOMEM;
+ snprintf(tvar->value, ret + 2, "@%s", dwarf_diename(vr_die));
+ tvar->ref = alloc_trace_arg_ref((long)offs);
+ if (tvar->ref == NULL)
+ return -ENOMEM;
+ return 0;
+ }
+
+ /* If this is based on frame buffer, set the offset */
+ if (op->atom == DW_OP_fbreg) {
+ if (pf->fb_ops == NULL) {
+ pr_warning("The attribute of frame base is not "
+ "supported.\n");
+ return -ENOTSUP;
+ }
+ ref = true;
+ offs = op->number;
+ op = &pf->fb_ops[0];
+ }
+
+ if (op->atom >= DW_OP_breg0 && op->atom <= DW_OP_breg31) {
+ regn = op->atom - DW_OP_breg0;
+ offs += op->number;
+ ref = true;
+ } else if (op->atom >= DW_OP_reg0 && op->atom <= DW_OP_reg31) {
+ regn = op->atom - DW_OP_reg0;
+ } else if (op->atom == DW_OP_bregx) {
+ regn = op->number;
+ offs += op->number2;
+ ref = true;
+ } else if (op->atom == DW_OP_regx) {
+ regn = op->number;
+ } else {
+ pr_warning("DW_OP %x is not supported.\n", op->atom);
+ return -ENOTSUP;
+ }
+
+ regs = get_arch_regstr(regn);
+ if (!regs) {
+ pr_warning("Mapping for DWARF register number %u missing on this architecture.", regn);
+ return -ERANGE;
+ }
+
+ tvar->value = strdup(regs);
+ if (tvar->value == NULL)
+ return -ENOMEM;
+
+ if (ref) {
+ tvar->ref = alloc_trace_arg_ref((long)offs);
+ if (tvar->ref == NULL)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int convert_variable_type(Dwarf_Die *vr_die,
+ struct probe_trace_arg *tvar,
+ const char *cast)
+{
+ struct probe_trace_arg_ref **ref_ptr = &tvar->ref;
+ Dwarf_Die type;
+ char buf[16];
+ int ret;
+
+ /* TODO: check all types */
+ if (cast && strcmp(cast, "string") != 0) {
+ /* Non string type is OK */
+ tvar->type = strdup(cast);
+ return (tvar->type == NULL) ? -ENOMEM : 0;
+ }
+
+ if (die_get_real_type(vr_die, &type) == NULL) {
+ pr_warning("Failed to get a type information of %s.\n",
+ dwarf_diename(vr_die));
+ return -ENOENT;
+ }
+
+ pr_debug("%s type is %s.\n",
+ dwarf_diename(vr_die), dwarf_diename(&type));
+
+ if (cast && strcmp(cast, "string") == 0) { /* String type */
+ ret = dwarf_tag(&type);
+ if (ret != DW_TAG_pointer_type &&
+ ret != DW_TAG_array_type) {
+ pr_warning("Failed to cast into string: "
+ "%s(%s) is not a pointer nor array.",
+ dwarf_diename(vr_die), dwarf_diename(&type));
+ return -EINVAL;
+ }
+ if (ret == DW_TAG_pointer_type) {
+ if (die_get_real_type(&type, &type) == NULL) {
+ pr_warning("Failed to get a type information.");
+ return -ENOENT;
+ }
+ while (*ref_ptr)
+ ref_ptr = &(*ref_ptr)->next;
+ /* Add new reference with offset +0 */
+ *ref_ptr = zalloc(sizeof(struct probe_trace_arg_ref));
+ if (*ref_ptr == NULL) {
+ pr_warning("Out of memory error\n");
+ return -ENOMEM;
+ }
+ }
+ if (!die_compare_name(&type, "char") &&
+ !die_compare_name(&type, "unsigned char")) {
+ pr_warning("Failed to cast into string: "
+ "%s is not (unsigned) char *.",
+ dwarf_diename(vr_die));
+ return -EINVAL;
+ }
+ tvar->type = strdup(cast);
+ return (tvar->type == NULL) ? -ENOMEM : 0;
+ }
+
+ ret = die_get_byte_size(&type) * 8;
+ if (ret) {
+ /* Check the bitwidth */
+ if (ret > MAX_BASIC_TYPE_BITS) {
+ pr_info("%s exceeds max-bitwidth."
+ " Cut down to %d bits.\n",
+ dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
+ ret = MAX_BASIC_TYPE_BITS;
+ }
+
+ ret = snprintf(buf, 16, "%c%d",
+ die_is_signed_type(&type) ? 's' : 'u', ret);
+ if (ret < 0 || ret >= 16) {
+ if (ret >= 16)
+ ret = -E2BIG;
+ pr_warning("Failed to convert variable type: %s\n",
+ strerror(-ret));
+ return ret;
+ }
+ tvar->type = strdup(buf);
+ if (tvar->type == NULL)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
+ struct perf_probe_arg_field *field,
+ struct probe_trace_arg_ref **ref_ptr,
+ Dwarf_Die *die_mem)
+{
+ struct probe_trace_arg_ref *ref = *ref_ptr;
+ Dwarf_Die type;
+ Dwarf_Word offs;
+ int ret, tag;
+
+ pr_debug("converting %s in %s\n", field->name, varname);
+ if (die_get_real_type(vr_die, &type) == NULL) {
+ pr_warning("Failed to get the type of %s.\n", varname);
+ return -ENOENT;
+ }
+ pr_debug2("Var real type: (%x)\n", (unsigned)dwarf_dieoffset(&type));
+ tag = dwarf_tag(&type);
+
+ if (field->name[0] == '[' &&
+ (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)) {
+ if (field->next)
+ /* Save original type for next field */
+ memcpy(die_mem, &type, sizeof(*die_mem));
+ /* Get the type of this array */
+ if (die_get_real_type(&type, &type) == NULL) {
+ pr_warning("Failed to get the type of %s.\n", varname);
+ return -ENOENT;
+ }
+ pr_debug2("Array real type: (%x)\n",
+ (unsigned)dwarf_dieoffset(&type));
+ if (tag == DW_TAG_pointer_type) {
+ ref = zalloc(sizeof(struct probe_trace_arg_ref));
+ if (ref == NULL)
+ return -ENOMEM;
+ if (*ref_ptr)
+ (*ref_ptr)->next = ref;
+ else
+ *ref_ptr = ref;
+ }
+ ref->offset += die_get_byte_size(&type) * field->index;
+ if (!field->next)
+ /* Save vr_die for converting types */
+ memcpy(die_mem, vr_die, sizeof(*die_mem));
+ goto next;
+ } else if (tag == DW_TAG_pointer_type) {
+ /* Check the pointer and dereference */
+ if (!field->ref) {
+ pr_err("Semantic error: %s must be referred by '->'\n",
+ field->name);
+ return -EINVAL;
+ }
+ /* Get the type pointed by this pointer */
+ if (die_get_real_type(&type, &type) == NULL) {
+ pr_warning("Failed to get the type of %s.\n", varname);
+ return -ENOENT;
+ }
+ /* Verify it is a data structure */
+ if (dwarf_tag(&type) != DW_TAG_structure_type) {
+ pr_warning("%s is not a data structure.\n", varname);
+ return -EINVAL;
+ }
+
+ ref = zalloc(sizeof(struct probe_trace_arg_ref));
+ if (ref == NULL)
+ return -ENOMEM;
+ if (*ref_ptr)
+ (*ref_ptr)->next = ref;
+ else
+ *ref_ptr = ref;
+ } else {
+ /* Verify it is a data structure */
+ if (tag != DW_TAG_structure_type) {
+ pr_warning("%s is not a data structure.\n", varname);
+ return -EINVAL;
+ }
+ if (field->name[0] == '[') {
+ pr_err("Semantic error: %s is not a pointor nor array.",
+ varname);
+ return -EINVAL;
+ }
+ if (field->ref) {
+ pr_err("Semantic error: %s must be referred by '.'\n",
+ field->name);
+ return -EINVAL;
+ }
+ if (!ref) {
+ pr_warning("Structure on a register is not "
+ "supported yet.\n");
+ return -ENOTSUP;
+ }
+ }
+
+ if (die_find_member(&type, field->name, die_mem) == NULL) {
+ pr_warning("%s(tyep:%s) has no member %s.\n", varname,
+ dwarf_diename(&type), field->name);
+ return -EINVAL;
+ }
+
+ /* Get the offset of the field */
+ ret = die_get_data_member_location(die_mem, &offs);
+ if (ret < 0) {
+ pr_warning("Failed to get the offset of %s.\n", field->name);
+ return ret;
+ }
+ ref->offset += (long)offs;
+
+next:
+ /* Converting next field */
+ if (field->next)
+ return convert_variable_fields(die_mem, field->name,
+ field->next, &ref, die_mem);
+ else
+ return 0;
+}
+
+/* Show a variables in kprobe event format */
+static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
+{
+ Dwarf_Die die_mem;
+ int ret;
+
+ pr_debug("Converting variable %s into trace event.\n",
+ dwarf_diename(vr_die));
+
+ ret = convert_variable_location(vr_die, pf);
+ if (ret == 0 && pf->pvar->field) {
+ ret = convert_variable_fields(vr_die, pf->pvar->var,
+ pf->pvar->field, &pf->tvar->ref,
+ &die_mem);
+ vr_die = &die_mem;
+ }
+ if (ret == 0)
+ ret = convert_variable_type(vr_die, pf->tvar, pf->pvar->type);
+ /* *expr will be cached in libdw. Don't free it. */
+ return ret;
+}
+
+/* Find a variable in a subprogram die */
+static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
+{
+ Dwarf_Die vr_die, *scopes;
+ char buf[32], *ptr;
+ int ret, nscopes;
+
+ if (!is_c_varname(pf->pvar->var)) {
+ /* Copy raw parameters */
+ pf->tvar->value = strdup(pf->pvar->var);
+ if (pf->tvar->value == NULL)
+ return -ENOMEM;
+ if (pf->pvar->type) {
+ pf->tvar->type = strdup(pf->pvar->type);
+ if (pf->tvar->type == NULL)
+ return -ENOMEM;
+ }
+ if (pf->pvar->name) {
+ pf->tvar->name = strdup(pf->pvar->name);
+ if (pf->tvar->name == NULL)
+ return -ENOMEM;
+ } else
+ pf->tvar->name = NULL;
+ return 0;
+ }
+
+ if (pf->pvar->name)
+ pf->tvar->name = strdup(pf->pvar->name);
+ else {
+ ret = synthesize_perf_probe_arg(pf->pvar, buf, 32);
+ if (ret < 0)
+ return ret;
+ ptr = strchr(buf, ':'); /* Change type separator to _ */
+ if (ptr)
+ *ptr = '_';
+ pf->tvar->name = strdup(buf);
+ }
+ if (pf->tvar->name == NULL)
+ return -ENOMEM;
+
+ pr_debug("Searching '%s' variable in context.\n",
+ pf->pvar->var);
+ /* Search child die for local variables and parameters. */
+ if (die_find_variable(sp_die, pf->pvar->var, &vr_die))
+ ret = convert_variable(&vr_die, pf);
+ else {
+ /* Search upper class */
+ nscopes = dwarf_getscopes_die(sp_die, &scopes);
+ if (nscopes > 0) {
+ ret = dwarf_getscopevar(scopes, nscopes, pf->pvar->var,
+ 0, NULL, 0, 0, &vr_die);
+ if (ret >= 0)
+ ret = convert_variable(&vr_die, pf);
+ else
+ ret = -ENOENT;
+ free(scopes);
+ } else
+ ret = -ENOENT;
+ }
+ if (ret < 0)
+ pr_warning("Failed to find '%s' in this function.\n",
+ pf->pvar->var);
+ return ret;
+}
+
+/* Show a probe point to output buffer */
+static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
+{
+ struct probe_trace_event *tev;
+ Dwarf_Addr eaddr;
+ Dwarf_Die die_mem;
+ const char *name;
+ int ret, i;
+ Dwarf_Attribute fb_attr;
+ size_t nops;
+
+ if (pf->ntevs == pf->max_tevs) {
+ pr_warning("Too many( > %d) probe point found.\n",
+ pf->max_tevs);
+ return -ERANGE;
+ }
+ tev = &pf->tevs[pf->ntevs++];
+
+ /* If no real subprogram, find a real one */
+ if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) {
+ sp_die = die_find_real_subprogram(&pf->cu_die,
+ pf->addr, &die_mem);
+ if (!sp_die) {
+ pr_warning("Failed to find probe point in any "
+ "functions.\n");
+ return -ENOENT;
+ }
+ }
+
+ /* Copy the name of probe point */
+ name = dwarf_diename(sp_die);
+ if (name) {
+ if (dwarf_entrypc(sp_die, &eaddr) != 0) {
+ pr_warning("Failed to get entry pc of %s\n",
+ dwarf_diename(sp_die));
+ return -ENOENT;
+ }
+ tev->point.symbol = strdup(name);
+ if (tev->point.symbol == NULL)
+ return -ENOMEM;
+ tev->point.offset = (unsigned long)(pf->addr - eaddr);
+ } else
+ /* This function has no name. */
+ tev->point.offset = (unsigned long)pf->addr;
+
+ /* Return probe must be on the head of a subprogram */
+ if (pf->pev->point.retprobe) {
+ if (tev->point.offset != 0) {
+ pr_warning("Return probe must be on the head of"
+ " a real function\n");
+ return -EINVAL;
+ }
+ tev->point.retprobe = true;
+ }
+
+ pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
+ tev->point.offset);
+
+ /* Get the frame base attribute/ops */
+ dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr);
+ ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1);
+ if (ret <= 0 || nops == 0) {
+ pf->fb_ops = NULL;
+#if _ELFUTILS_PREREQ(0, 142)
+ } else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa &&
+ pf->cfi != NULL) {
+ Dwarf_Frame *frame;
+ if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 ||
+ dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) {
+ pr_warning("Failed to get CFA on 0x%jx\n",
+ (uintmax_t)pf->addr);
+ return -ENOENT;
+ }
+#endif
+ }
+
+ /* Find each argument */
+ tev->nargs = pf->pev->nargs;
+ tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
+ if (tev->args == NULL)
+ return -ENOMEM;
+ for (i = 0; i < pf->pev->nargs; i++) {
+ pf->pvar = &pf->pev->args[i];
+ pf->tvar = &tev->args[i];
+ ret = find_variable(sp_die, pf);
+ if (ret != 0)
+ return ret;
+ }
+
+ /* *pf->fb_ops will be cached in libdw. Don't free it. */
+ pf->fb_ops = NULL;
+ return 0;
+}
+
+/* Find probe point from its line number */
+static int find_probe_point_by_line(struct probe_finder *pf)
+{
+ Dwarf_Lines *lines;
+ Dwarf_Line *line;
+ size_t nlines, i;
+ Dwarf_Addr addr;
+ int lineno;
+ int ret = 0;
+
+ if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) {
+ pr_warning("No source lines found in this CU.\n");
+ return -ENOENT;
+ }
+
+ for (i = 0; i < nlines && ret == 0; i++) {
+ line = dwarf_onesrcline(lines, i);
+ if (dwarf_lineno(line, &lineno) != 0 ||
+ lineno != pf->lno)
+ continue;
+
+ /* TODO: Get fileno from line, but how? */
+ if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
+ continue;
+
+ if (dwarf_lineaddr(line, &addr) != 0) {
+ pr_warning("Failed to get the address of the line.\n");
+ return -ENOENT;
+ }
+ pr_debug("Probe line found: line[%d]:%d addr:0x%jx\n",
+ (int)i, lineno, (uintmax_t)addr);
+ pf->addr = addr;
+
+ ret = convert_probe_point(NULL, pf);
+ /* Continuing, because target line might be inlined. */
+ }
+ return ret;
+}
+
+/* Find lines which match lazy pattern */
+static int find_lazy_match_lines(struct list_head *head,
+ const char *fname, const char *pat)
+{
+ char *fbuf, *p1, *p2;
+ int fd, line, nlines = -1;
+ struct stat st;
+
+ fd = open(fname, O_RDONLY);
+ if (fd < 0) {
+ pr_warning("Failed to open %s: %s\n", fname, strerror(-fd));
+ return -errno;
+ }
+
+ if (fstat(fd, &st) < 0) {
+ pr_warning("Failed to get the size of %s: %s\n",
+ fname, strerror(errno));
+ nlines = -errno;
+ goto out_close;
+ }
+
+ nlines = -ENOMEM;
+ fbuf = malloc(st.st_size + 2);
+ if (fbuf == NULL)
+ goto out_close;
+ if (read(fd, fbuf, st.st_size) < 0) {
+ pr_warning("Failed to read %s: %s\n", fname, strerror(errno));
+ nlines = -errno;
+ goto out_free_fbuf;
+ }
+ fbuf[st.st_size] = '\n'; /* Dummy line */
+ fbuf[st.st_size + 1] = '\0';
+ p1 = fbuf;
+ line = 1;
+ nlines = 0;
+ while ((p2 = strchr(p1, '\n')) != NULL) {
+ *p2 = '\0';
+ if (strlazymatch(p1, pat)) {
+ line_list__add_line(head, line);
+ nlines++;
+ }
+ line++;
+ p1 = p2 + 1;
+ }
+out_free_fbuf:
+ free(fbuf);
+out_close:
+ close(fd);
+ return nlines;
+}
+
+/* Find probe points from lazy pattern */
+static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
+{
+ Dwarf_Lines *lines;
+ Dwarf_Line *line;
+ size_t nlines, i;
+ Dwarf_Addr addr;
+ Dwarf_Die die_mem;
+ int lineno;
+ int ret = 0;
+
+ if (list_empty(&pf->lcache)) {
+ /* Matching lazy line pattern */
+ ret = find_lazy_match_lines(&pf->lcache, pf->fname,
+ pf->pev->point.lazy_line);
+ if (ret == 0) {
+ pr_debug("No matched lines found in %s.\n", pf->fname);
+ return 0;
+ } else if (ret < 0)
+ return ret;
+ }
+
+ if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) {
+ pr_warning("No source lines found in this CU.\n");
+ return -ENOENT;
+ }
+
+ for (i = 0; i < nlines && ret >= 0; i++) {
+ line = dwarf_onesrcline(lines, i);
+
+ if (dwarf_lineno(line, &lineno) != 0 ||
+ !line_list__has_line(&pf->lcache, lineno))
+ continue;
+
+ /* TODO: Get fileno from line, but how? */
+ if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
+ continue;
+
+ if (dwarf_lineaddr(line, &addr) != 0) {
+ pr_debug("Failed to get the address of line %d.\n",
+ lineno);
+ continue;
+ }
+ if (sp_die) {
+ /* Address filtering 1: does sp_die include addr? */
+ if (!dwarf_haspc(sp_die, addr))
+ continue;
+ /* Address filtering 2: No child include addr? */
+ if (die_find_inlinefunc(sp_die, addr, &die_mem))
+ continue;
+ }
+
+ pr_debug("Probe line found: line[%d]:%d addr:0x%llx\n",
+ (int)i, lineno, (unsigned long long)addr);
+ pf->addr = addr;
+
+ ret = convert_probe_point(sp_die, pf);
+ /* Continuing, because target line might be inlined. */
+ }
+ /* TODO: deallocate lines, but how? */
+ return ret;
+}
+
+/* Callback parameter with return value */
+struct dwarf_callback_param {
+ void *data;
+ int retval;
+};
+
+static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
+{
+ struct dwarf_callback_param *param = data;
+ struct probe_finder *pf = param->data;
+ struct perf_probe_point *pp = &pf->pev->point;
+ Dwarf_Addr addr;
+
+ if (pp->lazy_line)
+ param->retval = find_probe_point_lazy(in_die, pf);
+ else {
+ /* Get probe address */
+ if (dwarf_entrypc(in_die, &addr) != 0) {
+ pr_warning("Failed to get entry pc of %s.\n",
+ dwarf_diename(in_die));
+ param->retval = -ENOENT;
+ return DWARF_CB_ABORT;
+ }
+ pf->addr = addr;
+ pf->addr += pp->offset;
+ pr_debug("found inline addr: 0x%jx\n",
+ (uintmax_t)pf->addr);
+
+ param->retval = convert_probe_point(in_die, pf);
+ if (param->retval < 0)
+ return DWARF_CB_ABORT;
+ }
+
+ return DWARF_CB_OK;
+}
+
+/* Search function from function name */
+static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
+{
+ struct dwarf_callback_param *param = data;
+ struct probe_finder *pf = param->data;
+ struct perf_probe_point *pp = &pf->pev->point;
+
+ /* Check tag and diename */
+ if (dwarf_tag(sp_die) != DW_TAG_subprogram ||
+ !die_compare_name(sp_die, pp->function))
+ return DWARF_CB_OK;
+
+ pf->fname = dwarf_decl_file(sp_die);
+ if (pp->line) { /* Function relative line */
+ dwarf_decl_line(sp_die, &pf->lno);
+ pf->lno += pp->line;
+ param->retval = find_probe_point_by_line(pf);
+ } else if (!dwarf_func_inline(sp_die)) {
+ /* Real function */
+ if (pp->lazy_line)
+ param->retval = find_probe_point_lazy(sp_die, pf);
+ else {
+ if (dwarf_entrypc(sp_die, &pf->addr) != 0) {
+ pr_warning("Failed to get entry pc of %s.\n",
+ dwarf_diename(sp_die));
+ param->retval = -ENOENT;
+ return DWARF_CB_ABORT;
+ }
+ pf->addr += pp->offset;
+ /* TODO: Check the address in this function */
+ param->retval = convert_probe_point(sp_die, pf);
+ }
+ } else {
+ struct dwarf_callback_param _param = {.data = (void *)pf,
+ .retval = 0};
+ /* Inlined function: search instances */
+ dwarf_func_inline_instances(sp_die, probe_point_inline_cb,
+ &_param);
+ param->retval = _param.retval;
+ }
+
+ return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */
+}
+
+static int find_probe_point_by_func(struct probe_finder *pf)
+{
+ struct dwarf_callback_param _param = {.data = (void *)pf,
+ .retval = 0};
+ dwarf_getfuncs(&pf->cu_die, probe_point_search_cb, &_param, 0);
+ return _param.retval;
+}
+
+/* Find probe_trace_events specified by perf_probe_event from debuginfo */
+int find_probe_trace_events(int fd, struct perf_probe_event *pev,
+ struct probe_trace_event **tevs, int max_tevs)
+{
+ struct probe_finder pf = {.pev = pev, .max_tevs = max_tevs};
+ struct perf_probe_point *pp = &pev->point;
+ Dwarf_Off off, noff;
+ size_t cuhl;
+ Dwarf_Die *diep;
+ Dwarf *dbg;
+ int ret = 0;
+
+ pf.tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs);
+ if (pf.tevs == NULL)
+ return -ENOMEM;
+ *tevs = pf.tevs;
+ pf.ntevs = 0;
+
+ dbg = dwarf_begin(fd, DWARF_C_READ);
+ if (!dbg) {
+ pr_warning("No dwarf info found in the vmlinux - "
+ "please rebuild with CONFIG_DEBUG_INFO=y.\n");
+ free(pf.tevs);
+ *tevs = NULL;
+ return -EBADF;
+ }
+
+#if _ELFUTILS_PREREQ(0, 142)
+ /* Get the call frame information from this dwarf */
+ pf.cfi = dwarf_getcfi(dbg);
+#endif
+
+ off = 0;
+ line_list__init(&pf.lcache);
+ /* Loop on CUs (Compilation Unit) */
+ while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) &&
+ ret >= 0) {
+ /* Get the DIE(Debugging Information Entry) of this CU */
+ diep = dwarf_offdie(dbg, off + cuhl, &pf.cu_die);
+ if (!diep)
+ continue;
+
+ /* Check if target file is included. */
+ if (pp->file)
+ pf.fname = cu_find_realpath(&pf.cu_die, pp->file);
+ else
+ pf.fname = NULL;
+
+ if (!pp->file || pf.fname) {
+ if (pp->function)
+ ret = find_probe_point_by_func(&pf);
+ else if (pp->lazy_line)
+ ret = find_probe_point_lazy(NULL, &pf);
+ else {
+ pf.lno = pp->line;
+ ret = find_probe_point_by_line(&pf);
+ }
+ }
+ off = noff;
+ }
+ line_list__free(&pf.lcache);
+ dwarf_end(dbg);
+
+ return (ret < 0) ? ret : pf.ntevs;
+}
+
+/* Reverse search */
+int find_perf_probe_point(int fd, unsigned long addr,
+ struct perf_probe_point *ppt)
+{
+ Dwarf_Die cudie, spdie, indie;
+ Dwarf *dbg;
+ Dwarf_Line *line;
+ Dwarf_Addr laddr, eaddr;
+ const char *tmp;
+ int lineno, ret = 0;
+ bool found = false;
+
+ dbg = dwarf_begin(fd, DWARF_C_READ);
+ if (!dbg)
+ return -EBADF;
+
+ /* Find cu die */
+ if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr, &cudie)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Find a corresponding line */
+ line = dwarf_getsrc_die(&cudie, (Dwarf_Addr)addr);
+ if (line) {
+ if (dwarf_lineaddr(line, &laddr) == 0 &&
+ (Dwarf_Addr)addr == laddr &&
+ dwarf_lineno(line, &lineno) == 0) {
+ tmp = dwarf_linesrc(line, NULL, NULL);
+ if (tmp) {
+ ppt->line = lineno;
+ ppt->file = strdup(tmp);
+ if (ppt->file == NULL) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ found = true;
+ }
+ }
+ }
+
+ /* Find a corresponding function */
+ if (die_find_real_subprogram(&cudie, (Dwarf_Addr)addr, &spdie)) {
+ tmp = dwarf_diename(&spdie);
+ if (!tmp || dwarf_entrypc(&spdie, &eaddr) != 0)
+ goto end;
+
+ if (ppt->line) {
+ if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr,
+ &indie)) {
+ /* addr in an inline function */
+ tmp = dwarf_diename(&indie);
+ if (!tmp)
+ goto end;
+ ret = dwarf_decl_line(&indie, &lineno);
+ } else {
+ if (eaddr == addr) { /* Function entry */
+ lineno = ppt->line;
+ ret = 0;
+ } else
+ ret = dwarf_decl_line(&spdie, &lineno);
+ }
+ if (ret == 0) {
+ /* Make a relative line number */
+ ppt->line -= lineno;
+ goto found;
+ }
+ }
+ /* We don't have a line number, let's use offset */
+ ppt->offset = addr - (unsigned long)eaddr;
+found:
+ ppt->function = strdup(tmp);
+ if (ppt->function == NULL) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ found = true;
+ }
+
+end:
+ dwarf_end(dbg);
+ if (ret >= 0)
+ ret = found ? 1 : 0;
+ return ret;
+}
+
+/* Add a line and store the src path */
+static int line_range_add_line(const char *src, unsigned int lineno,
+ struct line_range *lr)
+{
+ /* Copy source path */
+ if (!lr->path) {
+ lr->path = strdup(src);
+ if (lr->path == NULL)
+ return -ENOMEM;
+ }
+ return line_list__add_line(&lr->line_list, lineno);
+}
+
+/* Search function declaration lines */
+static int line_range_funcdecl_cb(Dwarf_Die *sp_die, void *data)
+{
+ struct dwarf_callback_param *param = data;
+ struct line_finder *lf = param->data;
+ const char *src;
+ int lineno;
+
+ src = dwarf_decl_file(sp_die);
+ if (src && strtailcmp(src, lf->fname) != 0)
+ return DWARF_CB_OK;
+
+ if (dwarf_decl_line(sp_die, &lineno) != 0 ||
+ (lf->lno_s > lineno || lf->lno_e < lineno))
+ return DWARF_CB_OK;
+
+ param->retval = line_range_add_line(src, lineno, lf->lr);
+ if (param->retval < 0)
+ return DWARF_CB_ABORT;
+ return DWARF_CB_OK;
+}
+
+static int find_line_range_func_decl_lines(struct line_finder *lf)
+{
+ struct dwarf_callback_param param = {.data = (void *)lf, .retval = 0};
+ dwarf_getfuncs(&lf->cu_die, line_range_funcdecl_cb, &param, 0);
+ return param.retval;
+}
+
+/* Find line range from its line number */
+static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
+{
+ Dwarf_Lines *lines;
+ Dwarf_Line *line;
+ size_t nlines, i;
+ Dwarf_Addr addr;
+ int lineno, ret = 0;
+ const char *src;
+ Dwarf_Die die_mem;
+
+ line_list__init(&lf->lr->line_list);
+ if (dwarf_getsrclines(&lf->cu_die, &lines, &nlines) != 0) {
+ pr_warning("No source lines found in this CU.\n");
+ return -ENOENT;
+ }
+
+ /* Search probable lines on lines list */
+ for (i = 0; i < nlines; i++) {
+ line = dwarf_onesrcline(lines, i);
+ if (dwarf_lineno(line, &lineno) != 0 ||
+ (lf->lno_s > lineno || lf->lno_e < lineno))
+ continue;
+
+ if (sp_die) {
+ /* Address filtering 1: does sp_die include addr? */
+ if (dwarf_lineaddr(line, &addr) != 0 ||
+ !dwarf_haspc(sp_die, addr))
+ continue;
+
+ /* Address filtering 2: No child include addr? */
+ if (die_find_inlinefunc(sp_die, addr, &die_mem))
+ continue;
+ }
+
+ /* TODO: Get fileno from line, but how? */
+ src = dwarf_linesrc(line, NULL, NULL);
+ if (strtailcmp(src, lf->fname) != 0)
+ continue;
+
+ ret = line_range_add_line(src, lineno, lf->lr);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Dwarf lines doesn't include function declarations. We have to
+ * check functions list or given function.
+ */
+ if (sp_die) {
+ src = dwarf_decl_file(sp_die);
+ if (src && dwarf_decl_line(sp_die, &lineno) == 0 &&
+ (lf->lno_s <= lineno && lf->lno_e >= lineno))
+ ret = line_range_add_line(src, lineno, lf->lr);
+ } else
+ ret = find_line_range_func_decl_lines(lf);
+
+ /* Update status */
+ if (ret >= 0)
+ if (!list_empty(&lf->lr->line_list))
+ ret = lf->found = 1;
+ else
+ ret = 0; /* Lines are not found */
+ else {
+ free(lf->lr->path);
+ lf->lr->path = NULL;
+ }
+ return ret;
+}
+
+static int line_range_inline_cb(Dwarf_Die *in_die, void *data)
+{
+ struct dwarf_callback_param *param = data;
+
+ param->retval = find_line_range_by_line(in_die, param->data);
+ return DWARF_CB_ABORT; /* No need to find other instances */
+}
+
+/* Search function from function name */
+static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
+{
+ struct dwarf_callback_param *param = data;
+ struct line_finder *lf = param->data;
+ struct line_range *lr = lf->lr;
+
+ if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
+ die_compare_name(sp_die, lr->function)) {
+ lf->fname = dwarf_decl_file(sp_die);
+ dwarf_decl_line(sp_die, &lr->offset);
+ pr_debug("fname: %s, lineno:%d\n", lf->fname, lr->offset);
+ lf->lno_s = lr->offset + lr->start;
+ if (lf->lno_s < 0) /* Overflow */
+ lf->lno_s = INT_MAX;
+ lf->lno_e = lr->offset + lr->end;
+ if (lf->lno_e < 0) /* Overflow */
+ lf->lno_e = INT_MAX;
+ pr_debug("New line range: %d to %d\n", lf->lno_s, lf->lno_e);
+ lr->start = lf->lno_s;
+ lr->end = lf->lno_e;
+ if (dwarf_func_inline(sp_die)) {
+ struct dwarf_callback_param _param;
+ _param.data = (void *)lf;
+ _param.retval = 0;
+ dwarf_func_inline_instances(sp_die,
+ line_range_inline_cb,
+ &_param);
+ param->retval = _param.retval;
+ } else
+ param->retval = find_line_range_by_line(sp_die, lf);
+ return DWARF_CB_ABORT;
+ }
+ return DWARF_CB_OK;
+}
+
+static int find_line_range_by_func(struct line_finder *lf)
+{
+ struct dwarf_callback_param param = {.data = (void *)lf, .retval = 0};
+ dwarf_getfuncs(&lf->cu_die, line_range_search_cb, &param, 0);
+ return param.retval;
+}
+
+int find_line_range(int fd, struct line_range *lr)
+{
+ struct line_finder lf = {.lr = lr, .found = 0};
+ int ret = 0;
+ Dwarf_Off off = 0, noff;
+ size_t cuhl;
+ Dwarf_Die *diep;
+ Dwarf *dbg;
+ const char *comp_dir;
+
+ dbg = dwarf_begin(fd, DWARF_C_READ);
+ if (!dbg) {
+ pr_warning("No dwarf info found in the vmlinux - "
+ "please rebuild with CONFIG_DEBUG_INFO=y.\n");
+ return -EBADF;
+ }
+
+ /* Loop on CUs (Compilation Unit) */
+ while (!lf.found && ret >= 0) {
+ if (dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) != 0)
+ break;
+
+ /* Get the DIE(Debugging Information Entry) of this CU */
+ diep = dwarf_offdie(dbg, off + cuhl, &lf.cu_die);
+ if (!diep)
+ continue;
+
+ /* Check if target file is included. */
+ if (lr->file)
+ lf.fname = cu_find_realpath(&lf.cu_die, lr->file);
+ else
+ lf.fname = 0;
+
+ if (!lr->file || lf.fname) {
+ if (lr->function)
+ ret = find_line_range_by_func(&lf);
+ else {
+ lf.lno_s = lr->start;
+ lf.lno_e = lr->end;
+ ret = find_line_range_by_line(NULL, &lf);
+ }
+ }
+ off = noff;
+ }
+
+ /* Store comp_dir */
+ if (lf.found) {
+ comp_dir = cu_get_comp_dir(&lf.cu_die);
+ if (comp_dir) {
+ lr->comp_dir = strdup(comp_dir);
+ if (!lr->comp_dir)
+ ret = -ENOMEM;
+ }
+ }
+
+ pr_debug("path: %s\n", lr->path);
+ dwarf_end(dbg);
+
+ return (ret < 0) ? ret : lf.found;
+}
+
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
new file mode 100644
index 00000000..d47883f1
--- /dev/null
+++ b/tools/perf/util/probe-finder.h
@@ -0,0 +1,68 @@
+#ifndef _PROBE_FINDER_H
+#define _PROBE_FINDER_H
+
+#include <stdbool.h>
+#include "util.h"
+#include "probe-event.h"
+
+#define MAX_PATH_LEN 256
+#define MAX_PROBE_BUFFER 1024
+#define MAX_PROBES 128
+
+static inline int is_c_varname(const char *name)
+{
+ /* TODO */
+ return isalpha(name[0]) || name[0] == '_';
+}
+
+#ifdef DWARF_SUPPORT
+/* Find probe_trace_events specified by perf_probe_event from debuginfo */
+extern int find_probe_trace_events(int fd, struct perf_probe_event *pev,
+ struct probe_trace_event **tevs,
+ int max_tevs);
+
+/* Find a perf_probe_point from debuginfo */
+extern int find_perf_probe_point(int fd, unsigned long addr,
+ struct perf_probe_point *ppt);
+
+extern int find_line_range(int fd, struct line_range *lr);
+
+#include <dwarf.h>
+#include <elfutils/libdw.h>
+#include <elfutils/version.h>
+
+struct probe_finder {
+ struct perf_probe_event *pev; /* Target probe event */
+ struct probe_trace_event *tevs; /* Result trace events */
+ int ntevs; /* Number of trace events */
+ int max_tevs; /* Max number of trace events */
+
+ /* For function searching */
+ int lno; /* Line number */
+ Dwarf_Addr addr; /* Address */
+ const char *fname; /* Real file name */
+ Dwarf_Die cu_die; /* Current CU */
+ struct list_head lcache; /* Line cache for lazy match */
+
+ /* For variable searching */
+#if _ELFUTILS_PREREQ(0, 142)
+ Dwarf_CFI *cfi; /* Call Frame Information */
+#endif
+ Dwarf_Op *fb_ops; /* Frame base attribute */
+ struct perf_probe_arg *pvar; /* Current target variable */
+ struct probe_trace_arg *tvar; /* Current result variable */
+};
+
+struct line_finder {
+ struct line_range *lr; /* Target line range */
+
+ const char *fname; /* File name */
+ int lno_s; /* Start line number */
+ int lno_e; /* End line number */
+ Dwarf_Die cu_die; /* Current CU */
+ int found;
+};
+
+#endif /* DWARF_SUPPORT */
+
+#endif /*_PROBE_FINDER_H */
diff --git a/tools/perf/util/pstack.c b/tools/perf/util/pstack.c
new file mode 100644
index 00000000..13d36faf
--- /dev/null
+++ b/tools/perf/util/pstack.c
@@ -0,0 +1,75 @@
+/*
+ * Simple pointer stack
+ *
+ * (c) 2010 Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+
+#include "util.h"
+#include "pstack.h"
+#include <linux/kernel.h>
+#include <stdlib.h>
+
+struct pstack {
+ unsigned short top;
+ unsigned short max_nr_entries;
+ void *entries[0];
+};
+
+struct pstack *pstack__new(unsigned short max_nr_entries)
+{
+ struct pstack *self = zalloc((sizeof(*self) +
+ max_nr_entries * sizeof(void *)));
+ if (self != NULL)
+ self->max_nr_entries = max_nr_entries;
+ return self;
+}
+
+void pstack__delete(struct pstack *self)
+{
+ free(self);
+}
+
+bool pstack__empty(const struct pstack *self)
+{
+ return self->top == 0;
+}
+
+void pstack__remove(struct pstack *self, void *key)
+{
+ unsigned short i = self->top, last_index = self->top - 1;
+
+ while (i-- != 0) {
+ if (self->entries[i] == key) {
+ if (i < last_index)
+ memmove(self->entries + i,
+ self->entries + i + 1,
+ (last_index - i) * sizeof(void *));
+ --self->top;
+ return;
+ }
+ }
+ pr_err("%s: %p not on the pstack!\n", __func__, key);
+}
+
+void pstack__push(struct pstack *self, void *key)
+{
+ if (self->top == self->max_nr_entries) {
+ pr_err("%s: top=%d, overflow!\n", __func__, self->top);
+ return;
+ }
+ self->entries[self->top++] = key;
+}
+
+void *pstack__pop(struct pstack *self)
+{
+ void *ret;
+
+ if (self->top == 0) {
+ pr_err("%s: underflow!\n", __func__);
+ return NULL;
+ }
+
+ ret = self->entries[--self->top];
+ self->entries[self->top] = NULL;
+ return ret;
+}
diff --git a/tools/perf/util/pstack.h b/tools/perf/util/pstack.h
new file mode 100644
index 00000000..4cedea59
--- /dev/null
+++ b/tools/perf/util/pstack.h
@@ -0,0 +1,14 @@
+#ifndef _PERF_PSTACK_
+#define _PERF_PSTACK_
+
+#include <stdbool.h>
+
+struct pstack;
+struct pstack *pstack__new(unsigned short max_nr_entries);
+void pstack__delete(struct pstack *self);
+bool pstack__empty(const struct pstack *self);
+void pstack__remove(struct pstack *self, void *key);
+void pstack__push(struct pstack *self, void *key);
+void *pstack__pop(struct pstack *self);
+
+#endif /* _PERF_PSTACK_ */
diff --git a/tools/perf/util/quote.c b/tools/perf/util/quote.c
new file mode 100644
index 00000000..01f03242
--- /dev/null
+++ b/tools/perf/util/quote.c
@@ -0,0 +1,54 @@
+#include "cache.h"
+#include "quote.h"
+
+/* Help to copy the thing properly quoted for the shell safety.
+ * any single quote is replaced with '\'', any exclamation point
+ * is replaced with '\!', and the whole thing is enclosed in a
+ *
+ * E.g.
+ * original sq_quote result
+ * name ==> name ==> 'name'
+ * a b ==> a b ==> 'a b'
+ * a'b ==> a'\''b ==> 'a'\''b'
+ * a!b ==> a'\!'b ==> 'a'\!'b'
+ */
+static inline int need_bs_quote(char c)
+{
+ return (c == '\'' || c == '!');
+}
+
+static void sq_quote_buf(struct strbuf *dst, const char *src)
+{
+ char *to_free = NULL;
+
+ if (dst->buf == src)
+ to_free = strbuf_detach(dst, NULL);
+
+ strbuf_addch(dst, '\'');
+ while (*src) {
+ size_t len = strcspn(src, "'!");
+ strbuf_add(dst, src, len);
+ src += len;
+ while (need_bs_quote(*src)) {
+ strbuf_addstr(dst, "'\\");
+ strbuf_addch(dst, *src++);
+ strbuf_addch(dst, '\'');
+ }
+ }
+ strbuf_addch(dst, '\'');
+ free(to_free);
+}
+
+void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
+{
+ int i;
+
+ /* Copy into destination buffer. */
+ strbuf_grow(dst, 255);
+ for (i = 0; argv[i]; ++i) {
+ strbuf_addch(dst, ' ');
+ sq_quote_buf(dst, argv[i]);
+ if (maxlen && dst->len > maxlen)
+ die("Too many or long arguments");
+ }
+}
diff --git a/tools/perf/util/quote.h b/tools/perf/util/quote.h
new file mode 100644
index 00000000..172889ea
--- /dev/null
+++ b/tools/perf/util/quote.h
@@ -0,0 +1,29 @@
+#ifndef __PERF_QUOTE_H
+#define __PERF_QUOTE_H
+
+#include <stddef.h>
+#include <stdio.h>
+
+/* Help to copy the thing properly quoted for the shell safety.
+ * any single quote is replaced with '\'', any exclamation point
+ * is replaced with '\!', and the whole thing is enclosed in a
+ * single quote pair.
+ *
+ * For example, if you are passing the result to system() as an
+ * argument:
+ *
+ * sprintf(cmd, "foobar %s %s", sq_quote(arg0), sq_quote(arg1))
+ *
+ * would be appropriate. If the system() is going to call ssh to
+ * run the command on the other side:
+ *
+ * sprintf(cmd, "git-diff-tree %s %s", sq_quote(arg0), sq_quote(arg1));
+ * sprintf(rcmd, "ssh %s %s", sq_util/quote.host), sq_quote(cmd));
+ *
+ * Note that the above examples leak memory! Remember to free result from
+ * sq_quote() in a real application.
+ */
+
+extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen);
+
+#endif /* __PERF_QUOTE_H */
diff --git a/tools/perf/util/run-command.c b/tools/perf/util/run-command.c
new file mode 100644
index 00000000..da8e9b28
--- /dev/null
+++ b/tools/perf/util/run-command.c
@@ -0,0 +1,214 @@
+#include "cache.h"
+#include "run-command.h"
+#include "exec_cmd.h"
+
+static inline void close_pair(int fd[2])
+{
+ close(fd[0]);
+ close(fd[1]);
+}
+
+static inline void dup_devnull(int to)
+{
+ int fd = open("/dev/null", O_RDWR);
+ dup2(fd, to);
+ close(fd);
+}
+
+int start_command(struct child_process *cmd)
+{
+ int need_in, need_out, need_err;
+ int fdin[2], fdout[2], fderr[2];
+
+ /*
+ * In case of errors we must keep the promise to close FDs
+ * that have been passed in via ->in and ->out.
+ */
+
+ need_in = !cmd->no_stdin && cmd->in < 0;
+ if (need_in) {
+ if (pipe(fdin) < 0) {
+ if (cmd->out > 0)
+ close(cmd->out);
+ return -ERR_RUN_COMMAND_PIPE;
+ }
+ cmd->in = fdin[1];
+ }
+
+ need_out = !cmd->no_stdout
+ && !cmd->stdout_to_stderr
+ && cmd->out < 0;
+ if (need_out) {
+ if (pipe(fdout) < 0) {
+ if (need_in)
+ close_pair(fdin);
+ else if (cmd->in)
+ close(cmd->in);
+ return -ERR_RUN_COMMAND_PIPE;
+ }
+ cmd->out = fdout[0];
+ }
+
+ need_err = !cmd->no_stderr && cmd->err < 0;
+ if (need_err) {
+ if (pipe(fderr) < 0) {
+ if (need_in)
+ close_pair(fdin);
+ else if (cmd->in)
+ close(cmd->in);
+ if (need_out)
+ close_pair(fdout);
+ else if (cmd->out)
+ close(cmd->out);
+ return -ERR_RUN_COMMAND_PIPE;
+ }
+ cmd->err = fderr[0];
+ }
+
+ fflush(NULL);
+ cmd->pid = fork();
+ if (!cmd->pid) {
+ if (cmd->no_stdin)
+ dup_devnull(0);
+ else if (need_in) {
+ dup2(fdin[0], 0);
+ close_pair(fdin);
+ } else if (cmd->in) {
+ dup2(cmd->in, 0);
+ close(cmd->in);
+ }
+
+ if (cmd->no_stderr)
+ dup_devnull(2);
+ else if (need_err) {
+ dup2(fderr[1], 2);
+ close_pair(fderr);
+ }
+
+ if (cmd->no_stdout)
+ dup_devnull(1);
+ else if (cmd->stdout_to_stderr)
+ dup2(2, 1);
+ else if (need_out) {
+ dup2(fdout[1], 1);
+ close_pair(fdout);
+ } else if (cmd->out > 1) {
+ dup2(cmd->out, 1);
+ close(cmd->out);
+ }
+
+ if (cmd->dir && chdir(cmd->dir))
+ die("exec %s: cd to %s failed (%s)", cmd->argv[0],
+ cmd->dir, strerror(errno));
+ if (cmd->env) {
+ for (; *cmd->env; cmd->env++) {
+ if (strchr(*cmd->env, '='))
+ putenv((char*)*cmd->env);
+ else
+ unsetenv(*cmd->env);
+ }
+ }
+ if (cmd->preexec_cb)
+ cmd->preexec_cb();
+ if (cmd->perf_cmd) {
+ execv_perf_cmd(cmd->argv);
+ } else {
+ execvp(cmd->argv[0], (char *const*) cmd->argv);
+ }
+ exit(127);
+ }
+
+ if (cmd->pid < 0) {
+ int err = errno;
+ if (need_in)
+ close_pair(fdin);
+ else if (cmd->in)
+ close(cmd->in);
+ if (need_out)
+ close_pair(fdout);
+ else if (cmd->out)
+ close(cmd->out);
+ if (need_err)
+ close_pair(fderr);
+ return err == ENOENT ?
+ -ERR_RUN_COMMAND_EXEC :
+ -ERR_RUN_COMMAND_FORK;
+ }
+
+ if (need_in)
+ close(fdin[0]);
+ else if (cmd->in)
+ close(cmd->in);
+
+ if (need_out)
+ close(fdout[1]);
+ else if (cmd->out)
+ close(cmd->out);
+
+ if (need_err)
+ close(fderr[1]);
+
+ return 0;
+}
+
+static int wait_or_whine(pid_t pid)
+{
+ for (;;) {
+ int status, code;
+ pid_t waiting = waitpid(pid, &status, 0);
+
+ if (waiting < 0) {
+ if (errno == EINTR)
+ continue;
+ error("waitpid failed (%s)", strerror(errno));
+ return -ERR_RUN_COMMAND_WAITPID;
+ }
+ if (waiting != pid)
+ return -ERR_RUN_COMMAND_WAITPID_WRONG_PID;
+ if (WIFSIGNALED(status))
+ return -ERR_RUN_COMMAND_WAITPID_SIGNAL;
+
+ if (!WIFEXITED(status))
+ return -ERR_RUN_COMMAND_WAITPID_NOEXIT;
+ code = WEXITSTATUS(status);
+ switch (code) {
+ case 127:
+ return -ERR_RUN_COMMAND_EXEC;
+ case 0:
+ return 0;
+ default:
+ return -code;
+ }
+ }
+}
+
+int finish_command(struct child_process *cmd)
+{
+ return wait_or_whine(cmd->pid);
+}
+
+int run_command(struct child_process *cmd)
+{
+ int code = start_command(cmd);
+ if (code)
+ return code;
+ return finish_command(cmd);
+}
+
+static void prepare_run_command_v_opt(struct child_process *cmd,
+ const char **argv,
+ int opt)
+{
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->argv = argv;
+ cmd->no_stdin = opt & RUN_COMMAND_NO_STDIN ? 1 : 0;
+ cmd->perf_cmd = opt & RUN_PERF_CMD ? 1 : 0;
+ cmd->stdout_to_stderr = opt & RUN_COMMAND_STDOUT_TO_STDERR ? 1 : 0;
+}
+
+int run_command_v_opt(const char **argv, int opt)
+{
+ struct child_process cmd;
+ prepare_run_command_v_opt(&cmd, argv, opt);
+ return run_command(&cmd);
+}
diff --git a/tools/perf/util/run-command.h b/tools/perf/util/run-command.h
new file mode 100644
index 00000000..1ef264d5
--- /dev/null
+++ b/tools/perf/util/run-command.h
@@ -0,0 +1,58 @@
+#ifndef __PERF_RUN_COMMAND_H
+#define __PERF_RUN_COMMAND_H
+
+enum {
+ ERR_RUN_COMMAND_FORK = 10000,
+ ERR_RUN_COMMAND_EXEC,
+ ERR_RUN_COMMAND_PIPE,
+ ERR_RUN_COMMAND_WAITPID,
+ ERR_RUN_COMMAND_WAITPID_WRONG_PID,
+ ERR_RUN_COMMAND_WAITPID_SIGNAL,
+ ERR_RUN_COMMAND_WAITPID_NOEXIT,
+};
+#define IS_RUN_COMMAND_ERR(x) (-(x) >= ERR_RUN_COMMAND_FORK)
+
+struct child_process {
+ const char **argv;
+ pid_t pid;
+ /*
+ * Using .in, .out, .err:
+ * - Specify 0 for no redirections (child inherits stdin, stdout,
+ * stderr from parent).
+ * - Specify -1 to have a pipe allocated as follows:
+ * .in: returns the writable pipe end; parent writes to it,
+ * the readable pipe end becomes child's stdin
+ * .out, .err: returns the readable pipe end; parent reads from
+ * it, the writable pipe end becomes child's stdout/stderr
+ * The caller of start_command() must close the returned FDs
+ * after it has completed reading from/writing to it!
+ * - Specify > 0 to set a channel to a particular FD as follows:
+ * .in: a readable FD, becomes child's stdin
+ * .out: a writable FD, becomes child's stdout/stderr
+ * .err > 0 not supported
+ * The specified FD is closed by start_command(), even in case
+ * of errors!
+ */
+ int in;
+ int out;
+ int err;
+ const char *dir;
+ const char *const *env;
+ unsigned no_stdin:1;
+ unsigned no_stdout:1;
+ unsigned no_stderr:1;
+ unsigned perf_cmd:1; /* if this is to be perf sub-command */
+ unsigned stdout_to_stderr:1;
+ void (*preexec_cb)(void);
+};
+
+int start_command(struct child_process *);
+int finish_command(struct child_process *);
+int run_command(struct child_process *);
+
+#define RUN_COMMAND_NO_STDIN 1
+#define RUN_PERF_CMD 2 /*If this is to be perf sub-command */
+#define RUN_COMMAND_STDOUT_TO_STDERR 4
+int run_command_v_opt(const char **argv, int opt);
+
+#endif /* __PERF_RUN_COMMAND_H */
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
new file mode 100644
index 00000000..b059dc50
--- /dev/null
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -0,0 +1,565 @@
+/*
+ * trace-event-perl. Feed perf trace events to an embedded Perl interpreter.
+ *
+ * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+
+#include "../../perf.h"
+#include "../util.h"
+#include "../trace-event.h"
+
+#include <EXTERN.h>
+#include <perl.h>
+
+void boot_Perf__Trace__Context(pTHX_ CV *cv);
+void boot_DynaLoader(pTHX_ CV *cv);
+typedef PerlInterpreter * INTERP;
+
+void xs_init(pTHX);
+
+void xs_init(pTHX)
+{
+ const char *file = __FILE__;
+ dXSUB_SYS;
+
+ newXS("Perf::Trace::Context::bootstrap", boot_Perf__Trace__Context,
+ file);
+ newXS("DynaLoader::boot_DynaLoader", boot_DynaLoader, file);
+}
+
+INTERP my_perl;
+
+#define FTRACE_MAX_EVENT \
+ ((1 << (sizeof(unsigned short) * 8)) - 1)
+
+struct event *events[FTRACE_MAX_EVENT];
+
+extern struct scripting_context *scripting_context;
+
+static char *cur_field_name;
+static int zero_flag_atom;
+
+static void define_symbolic_value(const char *ev_name,
+ const char *field_name,
+ const char *field_value,
+ const char *field_str)
+{
+ unsigned long long value;
+ dSP;
+
+ value = eval_flag(field_value);
+
+ ENTER;
+ SAVETMPS;
+ PUSHMARK(SP);
+
+ XPUSHs(sv_2mortal(newSVpv(ev_name, 0)));
+ XPUSHs(sv_2mortal(newSVpv(field_name, 0)));
+ XPUSHs(sv_2mortal(newSVuv(value)));
+ XPUSHs(sv_2mortal(newSVpv(field_str, 0)));
+
+ PUTBACK;
+ if (get_cv("main::define_symbolic_value", 0))
+ call_pv("main::define_symbolic_value", G_SCALAR);
+ SPAGAIN;
+ PUTBACK;
+ FREETMPS;
+ LEAVE;
+}
+
+static void define_symbolic_values(struct print_flag_sym *field,
+ const char *ev_name,
+ const char *field_name)
+{
+ define_symbolic_value(ev_name, field_name, field->value, field->str);
+ if (field->next)
+ define_symbolic_values(field->next, ev_name, field_name);
+}
+
+static void define_symbolic_field(const char *ev_name,
+ const char *field_name)
+{
+ dSP;
+
+ ENTER;
+ SAVETMPS;
+ PUSHMARK(SP);
+
+ XPUSHs(sv_2mortal(newSVpv(ev_name, 0)));
+ XPUSHs(sv_2mortal(newSVpv(field_name, 0)));
+
+ PUTBACK;
+ if (get_cv("main::define_symbolic_field", 0))
+ call_pv("main::define_symbolic_field", G_SCALAR);
+ SPAGAIN;
+ PUTBACK;
+ FREETMPS;
+ LEAVE;
+}
+
+static void define_flag_value(const char *ev_name,
+ const char *field_name,
+ const char *field_value,
+ const char *field_str)
+{
+ unsigned long long value;
+ dSP;
+
+ value = eval_flag(field_value);
+
+ ENTER;
+ SAVETMPS;
+ PUSHMARK(SP);
+
+ XPUSHs(sv_2mortal(newSVpv(ev_name, 0)));
+ XPUSHs(sv_2mortal(newSVpv(field_name, 0)));
+ XPUSHs(sv_2mortal(newSVuv(value)));
+ XPUSHs(sv_2mortal(newSVpv(field_str, 0)));
+
+ PUTBACK;
+ if (get_cv("main::define_flag_value", 0))
+ call_pv("main::define_flag_value", G_SCALAR);
+ SPAGAIN;
+ PUTBACK;
+ FREETMPS;
+ LEAVE;
+}
+
+static void define_flag_values(struct print_flag_sym *field,
+ const char *ev_name,
+ const char *field_name)
+{
+ define_flag_value(ev_name, field_name, field->value, field->str);
+ if (field->next)
+ define_flag_values(field->next, ev_name, field_name);
+}
+
+static void define_flag_field(const char *ev_name,
+ const char *field_name,
+ const char *delim)
+{
+ dSP;
+
+ ENTER;
+ SAVETMPS;
+ PUSHMARK(SP);
+
+ XPUSHs(sv_2mortal(newSVpv(ev_name, 0)));
+ XPUSHs(sv_2mortal(newSVpv(field_name, 0)));
+ XPUSHs(sv_2mortal(newSVpv(delim, 0)));
+
+ PUTBACK;
+ if (get_cv("main::define_flag_field", 0))
+ call_pv("main::define_flag_field", G_SCALAR);
+ SPAGAIN;
+ PUTBACK;
+ FREETMPS;
+ LEAVE;
+}
+
+static void define_event_symbols(struct event *event,
+ const char *ev_name,
+ struct print_arg *args)
+{
+ switch (args->type) {
+ case PRINT_NULL:
+ break;
+ case PRINT_ATOM:
+ define_flag_value(ev_name, cur_field_name, "0",
+ args->atom.atom);
+ zero_flag_atom = 0;
+ break;
+ case PRINT_FIELD:
+ if (cur_field_name)
+ free(cur_field_name);
+ cur_field_name = strdup(args->field.name);
+ break;
+ case PRINT_FLAGS:
+ define_event_symbols(event, ev_name, args->flags.field);
+ define_flag_field(ev_name, cur_field_name, args->flags.delim);
+ define_flag_values(args->flags.flags, ev_name, cur_field_name);
+ break;
+ case PRINT_SYMBOL:
+ define_event_symbols(event, ev_name, args->symbol.field);
+ define_symbolic_field(ev_name, cur_field_name);
+ define_symbolic_values(args->symbol.symbols, ev_name,
+ cur_field_name);
+ break;
+ case PRINT_STRING:
+ break;
+ case PRINT_TYPE:
+ define_event_symbols(event, ev_name, args->typecast.item);
+ break;
+ case PRINT_OP:
+ if (strcmp(args->op.op, ":") == 0)
+ zero_flag_atom = 1;
+ define_event_symbols(event, ev_name, args->op.left);
+ define_event_symbols(event, ev_name, args->op.right);
+ break;
+ default:
+ /* we should warn... */
+ return;
+ }
+
+ if (args->next)
+ define_event_symbols(event, ev_name, args->next);
+}
+
+static inline struct event *find_cache_event(int type)
+{
+ static char ev_name[256];
+ struct event *event;
+
+ if (events[type])
+ return events[type];
+
+ events[type] = event = trace_find_event(type);
+ if (!event)
+ return NULL;
+
+ sprintf(ev_name, "%s::%s", event->system, event->name);
+
+ define_event_symbols(event, ev_name, event->print_fmt.args);
+
+ return event;
+}
+
+static void perl_process_event(int cpu, void *data,
+ int size __unused,
+ unsigned long long nsecs, char *comm)
+{
+ struct format_field *field;
+ static char handler[256];
+ unsigned long long val;
+ unsigned long s, ns;
+ struct event *event;
+ int type;
+ int pid;
+
+ dSP;
+
+ type = trace_parse_common_type(data);
+
+ event = find_cache_event(type);
+ if (!event)
+ die("ug! no event found for type %d", type);
+
+ pid = trace_parse_common_pid(data);
+
+ sprintf(handler, "%s::%s", event->system, event->name);
+
+ s = nsecs / NSECS_PER_SEC;
+ ns = nsecs - s * NSECS_PER_SEC;
+
+ scripting_context->event_data = data;
+
+ ENTER;
+ SAVETMPS;
+ PUSHMARK(SP);
+
+ XPUSHs(sv_2mortal(newSVpv(handler, 0)));
+ XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context))));
+ XPUSHs(sv_2mortal(newSVuv(cpu)));
+ XPUSHs(sv_2mortal(newSVuv(s)));
+ XPUSHs(sv_2mortal(newSVuv(ns)));
+ XPUSHs(sv_2mortal(newSViv(pid)));
+ XPUSHs(sv_2mortal(newSVpv(comm, 0)));
+
+ /* common fields other than pid can be accessed via xsub fns */
+
+ for (field = event->format.fields; field; field = field->next) {
+ if (field->flags & FIELD_IS_STRING) {
+ int offset;
+ if (field->flags & FIELD_IS_DYNAMIC) {
+ offset = *(int *)(data + field->offset);
+ offset &= 0xffff;
+ } else
+ offset = field->offset;
+ XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0)));
+ } else { /* FIELD_IS_NUMERIC */
+ val = read_size(data + field->offset, field->size);
+ if (field->flags & FIELD_IS_SIGNED) {
+ XPUSHs(sv_2mortal(newSViv(val)));
+ } else {
+ XPUSHs(sv_2mortal(newSVuv(val)));
+ }
+ }
+ }
+
+ PUTBACK;
+
+ if (get_cv(handler, 0))
+ call_pv(handler, G_SCALAR);
+ else if (get_cv("main::trace_unhandled", 0)) {
+ XPUSHs(sv_2mortal(newSVpv(handler, 0)));
+ XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context))));
+ XPUSHs(sv_2mortal(newSVuv(cpu)));
+ XPUSHs(sv_2mortal(newSVuv(nsecs)));
+ XPUSHs(sv_2mortal(newSViv(pid)));
+ XPUSHs(sv_2mortal(newSVpv(comm, 0)));
+ call_pv("main::trace_unhandled", G_SCALAR);
+ }
+ SPAGAIN;
+ PUTBACK;
+ FREETMPS;
+ LEAVE;
+}
+
+static void run_start_sub(void)
+{
+ dSP; /* access to Perl stack */
+ PUSHMARK(SP);
+
+ if (get_cv("main::trace_begin", 0))
+ call_pv("main::trace_begin", G_DISCARD | G_NOARGS);
+}
+
+/*
+ * Start trace script
+ */
+static int perl_start_script(const char *script, int argc, const char **argv)
+{
+ const char **command_line;
+ int i, err = 0;
+
+ command_line = malloc((argc + 2) * sizeof(const char *));
+ command_line[0] = "";
+ command_line[1] = script;
+ for (i = 2; i < argc + 2; i++)
+ command_line[i] = argv[i - 2];
+
+ my_perl = perl_alloc();
+ perl_construct(my_perl);
+
+ if (perl_parse(my_perl, xs_init, argc + 2, (char **)command_line,
+ (char **)NULL)) {
+ err = -1;
+ goto error;
+ }
+
+ if (perl_run(my_perl)) {
+ err = -1;
+ goto error;
+ }
+
+ if (SvTRUE(ERRSV)) {
+ err = -1;
+ goto error;
+ }
+
+ run_start_sub();
+
+ free(command_line);
+ return 0;
+error:
+ perl_free(my_perl);
+ free(command_line);
+
+ return err;
+}
+
+/*
+ * Stop trace script
+ */
+static int perl_stop_script(void)
+{
+ dSP; /* access to Perl stack */
+ PUSHMARK(SP);
+
+ if (get_cv("main::trace_end", 0))
+ call_pv("main::trace_end", G_DISCARD | G_NOARGS);
+
+ perl_destruct(my_perl);
+ perl_free(my_perl);
+
+ return 0;
+}
+
+static int perl_generate_script(const char *outfile)
+{
+ struct event *event = NULL;
+ struct format_field *f;
+ char fname[PATH_MAX];
+ int not_first, count;
+ FILE *ofp;
+
+ sprintf(fname, "%s.pl", outfile);
+ ofp = fopen(fname, "w");
+ if (ofp == NULL) {
+ fprintf(stderr, "couldn't open %s\n", fname);
+ return -1;
+ }
+
+ fprintf(ofp, "# perf trace event handlers, "
+ "generated by perf trace -g perl\n");
+
+ fprintf(ofp, "# Licensed under the terms of the GNU GPL"
+ " License version 2\n\n");
+
+ fprintf(ofp, "# The common_* event handler fields are the most useful "
+ "fields common to\n");
+
+ fprintf(ofp, "# all events. They don't necessarily correspond to "
+ "the 'common_*' fields\n");
+
+ fprintf(ofp, "# in the format files. Those fields not available as "
+ "handler params can\n");
+
+ fprintf(ofp, "# be retrieved using Perl functions of the form "
+ "common_*($context).\n");
+
+ fprintf(ofp, "# See Context.pm for the list of available "
+ "functions.\n\n");
+
+ fprintf(ofp, "use lib \"$ENV{'PERF_EXEC_PATH'}/scripts/perl/"
+ "Perf-Trace-Util/lib\";\n");
+
+ fprintf(ofp, "use lib \"./Perf-Trace-Util/lib\";\n");
+ fprintf(ofp, "use Perf::Trace::Core;\n");
+ fprintf(ofp, "use Perf::Trace::Context;\n");
+ fprintf(ofp, "use Perf::Trace::Util;\n\n");
+
+ fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n");
+ fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n");
+
+ while ((event = trace_find_next_event(event))) {
+ fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name);
+ fprintf(ofp, "\tmy (");
+
+ fprintf(ofp, "$event_name, ");
+ fprintf(ofp, "$context, ");
+ fprintf(ofp, "$common_cpu, ");
+ fprintf(ofp, "$common_secs, ");
+ fprintf(ofp, "$common_nsecs,\n");
+ fprintf(ofp, "\t $common_pid, ");
+ fprintf(ofp, "$common_comm,\n\t ");
+
+ not_first = 0;
+ count = 0;
+
+ for (f = event->format.fields; f; f = f->next) {
+ if (not_first++)
+ fprintf(ofp, ", ");
+ if (++count % 5 == 0)
+ fprintf(ofp, "\n\t ");
+
+ fprintf(ofp, "$%s", f->name);
+ }
+ fprintf(ofp, ") = @_;\n\n");
+
+ fprintf(ofp, "\tprint_header($event_name, $common_cpu, "
+ "$common_secs, $common_nsecs,\n\t "
+ "$common_pid, $common_comm);\n\n");
+
+ fprintf(ofp, "\tprintf(\"");
+
+ not_first = 0;
+ count = 0;
+
+ for (f = event->format.fields; f; f = f->next) {
+ if (not_first++)
+ fprintf(ofp, ", ");
+ if (count && count % 4 == 0) {
+ fprintf(ofp, "\".\n\t \"");
+ }
+ count++;
+
+ fprintf(ofp, "%s=", f->name);
+ if (f->flags & FIELD_IS_STRING ||
+ f->flags & FIELD_IS_FLAG ||
+ f->flags & FIELD_IS_SYMBOLIC)
+ fprintf(ofp, "%%s");
+ else if (f->flags & FIELD_IS_SIGNED)
+ fprintf(ofp, "%%d");
+ else
+ fprintf(ofp, "%%u");
+ }
+
+ fprintf(ofp, "\\n\",\n\t ");
+
+ not_first = 0;
+ count = 0;
+
+ for (f = event->format.fields; f; f = f->next) {
+ if (not_first++)
+ fprintf(ofp, ", ");
+
+ if (++count % 5 == 0)
+ fprintf(ofp, "\n\t ");
+
+ if (f->flags & FIELD_IS_FLAG) {
+ if ((count - 1) % 5 != 0) {
+ fprintf(ofp, "\n\t ");
+ count = 4;
+ }
+ fprintf(ofp, "flag_str(\"");
+ fprintf(ofp, "%s::%s\", ", event->system,
+ event->name);
+ fprintf(ofp, "\"%s\", $%s)", f->name,
+ f->name);
+ } else if (f->flags & FIELD_IS_SYMBOLIC) {
+ if ((count - 1) % 5 != 0) {
+ fprintf(ofp, "\n\t ");
+ count = 4;
+ }
+ fprintf(ofp, "symbol_str(\"");
+ fprintf(ofp, "%s::%s\", ", event->system,
+ event->name);
+ fprintf(ofp, "\"%s\", $%s)", f->name,
+ f->name);
+ } else
+ fprintf(ofp, "$%s", f->name);
+ }
+
+ fprintf(ofp, ");\n");
+ fprintf(ofp, "}\n\n");
+ }
+
+ fprintf(ofp, "sub trace_unhandled\n{\n\tmy ($event_name, $context, "
+ "$common_cpu, $common_secs, $common_nsecs,\n\t "
+ "$common_pid, $common_comm) = @_;\n\n");
+
+ fprintf(ofp, "\tprint_header($event_name, $common_cpu, "
+ "$common_secs, $common_nsecs,\n\t $common_pid, "
+ "$common_comm);\n}\n\n");
+
+ fprintf(ofp, "sub print_header\n{\n"
+ "\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n"
+ "\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t "
+ "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}");
+
+ fclose(ofp);
+
+ fprintf(stderr, "generated Perl script: %s\n", fname);
+
+ return 0;
+}
+
+struct scripting_ops perl_scripting_ops = {
+ .name = "Perl",
+ .start_script = perl_start_script,
+ .stop_script = perl_stop_script,
+ .process_event = perl_process_event,
+ .generate_script = perl_generate_script,
+};
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
new file mode 100644
index 00000000..33a63252
--- /dev/null
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -0,0 +1,594 @@
+/*
+ * trace-event-python. Feed trace events to an embedded Python interpreter.
+ *
+ * Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <Python.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+
+#include "../../perf.h"
+#include "../util.h"
+#include "../trace-event.h"
+
+PyMODINIT_FUNC initperf_trace_context(void);
+
+#define FTRACE_MAX_EVENT \
+ ((1 << (sizeof(unsigned short) * 8)) - 1)
+
+struct event *events[FTRACE_MAX_EVENT];
+
+#define MAX_FIELDS 64
+#define N_COMMON_FIELDS 7
+
+extern struct scripting_context *scripting_context;
+
+static char *cur_field_name;
+static int zero_flag_atom;
+
+static PyObject *main_module, *main_dict;
+
+static void handler_call_die(const char *handler_name)
+{
+ PyErr_Print();
+ Py_FatalError("problem in Python trace event handler");
+}
+
+static void define_value(enum print_arg_type field_type,
+ const char *ev_name,
+ const char *field_name,
+ const char *field_value,
+ const char *field_str)
+{
+ const char *handler_name = "define_flag_value";
+ PyObject *handler, *t, *retval;
+ unsigned long long value;
+ unsigned n = 0;
+
+ if (field_type == PRINT_SYMBOL)
+ handler_name = "define_symbolic_value";
+
+ t = PyTuple_New(4);
+ if (!t)
+ Py_FatalError("couldn't create Python tuple");
+
+ value = eval_flag(field_value);
+
+ PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
+ PyTuple_SetItem(t, n++, PyString_FromString(field_name));
+ PyTuple_SetItem(t, n++, PyInt_FromLong(value));
+ PyTuple_SetItem(t, n++, PyString_FromString(field_str));
+
+ handler = PyDict_GetItemString(main_dict, handler_name);
+ if (handler && PyCallable_Check(handler)) {
+ retval = PyObject_CallObject(handler, t);
+ if (retval == NULL)
+ handler_call_die(handler_name);
+ }
+
+ Py_DECREF(t);
+}
+
+static void define_values(enum print_arg_type field_type,
+ struct print_flag_sym *field,
+ const char *ev_name,
+ const char *field_name)
+{
+ define_value(field_type, ev_name, field_name, field->value,
+ field->str);
+
+ if (field->next)
+ define_values(field_type, field->next, ev_name, field_name);
+}
+
+static void define_field(enum print_arg_type field_type,
+ const char *ev_name,
+ const char *field_name,
+ const char *delim)
+{
+ const char *handler_name = "define_flag_field";
+ PyObject *handler, *t, *retval;
+ unsigned n = 0;
+
+ if (field_type == PRINT_SYMBOL)
+ handler_name = "define_symbolic_field";
+
+ if (field_type == PRINT_FLAGS)
+ t = PyTuple_New(3);
+ else
+ t = PyTuple_New(2);
+ if (!t)
+ Py_FatalError("couldn't create Python tuple");
+
+ PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
+ PyTuple_SetItem(t, n++, PyString_FromString(field_name));
+ if (field_type == PRINT_FLAGS)
+ PyTuple_SetItem(t, n++, PyString_FromString(delim));
+
+ handler = PyDict_GetItemString(main_dict, handler_name);
+ if (handler && PyCallable_Check(handler)) {
+ retval = PyObject_CallObject(handler, t);
+ if (retval == NULL)
+ handler_call_die(handler_name);
+ }
+
+ Py_DECREF(t);
+}
+
+static void define_event_symbols(struct event *event,
+ const char *ev_name,
+ struct print_arg *args)
+{
+ switch (args->type) {
+ case PRINT_NULL:
+ break;
+ case PRINT_ATOM:
+ define_value(PRINT_FLAGS, ev_name, cur_field_name, "0",
+ args->atom.atom);
+ zero_flag_atom = 0;
+ break;
+ case PRINT_FIELD:
+ if (cur_field_name)
+ free(cur_field_name);
+ cur_field_name = strdup(args->field.name);
+ break;
+ case PRINT_FLAGS:
+ define_event_symbols(event, ev_name, args->flags.field);
+ define_field(PRINT_FLAGS, ev_name, cur_field_name,
+ args->flags.delim);
+ define_values(PRINT_FLAGS, args->flags.flags, ev_name,
+ cur_field_name);
+ break;
+ case PRINT_SYMBOL:
+ define_event_symbols(event, ev_name, args->symbol.field);
+ define_field(PRINT_SYMBOL, ev_name, cur_field_name, NULL);
+ define_values(PRINT_SYMBOL, args->symbol.symbols, ev_name,
+ cur_field_name);
+ break;
+ case PRINT_STRING:
+ break;
+ case PRINT_TYPE:
+ define_event_symbols(event, ev_name, args->typecast.item);
+ break;
+ case PRINT_OP:
+ if (strcmp(args->op.op, ":") == 0)
+ zero_flag_atom = 1;
+ define_event_symbols(event, ev_name, args->op.left);
+ define_event_symbols(event, ev_name, args->op.right);
+ break;
+ default:
+ /* we should warn... */
+ return;
+ }
+
+ if (args->next)
+ define_event_symbols(event, ev_name, args->next);
+}
+
+static inline struct event *find_cache_event(int type)
+{
+ static char ev_name[256];
+ struct event *event;
+
+ if (events[type])
+ return events[type];
+
+ events[type] = event = trace_find_event(type);
+ if (!event)
+ return NULL;
+
+ sprintf(ev_name, "%s__%s", event->system, event->name);
+
+ define_event_symbols(event, ev_name, event->print_fmt.args);
+
+ return event;
+}
+
+static void python_process_event(int cpu, void *data,
+ int size __unused,
+ unsigned long long nsecs, char *comm)
+{
+ PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
+ static char handler_name[256];
+ struct format_field *field;
+ unsigned long long val;
+ unsigned long s, ns;
+ struct event *event;
+ unsigned n = 0;
+ int type;
+ int pid;
+
+ t = PyTuple_New(MAX_FIELDS);
+ if (!t)
+ Py_FatalError("couldn't create Python tuple");
+
+ type = trace_parse_common_type(data);
+
+ event = find_cache_event(type);
+ if (!event)
+ die("ug! no event found for type %d", type);
+
+ pid = trace_parse_common_pid(data);
+
+ sprintf(handler_name, "%s__%s", event->system, event->name);
+
+ handler = PyDict_GetItemString(main_dict, handler_name);
+ if (handler && !PyCallable_Check(handler))
+ handler = NULL;
+ if (!handler) {
+ dict = PyDict_New();
+ if (!dict)
+ Py_FatalError("couldn't create Python dict");
+ }
+ s = nsecs / NSECS_PER_SEC;
+ ns = nsecs - s * NSECS_PER_SEC;
+
+ scripting_context->event_data = data;
+
+ context = PyCObject_FromVoidPtr(scripting_context, NULL);
+
+ PyTuple_SetItem(t, n++, PyString_FromString(handler_name));
+ PyTuple_SetItem(t, n++,
+ PyCObject_FromVoidPtr(scripting_context, NULL));
+
+ if (handler) {
+ PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
+ PyTuple_SetItem(t, n++, PyInt_FromLong(s));
+ PyTuple_SetItem(t, n++, PyInt_FromLong(ns));
+ PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
+ PyTuple_SetItem(t, n++, PyString_FromString(comm));
+ } else {
+ PyDict_SetItemString(dict, "common_cpu", PyInt_FromLong(cpu));
+ PyDict_SetItemString(dict, "common_s", PyInt_FromLong(s));
+ PyDict_SetItemString(dict, "common_ns", PyInt_FromLong(ns));
+ PyDict_SetItemString(dict, "common_pid", PyInt_FromLong(pid));
+ PyDict_SetItemString(dict, "common_comm", PyString_FromString(comm));
+ }
+ for (field = event->format.fields; field; field = field->next) {
+ if (field->flags & FIELD_IS_STRING) {
+ int offset;
+ if (field->flags & FIELD_IS_DYNAMIC) {
+ offset = *(int *)(data + field->offset);
+ offset &= 0xffff;
+ } else
+ offset = field->offset;
+ obj = PyString_FromString((char *)data + offset);
+ } else { /* FIELD_IS_NUMERIC */
+ val = read_size(data + field->offset, field->size);
+ if (field->flags & FIELD_IS_SIGNED) {
+ if ((long long)val >= LONG_MIN &&
+ (long long)val <= LONG_MAX)
+ obj = PyInt_FromLong(val);
+ else
+ obj = PyLong_FromLongLong(val);
+ } else {
+ if (val <= LONG_MAX)
+ obj = PyInt_FromLong(val);
+ else
+ obj = PyLong_FromUnsignedLongLong(val);
+ }
+ }
+ if (handler)
+ PyTuple_SetItem(t, n++, obj);
+ else
+ PyDict_SetItemString(dict, field->name, obj);
+
+ }
+ if (!handler)
+ PyTuple_SetItem(t, n++, dict);
+
+ if (_PyTuple_Resize(&t, n) == -1)
+ Py_FatalError("error resizing Python tuple");
+
+ if (handler) {
+ retval = PyObject_CallObject(handler, t);
+ if (retval == NULL)
+ handler_call_die(handler_name);
+ } else {
+ handler = PyDict_GetItemString(main_dict, "trace_unhandled");
+ if (handler && PyCallable_Check(handler)) {
+
+ retval = PyObject_CallObject(handler, t);
+ if (retval == NULL)
+ handler_call_die("trace_unhandled");
+ }
+ Py_DECREF(dict);
+ }
+
+ Py_DECREF(t);
+}
+
+static int run_start_sub(void)
+{
+ PyObject *handler, *retval;
+ int err = 0;
+
+ main_module = PyImport_AddModule("__main__");
+ if (main_module == NULL)
+ return -1;
+ Py_INCREF(main_module);
+
+ main_dict = PyModule_GetDict(main_module);
+ if (main_dict == NULL) {
+ err = -1;
+ goto error;
+ }
+ Py_INCREF(main_dict);
+
+ handler = PyDict_GetItemString(main_dict, "trace_begin");
+ if (handler == NULL || !PyCallable_Check(handler))
+ goto out;
+
+ retval = PyObject_CallObject(handler, NULL);
+ if (retval == NULL)
+ handler_call_die("trace_begin");
+
+ Py_DECREF(retval);
+ return err;
+error:
+ Py_XDECREF(main_dict);
+ Py_XDECREF(main_module);
+out:
+ return err;
+}
+
+/*
+ * Start trace script
+ */
+static int python_start_script(const char *script, int argc, const char **argv)
+{
+ const char **command_line;
+ char buf[PATH_MAX];
+ int i, err = 0;
+ FILE *fp;
+
+ command_line = malloc((argc + 1) * sizeof(const char *));
+ command_line[0] = script;
+ for (i = 1; i < argc + 1; i++)
+ command_line[i] = argv[i - 1];
+
+ Py_Initialize();
+
+ initperf_trace_context();
+
+ PySys_SetArgv(argc + 1, (char **)command_line);
+
+ fp = fopen(script, "r");
+ if (!fp) {
+ sprintf(buf, "Can't open python script \"%s\"", script);
+ perror(buf);
+ err = -1;
+ goto error;
+ }
+
+ err = PyRun_SimpleFile(fp, script);
+ if (err) {
+ fprintf(stderr, "Error running python script %s\n", script);
+ goto error;
+ }
+
+ err = run_start_sub();
+ if (err) {
+ fprintf(stderr, "Error starting python script %s\n", script);
+ goto error;
+ }
+
+ free(command_line);
+
+ return err;
+error:
+ Py_Finalize();
+ free(command_line);
+
+ return err;
+}
+
+/*
+ * Stop trace script
+ */
+static int python_stop_script(void)
+{
+ PyObject *handler, *retval;
+ int err = 0;
+
+ handler = PyDict_GetItemString(main_dict, "trace_end");
+ if (handler == NULL || !PyCallable_Check(handler))
+ goto out;
+
+ retval = PyObject_CallObject(handler, NULL);
+ if (retval == NULL)
+ handler_call_die("trace_end");
+ else
+ Py_DECREF(retval);
+out:
+ Py_XDECREF(main_dict);
+ Py_XDECREF(main_module);
+ Py_Finalize();
+
+ return err;
+}
+
+static int python_generate_script(const char *outfile)
+{
+ struct event *event = NULL;
+ struct format_field *f;
+ char fname[PATH_MAX];
+ int not_first, count;
+ FILE *ofp;
+
+ sprintf(fname, "%s.py", outfile);
+ ofp = fopen(fname, "w");
+ if (ofp == NULL) {
+ fprintf(stderr, "couldn't open %s\n", fname);
+ return -1;
+ }
+ fprintf(ofp, "# perf trace event handlers, "
+ "generated by perf trace -g python\n");
+
+ fprintf(ofp, "# Licensed under the terms of the GNU GPL"
+ " License version 2\n\n");
+
+ fprintf(ofp, "# The common_* event handler fields are the most useful "
+ "fields common to\n");
+
+ fprintf(ofp, "# all events. They don't necessarily correspond to "
+ "the 'common_*' fields\n");
+
+ fprintf(ofp, "# in the format files. Those fields not available as "
+ "handler params can\n");
+
+ fprintf(ofp, "# be retrieved using Python functions of the form "
+ "common_*(context).\n");
+
+ fprintf(ofp, "# See the perf-trace-python Documentation for the list "
+ "of available functions.\n\n");
+
+ fprintf(ofp, "import os\n");
+ fprintf(ofp, "import sys\n\n");
+
+ fprintf(ofp, "sys.path.append(os.environ['PERF_EXEC_PATH'] + \\\n");
+ fprintf(ofp, "\t'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')\n");
+ fprintf(ofp, "\nfrom perf_trace_context import *\n");
+ fprintf(ofp, "from Core import *\n\n\n");
+
+ fprintf(ofp, "def trace_begin():\n");
+ fprintf(ofp, "\tprint \"in trace_begin\"\n\n");
+
+ fprintf(ofp, "def trace_end():\n");
+ fprintf(ofp, "\tprint \"in trace_end\"\n\n");
+
+ while ((event = trace_find_next_event(event))) {
+ fprintf(ofp, "def %s__%s(", event->system, event->name);
+ fprintf(ofp, "event_name, ");
+ fprintf(ofp, "context, ");
+ fprintf(ofp, "common_cpu,\n");
+ fprintf(ofp, "\tcommon_secs, ");
+ fprintf(ofp, "common_nsecs, ");
+ fprintf(ofp, "common_pid, ");
+ fprintf(ofp, "common_comm,\n\t");
+
+ not_first = 0;
+ count = 0;
+
+ for (f = event->format.fields; f; f = f->next) {
+ if (not_first++)
+ fprintf(ofp, ", ");
+ if (++count % 5 == 0)
+ fprintf(ofp, "\n\t");
+
+ fprintf(ofp, "%s", f->name);
+ }
+ fprintf(ofp, "):\n");
+
+ fprintf(ofp, "\t\tprint_header(event_name, common_cpu, "
+ "common_secs, common_nsecs,\n\t\t\t"
+ "common_pid, common_comm)\n\n");
+
+ fprintf(ofp, "\t\tprint \"");
+
+ not_first = 0;
+ count = 0;
+
+ for (f = event->format.fields; f; f = f->next) {
+ if (not_first++)
+ fprintf(ofp, ", ");
+ if (count && count % 3 == 0) {
+ fprintf(ofp, "\" \\\n\t\t\"");
+ }
+ count++;
+
+ fprintf(ofp, "%s=", f->name);
+ if (f->flags & FIELD_IS_STRING ||
+ f->flags & FIELD_IS_FLAG ||
+ f->flags & FIELD_IS_SYMBOLIC)
+ fprintf(ofp, "%%s");
+ else if (f->flags & FIELD_IS_SIGNED)
+ fprintf(ofp, "%%d");
+ else
+ fprintf(ofp, "%%u");
+ }
+
+ fprintf(ofp, "\\n\" %% \\\n\t\t(");
+
+ not_first = 0;
+ count = 0;
+
+ for (f = event->format.fields; f; f = f->next) {
+ if (not_first++)
+ fprintf(ofp, ", ");
+
+ if (++count % 5 == 0)
+ fprintf(ofp, "\n\t\t");
+
+ if (f->flags & FIELD_IS_FLAG) {
+ if ((count - 1) % 5 != 0) {
+ fprintf(ofp, "\n\t\t");
+ count = 4;
+ }
+ fprintf(ofp, "flag_str(\"");
+ fprintf(ofp, "%s__%s\", ", event->system,
+ event->name);
+ fprintf(ofp, "\"%s\", %s)", f->name,
+ f->name);
+ } else if (f->flags & FIELD_IS_SYMBOLIC) {
+ if ((count - 1) % 5 != 0) {
+ fprintf(ofp, "\n\t\t");
+ count = 4;
+ }
+ fprintf(ofp, "symbol_str(\"");
+ fprintf(ofp, "%s__%s\", ", event->system,
+ event->name);
+ fprintf(ofp, "\"%s\", %s)", f->name,
+ f->name);
+ } else
+ fprintf(ofp, "%s", f->name);
+ }
+
+ fprintf(ofp, "),\n\n");
+ }
+
+ fprintf(ofp, "def trace_unhandled(event_name, context, "
+ "event_fields_dict):\n");
+
+ fprintf(ofp, "\t\tprint ' '.join(['%%s=%%s'%%(k,str(v))"
+ "for k,v in sorted(event_fields_dict.items())])\n\n");
+
+ fprintf(ofp, "def print_header("
+ "event_name, cpu, secs, nsecs, pid, comm):\n"
+ "\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
+ "(event_name, cpu, secs, nsecs, pid, comm),\n");
+
+ fclose(ofp);
+
+ fprintf(stderr, "generated Python script: %s\n", fname);
+
+ return 0;
+}
+
+struct scripting_ops python_scripting_ops = {
+ .name = "Python",
+ .start_script = python_start_script,
+ .stop_script = python_stop_script,
+ .process_event = python_process_event,
+ .generate_script = python_generate_script,
+};
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
new file mode 100644
index 00000000..fa9d652c
--- /dev/null
+++ b/tools/perf/util/session.c
@@ -0,0 +1,931 @@
+#define _FILE_OFFSET_BITS 64
+
+#include <linux/kernel.h>
+
+#include <byteswap.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+
+#include "session.h"
+#include "sort.h"
+#include "util.h"
+
+static int perf_session__open(struct perf_session *self, bool force)
+{
+ struct stat input_stat;
+
+ if (!strcmp(self->filename, "-")) {
+ self->fd_pipe = true;
+ self->fd = STDIN_FILENO;
+
+ if (perf_header__read(self, self->fd) < 0)
+ pr_err("incompatible file format");
+
+ return 0;
+ }
+
+ self->fd = open(self->filename, O_RDONLY);
+ if (self->fd < 0) {
+ int err = errno;
+
+ pr_err("failed to open %s: %s", self->filename, strerror(err));
+ if (err == ENOENT && !strcmp(self->filename, "perf.data"))
+ pr_err(" (try 'perf record' first)");
+ pr_err("\n");
+ return -errno;
+ }
+
+ if (fstat(self->fd, &input_stat) < 0)
+ goto out_close;
+
+ if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
+ pr_err("file %s not owned by current user or root\n",
+ self->filename);
+ goto out_close;
+ }
+
+ if (!input_stat.st_size) {
+ pr_info("zero-sized file (%s), nothing to do!\n",
+ self->filename);
+ goto out_close;
+ }
+
+ if (perf_header__read(self, self->fd) < 0) {
+ pr_err("incompatible file format");
+ goto out_close;
+ }
+
+ self->size = input_stat.st_size;
+ return 0;
+
+out_close:
+ close(self->fd);
+ self->fd = -1;
+ return -1;
+}
+
+void perf_session__update_sample_type(struct perf_session *self)
+{
+ self->sample_type = perf_header__sample_type(&self->header);
+}
+
+int perf_session__create_kernel_maps(struct perf_session *self)
+{
+ int ret = machine__create_kernel_maps(&self->host_machine);
+
+ if (ret >= 0)
+ ret = machines__create_guest_kernel_maps(&self->machines);
+ return ret;
+}
+
+static void perf_session__destroy_kernel_maps(struct perf_session *self)
+{
+ machine__destroy_kernel_maps(&self->host_machine);
+ machines__destroy_guest_kernel_maps(&self->machines);
+}
+
+struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
+{
+ size_t len = filename ? strlen(filename) + 1 : 0;
+ struct perf_session *self = zalloc(sizeof(*self) + len);
+
+ if (self == NULL)
+ goto out;
+
+ if (perf_header__init(&self->header) < 0)
+ goto out_free;
+
+ memcpy(self->filename, filename, len);
+ self->threads = RB_ROOT;
+ INIT_LIST_HEAD(&self->dead_threads);
+ self->hists_tree = RB_ROOT;
+ self->last_match = NULL;
+ self->mmap_window = 32;
+ self->machines = RB_ROOT;
+ self->repipe = repipe;
+ INIT_LIST_HEAD(&self->ordered_samples.samples_head);
+ machine__init(&self->host_machine, "", HOST_KERNEL_ID);
+
+ if (mode == O_RDONLY) {
+ if (perf_session__open(self, force) < 0)
+ goto out_delete;
+ } else if (mode == O_WRONLY) {
+ /*
+ * In O_RDONLY mode this will be performed when reading the
+ * kernel MMAP event, in event__process_mmap().
+ */
+ if (perf_session__create_kernel_maps(self) < 0)
+ goto out_delete;
+ }
+
+ perf_session__update_sample_type(self);
+out:
+ return self;
+out_free:
+ free(self);
+ return NULL;
+out_delete:
+ perf_session__delete(self);
+ return NULL;
+}
+
+static void perf_session__delete_dead_threads(struct perf_session *self)
+{
+ struct thread *n, *t;
+
+ list_for_each_entry_safe(t, n, &self->dead_threads, node) {
+ list_del(&t->node);
+ thread__delete(t);
+ }
+}
+
+static void perf_session__delete_threads(struct perf_session *self)
+{
+ struct rb_node *nd = rb_first(&self->threads);
+
+ while (nd) {
+ struct thread *t = rb_entry(nd, struct thread, rb_node);
+
+ rb_erase(&t->rb_node, &self->threads);
+ nd = rb_next(nd);
+ thread__delete(t);
+ }
+}
+
+void perf_session__delete(struct perf_session *self)
+{
+ perf_header__exit(&self->header);
+ perf_session__destroy_kernel_maps(self);
+ perf_session__delete_dead_threads(self);
+ perf_session__delete_threads(self);
+ machine__exit(&self->host_machine);
+ close(self->fd);
+ free(self);
+}
+
+void perf_session__remove_thread(struct perf_session *self, struct thread *th)
+{
+ self->last_match = NULL;
+ rb_erase(&th->rb_node, &self->threads);
+ /*
+ * We may have references to this thread, for instance in some hist_entry
+ * instances, so just move them to a separate list.
+ */
+ list_add_tail(&th->node, &self->dead_threads);
+}
+
+static bool symbol__match_parent_regex(struct symbol *sym)
+{
+ if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
+ return 1;
+
+ return 0;
+}
+
+struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
+ struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent)
+{
+ u8 cpumode = PERF_RECORD_MISC_USER;
+ unsigned int i;
+ struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
+
+ if (!syms)
+ return NULL;
+
+ for (i = 0; i < chain->nr; i++) {
+ u64 ip = chain->ips[i];
+ struct addr_location al;
+
+ if (ip >= PERF_CONTEXT_MAX) {
+ switch (ip) {
+ case PERF_CONTEXT_HV:
+ cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
+ case PERF_CONTEXT_KERNEL:
+ cpumode = PERF_RECORD_MISC_KERNEL; break;
+ case PERF_CONTEXT_USER:
+ cpumode = PERF_RECORD_MISC_USER; break;
+ default:
+ break;
+ }
+ continue;
+ }
+
+ al.filtered = false;
+ thread__find_addr_location(thread, self, cpumode,
+ MAP__FUNCTION, thread->pid, ip, &al, NULL);
+ if (al.sym != NULL) {
+ if (sort__has_parent && !*parent &&
+ symbol__match_parent_regex(al.sym))
+ *parent = al.sym;
+ if (!symbol_conf.use_callchain)
+ break;
+ syms[i].map = al.map;
+ syms[i].sym = al.sym;
+ }
+ }
+
+ return syms;
+}
+
+static int process_event_stub(event_t *event __used,
+ struct perf_session *session __used)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_finished_round_stub(event_t *event __used,
+ struct perf_session *session __used,
+ struct perf_event_ops *ops __used)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static int process_finished_round(event_t *event,
+ struct perf_session *session,
+ struct perf_event_ops *ops);
+
+static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
+{
+ if (handler->sample == NULL)
+ handler->sample = process_event_stub;
+ if (handler->mmap == NULL)
+ handler->mmap = process_event_stub;
+ if (handler->comm == NULL)
+ handler->comm = process_event_stub;
+ if (handler->fork == NULL)
+ handler->fork = process_event_stub;
+ if (handler->exit == NULL)
+ handler->exit = process_event_stub;
+ if (handler->lost == NULL)
+ handler->lost = process_event_stub;
+ if (handler->read == NULL)
+ handler->read = process_event_stub;
+ if (handler->throttle == NULL)
+ handler->throttle = process_event_stub;
+ if (handler->unthrottle == NULL)
+ handler->unthrottle = process_event_stub;
+ if (handler->attr == NULL)
+ handler->attr = process_event_stub;
+ if (handler->event_type == NULL)
+ handler->event_type = process_event_stub;
+ if (handler->tracing_data == NULL)
+ handler->tracing_data = process_event_stub;
+ if (handler->build_id == NULL)
+ handler->build_id = process_event_stub;
+ if (handler->finished_round == NULL) {
+ if (handler->ordered_samples)
+ handler->finished_round = process_finished_round;
+ else
+ handler->finished_round = process_finished_round_stub;
+ }
+}
+
+void mem_bswap_64(void *src, int byte_size)
+{
+ u64 *m = src;
+
+ while (byte_size > 0) {
+ *m = bswap_64(*m);
+ byte_size -= sizeof(u64);
+ ++m;
+ }
+}
+
+static void event__all64_swap(event_t *self)
+{
+ struct perf_event_header *hdr = &self->header;
+ mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
+}
+
+static void event__comm_swap(event_t *self)
+{
+ self->comm.pid = bswap_32(self->comm.pid);
+ self->comm.tid = bswap_32(self->comm.tid);
+}
+
+static void event__mmap_swap(event_t *self)
+{
+ self->mmap.pid = bswap_32(self->mmap.pid);
+ self->mmap.tid = bswap_32(self->mmap.tid);
+ self->mmap.start = bswap_64(self->mmap.start);
+ self->mmap.len = bswap_64(self->mmap.len);
+ self->mmap.pgoff = bswap_64(self->mmap.pgoff);
+}
+
+static void event__task_swap(event_t *self)
+{
+ self->fork.pid = bswap_32(self->fork.pid);
+ self->fork.tid = bswap_32(self->fork.tid);
+ self->fork.ppid = bswap_32(self->fork.ppid);
+ self->fork.ptid = bswap_32(self->fork.ptid);
+ self->fork.time = bswap_64(self->fork.time);
+}
+
+static void event__read_swap(event_t *self)
+{
+ self->read.pid = bswap_32(self->read.pid);
+ self->read.tid = bswap_32(self->read.tid);
+ self->read.value = bswap_64(self->read.value);
+ self->read.time_enabled = bswap_64(self->read.time_enabled);
+ self->read.time_running = bswap_64(self->read.time_running);
+ self->read.id = bswap_64(self->read.id);
+}
+
+static void event__attr_swap(event_t *self)
+{
+ size_t size;
+
+ self->attr.attr.type = bswap_32(self->attr.attr.type);
+ self->attr.attr.size = bswap_32(self->attr.attr.size);
+ self->attr.attr.config = bswap_64(self->attr.attr.config);
+ self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period);
+ self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type);
+ self->attr.attr.read_format = bswap_64(self->attr.attr.read_format);
+ self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events);
+ self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type);
+ self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr);
+ self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len);
+
+ size = self->header.size;
+ size -= (void *)&self->attr.id - (void *)self;
+ mem_bswap_64(self->attr.id, size);
+}
+
+static void event__event_type_swap(event_t *self)
+{
+ self->event_type.event_type.event_id =
+ bswap_64(self->event_type.event_type.event_id);
+}
+
+static void event__tracing_data_swap(event_t *self)
+{
+ self->tracing_data.size = bswap_32(self->tracing_data.size);
+}
+
+typedef void (*event__swap_op)(event_t *self);
+
+static event__swap_op event__swap_ops[] = {
+ [PERF_RECORD_MMAP] = event__mmap_swap,
+ [PERF_RECORD_COMM] = event__comm_swap,
+ [PERF_RECORD_FORK] = event__task_swap,
+ [PERF_RECORD_EXIT] = event__task_swap,
+ [PERF_RECORD_LOST] = event__all64_swap,
+ [PERF_RECORD_READ] = event__read_swap,
+ [PERF_RECORD_SAMPLE] = event__all64_swap,
+ [PERF_RECORD_HEADER_ATTR] = event__attr_swap,
+ [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap,
+ [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap,
+ [PERF_RECORD_HEADER_BUILD_ID] = NULL,
+ [PERF_RECORD_HEADER_MAX] = NULL,
+};
+
+struct sample_queue {
+ u64 timestamp;
+ struct sample_event *event;
+ struct list_head list;
+};
+
+static void flush_sample_queue(struct perf_session *s,
+ struct perf_event_ops *ops)
+{
+ struct list_head *head = &s->ordered_samples.samples_head;
+ u64 limit = s->ordered_samples.next_flush;
+ struct sample_queue *tmp, *iter;
+
+ if (!ops->ordered_samples || !limit)
+ return;
+
+ list_for_each_entry_safe(iter, tmp, head, list) {
+ if (iter->timestamp > limit)
+ return;
+
+ if (iter == s->ordered_samples.last_inserted)
+ s->ordered_samples.last_inserted = NULL;
+
+ ops->sample((event_t *)iter->event, s);
+
+ s->ordered_samples.last_flush = iter->timestamp;
+ list_del(&iter->list);
+ free(iter->event);
+ free(iter);
+ }
+}
+
+/*
+ * When perf record finishes a pass on every buffers, it records this pseudo
+ * event.
+ * We record the max timestamp t found in the pass n.
+ * Assuming these timestamps are monotonic across cpus, we know that if
+ * a buffer still has events with timestamps below t, they will be all
+ * available and then read in the pass n + 1.
+ * Hence when we start to read the pass n + 2, we can safely flush every
+ * events with timestamps below t.
+ *
+ * ============ PASS n =================
+ * CPU 0 | CPU 1
+ * |
+ * cnt1 timestamps | cnt2 timestamps
+ * 1 | 2
+ * 2 | 3
+ * - | 4 <--- max recorded
+ *
+ * ============ PASS n + 1 ==============
+ * CPU 0 | CPU 1
+ * |
+ * cnt1 timestamps | cnt2 timestamps
+ * 3 | 5
+ * 4 | 6
+ * 5 | 7 <---- max recorded
+ *
+ * Flush every events below timestamp 4
+ *
+ * ============ PASS n + 2 ==============
+ * CPU 0 | CPU 1
+ * |
+ * cnt1 timestamps | cnt2 timestamps
+ * 6 | 8
+ * 7 | 9
+ * - | 10
+ *
+ * Flush every events below timestamp 7
+ * etc...
+ */
+static int process_finished_round(event_t *event __used,
+ struct perf_session *session,
+ struct perf_event_ops *ops)
+{
+ flush_sample_queue(session, ops);
+ session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
+
+ return 0;
+}
+
+static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
+{
+ struct sample_queue *iter;
+
+ list_for_each_entry_reverse(iter, head, list) {
+ if (iter->timestamp < new->timestamp) {
+ list_add(&new->list, &iter->list);
+ return;
+ }
+ }
+
+ list_add(&new->list, head);
+}
+
+static void __queue_sample_before(struct sample_queue *new,
+ struct sample_queue *iter,
+ struct list_head *head)
+{
+ list_for_each_entry_continue_reverse(iter, head, list) {
+ if (iter->timestamp < new->timestamp) {
+ list_add(&new->list, &iter->list);
+ return;
+ }
+ }
+
+ list_add(&new->list, head);
+}
+
+static void __queue_sample_after(struct sample_queue *new,
+ struct sample_queue *iter,
+ struct list_head *head)
+{
+ list_for_each_entry_continue(iter, head, list) {
+ if (iter->timestamp > new->timestamp) {
+ list_add_tail(&new->list, &iter->list);
+ return;
+ }
+ }
+ list_add_tail(&new->list, head);
+}
+
+/* The queue is ordered by time */
+static void __queue_sample_event(struct sample_queue *new,
+ struct perf_session *s)
+{
+ struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
+ struct list_head *head = &s->ordered_samples.samples_head;
+
+
+ if (!last_inserted) {
+ __queue_sample_end(new, head);
+ return;
+ }
+
+ /*
+ * Most of the time the current event has a timestamp
+ * very close to the last event inserted, unless we just switched
+ * to another event buffer. Having a sorting based on a list and
+ * on the last inserted event that is close to the current one is
+ * probably more efficient than an rbtree based sorting.
+ */
+ if (last_inserted->timestamp >= new->timestamp)
+ __queue_sample_before(new, last_inserted, head);
+ else
+ __queue_sample_after(new, last_inserted, head);
+}
+
+static int queue_sample_event(event_t *event, struct sample_data *data,
+ struct perf_session *s)
+{
+ u64 timestamp = data->time;
+ struct sample_queue *new;
+
+
+ if (timestamp < s->ordered_samples.last_flush) {
+ printf("Warning: Timestamp below last timeslice flush\n");
+ return -EINVAL;
+ }
+
+ new = malloc(sizeof(*new));
+ if (!new)
+ return -ENOMEM;
+
+ new->timestamp = timestamp;
+
+ new->event = malloc(event->header.size);
+ if (!new->event) {
+ free(new);
+ return -ENOMEM;
+ }
+
+ memcpy(new->event, event, event->header.size);
+
+ __queue_sample_event(new, s);
+ s->ordered_samples.last_inserted = new;
+
+ if (new->timestamp > s->ordered_samples.max_timestamp)
+ s->ordered_samples.max_timestamp = new->timestamp;
+
+ return 0;
+}
+
+static int perf_session__process_sample(event_t *event, struct perf_session *s,
+ struct perf_event_ops *ops)
+{
+ struct sample_data data;
+
+ if (!ops->ordered_samples)
+ return ops->sample(event, s);
+
+ bzero(&data, sizeof(struct sample_data));
+ event__parse_sample(event, s->sample_type, &data);
+
+ queue_sample_event(event, &data, s);
+
+ return 0;
+}
+
+static int perf_session__process_event(struct perf_session *self,
+ event_t *event,
+ struct perf_event_ops *ops,
+ u64 offset, u64 head)
+{
+ trace_event(event);
+
+ if (event->header.type < PERF_RECORD_HEADER_MAX) {
+ dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
+ offset + head, event->header.size,
+ event__name[event->header.type]);
+ hists__inc_nr_events(&self->hists, event->header.type);
+ }
+
+ if (self->header.needs_swap && event__swap_ops[event->header.type])
+ event__swap_ops[event->header.type](event);
+
+ switch (event->header.type) {
+ case PERF_RECORD_SAMPLE:
+ return perf_session__process_sample(event, self, ops);
+ case PERF_RECORD_MMAP:
+ return ops->mmap(event, self);
+ case PERF_RECORD_COMM:
+ return ops->comm(event, self);
+ case PERF_RECORD_FORK:
+ return ops->fork(event, self);
+ case PERF_RECORD_EXIT:
+ return ops->exit(event, self);
+ case PERF_RECORD_LOST:
+ return ops->lost(event, self);
+ case PERF_RECORD_READ:
+ return ops->read(event, self);
+ case PERF_RECORD_THROTTLE:
+ return ops->throttle(event, self);
+ case PERF_RECORD_UNTHROTTLE:
+ return ops->unthrottle(event, self);
+ case PERF_RECORD_HEADER_ATTR:
+ return ops->attr(event, self);
+ case PERF_RECORD_HEADER_EVENT_TYPE:
+ return ops->event_type(event, self);
+ case PERF_RECORD_HEADER_TRACING_DATA:
+ /* setup for reading amidst mmap */
+ lseek(self->fd, offset + head, SEEK_SET);
+ return ops->tracing_data(event, self);
+ case PERF_RECORD_HEADER_BUILD_ID:
+ return ops->build_id(event, self);
+ case PERF_RECORD_FINISHED_ROUND:
+ return ops->finished_round(event, self, ops);
+ default:
+ ++self->hists.stats.nr_unknown_events;
+ return -1;
+ }
+}
+
+void perf_event_header__bswap(struct perf_event_header *self)
+{
+ self->type = bswap_32(self->type);
+ self->misc = bswap_16(self->misc);
+ self->size = bswap_16(self->size);
+}
+
+static struct thread *perf_session__register_idle_thread(struct perf_session *self)
+{
+ struct thread *thread = perf_session__findnew(self, 0);
+
+ if (thread == NULL || thread__set_comm(thread, "swapper")) {
+ pr_err("problem inserting idle task.\n");
+ thread = NULL;
+ }
+
+ return thread;
+}
+
+int do_read(int fd, void *buf, size_t size)
+{
+ void *buf_start = buf;
+
+ while (size) {
+ int ret = read(fd, buf, size);
+
+ if (ret <= 0)
+ return ret;
+
+ size -= ret;
+ buf += ret;
+ }
+
+ return buf - buf_start;
+}
+
+#define session_done() (*(volatile int *)(&session_done))
+volatile int session_done;
+
+static int __perf_session__process_pipe_events(struct perf_session *self,
+ struct perf_event_ops *ops)
+{
+ event_t event;
+ uint32_t size;
+ int skip = 0;
+ u64 head;
+ int err;
+ void *p;
+
+ perf_event_ops__fill_defaults(ops);
+
+ head = 0;
+more:
+ err = do_read(self->fd, &event, sizeof(struct perf_event_header));
+ if (err <= 0) {
+ if (err == 0)
+ goto done;
+
+ pr_err("failed to read event header\n");
+ goto out_err;
+ }
+
+ if (self->header.needs_swap)
+ perf_event_header__bswap(&event.header);
+
+ size = event.header.size;
+ if (size == 0)
+ size = 8;
+
+ p = &event;
+ p += sizeof(struct perf_event_header);
+
+ if (size - sizeof(struct perf_event_header)) {
+ err = do_read(self->fd, p,
+ size - sizeof(struct perf_event_header));
+ if (err <= 0) {
+ if (err == 0) {
+ pr_err("unexpected end of event stream\n");
+ goto done;
+ }
+
+ pr_err("failed to read event data\n");
+ goto out_err;
+ }
+ }
+
+ if (size == 0 ||
+ (skip = perf_session__process_event(self, &event, ops,
+ 0, head)) < 0) {
+ dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
+ head, event.header.size, event.header.type);
+ /*
+ * assume we lost track of the stream, check alignment, and
+ * increment a single u64 in the hope to catch on again 'soon'.
+ */
+ if (unlikely(head & 7))
+ head &= ~7ULL;
+
+ size = 8;
+ }
+
+ head += size;
+
+ dump_printf("\n%#Lx [%#x]: event: %d\n",
+ head, event.header.size, event.header.type);
+
+ if (skip > 0)
+ head += skip;
+
+ if (!session_done())
+ goto more;
+done:
+ err = 0;
+out_err:
+ return err;
+}
+
+int __perf_session__process_events(struct perf_session *self,
+ u64 data_offset, u64 data_size,
+ u64 file_size, struct perf_event_ops *ops)
+{
+ int err, mmap_prot, mmap_flags;
+ u64 head, shift;
+ u64 offset = 0;
+ size_t page_size;
+ event_t *event;
+ uint32_t size;
+ char *buf;
+ struct ui_progress *progress = ui_progress__new("Processing events...",
+ self->size);
+ if (progress == NULL)
+ return -1;
+
+ perf_event_ops__fill_defaults(ops);
+
+ page_size = sysconf(_SC_PAGESIZE);
+
+ head = data_offset;
+ shift = page_size * (head / page_size);
+ offset += shift;
+ head -= shift;
+
+ mmap_prot = PROT_READ;
+ mmap_flags = MAP_SHARED;
+
+ if (self->header.needs_swap) {
+ mmap_prot |= PROT_WRITE;
+ mmap_flags = MAP_PRIVATE;
+ }
+remap:
+ buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
+ mmap_flags, self->fd, offset);
+ if (buf == MAP_FAILED) {
+ pr_err("failed to mmap file\n");
+ err = -errno;
+ goto out_err;
+ }
+
+more:
+ event = (event_t *)(buf + head);
+ ui_progress__update(progress, offset);
+
+ if (self->header.needs_swap)
+ perf_event_header__bswap(&event->header);
+ size = event->header.size;
+ if (size == 0)
+ size = 8;
+
+ if (head + event->header.size >= page_size * self->mmap_window) {
+ int munmap_ret;
+
+ shift = page_size * (head / page_size);
+
+ munmap_ret = munmap(buf, page_size * self->mmap_window);
+ assert(munmap_ret == 0);
+
+ offset += shift;
+ head -= shift;
+ goto remap;
+ }
+
+ size = event->header.size;
+
+ dump_printf("\n%#Lx [%#x]: event: %d\n",
+ offset + head, event->header.size, event->header.type);
+
+ if (size == 0 ||
+ perf_session__process_event(self, event, ops, offset, head) < 0) {
+ dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
+ offset + head, event->header.size,
+ event->header.type);
+ /*
+ * assume we lost track of the stream, check alignment, and
+ * increment a single u64 in the hope to catch on again 'soon'.
+ */
+ if (unlikely(head & 7))
+ head &= ~7ULL;
+
+ size = 8;
+ }
+
+ head += size;
+
+ if (offset + head >= data_offset + data_size)
+ goto done;
+
+ if (offset + head < file_size)
+ goto more;
+done:
+ err = 0;
+ /* do the final flush for ordered samples */
+ self->ordered_samples.next_flush = ULLONG_MAX;
+ flush_sample_queue(self, ops);
+out_err:
+ ui_progress__delete(progress);
+ return err;
+}
+
+int perf_session__process_events(struct perf_session *self,
+ struct perf_event_ops *ops)
+{
+ int err;
+
+ if (perf_session__register_idle_thread(self) == NULL)
+ return -ENOMEM;
+
+ if (!self->fd_pipe)
+ err = __perf_session__process_events(self,
+ self->header.data_offset,
+ self->header.data_size,
+ self->size, ops);
+ else
+ err = __perf_session__process_pipe_events(self, ops);
+
+ return err;
+}
+
+bool perf_session__has_traces(struct perf_session *self, const char *msg)
+{
+ if (!(self->sample_type & PERF_SAMPLE_RAW)) {
+ pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
+ return false;
+ }
+
+ return true;
+}
+
+int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
+ const char *symbol_name,
+ u64 addr)
+{
+ char *bracket;
+ enum map_type i;
+ struct ref_reloc_sym *ref;
+
+ ref = zalloc(sizeof(struct ref_reloc_sym));
+ if (ref == NULL)
+ return -ENOMEM;
+
+ ref->name = strdup(symbol_name);
+ if (ref->name == NULL) {
+ free(ref);
+ return -ENOMEM;
+ }
+
+ bracket = strchr(ref->name, ']');
+ if (bracket)
+ *bracket = '\0';
+
+ ref->addr = addr;
+
+ for (i = 0; i < MAP__NR_TYPES; ++i) {
+ struct kmap *kmap = map__kmap(maps[i]);
+ kmap->ref_reloc_sym = ref;
+ }
+
+ return 0;
+}
+
+size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
+{
+ return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
+ __dsos__fprintf(&self->host_machine.user_dsos, fp) +
+ machines__fprintf_dsos(&self->machines, fp);
+}
+
+size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
+ bool with_hits)
+{
+ size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
+ return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
+}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
new file mode 100644
index 00000000..9fa0fc2a
--- /dev/null
+++ b/tools/perf/util/session.h
@@ -0,0 +1,145 @@
+#ifndef __PERF_SESSION_H
+#define __PERF_SESSION_H
+
+#include "hist.h"
+#include "event.h"
+#include "header.h"
+#include "symbol.h"
+#include "thread.h"
+#include <linux/rbtree.h>
+#include "../../../include/linux/perf_event.h"
+
+struct sample_queue;
+struct ip_callchain;
+struct thread;
+
+struct ordered_samples {
+ u64 last_flush;
+ u64 next_flush;
+ u64 max_timestamp;
+ struct list_head samples_head;
+ struct sample_queue *last_inserted;
+};
+
+struct perf_session {
+ struct perf_header header;
+ unsigned long size;
+ unsigned long mmap_window;
+ struct rb_root threads;
+ struct list_head dead_threads;
+ struct thread *last_match;
+ struct machine host_machine;
+ struct rb_root machines;
+ struct rb_root hists_tree;
+ /*
+ * FIXME: should point to the first entry in hists_tree and
+ * be a hists instance. Right now its only 'report'
+ * that is using ->hists_tree while all the rest use
+ * ->hists.
+ */
+ struct hists hists;
+ u64 sample_type;
+ int fd;
+ bool fd_pipe;
+ bool repipe;
+ int cwdlen;
+ char *cwd;
+ struct ordered_samples ordered_samples;
+ char filename[0];
+};
+
+struct perf_event_ops;
+
+typedef int (*event_op)(event_t *self, struct perf_session *session);
+typedef int (*event_op2)(event_t *self, struct perf_session *session,
+ struct perf_event_ops *ops);
+
+struct perf_event_ops {
+ event_op sample,
+ mmap,
+ comm,
+ fork,
+ exit,
+ lost,
+ read,
+ throttle,
+ unthrottle,
+ attr,
+ event_type,
+ tracing_data,
+ build_id;
+ event_op2 finished_round;
+ bool ordered_samples;
+};
+
+struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe);
+void perf_session__delete(struct perf_session *self);
+
+void perf_event_header__bswap(struct perf_event_header *self);
+
+int __perf_session__process_events(struct perf_session *self,
+ u64 data_offset, u64 data_size, u64 size,
+ struct perf_event_ops *ops);
+int perf_session__process_events(struct perf_session *self,
+ struct perf_event_ops *event_ops);
+
+struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
+ struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent);
+
+bool perf_session__has_traces(struct perf_session *self, const char *msg);
+
+int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
+ const char *symbol_name,
+ u64 addr);
+
+void mem_bswap_64(void *src, int byte_size);
+
+int perf_session__create_kernel_maps(struct perf_session *self);
+
+int do_read(int fd, void *buf, size_t size);
+void perf_session__update_sample_type(struct perf_session *self);
+void perf_session__remove_thread(struct perf_session *self, struct thread *th);
+
+static inline
+struct machine *perf_session__find_host_machine(struct perf_session *self)
+{
+ return &self->host_machine;
+}
+
+static inline
+struct machine *perf_session__find_machine(struct perf_session *self, pid_t pid)
+{
+ if (pid == HOST_KERNEL_ID)
+ return &self->host_machine;
+ return machines__find(&self->machines, pid);
+}
+
+static inline
+struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t pid)
+{
+ if (pid == HOST_KERNEL_ID)
+ return &self->host_machine;
+ return machines__findnew(&self->machines, pid);
+}
+
+static inline
+void perf_session__process_machines(struct perf_session *self,
+ machine__process_t process)
+{
+ process(&self->host_machine, self);
+ return machines__process(&self->machines, process, self);
+}
+
+size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp);
+
+size_t perf_session__fprintf_dsos_buildid(struct perf_session *self,
+ FILE *fp, bool with_hits);
+
+static inline
+size_t perf_session__fprintf_nr_events(struct perf_session *self, FILE *fp)
+{
+ return hists__fprintf_nr_events(&self->hists, fp);
+}
+#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/sigchain.c b/tools/perf/util/sigchain.c
new file mode 100644
index 00000000..ba785e9b
--- /dev/null
+++ b/tools/perf/util/sigchain.c
@@ -0,0 +1,52 @@
+#include "sigchain.h"
+#include "cache.h"
+
+#define SIGCHAIN_MAX_SIGNALS 32
+
+struct sigchain_signal {
+ sigchain_fun *old;
+ int n;
+ int alloc;
+};
+static struct sigchain_signal signals[SIGCHAIN_MAX_SIGNALS];
+
+static void check_signum(int sig)
+{
+ if (sig < 1 || sig >= SIGCHAIN_MAX_SIGNALS)
+ die("BUG: signal out of range: %d", sig);
+}
+
+static int sigchain_push(int sig, sigchain_fun f)
+{
+ struct sigchain_signal *s = signals + sig;
+ check_signum(sig);
+
+ ALLOC_GROW(s->old, s->n + 1, s->alloc);
+ s->old[s->n] = signal(sig, f);
+ if (s->old[s->n] == SIG_ERR)
+ return -1;
+ s->n++;
+ return 0;
+}
+
+int sigchain_pop(int sig)
+{
+ struct sigchain_signal *s = signals + sig;
+ check_signum(sig);
+ if (s->n < 1)
+ return 0;
+
+ if (signal(sig, s->old[s->n - 1]) == SIG_ERR)
+ return -1;
+ s->n--;
+ return 0;
+}
+
+void sigchain_push_common(sigchain_fun f)
+{
+ sigchain_push(SIGINT, f);
+ sigchain_push(SIGHUP, f);
+ sigchain_push(SIGTERM, f);
+ sigchain_push(SIGQUIT, f);
+ sigchain_push(SIGPIPE, f);
+}
diff --git a/tools/perf/util/sigchain.h b/tools/perf/util/sigchain.h
new file mode 100644
index 00000000..959d64eb
--- /dev/null
+++ b/tools/perf/util/sigchain.h
@@ -0,0 +1,10 @@
+#ifndef __PERF_SIGCHAIN_H
+#define __PERF_SIGCHAIN_H
+
+typedef void (*sigchain_fun)(int);
+
+int sigchain_pop(int sig);
+
+void sigchain_push_common(sigchain_fun f);
+
+#endif /* __PERF_SIGCHAIN_H */
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
new file mode 100644
index 00000000..b62a553c
--- /dev/null
+++ b/tools/perf/util/sort.c
@@ -0,0 +1,345 @@
+#include "sort.h"
+#include "hist.h"
+
+regex_t parent_regex;
+const char default_parent_pattern[] = "^sys_|^do_page_fault";
+const char *parent_pattern = default_parent_pattern;
+const char default_sort_order[] = "comm,dso,symbol";
+const char *sort_order = default_sort_order;
+int sort__need_collapse = 0;
+int sort__has_parent = 0;
+
+enum sort_type sort__first_dimension;
+
+char * field_sep;
+
+LIST_HEAD(hist_entry__sort_list);
+
+static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width);
+static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width);
+static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width);
+static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width);
+static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width);
+static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width);
+
+struct sort_entry sort_thread = {
+ .se_header = "Command: Pid",
+ .se_cmp = sort__thread_cmp,
+ .se_snprintf = hist_entry__thread_snprintf,
+ .se_width_idx = HISTC_THREAD,
+};
+
+struct sort_entry sort_comm = {
+ .se_header = "Command",
+ .se_cmp = sort__comm_cmp,
+ .se_collapse = sort__comm_collapse,
+ .se_snprintf = hist_entry__comm_snprintf,
+ .se_width_idx = HISTC_COMM,
+};
+
+struct sort_entry sort_dso = {
+ .se_header = "Shared Object",
+ .se_cmp = sort__dso_cmp,
+ .se_snprintf = hist_entry__dso_snprintf,
+ .se_width_idx = HISTC_DSO,
+};
+
+struct sort_entry sort_sym = {
+ .se_header = "Symbol",
+ .se_cmp = sort__sym_cmp,
+ .se_snprintf = hist_entry__sym_snprintf,
+ .se_width_idx = HISTC_SYMBOL,
+};
+
+struct sort_entry sort_parent = {
+ .se_header = "Parent symbol",
+ .se_cmp = sort__parent_cmp,
+ .se_snprintf = hist_entry__parent_snprintf,
+ .se_width_idx = HISTC_PARENT,
+};
+
+struct sort_entry sort_cpu = {
+ .se_header = "CPU",
+ .se_cmp = sort__cpu_cmp,
+ .se_snprintf = hist_entry__cpu_snprintf,
+ .se_width_idx = HISTC_CPU,
+};
+
+struct sort_dimension {
+ const char *name;
+ struct sort_entry *entry;
+ int taken;
+};
+
+static struct sort_dimension sort_dimensions[] = {
+ { .name = "pid", .entry = &sort_thread, },
+ { .name = "comm", .entry = &sort_comm, },
+ { .name = "dso", .entry = &sort_dso, },
+ { .name = "symbol", .entry = &sort_sym, },
+ { .name = "parent", .entry = &sort_parent, },
+ { .name = "cpu", .entry = &sort_cpu, },
+};
+
+int64_t cmp_null(void *l, void *r)
+{
+ if (!l && !r)
+ return 0;
+ else if (!l)
+ return -1;
+ else
+ return 1;
+}
+
+/* --sort pid */
+
+int64_t
+sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return right->thread->pid - left->thread->pid;
+}
+
+static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
+{
+ int n;
+ va_list ap;
+
+ va_start(ap, fmt);
+ n = vsnprintf(bf, size, fmt, ap);
+ if (field_sep && n > 0) {
+ char *sep = bf;
+
+ while (1) {
+ sep = strchr(sep, *field_sep);
+ if (sep == NULL)
+ break;
+ *sep = '.';
+ }
+ }
+ va_end(ap);
+ return n;
+}
+
+static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%*s:%5d", width,
+ self->thread->comm ?: "", self->thread->pid);
+}
+
+static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%*s", width, self->thread->comm);
+}
+
+/* --sort dso */
+
+int64_t
+sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct dso *dso_l = left->ms.map ? left->ms.map->dso : NULL;
+ struct dso *dso_r = right->ms.map ? right->ms.map->dso : NULL;
+ const char *dso_name_l, *dso_name_r;
+
+ if (!dso_l || !dso_r)
+ return cmp_null(dso_l, dso_r);
+
+ if (verbose) {
+ dso_name_l = dso_l->long_name;
+ dso_name_r = dso_r->long_name;
+ } else {
+ dso_name_l = dso_l->short_name;
+ dso_name_r = dso_r->short_name;
+ }
+
+ return strcmp(dso_name_l, dso_name_r);
+}
+
+static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width)
+{
+ if (self->ms.map && self->ms.map->dso) {
+ const char *dso_name = !verbose ? self->ms.map->dso->short_name :
+ self->ms.map->dso->long_name;
+ return repsep_snprintf(bf, size, "%-*s", width, dso_name);
+ }
+
+ return repsep_snprintf(bf, size, "%*Lx", width, self->ip);
+}
+
+/* --sort symbol */
+
+int64_t
+sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ u64 ip_l, ip_r;
+
+ if (left->ms.sym == right->ms.sym)
+ return 0;
+
+ ip_l = left->ms.sym ? left->ms.sym->start : left->ip;
+ ip_r = right->ms.sym ? right->ms.sym->start : right->ip;
+
+ return (int64_t)(ip_r - ip_l);
+}
+
+static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width __used)
+{
+ size_t ret = 0;
+
+ if (verbose) {
+ char o = self->ms.map ? dso__symtab_origin(self->ms.map->dso) : '!';
+ ret += repsep_snprintf(bf, size, "%*Lx %c ",
+ BITS_PER_LONG / 4, self->ip, o);
+ }
+
+ ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", self->level);
+ if (self->ms.sym)
+ ret += repsep_snprintf(bf + ret, size - ret, "%s",
+ self->ms.sym->name);
+ else
+ ret += repsep_snprintf(bf + ret, size - ret, "%*Lx",
+ BITS_PER_LONG / 4, self->ip);
+
+ return ret;
+}
+
+/* --sort comm */
+
+int64_t
+sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return right->thread->pid - left->thread->pid;
+}
+
+int64_t
+sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
+{
+ char *comm_l = left->thread->comm;
+ char *comm_r = right->thread->comm;
+
+ if (!comm_l || !comm_r)
+ return cmp_null(comm_l, comm_r);
+
+ return strcmp(comm_l, comm_r);
+}
+
+/* --sort parent */
+
+int64_t
+sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct symbol *sym_l = left->parent;
+ struct symbol *sym_r = right->parent;
+
+ if (!sym_l || !sym_r)
+ return cmp_null(sym_l, sym_r);
+
+ return strcmp(sym_l->name, sym_r->name);
+}
+
+static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%-*s", width,
+ self->parent ? self->parent->name : "[other]");
+}
+
+/* --sort cpu */
+
+int64_t
+sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return right->cpu - left->cpu;
+}
+
+static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%-*d", width, self->cpu);
+}
+
+int sort_dimension__add(const char *tok)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
+ struct sort_dimension *sd = &sort_dimensions[i];
+
+ if (sd->taken)
+ continue;
+
+ if (strncasecmp(tok, sd->name, strlen(tok)))
+ continue;
+
+ if (sd->entry->se_collapse)
+ sort__need_collapse = 1;
+
+ if (sd->entry == &sort_parent) {
+ int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
+ if (ret) {
+ char err[BUFSIZ];
+
+ regerror(ret, &parent_regex, err, sizeof(err));
+ pr_err("Invalid regex: %s\n%s", parent_pattern, err);
+ return -EINVAL;
+ }
+ sort__has_parent = 1;
+ }
+
+ if (list_empty(&hist_entry__sort_list)) {
+ if (!strcmp(sd->name, "pid"))
+ sort__first_dimension = SORT_PID;
+ else if (!strcmp(sd->name, "comm"))
+ sort__first_dimension = SORT_COMM;
+ else if (!strcmp(sd->name, "dso"))
+ sort__first_dimension = SORT_DSO;
+ else if (!strcmp(sd->name, "symbol"))
+ sort__first_dimension = SORT_SYM;
+ else if (!strcmp(sd->name, "parent"))
+ sort__first_dimension = SORT_PARENT;
+ else if (!strcmp(sd->name, "cpu"))
+ sort__first_dimension = SORT_CPU;
+ }
+
+ list_add_tail(&sd->entry->list, &hist_entry__sort_list);
+ sd->taken = 1;
+
+ return 0;
+ }
+
+ return -ESRCH;
+}
+
+void setup_sorting(const char * const usagestr[], const struct option *opts)
+{
+ char *tmp, *tok, *str = strdup(sort_order);
+
+ for (tok = strtok_r(str, ", ", &tmp);
+ tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ if (sort_dimension__add(tok) < 0) {
+ error("Unknown --sort key: `%s'", tok);
+ usage_with_options(usagestr, opts);
+ }
+ }
+
+ free(str);
+}
+
+void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list,
+ const char *list_name, FILE *fp)
+{
+ if (list && strlist__nr_entries(list) == 1) {
+ if (fp != NULL)
+ fprintf(fp, "# %s: %s\n", list_name,
+ strlist__entry(list, 0)->s);
+ self->elide = true;
+ }
+}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
new file mode 100644
index 00000000..46e531d0
--- /dev/null
+++ b/tools/perf/util/sort.h
@@ -0,0 +1,124 @@
+#ifndef __PERF_SORT_H
+#define __PERF_SORT_H
+#include "../builtin.h"
+
+#include "util.h"
+
+#include "color.h"
+#include <linux/list.h>
+#include "cache.h"
+#include <linux/rbtree.h>
+#include "symbol.h"
+#include "string.h"
+#include "callchain.h"
+#include "strlist.h"
+#include "values.h"
+
+#include "../perf.h"
+#include "debug.h"
+#include "header.h"
+
+#include "parse-options.h"
+#include "parse-events.h"
+
+#include "thread.h"
+#include "sort.h"
+
+extern regex_t parent_regex;
+extern const char *sort_order;
+extern const char default_parent_pattern[];
+extern const char *parent_pattern;
+extern const char default_sort_order[];
+extern int sort__need_collapse;
+extern int sort__has_parent;
+extern char *field_sep;
+extern struct sort_entry sort_comm;
+extern struct sort_entry sort_dso;
+extern struct sort_entry sort_sym;
+extern struct sort_entry sort_parent;
+extern enum sort_type sort__first_dimension;
+
+/**
+ * struct hist_entry - histogram entry
+ *
+ * @row_offset - offset from the first callchain expanded to appear on screen
+ * @nr_rows - rows expanded in callchain, recalculated on folding/unfolding
+ */
+struct hist_entry {
+ struct rb_node rb_node;
+ u64 period;
+ u64 period_sys;
+ u64 period_us;
+ u64 period_guest_sys;
+ u64 period_guest_us;
+ struct map_symbol ms;
+ struct thread *thread;
+ u64 ip;
+ s32 cpu;
+ u32 nr_events;
+
+ /* XXX These two should move to some tree widget lib */
+ u16 row_offset;
+ u16 nr_rows;
+
+ bool init_have_children;
+ char level;
+ u8 filtered;
+ struct symbol *parent;
+ union {
+ unsigned long position;
+ struct hist_entry *pair;
+ struct rb_root sorted_chain;
+ };
+ struct callchain_node callchain[0];
+};
+
+enum sort_type {
+ SORT_PID,
+ SORT_COMM,
+ SORT_DSO,
+ SORT_SYM,
+ SORT_PARENT,
+ SORT_CPU,
+};
+
+/*
+ * configurable sorting bits
+ */
+
+struct sort_entry {
+ struct list_head list;
+
+ const char *se_header;
+
+ int64_t (*se_cmp)(struct hist_entry *, struct hist_entry *);
+ int64_t (*se_collapse)(struct hist_entry *, struct hist_entry *);
+ int (*se_snprintf)(struct hist_entry *self, char *bf, size_t size,
+ unsigned int width);
+ u8 se_width_idx;
+ bool elide;
+};
+
+extern struct sort_entry sort_thread;
+extern struct list_head hist_entry__sort_list;
+
+void setup_sorting(const char * const usagestr[], const struct option *opts);
+
+extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int);
+extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int);
+extern size_t sort__dso_print(FILE *, struct hist_entry *, unsigned int);
+extern size_t sort__sym_print(FILE *, struct hist_entry *, unsigned int __used);
+extern int64_t cmp_null(void *, void *);
+extern int64_t sort__thread_cmp(struct hist_entry *, struct hist_entry *);
+extern int64_t sort__comm_cmp(struct hist_entry *, struct hist_entry *);
+extern int64_t sort__comm_collapse(struct hist_entry *, struct hist_entry *);
+extern int64_t sort__dso_cmp(struct hist_entry *, struct hist_entry *);
+extern int64_t sort__sym_cmp(struct hist_entry *, struct hist_entry *);
+extern int64_t sort__parent_cmp(struct hist_entry *, struct hist_entry *);
+int64_t sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right);
+extern size_t sort__parent_print(FILE *, struct hist_entry *, unsigned int);
+extern int sort_dimension__add(const char *);
+void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list,
+ const char *list_name, FILE *fp);
+
+#endif /* __PERF_SORT_H */
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
new file mode 100644
index 00000000..92e06851
--- /dev/null
+++ b/tools/perf/util/strbuf.c
@@ -0,0 +1,133 @@
+#include "cache.h"
+
+int prefixcmp(const char *str, const char *prefix)
+{
+ for (; ; str++, prefix++)
+ if (!*prefix)
+ return 0;
+ else if (*str != *prefix)
+ return (unsigned char)*prefix - (unsigned char)*str;
+}
+
+/*
+ * Used as the default ->buf value, so that people can always assume
+ * buf is non NULL and ->buf is NUL terminated even for a freshly
+ * initialized strbuf.
+ */
+char strbuf_slopbuf[1];
+
+void strbuf_init(struct strbuf *sb, ssize_t hint)
+{
+ sb->alloc = sb->len = 0;
+ sb->buf = strbuf_slopbuf;
+ if (hint)
+ strbuf_grow(sb, hint);
+}
+
+void strbuf_release(struct strbuf *sb)
+{
+ if (sb->alloc) {
+ free(sb->buf);
+ strbuf_init(sb, 0);
+ }
+}
+
+char *strbuf_detach(struct strbuf *sb, size_t *sz)
+{
+ char *res = sb->alloc ? sb->buf : NULL;
+ if (sz)
+ *sz = sb->len;
+ strbuf_init(sb, 0);
+ return res;
+}
+
+void strbuf_grow(struct strbuf *sb, size_t extra)
+{
+ if (sb->len + extra + 1 <= sb->len)
+ die("you want to use way too much memory");
+ if (!sb->alloc)
+ sb->buf = NULL;
+ ALLOC_GROW(sb->buf, sb->len + extra + 1, sb->alloc);
+}
+
+static void strbuf_splice(struct strbuf *sb, size_t pos, size_t len,
+ const void *data, size_t dlen)
+{
+ if (pos + len < pos)
+ die("you want to use way too much memory");
+ if (pos > sb->len)
+ die("`pos' is too far after the end of the buffer");
+ if (pos + len > sb->len)
+ die("`pos + len' is too far after the end of the buffer");
+
+ if (dlen >= len)
+ strbuf_grow(sb, dlen - len);
+ memmove(sb->buf + pos + dlen,
+ sb->buf + pos + len,
+ sb->len - pos - len);
+ memcpy(sb->buf + pos, data, dlen);
+ strbuf_setlen(sb, sb->len + dlen - len);
+}
+
+void strbuf_remove(struct strbuf *sb, size_t pos, size_t len)
+{
+ strbuf_splice(sb, pos, len, NULL, 0);
+}
+
+void strbuf_add(struct strbuf *sb, const void *data, size_t len)
+{
+ strbuf_grow(sb, len);
+ memcpy(sb->buf + sb->len, data, len);
+ strbuf_setlen(sb, sb->len + len);
+}
+
+void strbuf_addf(struct strbuf *sb, const char *fmt, ...)
+{
+ int len;
+ va_list ap;
+
+ if (!strbuf_avail(sb))
+ strbuf_grow(sb, 64);
+ va_start(ap, fmt);
+ len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
+ va_end(ap);
+ if (len < 0)
+ die("your vsnprintf is broken");
+ if (len > strbuf_avail(sb)) {
+ strbuf_grow(sb, len);
+ va_start(ap, fmt);
+ len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
+ va_end(ap);
+ if (len > strbuf_avail(sb)) {
+ die("this should not happen, your snprintf is broken");
+ }
+ }
+ strbuf_setlen(sb, sb->len + len);
+}
+
+ssize_t strbuf_read(struct strbuf *sb, int fd, ssize_t hint)
+{
+ size_t oldlen = sb->len;
+ size_t oldalloc = sb->alloc;
+
+ strbuf_grow(sb, hint ? hint : 8192);
+ for (;;) {
+ ssize_t cnt;
+
+ cnt = read(fd, sb->buf + sb->len, sb->alloc - sb->len - 1);
+ if (cnt < 0) {
+ if (oldalloc == 0)
+ strbuf_release(sb);
+ else
+ strbuf_setlen(sb, oldlen);
+ return -1;
+ }
+ if (!cnt)
+ break;
+ sb->len += cnt;
+ strbuf_grow(sb, 8192);
+ }
+
+ sb->buf[sb->len] = '\0';
+ return sb->len - oldlen;
+}
diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h
new file mode 100644
index 00000000..436ac319
--- /dev/null
+++ b/tools/perf/util/strbuf.h
@@ -0,0 +1,92 @@
+#ifndef __PERF_STRBUF_H
+#define __PERF_STRBUF_H
+
+/*
+ * Strbuf's can be use in many ways: as a byte array, or to store arbitrary
+ * long, overflow safe strings.
+ *
+ * Strbufs has some invariants that are very important to keep in mind:
+ *
+ * 1. the ->buf member is always malloc-ed, hence strbuf's can be used to
+ * build complex strings/buffers whose final size isn't easily known.
+ *
+ * It is NOT legal to copy the ->buf pointer away.
+ * `strbuf_detach' is the operation that detachs a buffer from its shell
+ * while keeping the shell valid wrt its invariants.
+ *
+ * 2. the ->buf member is a byte array that has at least ->len + 1 bytes
+ * allocated. The extra byte is used to store a '\0', allowing the ->buf
+ * member to be a valid C-string. Every strbuf function ensure this
+ * invariant is preserved.
+ *
+ * Note that it is OK to "play" with the buffer directly if you work it
+ * that way:
+ *
+ * strbuf_grow(sb, SOME_SIZE);
+ * ... Here, the memory array starting at sb->buf, and of length
+ * ... strbuf_avail(sb) is all yours, and you are sure that
+ * ... strbuf_avail(sb) is at least SOME_SIZE.
+ * strbuf_setlen(sb, sb->len + SOME_OTHER_SIZE);
+ *
+ * Of course, SOME_OTHER_SIZE must be smaller or equal to strbuf_avail(sb).
+ *
+ * Doing so is safe, though if it has to be done in many places, adding the
+ * missing API to the strbuf module is the way to go.
+ *
+ * XXX: do _not_ assume that the area that is yours is of size ->alloc - 1
+ * even if it's true in the current implementation. Alloc is somehow a
+ * "private" member that should not be messed with.
+ */
+
+#include <assert.h>
+
+extern char strbuf_slopbuf[];
+struct strbuf {
+ size_t alloc;
+ size_t len;
+ char *buf;
+};
+
+#define STRBUF_INIT { 0, 0, strbuf_slopbuf }
+
+/*----- strbuf life cycle -----*/
+extern void strbuf_init(struct strbuf *buf, ssize_t hint);
+extern void strbuf_release(struct strbuf *);
+extern char *strbuf_detach(struct strbuf *, size_t *);
+
+/*----- strbuf size related -----*/
+static inline ssize_t strbuf_avail(const struct strbuf *sb) {
+ return sb->alloc ? sb->alloc - sb->len - 1 : 0;
+}
+
+extern void strbuf_grow(struct strbuf *, size_t);
+
+static inline void strbuf_setlen(struct strbuf *sb, size_t len) {
+ if (!sb->alloc)
+ strbuf_grow(sb, 0);
+ assert(len < sb->alloc);
+ sb->len = len;
+ sb->buf[len] = '\0';
+}
+
+/*----- add data in your buffer -----*/
+static inline void strbuf_addch(struct strbuf *sb, int c) {
+ strbuf_grow(sb, 1);
+ sb->buf[sb->len++] = c;
+ sb->buf[sb->len] = '\0';
+}
+
+extern void strbuf_remove(struct strbuf *, size_t pos, size_t len);
+
+extern void strbuf_add(struct strbuf *, const void *, size_t);
+static inline void strbuf_addstr(struct strbuf *sb, const char *s) {
+ strbuf_add(sb, s, strlen(s));
+}
+
+__attribute__((format(printf,2,3)))
+extern void strbuf_addf(struct strbuf *sb, const char *fmt, ...);
+
+/* XXX: if read fails, any partial read is undone */
+extern ssize_t strbuf_read(struct strbuf *, int fd, ssize_t hint);
+
+#endif /* __PERF_STRBUF_H */
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
new file mode 100644
index 00000000..0409fc7c
--- /dev/null
+++ b/tools/perf/util/string.c
@@ -0,0 +1,296 @@
+#include "util.h"
+#include "string.h"
+
+#define K 1024LL
+/*
+ * perf_atoll()
+ * Parse (\d+)(b|B|kb|KB|mb|MB|gb|GB|tb|TB) (e.g. "256MB")
+ * and return its numeric value
+ */
+s64 perf_atoll(const char *str)
+{
+ unsigned int i;
+ s64 length = -1, unit = 1;
+
+ if (!isdigit(str[0]))
+ goto out_err;
+
+ for (i = 1; i < strlen(str); i++) {
+ switch (str[i]) {
+ case 'B':
+ case 'b':
+ break;
+ case 'K':
+ if (str[i + 1] != 'B')
+ goto out_err;
+ else
+ goto kilo;
+ case 'k':
+ if (str[i + 1] != 'b')
+ goto out_err;
+kilo:
+ unit = K;
+ break;
+ case 'M':
+ if (str[i + 1] != 'B')
+ goto out_err;
+ else
+ goto mega;
+ case 'm':
+ if (str[i + 1] != 'b')
+ goto out_err;
+mega:
+ unit = K * K;
+ break;
+ case 'G':
+ if (str[i + 1] != 'B')
+ goto out_err;
+ else
+ goto giga;
+ case 'g':
+ if (str[i + 1] != 'b')
+ goto out_err;
+giga:
+ unit = K * K * K;
+ break;
+ case 'T':
+ if (str[i + 1] != 'B')
+ goto out_err;
+ else
+ goto tera;
+ case 't':
+ if (str[i + 1] != 'b')
+ goto out_err;
+tera:
+ unit = K * K * K * K;
+ break;
+ case '\0': /* only specified figures */
+ unit = 1;
+ break;
+ default:
+ if (!isdigit(str[i]))
+ goto out_err;
+ break;
+ }
+ }
+
+ length = atoll(str) * unit;
+ goto out;
+
+out_err:
+ length = -1;
+out:
+ return length;
+}
+
+/*
+ * Helper function for splitting a string into an argv-like array.
+ * originaly copied from lib/argv_split.c
+ */
+static const char *skip_sep(const char *cp)
+{
+ while (*cp && isspace(*cp))
+ cp++;
+
+ return cp;
+}
+
+static const char *skip_arg(const char *cp)
+{
+ while (*cp && !isspace(*cp))
+ cp++;
+
+ return cp;
+}
+
+static int count_argc(const char *str)
+{
+ int count = 0;
+
+ while (*str) {
+ str = skip_sep(str);
+ if (*str) {
+ count++;
+ str = skip_arg(str);
+ }
+ }
+
+ return count;
+}
+
+/**
+ * argv_free - free an argv
+ * @argv - the argument vector to be freed
+ *
+ * Frees an argv and the strings it points to.
+ */
+void argv_free(char **argv)
+{
+ char **p;
+ for (p = argv; *p; p++)
+ free(*p);
+
+ free(argv);
+}
+
+/**
+ * argv_split - split a string at whitespace, returning an argv
+ * @str: the string to be split
+ * @argcp: returned argument count
+ *
+ * Returns an array of pointers to strings which are split out from
+ * @str. This is performed by strictly splitting on white-space; no
+ * quote processing is performed. Multiple whitespace characters are
+ * considered to be a single argument separator. The returned array
+ * is always NULL-terminated. Returns NULL on memory allocation
+ * failure.
+ */
+char **argv_split(const char *str, int *argcp)
+{
+ int argc = count_argc(str);
+ char **argv = zalloc(sizeof(*argv) * (argc+1));
+ char **argvp;
+
+ if (argv == NULL)
+ goto out;
+
+ if (argcp)
+ *argcp = argc;
+
+ argvp = argv;
+
+ while (*str) {
+ str = skip_sep(str);
+
+ if (*str) {
+ const char *p = str;
+ char *t;
+
+ str = skip_arg(str);
+
+ t = strndup(p, str-p);
+ if (t == NULL)
+ goto fail;
+ *argvp++ = t;
+ }
+ }
+ *argvp = NULL;
+
+out:
+ return argv;
+
+fail:
+ argv_free(argv);
+ return NULL;
+}
+
+/* Character class matching */
+static bool __match_charclass(const char *pat, char c, const char **npat)
+{
+ bool complement = false, ret = true;
+
+ if (*pat == '!') {
+ complement = true;
+ pat++;
+ }
+ if (*pat++ == c) /* First character is special */
+ goto end;
+
+ while (*pat && *pat != ']') { /* Matching */
+ if (*pat == '-' && *(pat + 1) != ']') { /* Range */
+ if (*(pat - 1) <= c && c <= *(pat + 1))
+ goto end;
+ if (*(pat - 1) > *(pat + 1))
+ goto error;
+ pat += 2;
+ } else if (*pat++ == c)
+ goto end;
+ }
+ if (!*pat)
+ goto error;
+ ret = false;
+
+end:
+ while (*pat && *pat != ']') /* Searching closing */
+ pat++;
+ if (!*pat)
+ goto error;
+ *npat = pat + 1;
+ return complement ? !ret : ret;
+
+error:
+ return false;
+}
+
+/* Glob/lazy pattern matching */
+static bool __match_glob(const char *str, const char *pat, bool ignore_space)
+{
+ while (*str && *pat && *pat != '*') {
+ if (ignore_space) {
+ /* Ignore spaces for lazy matching */
+ if (isspace(*str)) {
+ str++;
+ continue;
+ }
+ if (isspace(*pat)) {
+ pat++;
+ continue;
+ }
+ }
+ if (*pat == '?') { /* Matches any single character */
+ str++;
+ pat++;
+ continue;
+ } else if (*pat == '[') /* Character classes/Ranges */
+ if (__match_charclass(pat + 1, *str, &pat)) {
+ str++;
+ continue;
+ } else
+ return false;
+ else if (*pat == '\\') /* Escaped char match as normal char */
+ pat++;
+ if (*str++ != *pat++)
+ return false;
+ }
+ /* Check wild card */
+ if (*pat == '*') {
+ while (*pat == '*')
+ pat++;
+ if (!*pat) /* Tail wild card matches all */
+ return true;
+ while (*str)
+ if (strglobmatch(str++, pat))
+ return true;
+ }
+ return !*str && !*pat;
+}
+
+/**
+ * strglobmatch - glob expression pattern matching
+ * @str: the target string to match
+ * @pat: the pattern string to match
+ *
+ * This returns true if the @str matches @pat. @pat can includes wildcards
+ * ('*','?') and character classes ([CHARS], complementation and ranges are
+ * also supported). Also, this supports escape character ('\') to use special
+ * characters as normal character.
+ *
+ * Note: if @pat syntax is broken, this always returns false.
+ */
+bool strglobmatch(const char *str, const char *pat)
+{
+ return __match_glob(str, pat, false);
+}
+
+/**
+ * strlazymatch - matching pattern strings lazily with glob pattern
+ * @str: the target string to match
+ * @pat: the pattern string to match
+ *
+ * This is similar to strglobmatch, except this ignores spaces in
+ * the target string.
+ */
+bool strlazymatch(const char *str, const char *pat)
+{
+ return __match_glob(str, pat, true);
+}
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
new file mode 100644
index 00000000..6783a204
--- /dev/null
+++ b/tools/perf/util/strlist.c
@@ -0,0 +1,200 @@
+/*
+ * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Licensed under the GPLv2.
+ */
+
+#include "strlist.h"
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+static struct str_node *str_node__new(const char *s, bool dupstr)
+{
+ struct str_node *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ if (dupstr) {
+ s = strdup(s);
+ if (s == NULL)
+ goto out_delete;
+ }
+ self->s = s;
+ }
+
+ return self;
+
+out_delete:
+ free(self);
+ return NULL;
+}
+
+static void str_node__delete(struct str_node *self, bool dupstr)
+{
+ if (dupstr)
+ free((void *)self->s);
+ free(self);
+}
+
+int strlist__add(struct strlist *self, const char *new_entry)
+{
+ struct rb_node **p = &self->entries.rb_node;
+ struct rb_node *parent = NULL;
+ struct str_node *sn;
+
+ while (*p != NULL) {
+ int rc;
+
+ parent = *p;
+ sn = rb_entry(parent, struct str_node, rb_node);
+ rc = strcmp(sn->s, new_entry);
+
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return -EEXIST;
+ }
+
+ sn = str_node__new(new_entry, self->dupstr);
+ if (sn == NULL)
+ return -ENOMEM;
+
+ rb_link_node(&sn->rb_node, parent, p);
+ rb_insert_color(&sn->rb_node, &self->entries);
+ ++self->nr_entries;
+
+ return 0;
+}
+
+int strlist__load(struct strlist *self, const char *filename)
+{
+ char entry[1024];
+ int err;
+ FILE *fp = fopen(filename, "r");
+
+ if (fp == NULL)
+ return errno;
+
+ while (fgets(entry, sizeof(entry), fp) != NULL) {
+ const size_t len = strlen(entry);
+
+ if (len == 0)
+ continue;
+ entry[len - 1] = '\0';
+
+ err = strlist__add(self, entry);
+ if (err != 0)
+ goto out;
+ }
+
+ err = 0;
+out:
+ fclose(fp);
+ return err;
+}
+
+void strlist__remove(struct strlist *self, struct str_node *sn)
+{
+ rb_erase(&sn->rb_node, &self->entries);
+ str_node__delete(sn, self->dupstr);
+}
+
+struct str_node *strlist__find(struct strlist *self, const char *entry)
+{
+ struct rb_node **p = &self->entries.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p != NULL) {
+ struct str_node *sn;
+ int rc;
+
+ parent = *p;
+ sn = rb_entry(parent, struct str_node, rb_node);
+ rc = strcmp(sn->s, entry);
+
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return sn;
+ }
+
+ return NULL;
+}
+
+static int strlist__parse_list_entry(struct strlist *self, const char *s)
+{
+ if (strncmp(s, "file://", 7) == 0)
+ return strlist__load(self, s + 7);
+
+ return strlist__add(self, s);
+}
+
+int strlist__parse_list(struct strlist *self, const char *s)
+{
+ char *sep;
+ int err;
+
+ while ((sep = strchr(s, ',')) != NULL) {
+ *sep = '\0';
+ err = strlist__parse_list_entry(self, s);
+ *sep = ',';
+ if (err != 0)
+ return err;
+ s = sep + 1;
+ }
+
+ return *s ? strlist__parse_list_entry(self, s) : 0;
+}
+
+struct strlist *strlist__new(bool dupstr, const char *slist)
+{
+ struct strlist *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ self->entries = RB_ROOT;
+ self->dupstr = dupstr;
+ self->nr_entries = 0;
+ if (slist && strlist__parse_list(self, slist) != 0)
+ goto out_error;
+ }
+
+ return self;
+out_error:
+ free(self);
+ return NULL;
+}
+
+void strlist__delete(struct strlist *self)
+{
+ if (self != NULL) {
+ struct str_node *pos;
+ struct rb_node *next = rb_first(&self->entries);
+
+ while (next) {
+ pos = rb_entry(next, struct str_node, rb_node);
+ next = rb_next(&pos->rb_node);
+ strlist__remove(self, pos);
+ }
+ self->entries = RB_ROOT;
+ free(self);
+ }
+}
+
+struct str_node *strlist__entry(const struct strlist *self, unsigned int idx)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
+ struct str_node *pos = rb_entry(nd, struct str_node, rb_node);
+
+ if (!idx--)
+ return pos;
+ }
+
+ return NULL;
+}
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
new file mode 100644
index 00000000..3ba83900
--- /dev/null
+++ b/tools/perf/util/strlist.h
@@ -0,0 +1,78 @@
+#ifndef __PERF_STRLIST_H
+#define __PERF_STRLIST_H
+
+#include <linux/rbtree.h>
+#include <stdbool.h>
+
+struct str_node {
+ struct rb_node rb_node;
+ const char *s;
+};
+
+struct strlist {
+ struct rb_root entries;
+ unsigned int nr_entries;
+ bool dupstr;
+};
+
+struct strlist *strlist__new(bool dupstr, const char *slist);
+void strlist__delete(struct strlist *self);
+
+void strlist__remove(struct strlist *self, struct str_node *sn);
+int strlist__load(struct strlist *self, const char *filename);
+int strlist__add(struct strlist *self, const char *str);
+
+struct str_node *strlist__entry(const struct strlist *self, unsigned int idx);
+struct str_node *strlist__find(struct strlist *self, const char *entry);
+
+static inline bool strlist__has_entry(struct strlist *self, const char *entry)
+{
+ return strlist__find(self, entry) != NULL;
+}
+
+static inline bool strlist__empty(const struct strlist *self)
+{
+ return self->nr_entries == 0;
+}
+
+static inline unsigned int strlist__nr_entries(const struct strlist *self)
+{
+ return self->nr_entries;
+}
+
+/* For strlist iteration */
+static inline struct str_node *strlist__first(struct strlist *self)
+{
+ struct rb_node *rn = rb_first(&self->entries);
+ return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
+}
+static inline struct str_node *strlist__next(struct str_node *sn)
+{
+ struct rb_node *rn;
+ if (!sn)
+ return NULL;
+ rn = rb_next(&sn->rb_node);
+ return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
+}
+
+/**
+ * strlist_for_each - iterate over a strlist
+ * @pos: the &struct str_node to use as a loop cursor.
+ * @self: the &struct strlist for loop.
+ */
+#define strlist__for_each(pos, self) \
+ for (pos = strlist__first(self); pos; pos = strlist__next(pos))
+
+/**
+ * strlist_for_each_safe - iterate over a strlist safe against removal of
+ * str_node
+ * @pos: the &struct str_node to use as a loop cursor.
+ * @n: another &struct str_node to use as temporary storage.
+ * @self: the &struct strlist for loop.
+ */
+#define strlist__for_each_safe(pos, n, self) \
+ for (pos = strlist__first(self), n = strlist__next(pos); pos;\
+ pos = n, n = strlist__next(n))
+
+int strlist__parse_list(struct strlist *self, const char *s);
+#endif /* __PERF_STRLIST_H */
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
new file mode 100644
index 00000000..b3637db0
--- /dev/null
+++ b/tools/perf/util/svghelper.c
@@ -0,0 +1,500 @@
+/*
+ * svghelper.c - helper functions for outputting svg
+ *
+ * (C) Copyright 2009 Intel Corporation
+ *
+ * Authors:
+ * Arjan van de Ven <arjan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+
+#include "svghelper.h"
+
+static u64 first_time, last_time;
+static u64 turbo_frequency, max_freq;
+
+
+#define SLOT_MULT 30.0
+#define SLOT_HEIGHT 25.0
+
+int svg_page_width = 1000;
+
+#define MIN_TEXT_SIZE 0.01
+
+static u64 total_height;
+static FILE *svgfile;
+
+static double cpu2slot(int cpu)
+{
+ return 2 * cpu + 1;
+}
+
+static double cpu2y(int cpu)
+{
+ return cpu2slot(cpu) * SLOT_MULT;
+}
+
+static double time2pixels(u64 time)
+{
+ double X;
+
+ X = 1.0 * svg_page_width * (time - first_time) / (last_time - first_time);
+ return X;
+}
+
+/*
+ * Round text sizes so that the svg viewer only needs a discrete
+ * number of renderings of the font
+ */
+static double round_text_size(double size)
+{
+ int loop = 100;
+ double target = 10.0;
+
+ if (size >= 10.0)
+ return size;
+ while (loop--) {
+ if (size >= target)
+ return target;
+ target = target / 2.0;
+ }
+ return size;
+}
+
+void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end)
+{
+ int new_width;
+
+ svgfile = fopen(filename, "w");
+ if (!svgfile) {
+ fprintf(stderr, "Cannot open %s for output\n", filename);
+ return;
+ }
+ first_time = start;
+ first_time = first_time / 100000000 * 100000000;
+ last_time = end;
+
+ /*
+ * if the recording is short, we default to a width of 1000, but
+ * for longer recordings we want at least 200 units of width per second
+ */
+ new_width = (last_time - first_time) / 5000000;
+
+ if (new_width > svg_page_width)
+ svg_page_width = new_width;
+
+ total_height = (1 + rows + cpu2slot(cpus)) * SLOT_MULT;
+ fprintf(svgfile, "<?xml version=\"1.0\" standalone=\"no\"?> \n");
+ fprintf(svgfile, "<svg width=\"%i\" height=\"%llu\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height);
+
+ fprintf(svgfile, "<defs>\n <style type=\"text/css\">\n <![CDATA[\n");
+
+ fprintf(svgfile, " rect { stroke-width: 1; }\n");
+ fprintf(svgfile, " rect.process { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:1; stroke:rgb( 0, 0, 0); } \n");
+ fprintf(svgfile, " rect.process2 { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
+ fprintf(svgfile, " rect.sample { fill:rgb( 0, 0,255); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
+ fprintf(svgfile, " rect.blocked { fill:rgb(255, 0, 0); fill-opacity:0.5; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
+ fprintf(svgfile, " rect.waiting { fill:rgb(224,214, 0); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
+ fprintf(svgfile, " rect.WAITING { fill:rgb(255,214, 48); fill-opacity:0.6; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
+ fprintf(svgfile, " rect.cpu { fill:rgb(192,192,192); fill-opacity:0.2; stroke-width:0.5; stroke:rgb(128,128,128); } \n");
+ fprintf(svgfile, " rect.pstate { fill:rgb(128,128,128); fill-opacity:0.8; stroke-width:0; } \n");
+ fprintf(svgfile, " rect.c1 { fill:rgb(255,214,214); fill-opacity:0.5; stroke-width:0; } \n");
+ fprintf(svgfile, " rect.c2 { fill:rgb(255,172,172); fill-opacity:0.5; stroke-width:0; } \n");
+ fprintf(svgfile, " rect.c3 { fill:rgb(255,130,130); fill-opacity:0.5; stroke-width:0; } \n");
+ fprintf(svgfile, " rect.c4 { fill:rgb(255, 88, 88); fill-opacity:0.5; stroke-width:0; } \n");
+ fprintf(svgfile, " rect.c5 { fill:rgb(255, 44, 44); fill-opacity:0.5; stroke-width:0; } \n");
+ fprintf(svgfile, " rect.c6 { fill:rgb(255, 0, 0); fill-opacity:0.5; stroke-width:0; } \n");
+ fprintf(svgfile, " line.pstate { stroke:rgb(255,255, 0); stroke-opacity:0.8; stroke-width:2; } \n");
+
+ fprintf(svgfile, " ]]>\n </style>\n</defs>\n");
+}
+
+void svg_box(int Yslot, u64 start, u64 end, const char *type)
+{
+ if (!svgfile)
+ return;
+
+ fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"%s\"/>\n",
+ time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT, type);
+}
+
+void svg_sample(int Yslot, int cpu, u64 start, u64 end)
+{
+ double text_size;
+ if (!svgfile)
+ return;
+
+ fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"sample\"/>\n",
+ time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT);
+
+ text_size = (time2pixels(end)-time2pixels(start));
+ if (cpu > 9)
+ text_size = text_size/2;
+ if (text_size > 1.25)
+ text_size = 1.25;
+ text_size = round_text_size(text_size);
+
+ if (text_size > MIN_TEXT_SIZE)
+ fprintf(svgfile, "<text x=\"%1.8f\" y=\"%1.8f\" font-size=\"%1.8fpt\">%i</text>\n",
+ time2pixels(start), Yslot * SLOT_MULT + SLOT_HEIGHT - 1, text_size, cpu + 1);
+
+}
+
+static char *time_to_string(u64 duration)
+{
+ static char text[80];
+
+ text[0] = 0;
+
+ if (duration < 1000) /* less than 1 usec */
+ return text;
+
+ if (duration < 1000 * 1000) { /* less than 1 msec */
+ sprintf(text, "%4.1f us", duration / 1000.0);
+ return text;
+ }
+ sprintf(text, "%4.1f ms", duration / 1000.0 / 1000);
+
+ return text;
+}
+
+void svg_waiting(int Yslot, u64 start, u64 end)
+{
+ char *text;
+ const char *style;
+ double font_size;
+
+ if (!svgfile)
+ return;
+
+ style = "waiting";
+
+ if (end-start > 10 * 1000000) /* 10 msec */
+ style = "WAITING";
+
+ text = time_to_string(end-start);
+
+ font_size = 1.0 * (time2pixels(end)-time2pixels(start));
+
+ if (font_size > 3)
+ font_size = 3;
+
+ font_size = round_text_size(font_size);
+
+ fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\">\n", time2pixels(start), Yslot * SLOT_MULT);
+ fprintf(svgfile, "<rect x=\"0\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
+ time2pixels(end)-time2pixels(start), SLOT_HEIGHT, style);
+ if (font_size > MIN_TEXT_SIZE)
+ fprintf(svgfile, "<text transform=\"rotate(90)\" font-size=\"%1.8fpt\"> %s</text>\n",
+ font_size, text);
+ fprintf(svgfile, "</g>\n");
+}
+
+static char *cpu_model(void)
+{
+ static char cpu_m[255];
+ char buf[256];
+ FILE *file;
+
+ cpu_m[0] = 0;
+ /* CPU type */
+ file = fopen("/proc/cpuinfo", "r");
+ if (file) {
+ while (fgets(buf, 255, file)) {
+ if (strstr(buf, "model name")) {
+ strncpy(cpu_m, &buf[13], 255);
+ break;
+ }
+ }
+ fclose(file);
+ }
+
+ /* CPU type */
+ file = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", "r");
+ if (file) {
+ while (fgets(buf, 255, file)) {
+ unsigned int freq;
+ freq = strtoull(buf, NULL, 10);
+ if (freq > max_freq)
+ max_freq = freq;
+ }
+ fclose(file);
+ }
+ return cpu_m;
+}
+
+void svg_cpu_box(int cpu, u64 __max_freq, u64 __turbo_freq)
+{
+ char cpu_string[80];
+ if (!svgfile)
+ return;
+
+ max_freq = __max_freq;
+ turbo_frequency = __turbo_freq;
+
+ fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"cpu\"/>\n",
+ time2pixels(first_time),
+ time2pixels(last_time)-time2pixels(first_time),
+ cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
+
+ sprintf(cpu_string, "CPU %i", (int)cpu+1);
+ fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\">%s</text>\n",
+ 10+time2pixels(first_time), cpu2y(cpu) + SLOT_HEIGHT/2, cpu_string);
+
+ fprintf(svgfile, "<text transform=\"translate(%4.8f,%4.8f)\" font-size=\"1.25pt\">%s</text>\n",
+ 10+time2pixels(first_time), cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - 4, cpu_model());
+}
+
+void svg_process(int cpu, u64 start, u64 end, const char *type, const char *name)
+{
+ double width;
+
+ if (!svgfile)
+ return;
+
+
+ fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\">\n", time2pixels(start), cpu2y(cpu));
+ fprintf(svgfile, "<rect x=\"0\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
+ time2pixels(end)-time2pixels(start), SLOT_MULT+SLOT_HEIGHT, type);
+ width = time2pixels(end)-time2pixels(start);
+ if (width > 6)
+ width = 6;
+
+ width = round_text_size(width);
+
+ if (width > MIN_TEXT_SIZE)
+ fprintf(svgfile, "<text transform=\"rotate(90)\" font-size=\"%3.8fpt\">%s</text>\n",
+ width, name);
+
+ fprintf(svgfile, "</g>\n");
+}
+
+void svg_cstate(int cpu, u64 start, u64 end, int type)
+{
+ double width;
+ char style[128];
+
+ if (!svgfile)
+ return;
+
+
+ if (type > 6)
+ type = 6;
+ sprintf(style, "c%i", type);
+
+ fprintf(svgfile, "<rect class=\"%s\" x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\"/>\n",
+ style,
+ time2pixels(start), time2pixels(end)-time2pixels(start),
+ cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
+
+ width = (time2pixels(end)-time2pixels(start))/2.0;
+ if (width > 6)
+ width = 6;
+
+ width = round_text_size(width);
+
+ if (width > MIN_TEXT_SIZE)
+ fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\" font-size=\"%3.8fpt\">C%i</text>\n",
+ time2pixels(start), cpu2y(cpu)+width, width, type);
+}
+
+static char *HzToHuman(unsigned long hz)
+{
+ static char buffer[1024];
+ unsigned long long Hz;
+
+ memset(buffer, 0, 1024);
+
+ Hz = hz;
+
+ /* default: just put the Number in */
+ sprintf(buffer, "%9lli", Hz);
+
+ if (Hz > 1000)
+ sprintf(buffer, " %6lli Mhz", (Hz+500)/1000);
+
+ if (Hz > 1500000)
+ sprintf(buffer, " %6.2f Ghz", (Hz+5000.0)/1000000);
+
+ if (Hz == turbo_frequency)
+ sprintf(buffer, "Turbo");
+
+ return buffer;
+}
+
+void svg_pstate(int cpu, u64 start, u64 end, u64 freq)
+{
+ double height = 0;
+
+ if (!svgfile)
+ return;
+
+ if (max_freq)
+ height = freq * 1.0 / max_freq * (SLOT_HEIGHT + SLOT_MULT);
+ height = 1 + cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - height;
+ fprintf(svgfile, "<line x1=\"%4.8f\" x2=\"%4.8f\" y1=\"%4.1f\" y2=\"%4.1f\" class=\"pstate\"/>\n",
+ time2pixels(start), time2pixels(end), height, height);
+ fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\" font-size=\"0.25pt\">%s</text>\n",
+ time2pixels(start), height+0.9, HzToHuman(freq));
+
+}
+
+
+void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2)
+{
+ double height;
+
+ if (!svgfile)
+ return;
+
+
+ if (row1 < row2) {
+ if (row1) {
+ fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+ time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/32);
+ if (desc2)
+ fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &gt;</text></g>\n",
+ time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT + SLOT_HEIGHT/48, desc2);
+ }
+ if (row2) {
+ fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+ time2pixels(start), row2 * SLOT_MULT - SLOT_MULT/32, time2pixels(start), row2 * SLOT_MULT);
+ if (desc1)
+ fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &gt;</text></g>\n",
+ time2pixels(start), row2 * SLOT_MULT - SLOT_MULT/32, desc1);
+ }
+ } else {
+ if (row2) {
+ fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+ time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/32);
+ if (desc1)
+ fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &lt;</text></g>\n",
+ time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/48, desc1);
+ }
+ if (row1) {
+ fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+ time2pixels(start), row1 * SLOT_MULT - SLOT_MULT/32, time2pixels(start), row1 * SLOT_MULT);
+ if (desc2)
+ fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &lt;</text></g>\n",
+ time2pixels(start), row1 * SLOT_MULT - SLOT_HEIGHT/32, desc2);
+ }
+ }
+ height = row1 * SLOT_MULT;
+ if (row2 > row1)
+ height += SLOT_HEIGHT;
+ if (row1)
+ fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(32,255,32)\"/>\n",
+ time2pixels(start), height);
+}
+
+void svg_wakeline(u64 start, int row1, int row2)
+{
+ double height;
+
+ if (!svgfile)
+ return;
+
+
+ if (row1 < row2)
+ fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+ time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row2 * SLOT_MULT);
+ else
+ fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+ time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row1 * SLOT_MULT);
+
+ height = row1 * SLOT_MULT;
+ if (row2 > row1)
+ height += SLOT_HEIGHT;
+ fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(32,255,32)\"/>\n",
+ time2pixels(start), height);
+}
+
+void svg_interrupt(u64 start, int row)
+{
+ if (!svgfile)
+ return;
+
+ fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(255,128,128)\"/>\n",
+ time2pixels(start), row * SLOT_MULT);
+ fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(255,128,128)\"/>\n",
+ time2pixels(start), row * SLOT_MULT + SLOT_HEIGHT);
+}
+
+void svg_text(int Yslot, u64 start, const char *text)
+{
+ if (!svgfile)
+ return;
+
+ fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\">%s</text>\n",
+ time2pixels(start), Yslot * SLOT_MULT+SLOT_HEIGHT/2, text);
+}
+
+static void svg_legenda_box(int X, const char *text, const char *style)
+{
+ double boxsize;
+ boxsize = SLOT_HEIGHT / 2;
+
+ fprintf(svgfile, "<rect x=\"%i\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
+ X, boxsize, boxsize, style);
+ fprintf(svgfile, "<text transform=\"translate(%4.8f, %4.8f)\" font-size=\"%4.8fpt\">%s</text>\n",
+ X + boxsize + 5, boxsize, 0.8 * boxsize, text);
+}
+
+void svg_legenda(void)
+{
+ if (!svgfile)
+ return;
+
+ svg_legenda_box(0, "Running", "sample");
+ svg_legenda_box(100, "Idle","rect.c1");
+ svg_legenda_box(200, "Deeper Idle", "rect.c3");
+ svg_legenda_box(350, "Deepest Idle", "rect.c6");
+ svg_legenda_box(550, "Sleeping", "process2");
+ svg_legenda_box(650, "Waiting for cpu", "waiting");
+ svg_legenda_box(800, "Blocked on IO", "blocked");
+}
+
+void svg_time_grid(void)
+{
+ u64 i;
+
+ if (!svgfile)
+ return;
+
+ i = first_time;
+ while (i < last_time) {
+ int color = 220;
+ double thickness = 0.075;
+ if ((i % 100000000) == 0) {
+ thickness = 0.5;
+ color = 192;
+ }
+ if ((i % 1000000000) == 0) {
+ thickness = 2.0;
+ color = 128;
+ }
+
+ fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%llu\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n",
+ time2pixels(i), SLOT_MULT/2, time2pixels(i), total_height, color, color, color, thickness);
+
+ i += 10000000;
+ }
+}
+
+void svg_close(void)
+{
+ if (svgfile) {
+ fprintf(svgfile, "</svg>\n");
+ fclose(svgfile);
+ svgfile = NULL;
+ }
+}
diff --git a/tools/perf/util/svghelper.h b/tools/perf/util/svghelper.h
new file mode 100644
index 00000000..e0781989
--- /dev/null
+++ b/tools/perf/util/svghelper.h
@@ -0,0 +1,28 @@
+#ifndef __PERF_SVGHELPER_H
+#define __PERF_SVGHELPER_H
+
+#include "types.h"
+
+extern void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end);
+extern void svg_box(int Yslot, u64 start, u64 end, const char *type);
+extern void svg_sample(int Yslot, int cpu, u64 start, u64 end);
+extern void svg_waiting(int Yslot, u64 start, u64 end);
+extern void svg_cpu_box(int cpu, u64 max_frequency, u64 turbo_frequency);
+
+
+extern void svg_process(int cpu, u64 start, u64 end, const char *type, const char *name);
+extern void svg_cstate(int cpu, u64 start, u64 end, int type);
+extern void svg_pstate(int cpu, u64 start, u64 end, u64 freq);
+
+
+extern void svg_time_grid(void);
+extern void svg_legenda(void);
+extern void svg_wakeline(u64 start, int row1, int row2);
+extern void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2);
+extern void svg_interrupt(u64 start, int row);
+extern void svg_text(int Yslot, u64 start, const char *text);
+extern void svg_close(void);
+
+extern int svg_page_width;
+
+#endif /* __PERF_SVGHELPER_H */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
new file mode 100644
index 00000000..b2f5ae97
--- /dev/null
+++ b/tools/perf/util/symbol.c
@@ -0,0 +1,2459 @@
+#define _GNU_SOURCE
+#include <ctype.h>
+#include <dirent.h>
+#include <errno.h>
+#include <libgen.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/param.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include "build-id.h"
+#include "debug.h"
+#include "symbol.h"
+#include "strlist.h"
+
+#include <libelf.h>
+#include <gelf.h>
+#include <elf.h>
+#include <limits.h>
+#include <sys/utsname.h>
+
+#ifndef NT_GNU_BUILD_ID
+#define NT_GNU_BUILD_ID 3
+#endif
+
+static bool dso__build_id_equal(const struct dso *self, u8 *build_id);
+static int elf_read_build_id(Elf *elf, void *bf, size_t size);
+static void dsos__add(struct list_head *head, struct dso *dso);
+static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
+static int dso__load_kernel_sym(struct dso *self, struct map *map,
+ symbol_filter_t filter);
+static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
+ symbol_filter_t filter);
+static int vmlinux_path__nr_entries;
+static char **vmlinux_path;
+
+struct symbol_conf symbol_conf = {
+ .exclude_other = true,
+ .use_modules = true,
+ .try_vmlinux_path = true,
+};
+
+int dso__name_len(const struct dso *self)
+{
+ if (verbose)
+ return self->long_name_len;
+
+ return self->short_name_len;
+}
+
+bool dso__loaded(const struct dso *self, enum map_type type)
+{
+ return self->loaded & (1 << type);
+}
+
+bool dso__sorted_by_name(const struct dso *self, enum map_type type)
+{
+ return self->sorted_by_name & (1 << type);
+}
+
+static void dso__set_sorted_by_name(struct dso *self, enum map_type type)
+{
+ self->sorted_by_name |= (1 << type);
+}
+
+bool symbol_type__is_a(char symbol_type, enum map_type map_type)
+{
+ switch (map_type) {
+ case MAP__FUNCTION:
+ return symbol_type == 'T' || symbol_type == 'W';
+ case MAP__VARIABLE:
+ return symbol_type == 'D' || symbol_type == 'd';
+ default:
+ return false;
+ }
+}
+
+static void symbols__fixup_end(struct rb_root *self)
+{
+ struct rb_node *nd, *prevnd = rb_first(self);
+ struct symbol *curr, *prev;
+
+ if (prevnd == NULL)
+ return;
+
+ curr = rb_entry(prevnd, struct symbol, rb_node);
+
+ for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
+ prev = curr;
+ curr = rb_entry(nd, struct symbol, rb_node);
+
+ if (prev->end == prev->start)
+ prev->end = curr->start - 1;
+ }
+
+ /* Last entry */
+ if (curr->end == curr->start)
+ curr->end = roundup(curr->start, 4096);
+}
+
+static void __map_groups__fixup_end(struct map_groups *self, enum map_type type)
+{
+ struct map *prev, *curr;
+ struct rb_node *nd, *prevnd = rb_first(&self->maps[type]);
+
+ if (prevnd == NULL)
+ return;
+
+ curr = rb_entry(prevnd, struct map, rb_node);
+
+ for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
+ prev = curr;
+ curr = rb_entry(nd, struct map, rb_node);
+ prev->end = curr->start - 1;
+ }
+
+ /*
+ * We still haven't the actual symbols, so guess the
+ * last map final address.
+ */
+ curr->end = ~0UL;
+}
+
+static void map_groups__fixup_end(struct map_groups *self)
+{
+ int i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ __map_groups__fixup_end(self, i);
+}
+
+static struct symbol *symbol__new(u64 start, u64 len, u8 binding,
+ const char *name)
+{
+ size_t namelen = strlen(name) + 1;
+ struct symbol *self = calloc(1, (symbol_conf.priv_size +
+ sizeof(*self) + namelen));
+ if (self == NULL)
+ return NULL;
+
+ if (symbol_conf.priv_size)
+ self = ((void *)self) + symbol_conf.priv_size;
+
+ self->start = start;
+ self->end = len ? start + len - 1 : start;
+ self->binding = binding;
+ self->namelen = namelen - 1;
+
+ pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end);
+
+ memcpy(self->name, name, namelen);
+
+ return self;
+}
+
+void symbol__delete(struct symbol *self)
+{
+ free(((void *)self) - symbol_conf.priv_size);
+}
+
+static size_t symbol__fprintf(struct symbol *self, FILE *fp)
+{
+ return fprintf(fp, " %llx-%llx %c %s\n",
+ self->start, self->end,
+ self->binding == STB_GLOBAL ? 'g' :
+ self->binding == STB_LOCAL ? 'l' : 'w',
+ self->name);
+}
+
+void dso__set_long_name(struct dso *self, char *name)
+{
+ if (name == NULL)
+ return;
+ self->long_name = name;
+ self->long_name_len = strlen(name);
+}
+
+static void dso__set_short_name(struct dso *self, const char *name)
+{
+ if (name == NULL)
+ return;
+ self->short_name = name;
+ self->short_name_len = strlen(name);
+}
+
+static void dso__set_basename(struct dso *self)
+{
+ dso__set_short_name(self, basename(self->long_name));
+}
+
+struct dso *dso__new(const char *name)
+{
+ struct dso *self = calloc(1, sizeof(*self) + strlen(name) + 1);
+
+ if (self != NULL) {
+ int i;
+ strcpy(self->name, name);
+ dso__set_long_name(self, self->name);
+ dso__set_short_name(self, self->name);
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ self->symbols[i] = self->symbol_names[i] = RB_ROOT;
+ self->slen_calculated = 0;
+ self->origin = DSO__ORIG_NOT_FOUND;
+ self->loaded = 0;
+ self->sorted_by_name = 0;
+ self->has_build_id = 0;
+ self->kernel = DSO_TYPE_USER;
+ INIT_LIST_HEAD(&self->node);
+ }
+
+ return self;
+}
+
+static void symbols__delete(struct rb_root *self)
+{
+ struct symbol *pos;
+ struct rb_node *next = rb_first(self);
+
+ while (next) {
+ pos = rb_entry(next, struct symbol, rb_node);
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, self);
+ symbol__delete(pos);
+ }
+}
+
+void dso__delete(struct dso *self)
+{
+ int i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ symbols__delete(&self->symbols[i]);
+ if (self->sname_alloc)
+ free((char *)self->short_name);
+ if (self->lname_alloc)
+ free(self->long_name);
+ free(self);
+}
+
+void dso__set_build_id(struct dso *self, void *build_id)
+{
+ memcpy(self->build_id, build_id, sizeof(self->build_id));
+ self->has_build_id = 1;
+}
+
+static void symbols__insert(struct rb_root *self, struct symbol *sym)
+{
+ struct rb_node **p = &self->rb_node;
+ struct rb_node *parent = NULL;
+ const u64 ip = sym->start;
+ struct symbol *s;
+
+ while (*p != NULL) {
+ parent = *p;
+ s = rb_entry(parent, struct symbol, rb_node);
+ if (ip < s->start)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&sym->rb_node, parent, p);
+ rb_insert_color(&sym->rb_node, self);
+}
+
+static struct symbol *symbols__find(struct rb_root *self, u64 ip)
+{
+ struct rb_node *n;
+
+ if (self == NULL)
+ return NULL;
+
+ n = self->rb_node;
+
+ while (n) {
+ struct symbol *s = rb_entry(n, struct symbol, rb_node);
+
+ if (ip < s->start)
+ n = n->rb_left;
+ else if (ip > s->end)
+ n = n->rb_right;
+ else
+ return s;
+ }
+
+ return NULL;
+}
+
+struct symbol_name_rb_node {
+ struct rb_node rb_node;
+ struct symbol sym;
+};
+
+static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym)
+{
+ struct rb_node **p = &self->rb_node;
+ struct rb_node *parent = NULL;
+ struct symbol_name_rb_node *symn = ((void *)sym) - sizeof(*parent), *s;
+
+ while (*p != NULL) {
+ parent = *p;
+ s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
+ if (strcmp(sym->name, s->sym.name) < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&symn->rb_node, parent, p);
+ rb_insert_color(&symn->rb_node, self);
+}
+
+static void symbols__sort_by_name(struct rb_root *self, struct rb_root *source)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(source); nd; nd = rb_next(nd)) {
+ struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
+ symbols__insert_by_name(self, pos);
+ }
+}
+
+static struct symbol *symbols__find_by_name(struct rb_root *self, const char *name)
+{
+ struct rb_node *n;
+
+ if (self == NULL)
+ return NULL;
+
+ n = self->rb_node;
+
+ while (n) {
+ struct symbol_name_rb_node *s;
+ int cmp;
+
+ s = rb_entry(n, struct symbol_name_rb_node, rb_node);
+ cmp = strcmp(name, s->sym.name);
+
+ if (cmp < 0)
+ n = n->rb_left;
+ else if (cmp > 0)
+ n = n->rb_right;
+ else
+ return &s->sym;
+ }
+
+ return NULL;
+}
+
+struct symbol *dso__find_symbol(struct dso *self,
+ enum map_type type, u64 addr)
+{
+ return symbols__find(&self->symbols[type], addr);
+}
+
+struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type,
+ const char *name)
+{
+ return symbols__find_by_name(&self->symbol_names[type], name);
+}
+
+void dso__sort_by_name(struct dso *self, enum map_type type)
+{
+ dso__set_sorted_by_name(self, type);
+ return symbols__sort_by_name(&self->symbol_names[type],
+ &self->symbols[type]);
+}
+
+int build_id__sprintf(const u8 *self, int len, char *bf)
+{
+ char *bid = bf;
+ const u8 *raw = self;
+ int i;
+
+ for (i = 0; i < len; ++i) {
+ sprintf(bid, "%02x", *raw);
+ ++raw;
+ bid += 2;
+ }
+
+ return raw - self;
+}
+
+size_t dso__fprintf_buildid(struct dso *self, FILE *fp)
+{
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id);
+ return fprintf(fp, "%s", sbuild_id);
+}
+
+size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp)
+{
+ struct rb_node *nd;
+ size_t ret = fprintf(fp, "dso: %s (", self->short_name);
+
+ if (self->short_name != self->long_name)
+ ret += fprintf(fp, "%s, ", self->long_name);
+ ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
+ self->loaded ? "" : "NOT ");
+ ret += dso__fprintf_buildid(self, fp);
+ ret += fprintf(fp, ")\n");
+ for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) {
+ struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
+ ret += symbol__fprintf(pos, fp);
+ }
+
+ return ret;
+}
+
+int kallsyms__parse(const char *filename, void *arg,
+ int (*process_symbol)(void *arg, const char *name,
+ char type, u64 start))
+{
+ char *line = NULL;
+ size_t n;
+ int err = 0;
+ FILE *file = fopen(filename, "r");
+
+ if (file == NULL)
+ goto out_failure;
+
+ while (!feof(file)) {
+ u64 start;
+ int line_len, len;
+ char symbol_type;
+ char *symbol_name;
+
+ line_len = getline(&line, &n, file);
+ if (line_len < 0 || !line)
+ break;
+
+ line[--line_len] = '\0'; /* \n */
+
+ len = hex2u64(line, &start);
+
+ len++;
+ if (len + 2 >= line_len)
+ continue;
+
+ symbol_type = toupper(line[len]);
+ symbol_name = line + len + 2;
+
+ err = process_symbol(arg, symbol_name, symbol_type, start);
+ if (err)
+ break;
+ }
+
+ free(line);
+ fclose(file);
+ return err;
+
+out_failure:
+ return -1;
+}
+
+struct process_kallsyms_args {
+ struct map *map;
+ struct dso *dso;
+};
+
+static u8 kallsyms2elf_type(char type)
+{
+ if (type == 'W')
+ return STB_WEAK;
+
+ return isupper(type) ? STB_GLOBAL : STB_LOCAL;
+}
+
+static int map__process_kallsym_symbol(void *arg, const char *name,
+ char type, u64 start)
+{
+ struct symbol *sym;
+ struct process_kallsyms_args *a = arg;
+ struct rb_root *root = &a->dso->symbols[a->map->type];
+
+ if (!symbol_type__is_a(type, a->map->type))
+ return 0;
+
+ /*
+ * Will fix up the end later, when we have all symbols sorted.
+ */
+ sym = symbol__new(start, 0, kallsyms2elf_type(type), name);
+
+ if (sym == NULL)
+ return -ENOMEM;
+ /*
+ * We will pass the symbols to the filter later, in
+ * map__split_kallsyms, when we have split the maps per module
+ */
+ symbols__insert(root, sym);
+
+ return 0;
+}
+
+/*
+ * Loads the function entries in /proc/kallsyms into kernel_map->dso,
+ * so that we can in the next step set the symbol ->end address and then
+ * call kernel_maps__split_kallsyms.
+ */
+static int dso__load_all_kallsyms(struct dso *self, const char *filename,
+ struct map *map)
+{
+ struct process_kallsyms_args args = { .map = map, .dso = self, };
+ return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
+}
+
+/*
+ * Split the symbols into maps, making sure there are no overlaps, i.e. the
+ * kernel range is broken in several maps, named [kernel].N, as we don't have
+ * the original ELF section names vmlinux have.
+ */
+static int dso__split_kallsyms(struct dso *self, struct map *map,
+ symbol_filter_t filter)
+{
+ struct map_groups *kmaps = map__kmap(map)->kmaps;
+ struct machine *machine = kmaps->machine;
+ struct map *curr_map = map;
+ struct symbol *pos;
+ int count = 0;
+ struct rb_root *root = &self->symbols[map->type];
+ struct rb_node *next = rb_first(root);
+ int kernel_range = 0;
+
+ while (next) {
+ char *module;
+
+ pos = rb_entry(next, struct symbol, rb_node);
+ next = rb_next(&pos->rb_node);
+
+ module = strchr(pos->name, '\t');
+ if (module) {
+ if (!symbol_conf.use_modules)
+ goto discard_symbol;
+
+ *module++ = '\0';
+
+ if (strcmp(curr_map->dso->short_name, module)) {
+ if (curr_map != map &&
+ self->kernel == DSO_TYPE_GUEST_KERNEL &&
+ machine__is_default_guest(machine)) {
+ /*
+ * We assume all symbols of a module are
+ * continuous in * kallsyms, so curr_map
+ * points to a module and all its
+ * symbols are in its kmap. Mark it as
+ * loaded.
+ */
+ dso__set_loaded(curr_map->dso,
+ curr_map->type);
+ }
+
+ curr_map = map_groups__find_by_name(kmaps,
+ map->type, module);
+ if (curr_map == NULL) {
+ pr_debug("%s/proc/{kallsyms,modules} "
+ "inconsistency while looking "
+ "for \"%s\" module!\n",
+ machine->root_dir, module);
+ curr_map = map;
+ goto discard_symbol;
+ }
+
+ if (curr_map->dso->loaded &&
+ !machine__is_default_guest(machine))
+ goto discard_symbol;
+ }
+ /*
+ * So that we look just like we get from .ko files,
+ * i.e. not prelinked, relative to map->start.
+ */
+ pos->start = curr_map->map_ip(curr_map, pos->start);
+ pos->end = curr_map->map_ip(curr_map, pos->end);
+ } else if (curr_map != map) {
+ char dso_name[PATH_MAX];
+ struct dso *dso;
+
+ if (self->kernel == DSO_TYPE_GUEST_KERNEL)
+ snprintf(dso_name, sizeof(dso_name),
+ "[guest.kernel].%d",
+ kernel_range++);
+ else
+ snprintf(dso_name, sizeof(dso_name),
+ "[kernel].%d",
+ kernel_range++);
+
+ dso = dso__new(dso_name);
+ if (dso == NULL)
+ return -1;
+
+ dso->kernel = self->kernel;
+
+ curr_map = map__new2(pos->start, dso, map->type);
+ if (curr_map == NULL) {
+ dso__delete(dso);
+ return -1;
+ }
+
+ curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
+ map_groups__insert(kmaps, curr_map);
+ ++kernel_range;
+ }
+
+ if (filter && filter(curr_map, pos)) {
+discard_symbol: rb_erase(&pos->rb_node, root);
+ symbol__delete(pos);
+ } else {
+ if (curr_map != map) {
+ rb_erase(&pos->rb_node, root);
+ symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
+ }
+ count++;
+ }
+ }
+
+ if (curr_map != map &&
+ self->kernel == DSO_TYPE_GUEST_KERNEL &&
+ machine__is_default_guest(kmaps->machine)) {
+ dso__set_loaded(curr_map->dso, curr_map->type);
+ }
+
+ return count;
+}
+
+int dso__load_kallsyms(struct dso *self, const char *filename,
+ struct map *map, symbol_filter_t filter)
+{
+ if (dso__load_all_kallsyms(self, filename, map) < 0)
+ return -1;
+
+ symbols__fixup_end(&self->symbols[map->type]);
+ if (self->kernel == DSO_TYPE_GUEST_KERNEL)
+ self->origin = DSO__ORIG_GUEST_KERNEL;
+ else
+ self->origin = DSO__ORIG_KERNEL;
+
+ return dso__split_kallsyms(self, map, filter);
+}
+
+static int dso__load_perf_map(struct dso *self, struct map *map,
+ symbol_filter_t filter)
+{
+ char *line = NULL;
+ size_t n;
+ FILE *file;
+ int nr_syms = 0;
+
+ file = fopen(self->long_name, "r");
+ if (file == NULL)
+ goto out_failure;
+
+ while (!feof(file)) {
+ u64 start, size;
+ struct symbol *sym;
+ int line_len, len;
+
+ line_len = getline(&line, &n, file);
+ if (line_len < 0)
+ break;
+
+ if (!line)
+ goto out_failure;
+
+ line[--line_len] = '\0'; /* \n */
+
+ len = hex2u64(line, &start);
+
+ len++;
+ if (len + 2 >= line_len)
+ continue;
+
+ len += hex2u64(line + len, &size);
+
+ len++;
+ if (len + 2 >= line_len)
+ continue;
+
+ sym = symbol__new(start, size, STB_GLOBAL, line + len);
+
+ if (sym == NULL)
+ goto out_delete_line;
+
+ if (filter && filter(map, sym))
+ symbol__delete(sym);
+ else {
+ symbols__insert(&self->symbols[map->type], sym);
+ nr_syms++;
+ }
+ }
+
+ free(line);
+ fclose(file);
+
+ return nr_syms;
+
+out_delete_line:
+ free(line);
+out_failure:
+ return -1;
+}
+
+/**
+ * elf_symtab__for_each_symbol - iterate thru all the symbols
+ *
+ * @self: struct elf_symtab instance to iterate
+ * @idx: uint32_t idx
+ * @sym: GElf_Sym iterator
+ */
+#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
+ for (idx = 0, gelf_getsym(syms, idx, &sym);\
+ idx < nr_syms; \
+ idx++, gelf_getsym(syms, idx, &sym))
+
+static inline uint8_t elf_sym__type(const GElf_Sym *sym)
+{
+ return GELF_ST_TYPE(sym->st_info);
+}
+
+static inline int elf_sym__is_function(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_FUNC &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF;
+}
+
+static inline bool elf_sym__is_object(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_OBJECT &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF;
+}
+
+static inline int elf_sym__is_label(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_NOTYPE &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF &&
+ sym->st_shndx != SHN_ABS;
+}
+
+static inline const char *elf_sec__name(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return secstrs->d_buf + shdr->sh_name;
+}
+
+static inline int elf_sec__is_text(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
+}
+
+static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
+}
+
+static inline const char *elf_sym__name(const GElf_Sym *sym,
+ const Elf_Data *symstrs)
+{
+ return symstrs->d_buf + sym->st_name;
+}
+
+static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
+ GElf_Shdr *shp, const char *name,
+ size_t *idx)
+{
+ Elf_Scn *sec = NULL;
+ size_t cnt = 1;
+
+ while ((sec = elf_nextscn(elf, sec)) != NULL) {
+ char *str;
+
+ gelf_getshdr(sec, shp);
+ str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
+ if (!strcmp(name, str)) {
+ if (idx)
+ *idx = cnt;
+ break;
+ }
+ ++cnt;
+ }
+
+ return sec;
+}
+
+#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
+ for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \
+ idx < nr_entries; \
+ ++idx, pos = gelf_getrel(reldata, idx, &pos_mem))
+
+#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \
+ for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \
+ idx < nr_entries; \
+ ++idx, pos = gelf_getrela(reldata, idx, &pos_mem))
+
+/*
+ * We need to check if we have a .dynsym, so that we can handle the
+ * .plt, synthesizing its symbols, that aren't on the symtabs (be it
+ * .dynsym or .symtab).
+ * And always look at the original dso, not at debuginfo packages, that
+ * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
+ */
+static int dso__synthesize_plt_symbols(struct dso *self, struct map *map,
+ symbol_filter_t filter)
+{
+ uint32_t nr_rel_entries, idx;
+ GElf_Sym sym;
+ u64 plt_offset;
+ GElf_Shdr shdr_plt;
+ struct symbol *f;
+ GElf_Shdr shdr_rel_plt, shdr_dynsym;
+ Elf_Data *reldata, *syms, *symstrs;
+ Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
+ size_t dynsym_idx;
+ GElf_Ehdr ehdr;
+ char sympltname[1024];
+ Elf *elf;
+ int nr = 0, symidx, fd, err = 0;
+
+ fd = open(self->long_name, O_RDONLY);
+ if (fd < 0)
+ goto out;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL)
+ goto out_close;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL)
+ goto out_elf_end;
+
+ scn_dynsym = elf_section_by_name(elf, &ehdr, &shdr_dynsym,
+ ".dynsym", &dynsym_idx);
+ if (scn_dynsym == NULL)
+ goto out_elf_end;
+
+ scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
+ ".rela.plt", NULL);
+ if (scn_plt_rel == NULL) {
+ scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
+ ".rel.plt", NULL);
+ if (scn_plt_rel == NULL)
+ goto out_elf_end;
+ }
+
+ err = -1;
+
+ if (shdr_rel_plt.sh_link != dynsym_idx)
+ goto out_elf_end;
+
+ if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL)
+ goto out_elf_end;
+
+ /*
+ * Fetch the relocation section to find the idxes to the GOT
+ * and the symbols in the .dynsym they refer to.
+ */
+ reldata = elf_getdata(scn_plt_rel, NULL);
+ if (reldata == NULL)
+ goto out_elf_end;
+
+ syms = elf_getdata(scn_dynsym, NULL);
+ if (syms == NULL)
+ goto out_elf_end;
+
+ scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
+ if (scn_symstrs == NULL)
+ goto out_elf_end;
+
+ symstrs = elf_getdata(scn_symstrs, NULL);
+ if (symstrs == NULL)
+ goto out_elf_end;
+
+ nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
+ plt_offset = shdr_plt.sh_offset;
+
+ if (shdr_rel_plt.sh_type == SHT_RELA) {
+ GElf_Rela pos_mem, *pos;
+
+ elf_section__for_each_rela(reldata, pos, pos_mem, idx,
+ nr_rel_entries) {
+ symidx = GELF_R_SYM(pos->r_info);
+ plt_offset += shdr_plt.sh_entsize;
+ gelf_getsym(syms, symidx, &sym);
+ snprintf(sympltname, sizeof(sympltname),
+ "%s@plt", elf_sym__name(&sym, symstrs));
+
+ f = symbol__new(plt_offset, shdr_plt.sh_entsize,
+ STB_GLOBAL, sympltname);
+ if (!f)
+ goto out_elf_end;
+
+ if (filter && filter(map, f))
+ symbol__delete(f);
+ else {
+ symbols__insert(&self->symbols[map->type], f);
+ ++nr;
+ }
+ }
+ } else if (shdr_rel_plt.sh_type == SHT_REL) {
+ GElf_Rel pos_mem, *pos;
+ elf_section__for_each_rel(reldata, pos, pos_mem, idx,
+ nr_rel_entries) {
+ symidx = GELF_R_SYM(pos->r_info);
+ plt_offset += shdr_plt.sh_entsize;
+ gelf_getsym(syms, symidx, &sym);
+ snprintf(sympltname, sizeof(sympltname),
+ "%s@plt", elf_sym__name(&sym, symstrs));
+
+ f = symbol__new(plt_offset, shdr_plt.sh_entsize,
+ STB_GLOBAL, sympltname);
+ if (!f)
+ goto out_elf_end;
+
+ if (filter && filter(map, f))
+ symbol__delete(f);
+ else {
+ symbols__insert(&self->symbols[map->type], f);
+ ++nr;
+ }
+ }
+ }
+
+ err = 0;
+out_elf_end:
+ elf_end(elf);
+out_close:
+ close(fd);
+
+ if (err == 0)
+ return nr;
+out:
+ pr_debug("%s: problems reading %s PLT info.\n",
+ __func__, self->long_name);
+ return 0;
+}
+
+static bool elf_sym__is_a(GElf_Sym *self, enum map_type type)
+{
+ switch (type) {
+ case MAP__FUNCTION:
+ return elf_sym__is_function(self);
+ case MAP__VARIABLE:
+ return elf_sym__is_object(self);
+ default:
+ return false;
+ }
+}
+
+static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type)
+{
+ switch (type) {
+ case MAP__FUNCTION:
+ return elf_sec__is_text(self, secstrs);
+ case MAP__VARIABLE:
+ return elf_sec__is_data(self, secstrs);
+ default:
+ return false;
+ }
+}
+
+static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
+{
+ Elf_Scn *sec = NULL;
+ GElf_Shdr shdr;
+ size_t cnt = 1;
+
+ while ((sec = elf_nextscn(elf, sec)) != NULL) {
+ gelf_getshdr(sec, &shdr);
+
+ if ((addr >= shdr.sh_addr) &&
+ (addr < (shdr.sh_addr + shdr.sh_size)))
+ return cnt;
+
+ ++cnt;
+ }
+
+ return -1;
+}
+
+static int dso__load_sym(struct dso *self, struct map *map, const char *name,
+ int fd, symbol_filter_t filter, int kmodule,
+ int want_symtab)
+{
+ struct kmap *kmap = self->kernel ? map__kmap(map) : NULL;
+ struct map *curr_map = map;
+ struct dso *curr_dso = self;
+ Elf_Data *symstrs, *secstrs;
+ uint32_t nr_syms;
+ int err = -1;
+ uint32_t idx;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr, opdshdr;
+ Elf_Data *syms, *opddata = NULL;
+ GElf_Sym sym;
+ Elf_Scn *sec, *sec_strndx, *opdsec;
+ Elf *elf;
+ int nr = 0;
+ size_t opdidx = 0;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL) {
+ pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
+ goto out_close;
+ }
+
+ if (gelf_getehdr(elf, &ehdr) == NULL) {
+ pr_debug("%s: cannot get elf header.\n", __func__);
+ goto out_elf_end;
+ }
+
+ /* Always reject images with a mismatched build-id: */
+ if (self->has_build_id) {
+ u8 build_id[BUILD_ID_SIZE];
+
+ if (elf_read_build_id(elf, build_id,
+ BUILD_ID_SIZE) != BUILD_ID_SIZE)
+ goto out_elf_end;
+
+ if (!dso__build_id_equal(self, build_id))
+ goto out_elf_end;
+ }
+
+ sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL);
+ if (sec == NULL) {
+ if (want_symtab)
+ goto out_elf_end;
+
+ sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL);
+ if (sec == NULL)
+ goto out_elf_end;
+ }
+
+ opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx);
+ if (opdsec)
+ opddata = elf_rawdata(opdsec, NULL);
+
+ syms = elf_getdata(sec, NULL);
+ if (syms == NULL)
+ goto out_elf_end;
+
+ sec = elf_getscn(elf, shdr.sh_link);
+ if (sec == NULL)
+ goto out_elf_end;
+
+ symstrs = elf_getdata(sec, NULL);
+ if (symstrs == NULL)
+ goto out_elf_end;
+
+ sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
+ if (sec_strndx == NULL)
+ goto out_elf_end;
+
+ secstrs = elf_getdata(sec_strndx, NULL);
+ if (secstrs == NULL)
+ goto out_elf_end;
+
+ nr_syms = shdr.sh_size / shdr.sh_entsize;
+
+ memset(&sym, 0, sizeof(sym));
+ if (self->kernel == DSO_TYPE_USER) {
+ self->adjust_symbols = (ehdr.e_type == ET_EXEC ||
+ elf_section_by_name(elf, &ehdr, &shdr,
+ ".gnu.prelink_undo",
+ NULL) != NULL);
+ } else self->adjust_symbols = 0;
+
+ elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
+ struct symbol *f;
+ const char *elf_name = elf_sym__name(&sym, symstrs);
+ char *demangled = NULL;
+ int is_label = elf_sym__is_label(&sym);
+ const char *section_name;
+
+ if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
+ strcmp(elf_name, kmap->ref_reloc_sym->name) == 0)
+ kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
+
+ if (!is_label && !elf_sym__is_a(&sym, map->type))
+ continue;
+
+ /* Reject ARM ELF "mapping symbols": these aren't unique and
+ * don't identify functions, so will confuse the profile
+ * output: */
+ if (ehdr.e_machine == EM_ARM) {
+ if (!strcmp(elf_name, "$a") ||
+ !strcmp(elf_name, "$d") ||
+ !strcmp(elf_name, "$t"))
+ continue;
+ }
+
+ if (opdsec && sym.st_shndx == opdidx) {
+ u32 offset = sym.st_value - opdshdr.sh_addr;
+ u64 *opd = opddata->d_buf + offset;
+ sym.st_value = *opd;
+ sym.st_shndx = elf_addr_to_index(elf, sym.st_value);
+ }
+
+ sec = elf_getscn(elf, sym.st_shndx);
+ if (!sec)
+ goto out_elf_end;
+
+ gelf_getshdr(sec, &shdr);
+
+ if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
+ continue;
+
+ section_name = elf_sec__name(&shdr, secstrs);
+
+ if (self->kernel != DSO_TYPE_USER || kmodule) {
+ char dso_name[PATH_MAX];
+
+ if (strcmp(section_name,
+ (curr_dso->short_name +
+ self->short_name_len)) == 0)
+ goto new_symbol;
+
+ if (strcmp(section_name, ".text") == 0) {
+ curr_map = map;
+ curr_dso = self;
+ goto new_symbol;
+ }
+
+ snprintf(dso_name, sizeof(dso_name),
+ "%s%s", self->short_name, section_name);
+
+ curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name);
+ if (curr_map == NULL) {
+ u64 start = sym.st_value;
+
+ if (kmodule)
+ start += map->start + shdr.sh_offset;
+
+ curr_dso = dso__new(dso_name);
+ if (curr_dso == NULL)
+ goto out_elf_end;
+ curr_dso->kernel = self->kernel;
+ curr_map = map__new2(start, curr_dso,
+ map->type);
+ if (curr_map == NULL) {
+ dso__delete(curr_dso);
+ goto out_elf_end;
+ }
+ curr_map->map_ip = identity__map_ip;
+ curr_map->unmap_ip = identity__map_ip;
+ curr_dso->origin = self->origin;
+ map_groups__insert(kmap->kmaps, curr_map);
+ dsos__add(&self->node, curr_dso);
+ dso__set_loaded(curr_dso, map->type);
+ } else
+ curr_dso = curr_map->dso;
+
+ goto new_symbol;
+ }
+
+ if (curr_dso->adjust_symbols) {
+ pr_debug4("%s: adjusting symbol: st_value: %#Lx "
+ "sh_addr: %#Lx sh_offset: %#Lx\n", __func__,
+ (u64)sym.st_value, (u64)shdr.sh_addr,
+ (u64)shdr.sh_offset);
+ sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+ }
+ /*
+ * We need to figure out if the object was created from C++ sources
+ * DWARF DW_compile_unit has this, but we don't always have access
+ * to it...
+ */
+ demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI);
+ if (demangled != NULL)
+ elf_name = demangled;
+new_symbol:
+ f = symbol__new(sym.st_value, sym.st_size,
+ GELF_ST_BIND(sym.st_info), elf_name);
+ free(demangled);
+ if (!f)
+ goto out_elf_end;
+
+ if (filter && filter(curr_map, f))
+ symbol__delete(f);
+ else {
+ symbols__insert(&curr_dso->symbols[curr_map->type], f);
+ nr++;
+ }
+ }
+
+ /*
+ * For misannotated, zeroed, ASM function sizes.
+ */
+ if (nr > 0) {
+ symbols__fixup_end(&self->symbols[map->type]);
+ if (kmap) {
+ /*
+ * We need to fixup this here too because we create new
+ * maps here, for things like vsyscall sections.
+ */
+ __map_groups__fixup_end(kmap->kmaps, map->type);
+ }
+ }
+ err = nr;
+out_elf_end:
+ elf_end(elf);
+out_close:
+ return err;
+}
+
+static bool dso__build_id_equal(const struct dso *self, u8 *build_id)
+{
+ return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0;
+}
+
+bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
+{
+ bool have_build_id = false;
+ struct dso *pos;
+
+ list_for_each_entry(pos, head, node) {
+ if (with_hits && !pos->hit)
+ continue;
+ if (pos->has_build_id) {
+ have_build_id = true;
+ continue;
+ }
+ if (filename__read_build_id(pos->long_name, pos->build_id,
+ sizeof(pos->build_id)) > 0) {
+ have_build_id = true;
+ pos->has_build_id = true;
+ }
+ }
+
+ return have_build_id;
+}
+
+/*
+ * Align offset to 4 bytes as needed for note name and descriptor data.
+ */
+#define NOTE_ALIGN(n) (((n) + 3) & -4U)
+
+static int elf_read_build_id(Elf *elf, void *bf, size_t size)
+{
+ int err = -1;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+ Elf_Data *data;
+ Elf_Scn *sec;
+ Elf_Kind ek;
+ void *ptr;
+
+ if (size < BUILD_ID_SIZE)
+ goto out;
+
+ ek = elf_kind(elf);
+ if (ek != ELF_K_ELF)
+ goto out;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL) {
+ pr_err("%s: cannot get elf header.\n", __func__);
+ goto out;
+ }
+
+ sec = elf_section_by_name(elf, &ehdr, &shdr,
+ ".note.gnu.build-id", NULL);
+ if (sec == NULL) {
+ sec = elf_section_by_name(elf, &ehdr, &shdr,
+ ".notes", NULL);
+ if (sec == NULL)
+ goto out;
+ }
+
+ data = elf_getdata(sec, NULL);
+ if (data == NULL)
+ goto out;
+
+ ptr = data->d_buf;
+ while (ptr < (data->d_buf + data->d_size)) {
+ GElf_Nhdr *nhdr = ptr;
+ int namesz = NOTE_ALIGN(nhdr->n_namesz),
+ descsz = NOTE_ALIGN(nhdr->n_descsz);
+ const char *name;
+
+ ptr += sizeof(*nhdr);
+ name = ptr;
+ ptr += namesz;
+ if (nhdr->n_type == NT_GNU_BUILD_ID &&
+ nhdr->n_namesz == sizeof("GNU")) {
+ if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
+ memcpy(bf, ptr, BUILD_ID_SIZE);
+ err = BUILD_ID_SIZE;
+ break;
+ }
+ }
+ ptr += descsz;
+ }
+
+out:
+ return err;
+}
+
+int filename__read_build_id(const char *filename, void *bf, size_t size)
+{
+ int fd, err = -1;
+ Elf *elf;
+
+ if (size < BUILD_ID_SIZE)
+ goto out;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ goto out;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL) {
+ pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
+ goto out_close;
+ }
+
+ err = elf_read_build_id(elf, bf, size);
+
+ elf_end(elf);
+out_close:
+ close(fd);
+out:
+ return err;
+}
+
+int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
+{
+ int fd, err = -1;
+
+ if (size < BUILD_ID_SIZE)
+ goto out;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ goto out;
+
+ while (1) {
+ char bf[BUFSIZ];
+ GElf_Nhdr nhdr;
+ int namesz, descsz;
+
+ if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
+ break;
+
+ namesz = NOTE_ALIGN(nhdr.n_namesz);
+ descsz = NOTE_ALIGN(nhdr.n_descsz);
+ if (nhdr.n_type == NT_GNU_BUILD_ID &&
+ nhdr.n_namesz == sizeof("GNU")) {
+ if (read(fd, bf, namesz) != namesz)
+ break;
+ if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
+ if (read(fd, build_id,
+ BUILD_ID_SIZE) == BUILD_ID_SIZE) {
+ err = 0;
+ break;
+ }
+ } else if (read(fd, bf, descsz) != descsz)
+ break;
+ } else {
+ int n = namesz + descsz;
+ if (read(fd, bf, n) != n)
+ break;
+ }
+ }
+ close(fd);
+out:
+ return err;
+}
+
+char dso__symtab_origin(const struct dso *self)
+{
+ static const char origin[] = {
+ [DSO__ORIG_KERNEL] = 'k',
+ [DSO__ORIG_JAVA_JIT] = 'j',
+ [DSO__ORIG_BUILD_ID_CACHE] = 'B',
+ [DSO__ORIG_FEDORA] = 'f',
+ [DSO__ORIG_UBUNTU] = 'u',
+ [DSO__ORIG_BUILDID] = 'b',
+ [DSO__ORIG_DSO] = 'd',
+ [DSO__ORIG_KMODULE] = 'K',
+ [DSO__ORIG_GUEST_KERNEL] = 'g',
+ [DSO__ORIG_GUEST_KMODULE] = 'G',
+ };
+
+ if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND)
+ return '!';
+ return origin[self->origin];
+}
+
+int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
+{
+ int size = PATH_MAX;
+ char *name;
+ int ret = -1;
+ int fd;
+ struct machine *machine;
+ const char *root_dir;
+ int want_symtab;
+
+ dso__set_loaded(self, map->type);
+
+ if (self->kernel == DSO_TYPE_KERNEL)
+ return dso__load_kernel_sym(self, map, filter);
+ else if (self->kernel == DSO_TYPE_GUEST_KERNEL)
+ return dso__load_guest_kernel_sym(self, map, filter);
+
+ if (map->groups && map->groups->machine)
+ machine = map->groups->machine;
+ else
+ machine = NULL;
+
+ name = malloc(size);
+ if (!name)
+ return -1;
+
+ self->adjust_symbols = 0;
+
+ if (strncmp(self->name, "/tmp/perf-", 10) == 0) {
+ ret = dso__load_perf_map(self, map, filter);
+ self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT :
+ DSO__ORIG_NOT_FOUND;
+ return ret;
+ }
+
+ /* Iterate over candidate debug images.
+ * On the first pass, only load images if they have a full symtab.
+ * Failing that, do a second pass where we accept .dynsym also
+ */
+ for (self->origin = DSO__ORIG_BUILD_ID_CACHE, want_symtab = 1;
+ self->origin != DSO__ORIG_NOT_FOUND;
+ self->origin++) {
+ switch (self->origin) {
+ case DSO__ORIG_BUILD_ID_CACHE:
+ if (dso__build_id_filename(self, name, size) == NULL)
+ continue;
+ break;
+ case DSO__ORIG_FEDORA:
+ snprintf(name, size, "/usr/lib/debug%s.debug",
+ self->long_name);
+ break;
+ case DSO__ORIG_UBUNTU:
+ snprintf(name, size, "/usr/lib/debug%s",
+ self->long_name);
+ break;
+ case DSO__ORIG_BUILDID: {
+ char build_id_hex[BUILD_ID_SIZE * 2 + 1];
+
+ if (!self->has_build_id)
+ continue;
+
+ build_id__sprintf(self->build_id,
+ sizeof(self->build_id),
+ build_id_hex);
+ snprintf(name, size,
+ "/usr/lib/debug/.build-id/%.2s/%s.debug",
+ build_id_hex, build_id_hex + 2);
+ }
+ break;
+ case DSO__ORIG_DSO:
+ snprintf(name, size, "%s", self->long_name);
+ break;
+ case DSO__ORIG_GUEST_KMODULE:
+ if (map->groups && map->groups->machine)
+ root_dir = map->groups->machine->root_dir;
+ else
+ root_dir = "";
+ snprintf(name, size, "%s%s", root_dir, self->long_name);
+ break;
+
+ default:
+ /*
+ * If we wanted a full symtab but no image had one,
+ * relax our requirements and repeat the search.
+ */
+ if (want_symtab) {
+ want_symtab = 0;
+ self->origin = DSO__ORIG_BUILD_ID_CACHE;
+ } else
+ continue;
+ }
+
+ /* Name is now the name of the next image to try */
+ fd = open(name, O_RDONLY);
+ if (fd < 0)
+ continue;
+
+ ret = dso__load_sym(self, map, name, fd, filter, 0,
+ want_symtab);
+ close(fd);
+
+ /*
+ * Some people seem to have debuginfo files _WITHOUT_ debug
+ * info!?!?
+ */
+ if (!ret)
+ continue;
+
+ if (ret > 0) {
+ int nr_plt = dso__synthesize_plt_symbols(self, map, filter);
+ if (nr_plt > 0)
+ ret += nr_plt;
+ break;
+ }
+ }
+
+ free(name);
+ if (ret < 0 && strstr(self->name, " (deleted)") != NULL)
+ return 0;
+ return ret;
+}
+
+struct map *map_groups__find_by_name(struct map_groups *self,
+ enum map_type type, const char *name)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
+ struct map *map = rb_entry(nd, struct map, rb_node);
+
+ if (map->dso && strcmp(map->dso->short_name, name) == 0)
+ return map;
+ }
+
+ return NULL;
+}
+
+static int dso__kernel_module_get_build_id(struct dso *self,
+ const char *root_dir)
+{
+ char filename[PATH_MAX];
+ /*
+ * kernel module short names are of the form "[module]" and
+ * we need just "module" here.
+ */
+ const char *name = self->short_name + 1;
+
+ snprintf(filename, sizeof(filename),
+ "%s/sys/module/%.*s/notes/.note.gnu.build-id",
+ root_dir, (int)strlen(name) - 1, name);
+
+ if (sysfs__read_build_id(filename, self->build_id,
+ sizeof(self->build_id)) == 0)
+ self->has_build_id = true;
+
+ return 0;
+}
+
+static int map_groups__set_modules_path_dir(struct map_groups *self,
+ const char *dir_name)
+{
+ struct dirent *dent;
+ DIR *dir = opendir(dir_name);
+ int ret = 0;
+
+ if (!dir) {
+ pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
+ return -1;
+ }
+
+ while ((dent = readdir(dir)) != NULL) {
+ char path[PATH_MAX];
+ struct stat st;
+
+ /*sshfs might return bad dent->d_type, so we have to stat*/
+ sprintf(path, "%s/%s", dir_name, dent->d_name);
+ if (stat(path, &st))
+ continue;
+
+ if (S_ISDIR(st.st_mode)) {
+ if (!strcmp(dent->d_name, ".") ||
+ !strcmp(dent->d_name, ".."))
+ continue;
+
+ snprintf(path, sizeof(path), "%s/%s",
+ dir_name, dent->d_name);
+ ret = map_groups__set_modules_path_dir(self, path);
+ if (ret < 0)
+ goto out;
+ } else {
+ char *dot = strrchr(dent->d_name, '.'),
+ dso_name[PATH_MAX];
+ struct map *map;
+ char *long_name;
+
+ if (dot == NULL || strcmp(dot, ".ko"))
+ continue;
+ snprintf(dso_name, sizeof(dso_name), "[%.*s]",
+ (int)(dot - dent->d_name), dent->d_name);
+
+ strxfrchar(dso_name, '-', '_');
+ map = map_groups__find_by_name(self, MAP__FUNCTION, dso_name);
+ if (map == NULL)
+ continue;
+
+ snprintf(path, sizeof(path), "%s/%s",
+ dir_name, dent->d_name);
+
+ long_name = strdup(path);
+ if (long_name == NULL) {
+ ret = -1;
+ goto out;
+ }
+ dso__set_long_name(map->dso, long_name);
+ map->dso->lname_alloc = 1;
+ dso__kernel_module_get_build_id(map->dso, "");
+ }
+ }
+
+out:
+ closedir(dir);
+ return ret;
+}
+
+static char *get_kernel_version(const char *root_dir)
+{
+ char version[PATH_MAX];
+ FILE *file;
+ char *name, *tmp;
+ const char *prefix = "Linux version ";
+
+ sprintf(version, "%s/proc/version", root_dir);
+ file = fopen(version, "r");
+ if (!file)
+ return NULL;
+
+ version[0] = '\0';
+ tmp = fgets(version, sizeof(version), file);
+ fclose(file);
+
+ name = strstr(version, prefix);
+ if (!name)
+ return NULL;
+ name += strlen(prefix);
+ tmp = strchr(name, ' ');
+ if (tmp)
+ *tmp = '\0';
+
+ return strdup(name);
+}
+
+static int machine__set_modules_path(struct machine *self)
+{
+ char *version;
+ char modules_path[PATH_MAX];
+
+ version = get_kernel_version(self->root_dir);
+ if (!version)
+ return -1;
+
+ snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
+ self->root_dir, version);
+ free(version);
+
+ return map_groups__set_modules_path_dir(&self->kmaps, modules_path);
+}
+
+/*
+ * Constructor variant for modules (where we know from /proc/modules where
+ * they are loaded) and for vmlinux, where only after we load all the
+ * symbols we'll know where it starts and ends.
+ */
+static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
+{
+ struct map *self = calloc(1, (sizeof(*self) +
+ (dso->kernel ? sizeof(struct kmap) : 0)));
+ if (self != NULL) {
+ /*
+ * ->end will be filled after we load all the symbols
+ */
+ map__init(self, type, start, 0, 0, dso);
+ }
+
+ return self;
+}
+
+struct map *machine__new_module(struct machine *self, u64 start,
+ const char *filename)
+{
+ struct map *map;
+ struct dso *dso = __dsos__findnew(&self->kernel_dsos, filename);
+
+ if (dso == NULL)
+ return NULL;
+
+ map = map__new2(start, dso, MAP__FUNCTION);
+ if (map == NULL)
+ return NULL;
+
+ if (machine__is_host(self))
+ dso->origin = DSO__ORIG_KMODULE;
+ else
+ dso->origin = DSO__ORIG_GUEST_KMODULE;
+ map_groups__insert(&self->kmaps, map);
+ return map;
+}
+
+static int machine__create_modules(struct machine *self)
+{
+ char *line = NULL;
+ size_t n;
+ FILE *file;
+ struct map *map;
+ const char *modules;
+ char path[PATH_MAX];
+
+ if (machine__is_default_guest(self))
+ modules = symbol_conf.default_guest_modules;
+ else {
+ sprintf(path, "%s/proc/modules", self->root_dir);
+ modules = path;
+ }
+
+ file = fopen(modules, "r");
+ if (file == NULL)
+ return -1;
+
+ while (!feof(file)) {
+ char name[PATH_MAX];
+ u64 start;
+ char *sep;
+ int line_len;
+
+ line_len = getline(&line, &n, file);
+ if (line_len < 0)
+ break;
+
+ if (!line)
+ goto out_failure;
+
+ line[--line_len] = '\0'; /* \n */
+
+ sep = strrchr(line, 'x');
+ if (sep == NULL)
+ continue;
+
+ hex2u64(sep + 1, &start);
+
+ sep = strchr(line, ' ');
+ if (sep == NULL)
+ continue;
+
+ *sep = '\0';
+
+ snprintf(name, sizeof(name), "[%s]", line);
+ map = machine__new_module(self, start, name);
+ if (map == NULL)
+ goto out_delete_line;
+ dso__kernel_module_get_build_id(map->dso, self->root_dir);
+ }
+
+ free(line);
+ fclose(file);
+
+ return machine__set_modules_path(self);
+
+out_delete_line:
+ free(line);
+out_failure:
+ return -1;
+}
+
+static int dso__load_vmlinux(struct dso *self, struct map *map,
+ const char *vmlinux, symbol_filter_t filter)
+{
+ int err = -1, fd;
+
+ fd = open(vmlinux, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ dso__set_loaded(self, map->type);
+ err = dso__load_sym(self, map, vmlinux, fd, filter, 0, 0);
+ close(fd);
+
+ if (err > 0)
+ pr_debug("Using %s for symbols\n", vmlinux);
+
+ return err;
+}
+
+int dso__load_vmlinux_path(struct dso *self, struct map *map,
+ symbol_filter_t filter)
+{
+ int i, err = 0;
+ char *filename;
+
+ pr_debug("Looking at the vmlinux_path (%d entries long)\n",
+ vmlinux_path__nr_entries + 1);
+
+ filename = dso__build_id_filename(self, NULL, 0);
+ if (filename != NULL) {
+ err = dso__load_vmlinux(self, map, filename, filter);
+ if (err > 0) {
+ dso__set_long_name(self, filename);
+ goto out;
+ }
+ free(filename);
+ }
+
+ for (i = 0; i < vmlinux_path__nr_entries; ++i) {
+ err = dso__load_vmlinux(self, map, vmlinux_path[i], filter);
+ if (err > 0) {
+ dso__set_long_name(self, strdup(vmlinux_path[i]));
+ break;
+ }
+ }
+out:
+ return err;
+}
+
+static int dso__load_kernel_sym(struct dso *self, struct map *map,
+ symbol_filter_t filter)
+{
+ int err;
+ const char *kallsyms_filename = NULL;
+ char *kallsyms_allocated_filename = NULL;
+ /*
+ * Step 1: if the user specified a vmlinux filename, use it and only
+ * it, reporting errors to the user if it cannot be used.
+ *
+ * For instance, try to analyse an ARM perf.data file _without_ a
+ * build-id, or if the user specifies the wrong path to the right
+ * vmlinux file, obviously we can't fallback to another vmlinux (a
+ * x86_86 one, on the machine where analysis is being performed, say),
+ * or worse, /proc/kallsyms.
+ *
+ * If the specified file _has_ a build-id and there is a build-id
+ * section in the perf.data file, we will still do the expected
+ * validation in dso__load_vmlinux and will bail out if they don't
+ * match.
+ */
+ if (symbol_conf.vmlinux_name != NULL) {
+ err = dso__load_vmlinux(self, map,
+ symbol_conf.vmlinux_name, filter);
+ if (err > 0) {
+ dso__set_long_name(self,
+ strdup(symbol_conf.vmlinux_name));
+ goto out_fixup;
+ }
+ return err;
+ }
+
+ if (vmlinux_path != NULL) {
+ err = dso__load_vmlinux_path(self, map, filter);
+ if (err > 0)
+ goto out_fixup;
+ }
+
+ /*
+ * Say the kernel DSO was created when processing the build-id header table,
+ * we have a build-id, so check if it is the same as the running kernel,
+ * using it if it is.
+ */
+ if (self->has_build_id) {
+ u8 kallsyms_build_id[BUILD_ID_SIZE];
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id,
+ sizeof(kallsyms_build_id)) == 0) {
+ if (dso__build_id_equal(self, kallsyms_build_id)) {
+ kallsyms_filename = "/proc/kallsyms";
+ goto do_kallsyms;
+ }
+ }
+ /*
+ * Now look if we have it on the build-id cache in
+ * $HOME/.debug/[kernel.kallsyms].
+ */
+ build_id__sprintf(self->build_id, sizeof(self->build_id),
+ sbuild_id);
+
+ if (asprintf(&kallsyms_allocated_filename,
+ "%s/.debug/[kernel.kallsyms]/%s",
+ getenv("HOME"), sbuild_id) == -1) {
+ pr_err("Not enough memory for kallsyms file lookup\n");
+ return -1;
+ }
+
+ kallsyms_filename = kallsyms_allocated_filename;
+
+ if (access(kallsyms_filename, F_OK)) {
+ pr_err("No kallsyms or vmlinux with build-id %s "
+ "was found\n", sbuild_id);
+ free(kallsyms_allocated_filename);
+ return -1;
+ }
+ } else {
+ /*
+ * Last resort, if we don't have a build-id and couldn't find
+ * any vmlinux file, try the running kernel kallsyms table.
+ */
+ kallsyms_filename = "/proc/kallsyms";
+ }
+
+do_kallsyms:
+ err = dso__load_kallsyms(self, kallsyms_filename, map, filter);
+ if (err > 0)
+ pr_debug("Using %s for symbols\n", kallsyms_filename);
+ free(kallsyms_allocated_filename);
+
+ if (err > 0) {
+out_fixup:
+ if (kallsyms_filename != NULL)
+ dso__set_long_name(self, strdup("[kernel.kallsyms]"));
+ map__fixup_start(map);
+ map__fixup_end(map);
+ }
+
+ return err;
+}
+
+static int dso__load_guest_kernel_sym(struct dso *self, struct map *map,
+ symbol_filter_t filter)
+{
+ int err;
+ const char *kallsyms_filename = NULL;
+ struct machine *machine;
+ char path[PATH_MAX];
+
+ if (!map->groups) {
+ pr_debug("Guest kernel map hasn't the point to groups\n");
+ return -1;
+ }
+ machine = map->groups->machine;
+
+ if (machine__is_default_guest(machine)) {
+ /*
+ * if the user specified a vmlinux filename, use it and only
+ * it, reporting errors to the user if it cannot be used.
+ * Or use file guest_kallsyms inputted by user on commandline
+ */
+ if (symbol_conf.default_guest_vmlinux_name != NULL) {
+ err = dso__load_vmlinux(self, map,
+ symbol_conf.default_guest_vmlinux_name, filter);
+ goto out_try_fixup;
+ }
+
+ kallsyms_filename = symbol_conf.default_guest_kallsyms;
+ if (!kallsyms_filename)
+ return -1;
+ } else {
+ sprintf(path, "%s/proc/kallsyms", machine->root_dir);
+ kallsyms_filename = path;
+ }
+
+ err = dso__load_kallsyms(self, kallsyms_filename, map, filter);
+ if (err > 0)
+ pr_debug("Using %s for symbols\n", kallsyms_filename);
+
+out_try_fixup:
+ if (err > 0) {
+ if (kallsyms_filename != NULL) {
+ machine__mmap_name(machine, path, sizeof(path));
+ dso__set_long_name(self, strdup(path));
+ }
+ map__fixup_start(map);
+ map__fixup_end(map);
+ }
+
+ return err;
+}
+
+static void dsos__add(struct list_head *head, struct dso *dso)
+{
+ list_add_tail(&dso->node, head);
+}
+
+static struct dso *dsos__find(struct list_head *head, const char *name)
+{
+ struct dso *pos;
+
+ list_for_each_entry(pos, head, node)
+ if (strcmp(pos->long_name, name) == 0)
+ return pos;
+ return NULL;
+}
+
+struct dso *__dsos__findnew(struct list_head *head, const char *name)
+{
+ struct dso *dso = dsos__find(head, name);
+
+ if (!dso) {
+ dso = dso__new(name);
+ if (dso != NULL) {
+ dsos__add(head, dso);
+ dso__set_basename(dso);
+ }
+ }
+
+ return dso;
+}
+
+size_t __dsos__fprintf(struct list_head *head, FILE *fp)
+{
+ struct dso *pos;
+ size_t ret = 0;
+
+ list_for_each_entry(pos, head, node) {
+ int i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ ret += dso__fprintf(pos, i, fp);
+ }
+
+ return ret;
+}
+
+size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp)
+{
+ struct rb_node *nd;
+ size_t ret = 0;
+
+ for (nd = rb_first(self); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret += __dsos__fprintf(&pos->kernel_dsos, fp);
+ ret += __dsos__fprintf(&pos->user_dsos, fp);
+ }
+
+ return ret;
+}
+
+static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
+ bool with_hits)
+{
+ struct dso *pos;
+ size_t ret = 0;
+
+ list_for_each_entry(pos, head, node) {
+ if (with_hits && !pos->hit)
+ continue;
+ ret += dso__fprintf_buildid(pos, fp);
+ ret += fprintf(fp, " %s\n", pos->long_name);
+ }
+ return ret;
+}
+
+size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits)
+{
+ return __dsos__fprintf_buildid(&self->kernel_dsos, fp, with_hits) +
+ __dsos__fprintf_buildid(&self->user_dsos, fp, with_hits);
+}
+
+size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits)
+{
+ struct rb_node *nd;
+ size_t ret = 0;
+
+ for (nd = rb_first(self); nd; nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+ ret += machine__fprintf_dsos_buildid(pos, fp, with_hits);
+ }
+ return ret;
+}
+
+struct dso *dso__new_kernel(const char *name)
+{
+ struct dso *self = dso__new(name ?: "[kernel.kallsyms]");
+
+ if (self != NULL) {
+ dso__set_short_name(self, "[kernel]");
+ self->kernel = DSO_TYPE_KERNEL;
+ }
+
+ return self;
+}
+
+static struct dso *dso__new_guest_kernel(struct machine *machine,
+ const char *name)
+{
+ char bf[PATH_MAX];
+ struct dso *self = dso__new(name ?: machine__mmap_name(machine, bf, sizeof(bf)));
+
+ if (self != NULL) {
+ dso__set_short_name(self, "[guest.kernel]");
+ self->kernel = DSO_TYPE_GUEST_KERNEL;
+ }
+
+ return self;
+}
+
+void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine)
+{
+ char path[PATH_MAX];
+
+ if (machine__is_default_guest(machine))
+ return;
+ sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
+ if (sysfs__read_build_id(path, self->build_id,
+ sizeof(self->build_id)) == 0)
+ self->has_build_id = true;
+}
+
+static struct dso *machine__create_kernel(struct machine *self)
+{
+ const char *vmlinux_name = NULL;
+ struct dso *kernel;
+
+ if (machine__is_host(self)) {
+ vmlinux_name = symbol_conf.vmlinux_name;
+ kernel = dso__new_kernel(vmlinux_name);
+ } else {
+ if (machine__is_default_guest(self))
+ vmlinux_name = symbol_conf.default_guest_vmlinux_name;
+ kernel = dso__new_guest_kernel(self, vmlinux_name);
+ }
+
+ if (kernel != NULL) {
+ dso__read_running_kernel_build_id(kernel, self);
+ dsos__add(&self->kernel_dsos, kernel);
+ }
+ return kernel;
+}
+
+int __machine__create_kernel_maps(struct machine *self, struct dso *kernel)
+{
+ enum map_type type;
+
+ for (type = 0; type < MAP__NR_TYPES; ++type) {
+ struct kmap *kmap;
+
+ self->vmlinux_maps[type] = map__new2(0, kernel, type);
+ if (self->vmlinux_maps[type] == NULL)
+ return -1;
+
+ self->vmlinux_maps[type]->map_ip =
+ self->vmlinux_maps[type]->unmap_ip = identity__map_ip;
+
+ kmap = map__kmap(self->vmlinux_maps[type]);
+ kmap->kmaps = &self->kmaps;
+ map_groups__insert(&self->kmaps, self->vmlinux_maps[type]);
+ }
+
+ return 0;
+}
+
+void machine__destroy_kernel_maps(struct machine *self)
+{
+ enum map_type type;
+
+ for (type = 0; type < MAP__NR_TYPES; ++type) {
+ struct kmap *kmap;
+
+ if (self->vmlinux_maps[type] == NULL)
+ continue;
+
+ kmap = map__kmap(self->vmlinux_maps[type]);
+ map_groups__remove(&self->kmaps, self->vmlinux_maps[type]);
+ if (kmap->ref_reloc_sym) {
+ /*
+ * ref_reloc_sym is shared among all maps, so free just
+ * on one of them.
+ */
+ if (type == MAP__FUNCTION) {
+ free((char *)kmap->ref_reloc_sym->name);
+ kmap->ref_reloc_sym->name = NULL;
+ free(kmap->ref_reloc_sym);
+ }
+ kmap->ref_reloc_sym = NULL;
+ }
+
+ map__delete(self->vmlinux_maps[type]);
+ self->vmlinux_maps[type] = NULL;
+ }
+}
+
+int machine__create_kernel_maps(struct machine *self)
+{
+ struct dso *kernel = machine__create_kernel(self);
+
+ if (kernel == NULL ||
+ __machine__create_kernel_maps(self, kernel) < 0)
+ return -1;
+
+ if (symbol_conf.use_modules && machine__create_modules(self) < 0)
+ pr_debug("Problems creating module maps, continuing anyway...\n");
+ /*
+ * Now that we have all the maps created, just set the ->end of them:
+ */
+ map_groups__fixup_end(&self->kmaps);
+ return 0;
+}
+
+static void vmlinux_path__exit(void)
+{
+ while (--vmlinux_path__nr_entries >= 0) {
+ free(vmlinux_path[vmlinux_path__nr_entries]);
+ vmlinux_path[vmlinux_path__nr_entries] = NULL;
+ }
+
+ free(vmlinux_path);
+ vmlinux_path = NULL;
+}
+
+static int vmlinux_path__init(void)
+{
+ struct utsname uts;
+ char bf[PATH_MAX];
+
+ if (uname(&uts) < 0)
+ return -1;
+
+ vmlinux_path = malloc(sizeof(char *) * 5);
+ if (vmlinux_path == NULL)
+ return -1;
+
+ vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux");
+ if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
+ goto out_fail;
+ ++vmlinux_path__nr_entries;
+ vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux");
+ if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
+ goto out_fail;
+ ++vmlinux_path__nr_entries;
+ snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release);
+ vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
+ if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
+ goto out_fail;
+ ++vmlinux_path__nr_entries;
+ snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", uts.release);
+ vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
+ if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
+ goto out_fail;
+ ++vmlinux_path__nr_entries;
+ snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux",
+ uts.release);
+ vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
+ if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
+ goto out_fail;
+ ++vmlinux_path__nr_entries;
+
+ return 0;
+
+out_fail:
+ vmlinux_path__exit();
+ return -1;
+}
+
+size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp)
+{
+ int i;
+ size_t printed = 0;
+ struct dso *kdso = self->vmlinux_maps[MAP__FUNCTION]->dso;
+
+ if (kdso->has_build_id) {
+ char filename[PATH_MAX];
+ if (dso__build_id_filename(kdso, filename, sizeof(filename)))
+ printed += fprintf(fp, "[0] %s\n", filename);
+ }
+
+ for (i = 0; i < vmlinux_path__nr_entries; ++i)
+ printed += fprintf(fp, "[%d] %s\n",
+ i + kdso->has_build_id, vmlinux_path[i]);
+
+ return printed;
+}
+
+static int setup_list(struct strlist **list, const char *list_str,
+ const char *list_name)
+{
+ if (list_str == NULL)
+ return 0;
+
+ *list = strlist__new(true, list_str);
+ if (!*list) {
+ pr_err("problems parsing %s list\n", list_name);
+ return -1;
+ }
+ return 0;
+}
+
+int symbol__init(void)
+{
+ if (symbol_conf.initialized)
+ return 0;
+
+ elf_version(EV_CURRENT);
+ if (symbol_conf.sort_by_name)
+ symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
+ sizeof(struct symbol));
+
+ if (symbol_conf.try_vmlinux_path && vmlinux_path__init() < 0)
+ return -1;
+
+ if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
+ pr_err("'.' is the only non valid --field-separator argument\n");
+ return -1;
+ }
+
+ if (setup_list(&symbol_conf.dso_list,
+ symbol_conf.dso_list_str, "dso") < 0)
+ return -1;
+
+ if (setup_list(&symbol_conf.comm_list,
+ symbol_conf.comm_list_str, "comm") < 0)
+ goto out_free_dso_list;
+
+ if (setup_list(&symbol_conf.sym_list,
+ symbol_conf.sym_list_str, "symbol") < 0)
+ goto out_free_comm_list;
+
+ symbol_conf.initialized = true;
+ return 0;
+
+out_free_dso_list:
+ strlist__delete(symbol_conf.dso_list);
+out_free_comm_list:
+ strlist__delete(symbol_conf.comm_list);
+ return -1;
+}
+
+void symbol__exit(void)
+{
+ if (!symbol_conf.initialized)
+ return;
+ strlist__delete(symbol_conf.sym_list);
+ strlist__delete(symbol_conf.dso_list);
+ strlist__delete(symbol_conf.comm_list);
+ vmlinux_path__exit();
+ symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
+ symbol_conf.initialized = false;
+}
+
+int machines__create_kernel_maps(struct rb_root *self, pid_t pid)
+{
+ struct machine *machine = machines__findnew(self, pid);
+
+ if (machine == NULL)
+ return -1;
+
+ return machine__create_kernel_maps(machine);
+}
+
+static int hex(char ch)
+{
+ if ((ch >= '0') && (ch <= '9'))
+ return ch - '0';
+ if ((ch >= 'a') && (ch <= 'f'))
+ return ch - 'a' + 10;
+ if ((ch >= 'A') && (ch <= 'F'))
+ return ch - 'A' + 10;
+ return -1;
+}
+
+/*
+ * While we find nice hex chars, build a long_val.
+ * Return number of chars processed.
+ */
+int hex2u64(const char *ptr, u64 *long_val)
+{
+ const char *p = ptr;
+ *long_val = 0;
+
+ while (*p) {
+ const int hex_val = hex(*p);
+
+ if (hex_val < 0)
+ break;
+
+ *long_val = (*long_val << 4) | hex_val;
+ p++;
+ }
+
+ return p - ptr;
+}
+
+char *strxfrchar(char *s, char from, char to)
+{
+ char *p = s;
+
+ while ((p = strchr(p, from)) != NULL)
+ *p++ = to;
+
+ return s;
+}
+
+int machines__create_guest_kernel_maps(struct rb_root *self)
+{
+ int ret = 0;
+ struct dirent **namelist = NULL;
+ int i, items = 0;
+ char path[PATH_MAX];
+ pid_t pid;
+
+ if (symbol_conf.default_guest_vmlinux_name ||
+ symbol_conf.default_guest_modules ||
+ symbol_conf.default_guest_kallsyms) {
+ machines__create_kernel_maps(self, DEFAULT_GUEST_KERNEL_ID);
+ }
+
+ if (symbol_conf.guestmount) {
+ items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
+ if (items <= 0)
+ return -ENOENT;
+ for (i = 0; i < items; i++) {
+ if (!isdigit(namelist[i]->d_name[0])) {
+ /* Filter out . and .. */
+ continue;
+ }
+ pid = atoi(namelist[i]->d_name);
+ sprintf(path, "%s/%s/proc/kallsyms",
+ symbol_conf.guestmount,
+ namelist[i]->d_name);
+ ret = access(path, R_OK);
+ if (ret) {
+ pr_debug("Can't access file %s\n", path);
+ goto failure;
+ }
+ machines__create_kernel_maps(self, pid);
+ }
+failure:
+ free(namelist);
+ }
+
+ return ret;
+}
+
+void machines__destroy_guest_kernel_maps(struct rb_root *self)
+{
+ struct rb_node *next = rb_first(self);
+
+ while (next) {
+ struct machine *pos = rb_entry(next, struct machine, rb_node);
+
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, self);
+ machine__delete(pos);
+ }
+}
+
+int machine__load_kallsyms(struct machine *self, const char *filename,
+ enum map_type type, symbol_filter_t filter)
+{
+ struct map *map = self->vmlinux_maps[type];
+ int ret = dso__load_kallsyms(map->dso, filename, map, filter);
+
+ if (ret > 0) {
+ dso__set_loaded(map->dso, type);
+ /*
+ * Since /proc/kallsyms will have multiple sessions for the
+ * kernel, with modules between them, fixup the end of all
+ * sections.
+ */
+ __map_groups__fixup_end(&self->kmaps, type);
+ }
+
+ return ret;
+}
+
+int machine__load_vmlinux_path(struct machine *self, enum map_type type,
+ symbol_filter_t filter)
+{
+ struct map *map = self->vmlinux_maps[type];
+ int ret = dso__load_vmlinux_path(map->dso, map, filter);
+
+ if (ret > 0) {
+ dso__set_loaded(map->dso, type);
+ map__reloc_vmlinux(map);
+ }
+
+ return ret;
+}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
new file mode 100644
index 00000000..ea95c275
--- /dev/null
+++ b/tools/perf/util/symbol.h
@@ -0,0 +1,231 @@
+#ifndef __PERF_SYMBOL
+#define __PERF_SYMBOL 1
+
+#include <linux/types.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include "map.h"
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <stdio.h>
+
+#ifdef HAVE_CPLUS_DEMANGLE
+extern char *cplus_demangle(const char *, int);
+
+static inline char *bfd_demangle(void __used *v, const char *c, int i)
+{
+ return cplus_demangle(c, i);
+}
+#else
+#ifdef NO_DEMANGLE
+static inline char *bfd_demangle(void __used *v, const char __used *c,
+ int __used i)
+{
+ return NULL;
+}
+#else
+#include <bfd.h>
+#endif
+#endif
+
+int hex2u64(const char *ptr, u64 *val);
+char *strxfrchar(char *s, char from, char to);
+
+/*
+ * libelf 0.8.x and earlier do not support ELF_C_READ_MMAP;
+ * for newer versions we can use mmap to reduce memory usage:
+ */
+#ifdef LIBELF_NO_MMAP
+# define PERF_ELF_C_READ_MMAP ELF_C_READ
+#else
+# define PERF_ELF_C_READ_MMAP ELF_C_READ_MMAP
+#endif
+
+#ifndef DMGL_PARAMS
+#define DMGL_PARAMS (1 << 0) /* Include function args */
+#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
+#endif
+
+#define BUILD_ID_SIZE 20
+
+struct symbol {
+ struct rb_node rb_node;
+ u64 start;
+ u64 end;
+ u16 namelen;
+ u8 binding;
+ char name[0];
+};
+
+void symbol__delete(struct symbol *self);
+
+struct strlist;
+
+struct symbol_conf {
+ unsigned short priv_size;
+ bool try_vmlinux_path,
+ use_modules,
+ sort_by_name,
+ show_nr_samples,
+ use_callchain,
+ exclude_other,
+ show_cpu_utilization,
+ initialized;
+ const char *vmlinux_name,
+ *source_prefix,
+ *field_sep;
+ const char *default_guest_vmlinux_name,
+ *default_guest_kallsyms,
+ *default_guest_modules;
+ const char *guestmount;
+ const char *dso_list_str,
+ *comm_list_str,
+ *sym_list_str,
+ *col_width_list_str;
+ struct strlist *dso_list,
+ *comm_list,
+ *sym_list;
+};
+
+extern struct symbol_conf symbol_conf;
+
+static inline void *symbol__priv(struct symbol *self)
+{
+ return ((void *)self) - symbol_conf.priv_size;
+}
+
+struct ref_reloc_sym {
+ const char *name;
+ u64 addr;
+ u64 unrelocated_addr;
+};
+
+struct map_symbol {
+ struct map *map;
+ struct symbol *sym;
+ bool unfolded;
+ bool has_children;
+};
+
+struct addr_location {
+ struct thread *thread;
+ struct map *map;
+ struct symbol *sym;
+ u64 addr;
+ char level;
+ bool filtered;
+ u8 cpumode;
+ s32 cpu;
+};
+
+enum dso_kernel_type {
+ DSO_TYPE_USER = 0,
+ DSO_TYPE_KERNEL,
+ DSO_TYPE_GUEST_KERNEL
+};
+
+struct dso {
+ struct list_head node;
+ struct rb_root symbols[MAP__NR_TYPES];
+ struct rb_root symbol_names[MAP__NR_TYPES];
+ enum dso_kernel_type kernel;
+ u8 adjust_symbols:1;
+ u8 slen_calculated:1;
+ u8 has_build_id:1;
+ u8 hit:1;
+ u8 annotate_warned:1;
+ u8 sname_alloc:1;
+ u8 lname_alloc:1;
+ unsigned char origin;
+ u8 sorted_by_name;
+ u8 loaded;
+ u8 build_id[BUILD_ID_SIZE];
+ const char *short_name;
+ char *long_name;
+ u16 long_name_len;
+ u16 short_name_len;
+ char name[0];
+};
+
+struct dso *dso__new(const char *name);
+struct dso *dso__new_kernel(const char *name);
+void dso__delete(struct dso *self);
+
+int dso__name_len(const struct dso *self);
+
+bool dso__loaded(const struct dso *self, enum map_type type);
+bool dso__sorted_by_name(const struct dso *self, enum map_type type);
+
+static inline void dso__set_loaded(struct dso *self, enum map_type type)
+{
+ self->loaded |= (1 << type);
+}
+
+void dso__sort_by_name(struct dso *self, enum map_type type);
+
+struct dso *__dsos__findnew(struct list_head *head, const char *name);
+
+int dso__load(struct dso *self, struct map *map, symbol_filter_t filter);
+int dso__load_vmlinux_path(struct dso *self, struct map *map,
+ symbol_filter_t filter);
+int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map,
+ symbol_filter_t filter);
+int machine__load_kallsyms(struct machine *self, const char *filename,
+ enum map_type type, symbol_filter_t filter);
+int machine__load_vmlinux_path(struct machine *self, enum map_type type,
+ symbol_filter_t filter);
+
+size_t __dsos__fprintf(struct list_head *head, FILE *fp);
+
+size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits);
+size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp);
+size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits);
+
+size_t dso__fprintf_buildid(struct dso *self, FILE *fp);
+size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp);
+
+enum dso_origin {
+ DSO__ORIG_KERNEL = 0,
+ DSO__ORIG_GUEST_KERNEL,
+ DSO__ORIG_JAVA_JIT,
+ DSO__ORIG_BUILD_ID_CACHE,
+ DSO__ORIG_FEDORA,
+ DSO__ORIG_UBUNTU,
+ DSO__ORIG_BUILDID,
+ DSO__ORIG_DSO,
+ DSO__ORIG_GUEST_KMODULE,
+ DSO__ORIG_KMODULE,
+ DSO__ORIG_NOT_FOUND,
+};
+
+char dso__symtab_origin(const struct dso *self);
+void dso__set_long_name(struct dso *self, char *name);
+void dso__set_build_id(struct dso *self, void *build_id);
+void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine);
+struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr);
+struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type,
+ const char *name);
+
+int filename__read_build_id(const char *filename, void *bf, size_t size);
+int sysfs__read_build_id(const char *filename, void *bf, size_t size);
+bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
+int build_id__sprintf(const u8 *self, int len, char *bf);
+int kallsyms__parse(const char *filename, void *arg,
+ int (*process_symbol)(void *arg, const char *name,
+ char type, u64 start));
+
+void machine__destroy_kernel_maps(struct machine *self);
+int __machine__create_kernel_maps(struct machine *self, struct dso *kernel);
+int machine__create_kernel_maps(struct machine *self);
+
+int machines__create_kernel_maps(struct rb_root *self, pid_t pid);
+int machines__create_guest_kernel_maps(struct rb_root *self);
+void machines__destroy_guest_kernel_maps(struct rb_root *self);
+
+int symbol__init(void);
+void symbol__exit(void);
+bool symbol_type__is_a(char symbol_type, enum map_type map_type);
+
+size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp);
+
+#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
new file mode 100644
index 00000000..8c72d888
--- /dev/null
+++ b/tools/perf/util/thread.c
@@ -0,0 +1,180 @@
+#include "../perf.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "session.h"
+#include "thread.h"
+#include "util.h"
+#include "debug.h"
+
+/* Skip "." and ".." directories */
+static int filter(const struct dirent *dir)
+{
+ if (dir->d_name[0] == '.')
+ return 0;
+ else
+ return 1;
+}
+
+int find_all_tid(int pid, pid_t ** all_tid)
+{
+ char name[256];
+ int items;
+ struct dirent **namelist = NULL;
+ int ret = 0;
+ int i;
+
+ sprintf(name, "/proc/%d/task", pid);
+ items = scandir(name, &namelist, filter, NULL);
+ if (items <= 0)
+ return -ENOENT;
+ *all_tid = malloc(sizeof(pid_t) * items);
+ if (!*all_tid) {
+ ret = -ENOMEM;
+ goto failure;
+ }
+
+ for (i = 0; i < items; i++)
+ (*all_tid)[i] = atoi(namelist[i]->d_name);
+
+ ret = items;
+
+failure:
+ for (i=0; i<items; i++)
+ free(namelist[i]);
+ free(namelist);
+
+ return ret;
+}
+
+static struct thread *thread__new(pid_t pid)
+{
+ struct thread *self = zalloc(sizeof(*self));
+
+ if (self != NULL) {
+ map_groups__init(&self->mg);
+ self->pid = pid;
+ self->comm = malloc(32);
+ if (self->comm)
+ snprintf(self->comm, 32, ":%d", self->pid);
+ }
+
+ return self;
+}
+
+void thread__delete(struct thread *self)
+{
+ map_groups__exit(&self->mg);
+ free(self->comm);
+ free(self);
+}
+
+int thread__set_comm(struct thread *self, const char *comm)
+{
+ int err;
+
+ if (self->comm)
+ free(self->comm);
+ self->comm = strdup(comm);
+ err = self->comm == NULL ? -ENOMEM : 0;
+ if (!err) {
+ self->comm_set = true;
+ map_groups__flush(&self->mg);
+ }
+ return err;
+}
+
+int thread__comm_len(struct thread *self)
+{
+ if (!self->comm_len) {
+ if (!self->comm)
+ return 0;
+ self->comm_len = strlen(self->comm);
+ }
+
+ return self->comm_len;
+}
+
+static size_t thread__fprintf(struct thread *self, FILE *fp)
+{
+ return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
+ map_groups__fprintf(&self->mg, verbose, fp);
+}
+
+struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
+{
+ struct rb_node **p = &self->threads.rb_node;
+ struct rb_node *parent = NULL;
+ struct thread *th;
+
+ /*
+ * Font-end cache - PID lookups come in blocks,
+ * so most of the time we dont have to look up
+ * the full rbtree:
+ */
+ if (self->last_match && self->last_match->pid == pid)
+ return self->last_match;
+
+ while (*p != NULL) {
+ parent = *p;
+ th = rb_entry(parent, struct thread, rb_node);
+
+ if (th->pid == pid) {
+ self->last_match = th;
+ return th;
+ }
+
+ if (pid < th->pid)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ th = thread__new(pid);
+ if (th != NULL) {
+ rb_link_node(&th->rb_node, parent, p);
+ rb_insert_color(&th->rb_node, &self->threads);
+ self->last_match = th;
+ }
+
+ return th;
+}
+
+void thread__insert_map(struct thread *self, struct map *map)
+{
+ map_groups__fixup_overlappings(&self->mg, map, verbose, stderr);
+ map_groups__insert(&self->mg, map);
+}
+
+int thread__fork(struct thread *self, struct thread *parent)
+{
+ int i;
+
+ if (parent->comm_set) {
+ if (self->comm)
+ free(self->comm);
+ self->comm = strdup(parent->comm);
+ if (!self->comm)
+ return -ENOMEM;
+ self->comm_set = true;
+ }
+
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
+ return -ENOMEM;
+ return 0;
+}
+
+size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
+{
+ size_t ret = 0;
+ struct rb_node *nd;
+
+ for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
+ struct thread *pos = rb_entry(nd, struct thread, rb_node);
+
+ ret += thread__fprintf(pos, fp);
+ }
+
+ return ret;
+}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
new file mode 100644
index 00000000..688500ff
--- /dev/null
+++ b/tools/perf/util/thread.h
@@ -0,0 +1,49 @@
+#ifndef __PERF_THREAD_H
+#define __PERF_THREAD_H
+
+#include <linux/rbtree.h>
+#include <unistd.h>
+#include "symbol.h"
+
+struct thread {
+ union {
+ struct rb_node rb_node;
+ struct list_head node;
+ };
+ struct map_groups mg;
+ pid_t pid;
+ char shortname[3];
+ bool comm_set;
+ char *comm;
+ int comm_len;
+};
+
+struct perf_session;
+
+void thread__delete(struct thread *self);
+
+int find_all_tid(int pid, pid_t ** all_tid);
+int thread__set_comm(struct thread *self, const char *comm);
+int thread__comm_len(struct thread *self);
+struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
+void thread__insert_map(struct thread *self, struct map *map);
+int thread__fork(struct thread *self, struct thread *parent);
+size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
+
+static inline struct map *thread__find_map(struct thread *self,
+ enum map_type type, u64 addr)
+{
+ return self ? map_groups__find(&self->mg, type, addr) : NULL;
+}
+
+void thread__find_addr_map(struct thread *self,
+ struct perf_session *session, u8 cpumode,
+ enum map_type type, pid_t pid, u64 addr,
+ struct addr_location *al);
+
+void thread__find_addr_location(struct thread *self,
+ struct perf_session *session, u8 cpumode,
+ enum map_type type, pid_t pid, u64 addr,
+ struct addr_location *al,
+ symbol_filter_t filter);
+#endif /* __PERF_THREAD_H */
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
new file mode 100644
index 00000000..b1572601
--- /dev/null
+++ b/tools/perf/util/trace-event-info.c
@@ -0,0 +1,563 @@
+/*
+ * Copyright (C) 2008,2009, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#define _GNU_SOURCE
+#include <dirent.h>
+#include <mntent.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <ctype.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <linux/kernel.h>
+
+#include "../perf.h"
+#include "trace-event.h"
+#include "debugfs.h"
+
+#define VERSION "0.5"
+
+#define _STR(x) #x
+#define STR(x) _STR(x)
+#define MAX_PATH 256
+
+#define TRACE_CTRL "tracing_on"
+#define TRACE "trace"
+#define AVAILABLE "available_tracers"
+#define CURRENT "current_tracer"
+#define ITER_CTRL "trace_options"
+#define MAX_LATENCY "tracing_max_latency"
+
+unsigned int page_size;
+
+static const char *output_file = "trace.info";
+static int output_fd;
+
+struct event_list {
+ struct event_list *next;
+ const char *event;
+};
+
+struct events {
+ struct events *sibling;
+ struct events *children;
+ struct events *next;
+ char *name;
+};
+
+
+
+static void die(const char *fmt, ...)
+{
+ va_list ap;
+ int ret = errno;
+
+ if (errno)
+ perror("trace-cmd");
+ else
+ ret = -1;
+
+ va_start(ap, fmt);
+ fprintf(stderr, " ");
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+
+ fprintf(stderr, "\n");
+ exit(ret);
+}
+
+void *malloc_or_die(unsigned int size)
+{
+ void *data;
+
+ data = malloc(size);
+ if (!data)
+ die("malloc");
+ return data;
+}
+
+static const char *find_debugfs(void)
+{
+ const char *path = debugfs_mount(NULL);
+
+ if (!path)
+ die("Your kernel not support debugfs filesystem");
+
+ return path;
+}
+
+/*
+ * Finds the path to the debugfs/tracing
+ * Allocates the string and stores it.
+ */
+static const char *find_tracing_dir(void)
+{
+ static char *tracing;
+ static int tracing_found;
+ const char *debugfs;
+
+ if (tracing_found)
+ return tracing;
+
+ debugfs = find_debugfs();
+
+ tracing = malloc_or_die(strlen(debugfs) + 9);
+
+ sprintf(tracing, "%s/tracing", debugfs);
+
+ tracing_found = 1;
+ return tracing;
+}
+
+static char *get_tracing_file(const char *name)
+{
+ const char *tracing;
+ char *file;
+
+ tracing = find_tracing_dir();
+ if (!tracing)
+ return NULL;
+
+ file = malloc_or_die(strlen(tracing) + strlen(name) + 2);
+
+ sprintf(file, "%s/%s", tracing, name);
+ return file;
+}
+
+static void put_tracing_file(char *file)
+{
+ free(file);
+}
+
+static ssize_t calc_data_size;
+
+static ssize_t write_or_die(const void *buf, size_t len)
+{
+ int ret;
+
+ if (calc_data_size) {
+ calc_data_size += len;
+ return len;
+ }
+
+ ret = write(output_fd, buf, len);
+ if (ret < 0)
+ die("writing to '%s'", output_file);
+
+ return ret;
+}
+
+int bigendian(void)
+{
+ unsigned char str[] = { 0x1, 0x2, 0x3, 0x4, 0x0, 0x0, 0x0, 0x0};
+ unsigned int *ptr;
+
+ ptr = (unsigned int *)(void *)str;
+ return *ptr == 0x01020304;
+}
+
+static unsigned long long copy_file_fd(int fd)
+{
+ unsigned long long size = 0;
+ char buf[BUFSIZ];
+ int r;
+
+ do {
+ r = read(fd, buf, BUFSIZ);
+ if (r > 0) {
+ size += r;
+ write_or_die(buf, r);
+ }
+ } while (r > 0);
+
+ return size;
+}
+
+static unsigned long long copy_file(const char *file)
+{
+ unsigned long long size = 0;
+ int fd;
+
+ fd = open(file, O_RDONLY);
+ if (fd < 0)
+ die("Can't read '%s'", file);
+ size = copy_file_fd(fd);
+ close(fd);
+
+ return size;
+}
+
+static unsigned long get_size_fd(int fd)
+{
+ unsigned long long size = 0;
+ char buf[BUFSIZ];
+ int r;
+
+ do {
+ r = read(fd, buf, BUFSIZ);
+ if (r > 0)
+ size += r;
+ } while (r > 0);
+
+ lseek(fd, 0, SEEK_SET);
+
+ return size;
+}
+
+static unsigned long get_size(const char *file)
+{
+ unsigned long long size = 0;
+ int fd;
+
+ fd = open(file, O_RDONLY);
+ if (fd < 0)
+ die("Can't read '%s'", file);
+ size = get_size_fd(fd);
+ close(fd);
+
+ return size;
+}
+
+static void read_header_files(void)
+{
+ unsigned long long size, check_size;
+ char *path;
+ int fd;
+
+ path = get_tracing_file("events/header_page");
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ die("can't read '%s'", path);
+
+ /* unfortunately, you can not stat debugfs files for size */
+ size = get_size_fd(fd);
+
+ write_or_die("header_page", 12);
+ write_or_die(&size, 8);
+ check_size = copy_file_fd(fd);
+ close(fd);
+
+ if (size != check_size)
+ die("wrong size for '%s' size=%lld read=%lld",
+ path, size, check_size);
+ put_tracing_file(path);
+
+ path = get_tracing_file("events/header_event");
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ die("can't read '%s'", path);
+
+ size = get_size_fd(fd);
+
+ write_or_die("header_event", 13);
+ write_or_die(&size, 8);
+ check_size = copy_file_fd(fd);
+ if (size != check_size)
+ die("wrong size for '%s'", path);
+ put_tracing_file(path);
+ close(fd);
+}
+
+static bool name_in_tp_list(char *sys, struct tracepoint_path *tps)
+{
+ while (tps) {
+ if (!strcmp(sys, tps->name))
+ return true;
+ tps = tps->next;
+ }
+
+ return false;
+}
+
+static void copy_event_system(const char *sys, struct tracepoint_path *tps)
+{
+ unsigned long long size, check_size;
+ struct dirent *dent;
+ struct stat st;
+ char *format;
+ DIR *dir;
+ int count = 0;
+ int ret;
+
+ dir = opendir(sys);
+ if (!dir)
+ die("can't read directory '%s'", sys);
+
+ while ((dent = readdir(dir))) {
+ if (dent->d_type != DT_DIR ||
+ strcmp(dent->d_name, ".") == 0 ||
+ strcmp(dent->d_name, "..") == 0 ||
+ !name_in_tp_list(dent->d_name, tps))
+ continue;
+ format = malloc_or_die(strlen(sys) + strlen(dent->d_name) + 10);
+ sprintf(format, "%s/%s/format", sys, dent->d_name);
+ ret = stat(format, &st);
+ free(format);
+ if (ret < 0)
+ continue;
+ count++;
+ }
+
+ write_or_die(&count, 4);
+
+ rewinddir(dir);
+ while ((dent = readdir(dir))) {
+ if (dent->d_type != DT_DIR ||
+ strcmp(dent->d_name, ".") == 0 ||
+ strcmp(dent->d_name, "..") == 0 ||
+ !name_in_tp_list(dent->d_name, tps))
+ continue;
+ format = malloc_or_die(strlen(sys) + strlen(dent->d_name) + 10);
+ sprintf(format, "%s/%s/format", sys, dent->d_name);
+ ret = stat(format, &st);
+
+ if (ret >= 0) {
+ /* unfortunately, you can not stat debugfs files for size */
+ size = get_size(format);
+ write_or_die(&size, 8);
+ check_size = copy_file(format);
+ if (size != check_size)
+ die("error in size of file '%s'", format);
+ }
+
+ free(format);
+ }
+ closedir(dir);
+}
+
+static void read_ftrace_files(struct tracepoint_path *tps)
+{
+ char *path;
+
+ path = get_tracing_file("events/ftrace");
+
+ copy_event_system(path, tps);
+
+ put_tracing_file(path);
+}
+
+static bool system_in_tp_list(char *sys, struct tracepoint_path *tps)
+{
+ while (tps) {
+ if (!strcmp(sys, tps->system))
+ return true;
+ tps = tps->next;
+ }
+
+ return false;
+}
+
+static void read_event_files(struct tracepoint_path *tps)
+{
+ struct dirent *dent;
+ struct stat st;
+ char *path;
+ char *sys;
+ DIR *dir;
+ int count = 0;
+ int ret;
+
+ path = get_tracing_file("events");
+
+ dir = opendir(path);
+ if (!dir)
+ die("can't read directory '%s'", path);
+
+ while ((dent = readdir(dir))) {
+ if (dent->d_type != DT_DIR ||
+ strcmp(dent->d_name, ".") == 0 ||
+ strcmp(dent->d_name, "..") == 0 ||
+ strcmp(dent->d_name, "ftrace") == 0 ||
+ !system_in_tp_list(dent->d_name, tps))
+ continue;
+ count++;
+ }
+
+ write_or_die(&count, 4);
+
+ rewinddir(dir);
+ while ((dent = readdir(dir))) {
+ if (dent->d_type != DT_DIR ||
+ strcmp(dent->d_name, ".") == 0 ||
+ strcmp(dent->d_name, "..") == 0 ||
+ strcmp(dent->d_name, "ftrace") == 0 ||
+ !system_in_tp_list(dent->d_name, tps))
+ continue;
+ sys = malloc_or_die(strlen(path) + strlen(dent->d_name) + 2);
+ sprintf(sys, "%s/%s", path, dent->d_name);
+ ret = stat(sys, &st);
+ if (ret >= 0) {
+ write_or_die(dent->d_name, strlen(dent->d_name) + 1);
+ copy_event_system(sys, tps);
+ }
+ free(sys);
+ }
+
+ closedir(dir);
+ put_tracing_file(path);
+}
+
+static void read_proc_kallsyms(void)
+{
+ unsigned int size, check_size;
+ const char *path = "/proc/kallsyms";
+ struct stat st;
+ int ret;
+
+ ret = stat(path, &st);
+ if (ret < 0) {
+ /* not found */
+ size = 0;
+ write_or_die(&size, 4);
+ return;
+ }
+ size = get_size(path);
+ write_or_die(&size, 4);
+ check_size = copy_file(path);
+ if (size != check_size)
+ die("error in size of file '%s'", path);
+
+}
+
+static void read_ftrace_printk(void)
+{
+ unsigned int size, check_size;
+ char *path;
+ struct stat st;
+ int ret;
+
+ path = get_tracing_file("printk_formats");
+ ret = stat(path, &st);
+ if (ret < 0) {
+ /* not found */
+ size = 0;
+ write_or_die(&size, 4);
+ goto out;
+ }
+ size = get_size(path);
+ write_or_die(&size, 4);
+ check_size = copy_file(path);
+ if (size != check_size)
+ die("error in size of file '%s'", path);
+out:
+ put_tracing_file(path);
+}
+
+static struct tracepoint_path *
+get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events)
+{
+ struct tracepoint_path path, *ppath = &path;
+ int i, nr_tracepoints = 0;
+
+ for (i = 0; i < nb_events; i++) {
+ if (pattrs[i].type != PERF_TYPE_TRACEPOINT)
+ continue;
+ ++nr_tracepoints;
+ ppath->next = tracepoint_id_to_path(pattrs[i].config);
+ if (!ppath->next)
+ die("%s\n", "No memory to alloc tracepoints list");
+ ppath = ppath->next;
+ }
+
+ return nr_tracepoints > 0 ? path.next : NULL;
+}
+
+bool have_tracepoints(struct perf_event_attr *pattrs, int nb_events)
+{
+ int i;
+
+ for (i = 0; i < nb_events; i++)
+ if (pattrs[i].type == PERF_TYPE_TRACEPOINT)
+ return true;
+
+ return false;
+}
+
+int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events)
+{
+ char buf[BUFSIZ];
+ struct tracepoint_path *tps = get_tracepoints_path(pattrs, nb_events);
+
+ /*
+ * What? No tracepoints? No sense writing anything here, bail out.
+ */
+ if (tps == NULL)
+ return -1;
+
+ output_fd = fd;
+
+ buf[0] = 23;
+ buf[1] = 8;
+ buf[2] = 68;
+ memcpy(buf + 3, "tracing", 7);
+
+ write_or_die(buf, 10);
+
+ write_or_die(VERSION, strlen(VERSION) + 1);
+
+ /* save endian */
+ if (bigendian())
+ buf[0] = 1;
+ else
+ buf[0] = 0;
+
+ write_or_die(buf, 1);
+
+ /* save size of long */
+ buf[0] = sizeof(long);
+ write_or_die(buf, 1);
+
+ /* save page_size */
+ page_size = sysconf(_SC_PAGESIZE);
+ write_or_die(&page_size, 4);
+
+ read_header_files();
+ read_ftrace_files(tps);
+ read_event_files(tps);
+ read_proc_kallsyms();
+ read_ftrace_printk();
+
+ return 0;
+}
+
+ssize_t read_tracing_data_size(int fd, struct perf_event_attr *pattrs,
+ int nb_events)
+{
+ ssize_t size;
+ int err = 0;
+
+ calc_data_size = 1;
+ err = read_tracing_data(fd, pattrs, nb_events);
+ size = calc_data_size - 1;
+ calc_data_size = 0;
+
+ if (err < 0)
+ return err;
+
+ return size;
+}
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
new file mode 100644
index 00000000..73a02223
--- /dev/null
+++ b/tools/perf/util/trace-event-parse.c
@@ -0,0 +1,3233 @@
+/*
+ * Copyright (C) 2009, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The parts for function graph printing was taken and modified from the
+ * Linux Kernel that were written by Frederic Weisbecker.
+ */
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+
+#undef _GNU_SOURCE
+#include "../perf.h"
+#include "util.h"
+#include "trace-event.h"
+
+int header_page_ts_offset;
+int header_page_ts_size;
+int header_page_size_offset;
+int header_page_size_size;
+int header_page_overwrite_offset;
+int header_page_overwrite_size;
+int header_page_data_offset;
+int header_page_data_size;
+
+bool latency_format;
+
+static char *input_buf;
+static unsigned long long input_buf_ptr;
+static unsigned long long input_buf_siz;
+
+static int cpus;
+static int long_size;
+static int is_flag_field;
+static int is_symbolic_field;
+
+static struct format_field *
+find_any_field(struct event *event, const char *name);
+
+static void init_input_buf(char *buf, unsigned long long size)
+{
+ input_buf = buf;
+ input_buf_siz = size;
+ input_buf_ptr = 0;
+}
+
+struct cmdline {
+ char *comm;
+ int pid;
+};
+
+static struct cmdline *cmdlines;
+static int cmdline_count;
+
+static int cmdline_cmp(const void *a, const void *b)
+{
+ const struct cmdline *ca = a;
+ const struct cmdline *cb = b;
+
+ if (ca->pid < cb->pid)
+ return -1;
+ if (ca->pid > cb->pid)
+ return 1;
+
+ return 0;
+}
+
+void parse_cmdlines(char *file, int size __unused)
+{
+ struct cmdline_list {
+ struct cmdline_list *next;
+ char *comm;
+ int pid;
+ } *list = NULL, *item;
+ char *line;
+ char *next = NULL;
+ int i;
+
+ line = strtok_r(file, "\n", &next);
+ while (line) {
+ item = malloc_or_die(sizeof(*item));
+ sscanf(line, "%d %as", &item->pid,
+ (float *)(void *)&item->comm); /* workaround gcc warning */
+ item->next = list;
+ list = item;
+ line = strtok_r(NULL, "\n", &next);
+ cmdline_count++;
+ }
+
+ cmdlines = malloc_or_die(sizeof(*cmdlines) * cmdline_count);
+
+ i = 0;
+ while (list) {
+ cmdlines[i].pid = list->pid;
+ cmdlines[i].comm = list->comm;
+ i++;
+ item = list;
+ list = list->next;
+ free(item);
+ }
+
+ qsort(cmdlines, cmdline_count, sizeof(*cmdlines), cmdline_cmp);
+}
+
+static struct func_map {
+ unsigned long long addr;
+ char *func;
+ char *mod;
+} *func_list;
+static unsigned int func_count;
+
+static int func_cmp(const void *a, const void *b)
+{
+ const struct func_map *fa = a;
+ const struct func_map *fb = b;
+
+ if (fa->addr < fb->addr)
+ return -1;
+ if (fa->addr > fb->addr)
+ return 1;
+
+ return 0;
+}
+
+void parse_proc_kallsyms(char *file, unsigned int size __unused)
+{
+ struct func_list {
+ struct func_list *next;
+ unsigned long long addr;
+ char *func;
+ char *mod;
+ } *list = NULL, *item;
+ char *line;
+ char *next = NULL;
+ char *addr_str;
+ char ch;
+ int ret;
+ int i;
+
+ line = strtok_r(file, "\n", &next);
+ while (line) {
+ item = malloc_or_die(sizeof(*item));
+ item->mod = NULL;
+ ret = sscanf(line, "%as %c %as\t[%as",
+ (float *)(void *)&addr_str, /* workaround gcc warning */
+ &ch,
+ (float *)(void *)&item->func,
+ (float *)(void *)&item->mod);
+ item->addr = strtoull(addr_str, NULL, 16);
+ free(addr_str);
+
+ /* truncate the extra ']' */
+ if (item->mod)
+ item->mod[strlen(item->mod) - 1] = 0;
+
+
+ item->next = list;
+ list = item;
+ line = strtok_r(NULL, "\n", &next);
+ func_count++;
+ }
+
+ func_list = malloc_or_die(sizeof(*func_list) * (func_count + 1));
+
+ i = 0;
+ while (list) {
+ func_list[i].func = list->func;
+ func_list[i].addr = list->addr;
+ func_list[i].mod = list->mod;
+ i++;
+ item = list;
+ list = list->next;
+ free(item);
+ }
+
+ qsort(func_list, func_count, sizeof(*func_list), func_cmp);
+
+ /*
+ * Add a special record at the end.
+ */
+ func_list[func_count].func = NULL;
+ func_list[func_count].addr = 0;
+ func_list[func_count].mod = NULL;
+}
+
+/*
+ * We are searching for a record in between, not an exact
+ * match.
+ */
+static int func_bcmp(const void *a, const void *b)
+{
+ const struct func_map *fa = a;
+ const struct func_map *fb = b;
+
+ if ((fa->addr == fb->addr) ||
+
+ (fa->addr > fb->addr &&
+ fa->addr < (fb+1)->addr))
+ return 0;
+
+ if (fa->addr < fb->addr)
+ return -1;
+
+ return 1;
+}
+
+static struct func_map *find_func(unsigned long long addr)
+{
+ struct func_map *func;
+ struct func_map key;
+
+ key.addr = addr;
+
+ func = bsearch(&key, func_list, func_count, sizeof(*func_list),
+ func_bcmp);
+
+ return func;
+}
+
+void print_funcs(void)
+{
+ int i;
+
+ for (i = 0; i < (int)func_count; i++) {
+ printf("%016llx %s",
+ func_list[i].addr,
+ func_list[i].func);
+ if (func_list[i].mod)
+ printf(" [%s]\n", func_list[i].mod);
+ else
+ printf("\n");
+ }
+}
+
+static struct printk_map {
+ unsigned long long addr;
+ char *printk;
+} *printk_list;
+static unsigned int printk_count;
+
+static int printk_cmp(const void *a, const void *b)
+{
+ const struct func_map *fa = a;
+ const struct func_map *fb = b;
+
+ if (fa->addr < fb->addr)
+ return -1;
+ if (fa->addr > fb->addr)
+ return 1;
+
+ return 0;
+}
+
+static struct printk_map *find_printk(unsigned long long addr)
+{
+ struct printk_map *printk;
+ struct printk_map key;
+
+ key.addr = addr;
+
+ printk = bsearch(&key, printk_list, printk_count, sizeof(*printk_list),
+ printk_cmp);
+
+ return printk;
+}
+
+void parse_ftrace_printk(char *file, unsigned int size __unused)
+{
+ struct printk_list {
+ struct printk_list *next;
+ unsigned long long addr;
+ char *printk;
+ } *list = NULL, *item;
+ char *line;
+ char *next = NULL;
+ char *addr_str;
+ int i;
+
+ line = strtok_r(file, "\n", &next);
+ while (line) {
+ addr_str = strsep(&line, ":");
+ if (!line) {
+ warning("error parsing print strings");
+ break;
+ }
+ item = malloc_or_die(sizeof(*item));
+ item->addr = strtoull(addr_str, NULL, 16);
+ /* fmt still has a space, skip it */
+ item->printk = strdup(line+1);
+ item->next = list;
+ list = item;
+ line = strtok_r(NULL, "\n", &next);
+ printk_count++;
+ }
+
+ printk_list = malloc_or_die(sizeof(*printk_list) * printk_count + 1);
+
+ i = 0;
+ while (list) {
+ printk_list[i].printk = list->printk;
+ printk_list[i].addr = list->addr;
+ i++;
+ item = list;
+ list = list->next;
+ free(item);
+ }
+
+ qsort(printk_list, printk_count, sizeof(*printk_list), printk_cmp);
+}
+
+void print_printk(void)
+{
+ int i;
+
+ for (i = 0; i < (int)printk_count; i++) {
+ printf("%016llx %s\n",
+ printk_list[i].addr,
+ printk_list[i].printk);
+ }
+}
+
+static struct event *alloc_event(void)
+{
+ struct event *event;
+
+ event = malloc_or_die(sizeof(*event));
+ memset(event, 0, sizeof(*event));
+
+ return event;
+}
+
+enum event_type {
+ EVENT_ERROR,
+ EVENT_NONE,
+ EVENT_SPACE,
+ EVENT_NEWLINE,
+ EVENT_OP,
+ EVENT_DELIM,
+ EVENT_ITEM,
+ EVENT_DQUOTE,
+ EVENT_SQUOTE,
+};
+
+static struct event *event_list;
+
+static void add_event(struct event *event)
+{
+ event->next = event_list;
+ event_list = event;
+}
+
+static int event_item_type(enum event_type type)
+{
+ switch (type) {
+ case EVENT_ITEM ... EVENT_SQUOTE:
+ return 1;
+ case EVENT_ERROR ... EVENT_DELIM:
+ default:
+ return 0;
+ }
+}
+
+static void free_arg(struct print_arg *arg)
+{
+ if (!arg)
+ return;
+
+ switch (arg->type) {
+ case PRINT_ATOM:
+ if (arg->atom.atom)
+ free(arg->atom.atom);
+ break;
+ case PRINT_NULL:
+ case PRINT_FIELD ... PRINT_OP:
+ default:
+ /* todo */
+ break;
+ }
+
+ free(arg);
+}
+
+static enum event_type get_type(int ch)
+{
+ if (ch == '\n')
+ return EVENT_NEWLINE;
+ if (isspace(ch))
+ return EVENT_SPACE;
+ if (isalnum(ch) || ch == '_')
+ return EVENT_ITEM;
+ if (ch == '\'')
+ return EVENT_SQUOTE;
+ if (ch == '"')
+ return EVENT_DQUOTE;
+ if (!isprint(ch))
+ return EVENT_NONE;
+ if (ch == '(' || ch == ')' || ch == ',')
+ return EVENT_DELIM;
+
+ return EVENT_OP;
+}
+
+static int __read_char(void)
+{
+ if (input_buf_ptr >= input_buf_siz)
+ return -1;
+
+ return input_buf[input_buf_ptr++];
+}
+
+static int __peek_char(void)
+{
+ if (input_buf_ptr >= input_buf_siz)
+ return -1;
+
+ return input_buf[input_buf_ptr];
+}
+
+static enum event_type __read_token(char **tok)
+{
+ char buf[BUFSIZ];
+ int ch, last_ch, quote_ch, next_ch;
+ int i = 0;
+ int tok_size = 0;
+ enum event_type type;
+
+ *tok = NULL;
+
+
+ ch = __read_char();
+ if (ch < 0)
+ return EVENT_NONE;
+
+ type = get_type(ch);
+ if (type == EVENT_NONE)
+ return type;
+
+ buf[i++] = ch;
+
+ switch (type) {
+ case EVENT_NEWLINE:
+ case EVENT_DELIM:
+ *tok = malloc_or_die(2);
+ (*tok)[0] = ch;
+ (*tok)[1] = 0;
+ return type;
+
+ case EVENT_OP:
+ switch (ch) {
+ case '-':
+ next_ch = __peek_char();
+ if (next_ch == '>') {
+ buf[i++] = __read_char();
+ break;
+ }
+ /* fall through */
+ case '+':
+ case '|':
+ case '&':
+ case '>':
+ case '<':
+ last_ch = ch;
+ ch = __peek_char();
+ if (ch != last_ch)
+ goto test_equal;
+ buf[i++] = __read_char();
+ switch (last_ch) {
+ case '>':
+ case '<':
+ goto test_equal;
+ default:
+ break;
+ }
+ break;
+ case '!':
+ case '=':
+ goto test_equal;
+ default: /* what should we do instead? */
+ break;
+ }
+ buf[i] = 0;
+ *tok = strdup(buf);
+ return type;
+
+ test_equal:
+ ch = __peek_char();
+ if (ch == '=')
+ buf[i++] = __read_char();
+ break;
+
+ case EVENT_DQUOTE:
+ case EVENT_SQUOTE:
+ /* don't keep quotes */
+ i--;
+ quote_ch = ch;
+ last_ch = 0;
+ do {
+ if (i == (BUFSIZ - 1)) {
+ buf[i] = 0;
+ if (*tok) {
+ *tok = realloc(*tok, tok_size + BUFSIZ);
+ if (!*tok)
+ return EVENT_NONE;
+ strcat(*tok, buf);
+ } else
+ *tok = strdup(buf);
+
+ if (!*tok)
+ return EVENT_NONE;
+ tok_size += BUFSIZ;
+ i = 0;
+ }
+ last_ch = ch;
+ ch = __read_char();
+ buf[i++] = ch;
+ /* the '\' '\' will cancel itself */
+ if (ch == '\\' && last_ch == '\\')
+ last_ch = 0;
+ } while (ch != quote_ch || last_ch == '\\');
+ /* remove the last quote */
+ i--;
+ goto out;
+
+ case EVENT_ERROR ... EVENT_SPACE:
+ case EVENT_ITEM:
+ default:
+ break;
+ }
+
+ while (get_type(__peek_char()) == type) {
+ if (i == (BUFSIZ - 1)) {
+ buf[i] = 0;
+ if (*tok) {
+ *tok = realloc(*tok, tok_size + BUFSIZ);
+ if (!*tok)
+ return EVENT_NONE;
+ strcat(*tok, buf);
+ } else
+ *tok = strdup(buf);
+
+ if (!*tok)
+ return EVENT_NONE;
+ tok_size += BUFSIZ;
+ i = 0;
+ }
+ ch = __read_char();
+ buf[i++] = ch;
+ }
+
+ out:
+ buf[i] = 0;
+ if (*tok) {
+ *tok = realloc(*tok, tok_size + i);
+ if (!*tok)
+ return EVENT_NONE;
+ strcat(*tok, buf);
+ } else
+ *tok = strdup(buf);
+ if (!*tok)
+ return EVENT_NONE;
+
+ return type;
+}
+
+static void free_token(char *tok)
+{
+ if (tok)
+ free(tok);
+}
+
+static enum event_type read_token(char **tok)
+{
+ enum event_type type;
+
+ for (;;) {
+ type = __read_token(tok);
+ if (type != EVENT_SPACE)
+ return type;
+
+ free_token(*tok);
+ }
+
+ /* not reached */
+ return EVENT_NONE;
+}
+
+/* no newline */
+static enum event_type read_token_item(char **tok)
+{
+ enum event_type type;
+
+ for (;;) {
+ type = __read_token(tok);
+ if (type != EVENT_SPACE && type != EVENT_NEWLINE)
+ return type;
+
+ free_token(*tok);
+ }
+
+ /* not reached */
+ return EVENT_NONE;
+}
+
+static int test_type(enum event_type type, enum event_type expect)
+{
+ if (type != expect) {
+ warning("Error: expected type %d but read %d",
+ expect, type);
+ return -1;
+ }
+ return 0;
+}
+
+static int __test_type_token(enum event_type type, char *token,
+ enum event_type expect, const char *expect_tok,
+ bool warn)
+{
+ if (type != expect) {
+ if (warn)
+ warning("Error: expected type %d but read %d",
+ expect, type);
+ return -1;
+ }
+
+ if (strcmp(token, expect_tok) != 0) {
+ if (warn)
+ warning("Error: expected '%s' but read '%s'",
+ expect_tok, token);
+ return -1;
+ }
+ return 0;
+}
+
+static int test_type_token(enum event_type type, char *token,
+ enum event_type expect, const char *expect_tok)
+{
+ return __test_type_token(type, token, expect, expect_tok, true);
+}
+
+static int __read_expect_type(enum event_type expect, char **tok, int newline_ok)
+{
+ enum event_type type;
+
+ if (newline_ok)
+ type = read_token(tok);
+ else
+ type = read_token_item(tok);
+ return test_type(type, expect);
+}
+
+static int read_expect_type(enum event_type expect, char **tok)
+{
+ return __read_expect_type(expect, tok, 1);
+}
+
+static int __read_expected(enum event_type expect, const char *str,
+ int newline_ok, bool warn)
+{
+ enum event_type type;
+ char *token;
+ int ret;
+
+ if (newline_ok)
+ type = read_token(&token);
+ else
+ type = read_token_item(&token);
+
+ ret = __test_type_token(type, token, expect, str, warn);
+
+ free_token(token);
+
+ return ret;
+}
+
+static int read_expected(enum event_type expect, const char *str)
+{
+ return __read_expected(expect, str, 1, true);
+}
+
+static int read_expected_item(enum event_type expect, const char *str)
+{
+ return __read_expected(expect, str, 0, true);
+}
+
+static char *event_read_name(void)
+{
+ char *token;
+
+ if (read_expected(EVENT_ITEM, "name") < 0)
+ return NULL;
+
+ if (read_expected(EVENT_OP, ":") < 0)
+ return NULL;
+
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ goto fail;
+
+ return token;
+
+ fail:
+ free_token(token);
+ return NULL;
+}
+
+static int event_read_id(void)
+{
+ char *token;
+ int id;
+
+ if (read_expected_item(EVENT_ITEM, "ID") < 0)
+ return -1;
+
+ if (read_expected(EVENT_OP, ":") < 0)
+ return -1;
+
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ goto fail;
+
+ id = strtoul(token, NULL, 0);
+ free_token(token);
+ return id;
+
+ fail:
+ free_token(token);
+ return -1;
+}
+
+static int field_is_string(struct format_field *field)
+{
+ if ((field->flags & FIELD_IS_ARRAY) &&
+ (!strstr(field->type, "char") || !strstr(field->type, "u8") ||
+ !strstr(field->type, "s8")))
+ return 1;
+
+ return 0;
+}
+
+static int field_is_dynamic(struct format_field *field)
+{
+ if (!strncmp(field->type, "__data_loc", 10))
+ return 1;
+
+ return 0;
+}
+
+static int event_read_fields(struct event *event, struct format_field **fields)
+{
+ struct format_field *field = NULL;
+ enum event_type type;
+ char *token;
+ char *last_token;
+ int count = 0;
+
+ do {
+ type = read_token(&token);
+ if (type == EVENT_NEWLINE) {
+ free_token(token);
+ return count;
+ }
+
+ count++;
+
+ if (test_type_token(type, token, EVENT_ITEM, "field"))
+ goto fail;
+ free_token(token);
+
+ type = read_token(&token);
+ /*
+ * The ftrace fields may still use the "special" name.
+ * Just ignore it.
+ */
+ if (event->flags & EVENT_FL_ISFTRACE &&
+ type == EVENT_ITEM && strcmp(token, "special") == 0) {
+ free_token(token);
+ type = read_token(&token);
+ }
+
+ if (test_type_token(type, token, EVENT_OP, ":") < 0)
+ return -1;
+
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ goto fail;
+
+ last_token = token;
+
+ field = malloc_or_die(sizeof(*field));
+ memset(field, 0, sizeof(*field));
+
+ /* read the rest of the type */
+ for (;;) {
+ type = read_token(&token);
+ if (type == EVENT_ITEM ||
+ (type == EVENT_OP && strcmp(token, "*") == 0) ||
+ /*
+ * Some of the ftrace fields are broken and have
+ * an illegal "." in them.
+ */
+ (event->flags & EVENT_FL_ISFTRACE &&
+ type == EVENT_OP && strcmp(token, ".") == 0)) {
+
+ if (strcmp(token, "*") == 0)
+ field->flags |= FIELD_IS_POINTER;
+
+ if (field->type) {
+ field->type = realloc(field->type,
+ strlen(field->type) +
+ strlen(last_token) + 2);
+ strcat(field->type, " ");
+ strcat(field->type, last_token);
+ } else
+ field->type = last_token;
+ last_token = token;
+ continue;
+ }
+
+ break;
+ }
+
+ if (!field->type) {
+ die("no type found");
+ goto fail;
+ }
+ field->name = last_token;
+
+ if (test_type(type, EVENT_OP))
+ goto fail;
+
+ if (strcmp(token, "[") == 0) {
+ enum event_type last_type = type;
+ char *brackets = token;
+ int len;
+
+ field->flags |= FIELD_IS_ARRAY;
+
+ type = read_token(&token);
+ while (strcmp(token, "]") != 0) {
+ if (last_type == EVENT_ITEM &&
+ type == EVENT_ITEM)
+ len = 2;
+ else
+ len = 1;
+ last_type = type;
+
+ brackets = realloc(brackets,
+ strlen(brackets) +
+ strlen(token) + len);
+ if (len == 2)
+ strcat(brackets, " ");
+ strcat(brackets, token);
+ free_token(token);
+ type = read_token(&token);
+ if (type == EVENT_NONE) {
+ die("failed to find token");
+ goto fail;
+ }
+ }
+
+ free_token(token);
+
+ brackets = realloc(brackets, strlen(brackets) + 2);
+ strcat(brackets, "]");
+
+ /* add brackets to type */
+
+ type = read_token(&token);
+ /*
+ * If the next token is not an OP, then it is of
+ * the format: type [] item;
+ */
+ if (type == EVENT_ITEM) {
+ field->type = realloc(field->type,
+ strlen(field->type) +
+ strlen(field->name) +
+ strlen(brackets) + 2);
+ strcat(field->type, " ");
+ strcat(field->type, field->name);
+ free_token(field->name);
+ strcat(field->type, brackets);
+ field->name = token;
+ type = read_token(&token);
+ } else {
+ field->type = realloc(field->type,
+ strlen(field->type) +
+ strlen(brackets) + 1);
+ strcat(field->type, brackets);
+ }
+ free(brackets);
+ }
+
+ if (field_is_string(field)) {
+ field->flags |= FIELD_IS_STRING;
+ if (field_is_dynamic(field))
+ field->flags |= FIELD_IS_DYNAMIC;
+ }
+
+ if (test_type_token(type, token, EVENT_OP, ";"))
+ goto fail;
+ free_token(token);
+
+ if (read_expected(EVENT_ITEM, "offset") < 0)
+ goto fail_expect;
+
+ if (read_expected(EVENT_OP, ":") < 0)
+ goto fail_expect;
+
+ if (read_expect_type(EVENT_ITEM, &token))
+ goto fail;
+ field->offset = strtoul(token, NULL, 0);
+ free_token(token);
+
+ if (read_expected(EVENT_OP, ";") < 0)
+ goto fail_expect;
+
+ if (read_expected(EVENT_ITEM, "size") < 0)
+ goto fail_expect;
+
+ if (read_expected(EVENT_OP, ":") < 0)
+ goto fail_expect;
+
+ if (read_expect_type(EVENT_ITEM, &token))
+ goto fail;
+ field->size = strtoul(token, NULL, 0);
+ free_token(token);
+
+ if (read_expected(EVENT_OP, ";") < 0)
+ goto fail_expect;
+
+ type = read_token(&token);
+ if (type != EVENT_NEWLINE) {
+ /* newer versions of the kernel have a "signed" type */
+ if (test_type_token(type, token, EVENT_ITEM, "signed"))
+ goto fail;
+
+ free_token(token);
+
+ if (read_expected(EVENT_OP, ":") < 0)
+ goto fail_expect;
+
+ if (read_expect_type(EVENT_ITEM, &token))
+ goto fail;
+
+ if (strtoul(token, NULL, 0))
+ field->flags |= FIELD_IS_SIGNED;
+
+ free_token(token);
+ if (read_expected(EVENT_OP, ";") < 0)
+ goto fail_expect;
+
+ if (read_expect_type(EVENT_NEWLINE, &token))
+ goto fail;
+ }
+
+ free_token(token);
+
+ *fields = field;
+ fields = &field->next;
+
+ } while (1);
+
+ return 0;
+
+fail:
+ free_token(token);
+fail_expect:
+ if (field)
+ free(field);
+ return -1;
+}
+
+static int event_read_format(struct event *event)
+{
+ char *token;
+ int ret;
+
+ if (read_expected_item(EVENT_ITEM, "format") < 0)
+ return -1;
+
+ if (read_expected(EVENT_OP, ":") < 0)
+ return -1;
+
+ if (read_expect_type(EVENT_NEWLINE, &token))
+ goto fail;
+ free_token(token);
+
+ ret = event_read_fields(event, &event->format.common_fields);
+ if (ret < 0)
+ return ret;
+ event->format.nr_common = ret;
+
+ ret = event_read_fields(event, &event->format.fields);
+ if (ret < 0)
+ return ret;
+ event->format.nr_fields = ret;
+
+ return 0;
+
+ fail:
+ free_token(token);
+ return -1;
+}
+
+enum event_type
+process_arg_token(struct event *event, struct print_arg *arg,
+ char **tok, enum event_type type);
+
+static enum event_type
+process_arg(struct event *event, struct print_arg *arg, char **tok)
+{
+ enum event_type type;
+ char *token;
+
+ type = read_token(&token);
+ *tok = token;
+
+ return process_arg_token(event, arg, tok, type);
+}
+
+static enum event_type
+process_cond(struct event *event, struct print_arg *top, char **tok)
+{
+ struct print_arg *arg, *left, *right;
+ enum event_type type;
+ char *token = NULL;
+
+ arg = malloc_or_die(sizeof(*arg));
+ memset(arg, 0, sizeof(*arg));
+
+ left = malloc_or_die(sizeof(*left));
+
+ right = malloc_or_die(sizeof(*right));
+
+ arg->type = PRINT_OP;
+ arg->op.left = left;
+ arg->op.right = right;
+
+ *tok = NULL;
+ type = process_arg(event, left, &token);
+ if (test_type_token(type, token, EVENT_OP, ":"))
+ goto out_free;
+
+ arg->op.op = token;
+
+ type = process_arg(event, right, &token);
+
+ top->op.right = arg;
+
+ *tok = token;
+ return type;
+
+out_free:
+ free_token(*tok);
+ free(right);
+ free(left);
+ free_arg(arg);
+ return EVENT_ERROR;
+}
+
+static enum event_type
+process_array(struct event *event, struct print_arg *top, char **tok)
+{
+ struct print_arg *arg;
+ enum event_type type;
+ char *token = NULL;
+
+ arg = malloc_or_die(sizeof(*arg));
+ memset(arg, 0, sizeof(*arg));
+
+ *tok = NULL;
+ type = process_arg(event, arg, &token);
+ if (test_type_token(type, token, EVENT_OP, "]"))
+ goto out_free;
+
+ top->op.right = arg;
+
+ free_token(token);
+ type = read_token_item(&token);
+ *tok = token;
+
+ return type;
+
+out_free:
+ free_token(*tok);
+ free_arg(arg);
+ return EVENT_ERROR;
+}
+
+static int get_op_prio(char *op)
+{
+ if (!op[1]) {
+ switch (op[0]) {
+ case '*':
+ case '/':
+ case '%':
+ return 6;
+ case '+':
+ case '-':
+ return 7;
+ /* '>>' and '<<' are 8 */
+ case '<':
+ case '>':
+ return 9;
+ /* '==' and '!=' are 10 */
+ case '&':
+ return 11;
+ case '^':
+ return 12;
+ case '|':
+ return 13;
+ case '?':
+ return 16;
+ default:
+ die("unknown op '%c'", op[0]);
+ return -1;
+ }
+ } else {
+ if (strcmp(op, "++") == 0 ||
+ strcmp(op, "--") == 0) {
+ return 3;
+ } else if (strcmp(op, ">>") == 0 ||
+ strcmp(op, "<<") == 0) {
+ return 8;
+ } else if (strcmp(op, ">=") == 0 ||
+ strcmp(op, "<=") == 0) {
+ return 9;
+ } else if (strcmp(op, "==") == 0 ||
+ strcmp(op, "!=") == 0) {
+ return 10;
+ } else if (strcmp(op, "&&") == 0) {
+ return 14;
+ } else if (strcmp(op, "||") == 0) {
+ return 15;
+ } else {
+ die("unknown op '%s'", op);
+ return -1;
+ }
+ }
+}
+
+static void set_op_prio(struct print_arg *arg)
+{
+
+ /* single ops are the greatest */
+ if (!arg->op.left || arg->op.left->type == PRINT_NULL) {
+ arg->op.prio = 0;
+ return;
+ }
+
+ arg->op.prio = get_op_prio(arg->op.op);
+}
+
+static enum event_type
+process_op(struct event *event, struct print_arg *arg, char **tok)
+{
+ struct print_arg *left, *right = NULL;
+ enum event_type type;
+ char *token;
+
+ /* the op is passed in via tok */
+ token = *tok;
+
+ if (arg->type == PRINT_OP && !arg->op.left) {
+ /* handle single op */
+ if (token[1]) {
+ die("bad op token %s", token);
+ return EVENT_ERROR;
+ }
+ switch (token[0]) {
+ case '!':
+ case '+':
+ case '-':
+ break;
+ default:
+ die("bad op token %s", token);
+ return EVENT_ERROR;
+ }
+
+ /* make an empty left */
+ left = malloc_or_die(sizeof(*left));
+ left->type = PRINT_NULL;
+ arg->op.left = left;
+
+ right = malloc_or_die(sizeof(*right));
+ arg->op.right = right;
+
+ type = process_arg(event, right, tok);
+
+ } else if (strcmp(token, "?") == 0) {
+
+ left = malloc_or_die(sizeof(*left));
+ /* copy the top arg to the left */
+ *left = *arg;
+
+ arg->type = PRINT_OP;
+ arg->op.op = token;
+ arg->op.left = left;
+ arg->op.prio = 0;
+
+ type = process_cond(event, arg, tok);
+
+ } else if (strcmp(token, ">>") == 0 ||
+ strcmp(token, "<<") == 0 ||
+ strcmp(token, "&") == 0 ||
+ strcmp(token, "|") == 0 ||
+ strcmp(token, "&&") == 0 ||
+ strcmp(token, "||") == 0 ||
+ strcmp(token, "-") == 0 ||
+ strcmp(token, "+") == 0 ||
+ strcmp(token, "*") == 0 ||
+ strcmp(token, "^") == 0 ||
+ strcmp(token, "/") == 0 ||
+ strcmp(token, "<") == 0 ||
+ strcmp(token, ">") == 0 ||
+ strcmp(token, "==") == 0 ||
+ strcmp(token, "!=") == 0) {
+
+ left = malloc_or_die(sizeof(*left));
+
+ /* copy the top arg to the left */
+ *left = *arg;
+
+ arg->type = PRINT_OP;
+ arg->op.op = token;
+ arg->op.left = left;
+
+ set_op_prio(arg);
+
+ right = malloc_or_die(sizeof(*right));
+
+ type = read_token_item(&token);
+ *tok = token;
+
+ /* could just be a type pointer */
+ if ((strcmp(arg->op.op, "*") == 0) &&
+ type == EVENT_DELIM && (strcmp(token, ")") == 0)) {
+ if (left->type != PRINT_ATOM)
+ die("bad pointer type");
+ left->atom.atom = realloc(left->atom.atom,
+ sizeof(left->atom.atom) + 3);
+ strcat(left->atom.atom, " *");
+ *arg = *left;
+ free(arg);
+
+ return type;
+ }
+
+ type = process_arg_token(event, right, tok, type);
+
+ arg->op.right = right;
+
+ } else if (strcmp(token, "[") == 0) {
+
+ left = malloc_or_die(sizeof(*left));
+ *left = *arg;
+
+ arg->type = PRINT_OP;
+ arg->op.op = token;
+ arg->op.left = left;
+
+ arg->op.prio = 0;
+ type = process_array(event, arg, tok);
+
+ } else {
+ warning("unknown op '%s'", token);
+ event->flags |= EVENT_FL_FAILED;
+ /* the arg is now the left side */
+ return EVENT_NONE;
+ }
+
+ if (type == EVENT_OP) {
+ int prio;
+
+ /* higher prios need to be closer to the root */
+ prio = get_op_prio(*tok);
+
+ if (prio > arg->op.prio)
+ return process_op(event, arg, tok);
+
+ return process_op(event, right, tok);
+ }
+
+ return type;
+}
+
+static enum event_type
+process_entry(struct event *event __unused, struct print_arg *arg,
+ char **tok)
+{
+ enum event_type type;
+ char *field;
+ char *token;
+
+ if (read_expected(EVENT_OP, "->") < 0)
+ return EVENT_ERROR;
+
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ goto fail;
+ field = token;
+
+ arg->type = PRINT_FIELD;
+ arg->field.name = field;
+
+ if (is_flag_field) {
+ arg->field.field = find_any_field(event, arg->field.name);
+ arg->field.field->flags |= FIELD_IS_FLAG;
+ is_flag_field = 0;
+ } else if (is_symbolic_field) {
+ arg->field.field = find_any_field(event, arg->field.name);
+ arg->field.field->flags |= FIELD_IS_SYMBOLIC;
+ is_symbolic_field = 0;
+ }
+
+ type = read_token(&token);
+ *tok = token;
+
+ return type;
+
+fail:
+ free_token(token);
+ return EVENT_ERROR;
+}
+
+static char *arg_eval (struct print_arg *arg);
+
+static long long arg_num_eval(struct print_arg *arg)
+{
+ long long left, right;
+ long long val = 0;
+
+ switch (arg->type) {
+ case PRINT_ATOM:
+ val = strtoll(arg->atom.atom, NULL, 0);
+ break;
+ case PRINT_TYPE:
+ val = arg_num_eval(arg->typecast.item);
+ break;
+ case PRINT_OP:
+ switch (arg->op.op[0]) {
+ case '|':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+ if (arg->op.op[1])
+ val = left || right;
+ else
+ val = left | right;
+ break;
+ case '&':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+ if (arg->op.op[1])
+ val = left && right;
+ else
+ val = left & right;
+ break;
+ case '<':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+ switch (arg->op.op[1]) {
+ case 0:
+ val = left < right;
+ break;
+ case '<':
+ val = left << right;
+ break;
+ case '=':
+ val = left <= right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ case '>':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+ switch (arg->op.op[1]) {
+ case 0:
+ val = left > right;
+ break;
+ case '>':
+ val = left >> right;
+ break;
+ case '=':
+ val = left >= right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ case '=':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+
+ if (arg->op.op[1] != '=')
+ die("unknown op '%s'", arg->op.op);
+
+ val = left == right;
+ break;
+ case '!':
+ left = arg_num_eval(arg->op.left);
+ right = arg_num_eval(arg->op.right);
+
+ switch (arg->op.op[1]) {
+ case '=':
+ val = left != right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+
+ case PRINT_NULL:
+ case PRINT_FIELD ... PRINT_SYMBOL:
+ case PRINT_STRING:
+ default:
+ die("invalid eval type %d", arg->type);
+
+ }
+ return val;
+}
+
+static char *arg_eval (struct print_arg *arg)
+{
+ long long val;
+ static char buf[20];
+
+ switch (arg->type) {
+ case PRINT_ATOM:
+ return arg->atom.atom;
+ case PRINT_TYPE:
+ return arg_eval(arg->typecast.item);
+ case PRINT_OP:
+ val = arg_num_eval(arg);
+ sprintf(buf, "%lld", val);
+ return buf;
+
+ case PRINT_NULL:
+ case PRINT_FIELD ... PRINT_SYMBOL:
+ case PRINT_STRING:
+ default:
+ die("invalid eval type %d", arg->type);
+ break;
+ }
+
+ return NULL;
+}
+
+static enum event_type
+process_fields(struct event *event, struct print_flag_sym **list, char **tok)
+{
+ enum event_type type;
+ struct print_arg *arg = NULL;
+ struct print_flag_sym *field;
+ char *token = NULL;
+ char *value;
+
+ do {
+ free_token(token);
+ type = read_token_item(&token);
+ if (test_type_token(type, token, EVENT_OP, "{"))
+ break;
+
+ arg = malloc_or_die(sizeof(*arg));
+
+ free_token(token);
+ type = process_arg(event, arg, &token);
+ if (test_type_token(type, token, EVENT_DELIM, ","))
+ goto out_free;
+
+ field = malloc_or_die(sizeof(*field));
+ memset(field, 0, sizeof(*field));
+
+ value = arg_eval(arg);
+ field->value = strdup(value);
+
+ free_token(token);
+ type = process_arg(event, arg, &token);
+ if (test_type_token(type, token, EVENT_OP, "}"))
+ goto out_free;
+
+ value = arg_eval(arg);
+ field->str = strdup(value);
+ free_arg(arg);
+ arg = NULL;
+
+ *list = field;
+ list = &field->next;
+
+ free_token(token);
+ type = read_token_item(&token);
+ } while (type == EVENT_DELIM && strcmp(token, ",") == 0);
+
+ *tok = token;
+ return type;
+
+out_free:
+ free_arg(arg);
+ free_token(token);
+
+ return EVENT_ERROR;
+}
+
+static enum event_type
+process_flags(struct event *event, struct print_arg *arg, char **tok)
+{
+ struct print_arg *field;
+ enum event_type type;
+ char *token;
+
+ memset(arg, 0, sizeof(*arg));
+ arg->type = PRINT_FLAGS;
+
+ if (read_expected_item(EVENT_DELIM, "(") < 0)
+ return EVENT_ERROR;
+
+ field = malloc_or_die(sizeof(*field));
+
+ type = process_arg(event, field, &token);
+ if (test_type_token(type, token, EVENT_DELIM, ","))
+ goto out_free;
+
+ arg->flags.field = field;
+
+ type = read_token_item(&token);
+ if (event_item_type(type)) {
+ arg->flags.delim = token;
+ type = read_token_item(&token);
+ }
+
+ if (test_type_token(type, token, EVENT_DELIM, ","))
+ goto out_free;
+
+ type = process_fields(event, &arg->flags.flags, &token);
+ if (test_type_token(type, token, EVENT_DELIM, ")"))
+ goto out_free;
+
+ free_token(token);
+ type = read_token_item(tok);
+ return type;
+
+out_free:
+ free_token(token);
+ return EVENT_ERROR;
+}
+
+static enum event_type
+process_symbols(struct event *event, struct print_arg *arg, char **tok)
+{
+ struct print_arg *field;
+ enum event_type type;
+ char *token;
+
+ memset(arg, 0, sizeof(*arg));
+ arg->type = PRINT_SYMBOL;
+
+ if (read_expected_item(EVENT_DELIM, "(") < 0)
+ return EVENT_ERROR;
+
+ field = malloc_or_die(sizeof(*field));
+
+ type = process_arg(event, field, &token);
+ if (test_type_token(type, token, EVENT_DELIM, ","))
+ goto out_free;
+
+ arg->symbol.field = field;
+
+ type = process_fields(event, &arg->symbol.symbols, &token);
+ if (test_type_token(type, token, EVENT_DELIM, ")"))
+ goto out_free;
+
+ free_token(token);
+ type = read_token_item(tok);
+ return type;
+
+out_free:
+ free_token(token);
+ return EVENT_ERROR;
+}
+
+static enum event_type
+process_paren(struct event *event, struct print_arg *arg, char **tok)
+{
+ struct print_arg *item_arg;
+ enum event_type type;
+ char *token;
+
+ type = process_arg(event, arg, &token);
+
+ if (type == EVENT_ERROR)
+ return EVENT_ERROR;
+
+ if (type == EVENT_OP)
+ type = process_op(event, arg, &token);
+
+ if (type == EVENT_ERROR)
+ return EVENT_ERROR;
+
+ if (test_type_token(type, token, EVENT_DELIM, ")")) {
+ free_token(token);
+ return EVENT_ERROR;
+ }
+
+ free_token(token);
+ type = read_token_item(&token);
+
+ /*
+ * If the next token is an item or another open paren, then
+ * this was a typecast.
+ */
+ if (event_item_type(type) ||
+ (type == EVENT_DELIM && strcmp(token, "(") == 0)) {
+
+ /* make this a typecast and contine */
+
+ /* prevous must be an atom */
+ if (arg->type != PRINT_ATOM)
+ die("previous needed to be PRINT_ATOM");
+
+ item_arg = malloc_or_die(sizeof(*item_arg));
+
+ arg->type = PRINT_TYPE;
+ arg->typecast.type = arg->atom.atom;
+ arg->typecast.item = item_arg;
+ type = process_arg_token(event, item_arg, &token, type);
+
+ }
+
+ *tok = token;
+ return type;
+}
+
+
+static enum event_type
+process_str(struct event *event __unused, struct print_arg *arg, char **tok)
+{
+ enum event_type type;
+ char *token;
+
+ if (read_expected(EVENT_DELIM, "(") < 0)
+ return EVENT_ERROR;
+
+ if (read_expect_type(EVENT_ITEM, &token) < 0)
+ goto fail;
+
+ arg->type = PRINT_STRING;
+ arg->string.string = token;
+ arg->string.offset = -1;
+
+ if (read_expected(EVENT_DELIM, ")") < 0)
+ return EVENT_ERROR;
+
+ type = read_token(&token);
+ *tok = token;
+
+ return type;
+fail:
+ free_token(token);
+ return EVENT_ERROR;
+}
+
+enum event_type
+process_arg_token(struct event *event, struct print_arg *arg,
+ char **tok, enum event_type type)
+{
+ char *token;
+ char *atom;
+
+ token = *tok;
+
+ switch (type) {
+ case EVENT_ITEM:
+ if (strcmp(token, "REC") == 0) {
+ free_token(token);
+ type = process_entry(event, arg, &token);
+ } else if (strcmp(token, "__print_flags") == 0) {
+ free_token(token);
+ is_flag_field = 1;
+ type = process_flags(event, arg, &token);
+ } else if (strcmp(token, "__print_symbolic") == 0) {
+ free_token(token);
+ is_symbolic_field = 1;
+ type = process_symbols(event, arg, &token);
+ } else if (strcmp(token, "__get_str") == 0) {
+ free_token(token);
+ type = process_str(event, arg, &token);
+ } else {
+ atom = token;
+ /* test the next token */
+ type = read_token_item(&token);
+
+ /* atoms can be more than one token long */
+ while (type == EVENT_ITEM) {
+ atom = realloc(atom, strlen(atom) + strlen(token) + 2);
+ strcat(atom, " ");
+ strcat(atom, token);
+ free_token(token);
+ type = read_token_item(&token);
+ }
+
+ /* todo, test for function */
+
+ arg->type = PRINT_ATOM;
+ arg->atom.atom = atom;
+ }
+ break;
+ case EVENT_DQUOTE:
+ case EVENT_SQUOTE:
+ arg->type = PRINT_ATOM;
+ arg->atom.atom = token;
+ type = read_token_item(&token);
+ break;
+ case EVENT_DELIM:
+ if (strcmp(token, "(") == 0) {
+ free_token(token);
+ type = process_paren(event, arg, &token);
+ break;
+ }
+ case EVENT_OP:
+ /* handle single ops */
+ arg->type = PRINT_OP;
+ arg->op.op = token;
+ arg->op.left = NULL;
+ type = process_op(event, arg, &token);
+
+ break;
+
+ case EVENT_ERROR ... EVENT_NEWLINE:
+ default:
+ die("unexpected type %d", type);
+ }
+ *tok = token;
+
+ return type;
+}
+
+static int event_read_print_args(struct event *event, struct print_arg **list)
+{
+ enum event_type type = EVENT_ERROR;
+ struct print_arg *arg;
+ char *token;
+ int args = 0;
+
+ do {
+ if (type == EVENT_NEWLINE) {
+ free_token(token);
+ type = read_token_item(&token);
+ continue;
+ }
+
+ arg = malloc_or_die(sizeof(*arg));
+ memset(arg, 0, sizeof(*arg));
+
+ type = process_arg(event, arg, &token);
+
+ if (type == EVENT_ERROR) {
+ free_arg(arg);
+ return -1;
+ }
+
+ *list = arg;
+ args++;
+
+ if (type == EVENT_OP) {
+ type = process_op(event, arg, &token);
+ list = &arg->next;
+ continue;
+ }
+
+ if (type == EVENT_DELIM && strcmp(token, ",") == 0) {
+ free_token(token);
+ *list = arg;
+ list = &arg->next;
+ continue;
+ }
+ break;
+ } while (type != EVENT_NONE);
+
+ if (type != EVENT_NONE)
+ free_token(token);
+
+ return args;
+}
+
+static int event_read_print(struct event *event)
+{
+ enum event_type type;
+ char *token;
+ int ret;
+
+ if (read_expected_item(EVENT_ITEM, "print") < 0)
+ return -1;
+
+ if (read_expected(EVENT_ITEM, "fmt") < 0)
+ return -1;
+
+ if (read_expected(EVENT_OP, ":") < 0)
+ return -1;
+
+ if (read_expect_type(EVENT_DQUOTE, &token) < 0)
+ goto fail;
+
+ concat:
+ event->print_fmt.format = token;
+ event->print_fmt.args = NULL;
+
+ /* ok to have no arg */
+ type = read_token_item(&token);
+
+ if (type == EVENT_NONE)
+ return 0;
+
+ /* Handle concatination of print lines */
+ if (type == EVENT_DQUOTE) {
+ char *cat;
+
+ cat = malloc_or_die(strlen(event->print_fmt.format) +
+ strlen(token) + 1);
+ strcpy(cat, event->print_fmt.format);
+ strcat(cat, token);
+ free_token(token);
+ free_token(event->print_fmt.format);
+ event->print_fmt.format = NULL;
+ token = cat;
+ goto concat;
+ }
+
+ if (test_type_token(type, token, EVENT_DELIM, ","))
+ goto fail;
+
+ free_token(token);
+
+ ret = event_read_print_args(event, &event->print_fmt.args);
+ if (ret < 0)
+ return -1;
+
+ return ret;
+
+ fail:
+ free_token(token);
+ return -1;
+}
+
+static struct format_field *
+find_common_field(struct event *event, const char *name)
+{
+ struct format_field *format;
+
+ for (format = event->format.common_fields;
+ format; format = format->next) {
+ if (strcmp(format->name, name) == 0)
+ break;
+ }
+
+ return format;
+}
+
+static struct format_field *
+find_field(struct event *event, const char *name)
+{
+ struct format_field *format;
+
+ for (format = event->format.fields;
+ format; format = format->next) {
+ if (strcmp(format->name, name) == 0)
+ break;
+ }
+
+ return format;
+}
+
+static struct format_field *
+find_any_field(struct event *event, const char *name)
+{
+ struct format_field *format;
+
+ format = find_common_field(event, name);
+ if (format)
+ return format;
+ return find_field(event, name);
+}
+
+unsigned long long read_size(void *ptr, int size)
+{
+ switch (size) {
+ case 1:
+ return *(unsigned char *)ptr;
+ case 2:
+ return data2host2(ptr);
+ case 4:
+ return data2host4(ptr);
+ case 8:
+ return data2host8(ptr);
+ default:
+ /* BUG! */
+ return 0;
+ }
+}
+
+unsigned long long
+raw_field_value(struct event *event, const char *name, void *data)
+{
+ struct format_field *field;
+
+ field = find_any_field(event, name);
+ if (!field)
+ return 0ULL;
+
+ return read_size(data + field->offset, field->size);
+}
+
+void *raw_field_ptr(struct event *event, const char *name, void *data)
+{
+ struct format_field *field;
+
+ field = find_any_field(event, name);
+ if (!field)
+ return NULL;
+
+ if (field->flags & FIELD_IS_DYNAMIC) {
+ int offset;
+
+ offset = *(int *)(data + field->offset);
+ offset &= 0xffff;
+
+ return data + offset;
+ }
+
+ return data + field->offset;
+}
+
+static int get_common_info(const char *type, int *offset, int *size)
+{
+ struct event *event;
+ struct format_field *field;
+
+ /*
+ * All events should have the same common elements.
+ * Pick any event to find where the type is;
+ */
+ if (!event_list)
+ die("no event_list!");
+
+ event = event_list;
+ field = find_common_field(event, type);
+ if (!field)
+ die("field '%s' not found", type);
+
+ *offset = field->offset;
+ *size = field->size;
+
+ return 0;
+}
+
+static int __parse_common(void *data, int *size, int *offset,
+ const char *name)
+{
+ int ret;
+
+ if (!*size) {
+ ret = get_common_info(name, offset, size);
+ if (ret < 0)
+ return ret;
+ }
+ return read_size(data + *offset, *size);
+}
+
+int trace_parse_common_type(void *data)
+{
+ static int type_offset;
+ static int type_size;
+
+ return __parse_common(data, &type_size, &type_offset,
+ "common_type");
+}
+
+int trace_parse_common_pid(void *data)
+{
+ static int pid_offset;
+ static int pid_size;
+
+ return __parse_common(data, &pid_size, &pid_offset,
+ "common_pid");
+}
+
+int parse_common_pc(void *data)
+{
+ static int pc_offset;
+ static int pc_size;
+
+ return __parse_common(data, &pc_size, &pc_offset,
+ "common_preempt_count");
+}
+
+int parse_common_flags(void *data)
+{
+ static int flags_offset;
+ static int flags_size;
+
+ return __parse_common(data, &flags_size, &flags_offset,
+ "common_flags");
+}
+
+int parse_common_lock_depth(void *data)
+{
+ static int ld_offset;
+ static int ld_size;
+ int ret;
+
+ ret = __parse_common(data, &ld_size, &ld_offset,
+ "common_lock_depth");
+ if (ret < 0)
+ return -1;
+
+ return ret;
+}
+
+struct event *trace_find_event(int id)
+{
+ struct event *event;
+
+ for (event = event_list; event; event = event->next) {
+ if (event->id == id)
+ break;
+ }
+ return event;
+}
+
+struct event *trace_find_next_event(struct event *event)
+{
+ if (!event)
+ return event_list;
+
+ return event->next;
+}
+
+static unsigned long long eval_num_arg(void *data, int size,
+ struct event *event, struct print_arg *arg)
+{
+ unsigned long long val = 0;
+ unsigned long long left, right;
+ struct print_arg *larg;
+
+ switch (arg->type) {
+ case PRINT_NULL:
+ /* ?? */
+ return 0;
+ case PRINT_ATOM:
+ return strtoull(arg->atom.atom, NULL, 0);
+ case PRINT_FIELD:
+ if (!arg->field.field) {
+ arg->field.field = find_any_field(event, arg->field.name);
+ if (!arg->field.field)
+ die("field %s not found", arg->field.name);
+ }
+ /* must be a number */
+ val = read_size(data + arg->field.field->offset,
+ arg->field.field->size);
+ break;
+ case PRINT_FLAGS:
+ case PRINT_SYMBOL:
+ break;
+ case PRINT_TYPE:
+ return eval_num_arg(data, size, event, arg->typecast.item);
+ case PRINT_STRING:
+ return 0;
+ break;
+ case PRINT_OP:
+ if (strcmp(arg->op.op, "[") == 0) {
+ /*
+ * Arrays are special, since we don't want
+ * to read the arg as is.
+ */
+ if (arg->op.left->type != PRINT_FIELD)
+ goto default_op; /* oops, all bets off */
+ larg = arg->op.left;
+ if (!larg->field.field) {
+ larg->field.field =
+ find_any_field(event, larg->field.name);
+ if (!larg->field.field)
+ die("field %s not found", larg->field.name);
+ }
+ right = eval_num_arg(data, size, event, arg->op.right);
+ val = read_size(data + larg->field.field->offset +
+ right * long_size, long_size);
+ break;
+ }
+ default_op:
+ left = eval_num_arg(data, size, event, arg->op.left);
+ right = eval_num_arg(data, size, event, arg->op.right);
+ switch (arg->op.op[0]) {
+ case '|':
+ if (arg->op.op[1])
+ val = left || right;
+ else
+ val = left | right;
+ break;
+ case '&':
+ if (arg->op.op[1])
+ val = left && right;
+ else
+ val = left & right;
+ break;
+ case '<':
+ switch (arg->op.op[1]) {
+ case 0:
+ val = left < right;
+ break;
+ case '<':
+ val = left << right;
+ break;
+ case '=':
+ val = left <= right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ case '>':
+ switch (arg->op.op[1]) {
+ case 0:
+ val = left > right;
+ break;
+ case '>':
+ val = left >> right;
+ break;
+ case '=':
+ val = left >= right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ case '=':
+ if (arg->op.op[1] != '=')
+ die("unknown op '%s'", arg->op.op);
+ val = left == right;
+ break;
+ case '-':
+ val = left - right;
+ break;
+ case '+':
+ val = left + right;
+ break;
+ default:
+ die("unknown op '%s'", arg->op.op);
+ }
+ break;
+ default: /* not sure what to do there */
+ return 0;
+ }
+ return val;
+}
+
+struct flag {
+ const char *name;
+ unsigned long long value;
+};
+
+static const struct flag flags[] = {
+ { "HI_SOFTIRQ", 0 },
+ { "TIMER_SOFTIRQ", 1 },
+ { "NET_TX_SOFTIRQ", 2 },
+ { "NET_RX_SOFTIRQ", 3 },
+ { "BLOCK_SOFTIRQ", 4 },
+ { "BLOCK_IOPOLL_SOFTIRQ", 5 },
+ { "TASKLET_SOFTIRQ", 6 },
+ { "SCHED_SOFTIRQ", 7 },
+ { "HRTIMER_SOFTIRQ", 8 },
+ { "RCU_SOFTIRQ", 9 },
+
+ { "HRTIMER_NORESTART", 0 },
+ { "HRTIMER_RESTART", 1 },
+};
+
+unsigned long long eval_flag(const char *flag)
+{
+ int i;
+
+ /*
+ * Some flags in the format files do not get converted.
+ * If the flag is not numeric, see if it is something that
+ * we already know about.
+ */
+ if (isdigit(flag[0]))
+ return strtoull(flag, NULL, 0);
+
+ for (i = 0; i < (int)(sizeof(flags)/sizeof(flags[0])); i++)
+ if (strcmp(flags[i].name, flag) == 0)
+ return flags[i].value;
+
+ return 0;
+}
+
+static void print_str_arg(void *data, int size,
+ struct event *event, struct print_arg *arg)
+{
+ struct print_flag_sym *flag;
+ unsigned long long val, fval;
+ char *str;
+ int print;
+
+ switch (arg->type) {
+ case PRINT_NULL:
+ /* ?? */
+ return;
+ case PRINT_ATOM:
+ printf("%s", arg->atom.atom);
+ return;
+ case PRINT_FIELD:
+ if (!arg->field.field) {
+ arg->field.field = find_any_field(event, arg->field.name);
+ if (!arg->field.field)
+ die("field %s not found", arg->field.name);
+ }
+ str = malloc_or_die(arg->field.field->size + 1);
+ memcpy(str, data + arg->field.field->offset,
+ arg->field.field->size);
+ str[arg->field.field->size] = 0;
+ printf("%s", str);
+ free(str);
+ break;
+ case PRINT_FLAGS:
+ val = eval_num_arg(data, size, event, arg->flags.field);
+ print = 0;
+ for (flag = arg->flags.flags; flag; flag = flag->next) {
+ fval = eval_flag(flag->value);
+ if (!val && !fval) {
+ printf("%s", flag->str);
+ break;
+ }
+ if (fval && (val & fval) == fval) {
+ if (print && arg->flags.delim)
+ printf("%s", arg->flags.delim);
+ printf("%s", flag->str);
+ print = 1;
+ val &= ~fval;
+ }
+ }
+ break;
+ case PRINT_SYMBOL:
+ val = eval_num_arg(data, size, event, arg->symbol.field);
+ for (flag = arg->symbol.symbols; flag; flag = flag->next) {
+ fval = eval_flag(flag->value);
+ if (val == fval) {
+ printf("%s", flag->str);
+ break;
+ }
+ }
+ break;
+
+ case PRINT_TYPE:
+ break;
+ case PRINT_STRING: {
+ int str_offset;
+
+ if (arg->string.offset == -1) {
+ struct format_field *f;
+
+ f = find_any_field(event, arg->string.string);
+ arg->string.offset = f->offset;
+ }
+ str_offset = *(int *)(data + arg->string.offset);
+ str_offset &= 0xffff;
+ printf("%s", ((char *)data) + str_offset);
+ break;
+ }
+ case PRINT_OP:
+ /*
+ * The only op for string should be ? :
+ */
+ if (arg->op.op[0] != '?')
+ return;
+ val = eval_num_arg(data, size, event, arg->op.left);
+ if (val)
+ print_str_arg(data, size, event, arg->op.right->op.left);
+ else
+ print_str_arg(data, size, event, arg->op.right->op.right);
+ break;
+ default:
+ /* well... */
+ break;
+ }
+}
+
+static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struct event *event)
+{
+ static struct format_field *field, *ip_field;
+ struct print_arg *args, *arg, **next;
+ unsigned long long ip, val;
+ char *ptr;
+ void *bptr;
+
+ if (!field) {
+ field = find_field(event, "buf");
+ if (!field)
+ die("can't find buffer field for binary printk");
+ ip_field = find_field(event, "ip");
+ if (!ip_field)
+ die("can't find ip field for binary printk");
+ }
+
+ ip = read_size(data + ip_field->offset, ip_field->size);
+
+ /*
+ * The first arg is the IP pointer.
+ */
+ args = malloc_or_die(sizeof(*args));
+ arg = args;
+ arg->next = NULL;
+ next = &arg->next;
+
+ arg->type = PRINT_ATOM;
+ arg->atom.atom = malloc_or_die(32);
+ sprintf(arg->atom.atom, "%lld", ip);
+
+ /* skip the first "%pf : " */
+ for (ptr = fmt + 6, bptr = data + field->offset;
+ bptr < data + size && *ptr; ptr++) {
+ int ls = 0;
+
+ if (*ptr == '%') {
+ process_again:
+ ptr++;
+ switch (*ptr) {
+ case '%':
+ break;
+ case 'l':
+ ls++;
+ goto process_again;
+ case 'L':
+ ls = 2;
+ goto process_again;
+ case '0' ... '9':
+ goto process_again;
+ case 'p':
+ ls = 1;
+ /* fall through */
+ case 'd':
+ case 'u':
+ case 'x':
+ case 'i':
+ /* the pointers are always 4 bytes aligned */
+ bptr = (void *)(((unsigned long)bptr + 3) &
+ ~3);
+ switch (ls) {
+ case 0:
+ case 1:
+ ls = long_size;
+ break;
+ case 2:
+ ls = 8;
+ default:
+ break;
+ }
+ val = read_size(bptr, ls);
+ bptr += ls;
+ arg = malloc_or_die(sizeof(*arg));
+ arg->next = NULL;
+ arg->type = PRINT_ATOM;
+ arg->atom.atom = malloc_or_die(32);
+ sprintf(arg->atom.atom, "%lld", val);
+ *next = arg;
+ next = &arg->next;
+ break;
+ case 's':
+ arg = malloc_or_die(sizeof(*arg));
+ arg->next = NULL;
+ arg->type = PRINT_STRING;
+ arg->string.string = strdup(bptr);
+ bptr += strlen(bptr) + 1;
+ *next = arg;
+ next = &arg->next;
+ default:
+ break;
+ }
+ }
+ }
+
+ return args;
+}
+
+static void free_args(struct print_arg *args)
+{
+ struct print_arg *next;
+
+ while (args) {
+ next = args->next;
+
+ if (args->type == PRINT_ATOM)
+ free(args->atom.atom);
+ else
+ free(args->string.string);
+ free(args);
+ args = next;
+ }
+}
+
+static char *get_bprint_format(void *data, int size __unused, struct event *event)
+{
+ unsigned long long addr;
+ static struct format_field *field;
+ struct printk_map *printk;
+ char *format;
+ char *p;
+
+ if (!field) {
+ field = find_field(event, "fmt");
+ if (!field)
+ die("can't find format field for binary printk");
+ printf("field->offset = %d size=%d\n", field->offset, field->size);
+ }
+
+ addr = read_size(data + field->offset, field->size);
+
+ printk = find_printk(addr);
+ if (!printk) {
+ format = malloc_or_die(45);
+ sprintf(format, "%%pf : (NO FORMAT FOUND at %llx)\n",
+ addr);
+ return format;
+ }
+
+ p = printk->printk;
+ /* Remove any quotes. */
+ if (*p == '"')
+ p++;
+ format = malloc_or_die(strlen(p) + 10);
+ sprintf(format, "%s : %s", "%pf", p);
+ /* remove ending quotes and new line since we will add one too */
+ p = format + strlen(format) - 1;
+ if (*p == '"')
+ *p = 0;
+
+ p -= 2;
+ if (strcmp(p, "\\n") == 0)
+ *p = 0;
+
+ return format;
+}
+
+static void pretty_print(void *data, int size, struct event *event)
+{
+ struct print_fmt *print_fmt = &event->print_fmt;
+ struct print_arg *arg = print_fmt->args;
+ struct print_arg *args = NULL;
+ const char *ptr = print_fmt->format;
+ unsigned long long val;
+ struct func_map *func;
+ const char *saveptr;
+ char *bprint_fmt = NULL;
+ char format[32];
+ int show_func;
+ int len;
+ int ls;
+
+ if (event->flags & EVENT_FL_ISFUNC)
+ ptr = " %pF <-- %pF";
+
+ if (event->flags & EVENT_FL_ISBPRINT) {
+ bprint_fmt = get_bprint_format(data, size, event);
+ args = make_bprint_args(bprint_fmt, data, size, event);
+ arg = args;
+ ptr = bprint_fmt;
+ }
+
+ for (; *ptr; ptr++) {
+ ls = 0;
+ if (*ptr == '\\') {
+ ptr++;
+ switch (*ptr) {
+ case 'n':
+ printf("\n");
+ break;
+ case 't':
+ printf("\t");
+ break;
+ case 'r':
+ printf("\r");
+ break;
+ case '\\':
+ printf("\\");
+ break;
+ default:
+ printf("%c", *ptr);
+ break;
+ }
+
+ } else if (*ptr == '%') {
+ saveptr = ptr;
+ show_func = 0;
+ cont_process:
+ ptr++;
+ switch (*ptr) {
+ case '%':
+ printf("%%");
+ break;
+ case 'l':
+ ls++;
+ goto cont_process;
+ case 'L':
+ ls = 2;
+ goto cont_process;
+ case 'z':
+ case 'Z':
+ case '0' ... '9':
+ goto cont_process;
+ case 'p':
+ if (long_size == 4)
+ ls = 1;
+ else
+ ls = 2;
+
+ if (*(ptr+1) == 'F' ||
+ *(ptr+1) == 'f') {
+ ptr++;
+ show_func = *ptr;
+ }
+
+ /* fall through */
+ case 'd':
+ case 'i':
+ case 'x':
+ case 'X':
+ case 'u':
+ if (!arg)
+ die("no argument match");
+
+ len = ((unsigned long)ptr + 1) -
+ (unsigned long)saveptr;
+
+ /* should never happen */
+ if (len > 32)
+ die("bad format!");
+
+ memcpy(format, saveptr, len);
+ format[len] = 0;
+
+ val = eval_num_arg(data, size, event, arg);
+ arg = arg->next;
+
+ if (show_func) {
+ func = find_func(val);
+ if (func) {
+ printf("%s", func->func);
+ if (show_func == 'F')
+ printf("+0x%llx",
+ val - func->addr);
+ break;
+ }
+ }
+ switch (ls) {
+ case 0:
+ printf(format, (int)val);
+ break;
+ case 1:
+ printf(format, (long)val);
+ break;
+ case 2:
+ printf(format, (long long)val);
+ break;
+ default:
+ die("bad count (%d)", ls);
+ }
+ break;
+ case 's':
+ if (!arg)
+ die("no matching argument");
+
+ print_str_arg(data, size, event, arg);
+ arg = arg->next;
+ break;
+ default:
+ printf(">%c<", *ptr);
+
+ }
+ } else
+ printf("%c", *ptr);
+ }
+
+ if (args) {
+ free_args(args);
+ free(bprint_fmt);
+ }
+}
+
+static inline int log10_cpu(int nb)
+{
+ if (nb / 100)
+ return 3;
+ if (nb / 10)
+ return 2;
+ return 1;
+}
+
+static void print_lat_fmt(void *data, int size __unused)
+{
+ unsigned int lat_flags;
+ unsigned int pc;
+ int lock_depth;
+ int hardirq;
+ int softirq;
+
+ lat_flags = parse_common_flags(data);
+ pc = parse_common_pc(data);
+ lock_depth = parse_common_lock_depth(data);
+
+ hardirq = lat_flags & TRACE_FLAG_HARDIRQ;
+ softirq = lat_flags & TRACE_FLAG_SOFTIRQ;
+
+ printf("%c%c%c",
+ (lat_flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
+ (lat_flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
+ 'X' : '.',
+ (lat_flags & TRACE_FLAG_NEED_RESCHED) ?
+ 'N' : '.',
+ (hardirq && softirq) ? 'H' :
+ hardirq ? 'h' : softirq ? 's' : '.');
+
+ if (pc)
+ printf("%x", pc);
+ else
+ printf(".");
+
+ if (lock_depth < 0)
+ printf(".");
+ else
+ printf("%d", lock_depth);
+}
+
+/* taken from Linux, written by Frederic Weisbecker */
+static void print_graph_cpu(int cpu)
+{
+ int i;
+ int log10_this = log10_cpu(cpu);
+ int log10_all = log10_cpu(cpus);
+
+
+ /*
+ * Start with a space character - to make it stand out
+ * to the right a bit when trace output is pasted into
+ * email:
+ */
+ printf(" ");
+
+ /*
+ * Tricky - we space the CPU field according to the max
+ * number of online CPUs. On a 2-cpu system it would take
+ * a maximum of 1 digit - on a 128 cpu system it would
+ * take up to 3 digits:
+ */
+ for (i = 0; i < log10_all - log10_this; i++)
+ printf(" ");
+
+ printf("%d) ", cpu);
+}
+
+#define TRACE_GRAPH_PROCINFO_LENGTH 14
+#define TRACE_GRAPH_INDENT 2
+
+static void print_graph_proc(int pid, const char *comm)
+{
+ /* sign + log10(MAX_INT) + '\0' */
+ char pid_str[11];
+ int spaces = 0;
+ int len;
+ int i;
+
+ sprintf(pid_str, "%d", pid);
+
+ /* 1 stands for the "-" character */
+ len = strlen(comm) + strlen(pid_str) + 1;
+
+ if (len < TRACE_GRAPH_PROCINFO_LENGTH)
+ spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
+
+ /* First spaces to align center */
+ for (i = 0; i < spaces / 2; i++)
+ printf(" ");
+
+ printf("%s-%s", comm, pid_str);
+
+ /* Last spaces to align center */
+ for (i = 0; i < spaces - (spaces / 2); i++)
+ printf(" ");
+}
+
+static struct record *
+get_return_for_leaf(int cpu, int cur_pid, unsigned long long cur_func,
+ struct record *next)
+{
+ struct format_field *field;
+ struct event *event;
+ unsigned long val;
+ int type;
+ int pid;
+
+ type = trace_parse_common_type(next->data);
+ event = trace_find_event(type);
+ if (!event)
+ return NULL;
+
+ if (!(event->flags & EVENT_FL_ISFUNCRET))
+ return NULL;
+
+ pid = trace_parse_common_pid(next->data);
+ field = find_field(event, "func");
+ if (!field)
+ die("function return does not have field func");
+
+ val = read_size(next->data + field->offset, field->size);
+
+ if (cur_pid != pid || cur_func != val)
+ return NULL;
+
+ /* this is a leaf, now advance the iterator */
+ return trace_read_data(cpu);
+}
+
+/* Signal a overhead of time execution to the output */
+static void print_graph_overhead(unsigned long long duration)
+{
+ /* Non nested entry or return */
+ if (duration == ~0ULL)
+ return (void)printf(" ");
+
+ /* Duration exceeded 100 msecs */
+ if (duration > 100000ULL)
+ return (void)printf("! ");
+
+ /* Duration exceeded 10 msecs */
+ if (duration > 10000ULL)
+ return (void)printf("+ ");
+
+ printf(" ");
+}
+
+static void print_graph_duration(unsigned long long duration)
+{
+ unsigned long usecs = duration / 1000;
+ unsigned long nsecs_rem = duration % 1000;
+ /* log10(ULONG_MAX) + '\0' */
+ char msecs_str[21];
+ char nsecs_str[5];
+ int len;
+ int i;
+
+ sprintf(msecs_str, "%lu", usecs);
+
+ /* Print msecs */
+ len = printf("%lu", usecs);
+
+ /* Print nsecs (we don't want to exceed 7 numbers) */
+ if (len < 7) {
+ snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
+ len += printf(".%s", nsecs_str);
+ }
+
+ printf(" us ");
+
+ /* Print remaining spaces to fit the row's width */
+ for (i = len; i < 7; i++)
+ printf(" ");
+
+ printf("| ");
+}
+
+static void
+print_graph_entry_leaf(struct event *event, void *data, struct record *ret_rec)
+{
+ unsigned long long rettime, calltime;
+ unsigned long long duration, depth;
+ unsigned long long val;
+ struct format_field *field;
+ struct func_map *func;
+ struct event *ret_event;
+ int type;
+ int i;
+
+ type = trace_parse_common_type(ret_rec->data);
+ ret_event = trace_find_event(type);
+
+ field = find_field(ret_event, "rettime");
+ if (!field)
+ die("can't find rettime in return graph");
+ rettime = read_size(ret_rec->data + field->offset, field->size);
+
+ field = find_field(ret_event, "calltime");
+ if (!field)
+ die("can't find rettime in return graph");
+ calltime = read_size(ret_rec->data + field->offset, field->size);
+
+ duration = rettime - calltime;
+
+ /* Overhead */
+ print_graph_overhead(duration);
+
+ /* Duration */
+ print_graph_duration(duration);
+
+ field = find_field(event, "depth");
+ if (!field)
+ die("can't find depth in entry graph");
+ depth = read_size(data + field->offset, field->size);
+
+ /* Function */
+ for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
+ printf(" ");
+
+ field = find_field(event, "func");
+ if (!field)
+ die("can't find func in entry graph");
+ val = read_size(data + field->offset, field->size);
+ func = find_func(val);
+
+ if (func)
+ printf("%s();", func->func);
+ else
+ printf("%llx();", val);
+}
+
+static void print_graph_nested(struct event *event, void *data)
+{
+ struct format_field *field;
+ unsigned long long depth;
+ unsigned long long val;
+ struct func_map *func;
+ int i;
+
+ /* No overhead */
+ print_graph_overhead(-1);
+
+ /* No time */
+ printf(" | ");
+
+ field = find_field(event, "depth");
+ if (!field)
+ die("can't find depth in entry graph");
+ depth = read_size(data + field->offset, field->size);
+
+ /* Function */
+ for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
+ printf(" ");
+
+ field = find_field(event, "func");
+ if (!field)
+ die("can't find func in entry graph");
+ val = read_size(data + field->offset, field->size);
+ func = find_func(val);
+
+ if (func)
+ printf("%s() {", func->func);
+ else
+ printf("%llx() {", val);
+}
+
+static void
+pretty_print_func_ent(void *data, int size, struct event *event,
+ int cpu, int pid, const char *comm,
+ unsigned long secs, unsigned long usecs)
+{
+ struct format_field *field;
+ struct record *rec;
+ void *copy_data;
+ unsigned long val;
+
+ printf("%5lu.%06lu | ", secs, usecs);
+
+ print_graph_cpu(cpu);
+ print_graph_proc(pid, comm);
+
+ printf(" | ");
+
+ if (latency_format) {
+ print_lat_fmt(data, size);
+ printf(" | ");
+ }
+
+ field = find_field(event, "func");
+ if (!field)
+ die("function entry does not have func field");
+
+ val = read_size(data + field->offset, field->size);
+
+ /*
+ * peek_data may unmap the data pointer. Copy it first.
+ */
+ copy_data = malloc_or_die(size);
+ memcpy(copy_data, data, size);
+ data = copy_data;
+
+ rec = trace_peek_data(cpu);
+ if (rec) {
+ rec = get_return_for_leaf(cpu, pid, val, rec);
+ if (rec) {
+ print_graph_entry_leaf(event, data, rec);
+ goto out_free;
+ }
+ }
+ print_graph_nested(event, data);
+out_free:
+ free(data);
+}
+
+static void
+pretty_print_func_ret(void *data, int size __unused, struct event *event,
+ int cpu, int pid, const char *comm,
+ unsigned long secs, unsigned long usecs)
+{
+ unsigned long long rettime, calltime;
+ unsigned long long duration, depth;
+ struct format_field *field;
+ int i;
+
+ printf("%5lu.%06lu | ", secs, usecs);
+
+ print_graph_cpu(cpu);
+ print_graph_proc(pid, comm);
+
+ printf(" | ");
+
+ if (latency_format) {
+ print_lat_fmt(data, size);
+ printf(" | ");
+ }
+
+ field = find_field(event, "rettime");
+ if (!field)
+ die("can't find rettime in return graph");
+ rettime = read_size(data + field->offset, field->size);
+
+ field = find_field(event, "calltime");
+ if (!field)
+ die("can't find calltime in return graph");
+ calltime = read_size(data + field->offset, field->size);
+
+ duration = rettime - calltime;
+
+ /* Overhead */
+ print_graph_overhead(duration);
+
+ /* Duration */
+ print_graph_duration(duration);
+
+ field = find_field(event, "depth");
+ if (!field)
+ die("can't find depth in entry graph");
+ depth = read_size(data + field->offset, field->size);
+
+ /* Function */
+ for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
+ printf(" ");
+
+ printf("}");
+}
+
+static void
+pretty_print_func_graph(void *data, int size, struct event *event,
+ int cpu, int pid, const char *comm,
+ unsigned long secs, unsigned long usecs)
+{
+ if (event->flags & EVENT_FL_ISFUNCENT)
+ pretty_print_func_ent(data, size, event,
+ cpu, pid, comm, secs, usecs);
+ else if (event->flags & EVENT_FL_ISFUNCRET)
+ pretty_print_func_ret(data, size, event,
+ cpu, pid, comm, secs, usecs);
+ printf("\n");
+}
+
+void print_event(int cpu, void *data, int size, unsigned long long nsecs,
+ char *comm)
+{
+ struct event *event;
+ unsigned long secs;
+ unsigned long usecs;
+ int type;
+ int pid;
+
+ secs = nsecs / NSECS_PER_SEC;
+ nsecs -= secs * NSECS_PER_SEC;
+ usecs = nsecs / NSECS_PER_USEC;
+
+ type = trace_parse_common_type(data);
+
+ event = trace_find_event(type);
+ if (!event) {
+ warning("ug! no event found for type %d", type);
+ return;
+ }
+
+ pid = trace_parse_common_pid(data);
+
+ if (event->flags & (EVENT_FL_ISFUNCENT | EVENT_FL_ISFUNCRET))
+ return pretty_print_func_graph(data, size, event, cpu,
+ pid, comm, secs, usecs);
+
+ if (latency_format) {
+ printf("%8.8s-%-5d %3d",
+ comm, pid, cpu);
+ print_lat_fmt(data, size);
+ } else
+ printf("%16s-%-5d [%03d]", comm, pid, cpu);
+
+ printf(" %5lu.%06lu: %s: ", secs, usecs, event->name);
+
+ if (event->flags & EVENT_FL_FAILED) {
+ printf("EVENT '%s' FAILED TO PARSE\n",
+ event->name);
+ return;
+ }
+
+ pretty_print(data, size, event);
+ printf("\n");
+}
+
+static void print_fields(struct print_flag_sym *field)
+{
+ printf("{ %s, %s }", field->value, field->str);
+ if (field->next) {
+ printf(", ");
+ print_fields(field->next);
+ }
+}
+
+static void print_args(struct print_arg *args)
+{
+ int print_paren = 1;
+
+ switch (args->type) {
+ case PRINT_NULL:
+ printf("null");
+ break;
+ case PRINT_ATOM:
+ printf("%s", args->atom.atom);
+ break;
+ case PRINT_FIELD:
+ printf("REC->%s", args->field.name);
+ break;
+ case PRINT_FLAGS:
+ printf("__print_flags(");
+ print_args(args->flags.field);
+ printf(", %s, ", args->flags.delim);
+ print_fields(args->flags.flags);
+ printf(")");
+ break;
+ case PRINT_SYMBOL:
+ printf("__print_symbolic(");
+ print_args(args->symbol.field);
+ printf(", ");
+ print_fields(args->symbol.symbols);
+ printf(")");
+ break;
+ case PRINT_STRING:
+ printf("__get_str(%s)", args->string.string);
+ break;
+ case PRINT_TYPE:
+ printf("(%s)", args->typecast.type);
+ print_args(args->typecast.item);
+ break;
+ case PRINT_OP:
+ if (strcmp(args->op.op, ":") == 0)
+ print_paren = 0;
+ if (print_paren)
+ printf("(");
+ print_args(args->op.left);
+ printf(" %s ", args->op.op);
+ print_args(args->op.right);
+ if (print_paren)
+ printf(")");
+ break;
+ default:
+ /* we should warn... */
+ return;
+ }
+ if (args->next) {
+ printf("\n");
+ print_args(args->next);
+ }
+}
+
+int parse_ftrace_file(char *buf, unsigned long size)
+{
+ struct format_field *field;
+ struct print_arg *arg, **list;
+ struct event *event;
+ int ret;
+
+ init_input_buf(buf, size);
+
+ event = alloc_event();
+ if (!event)
+ return -ENOMEM;
+
+ event->flags |= EVENT_FL_ISFTRACE;
+
+ event->name = event_read_name();
+ if (!event->name)
+ die("failed to read ftrace event name");
+
+ if (strcmp(event->name, "function") == 0)
+ event->flags |= EVENT_FL_ISFUNC;
+
+ else if (strcmp(event->name, "funcgraph_entry") == 0)
+ event->flags |= EVENT_FL_ISFUNCENT;
+
+ else if (strcmp(event->name, "funcgraph_exit") == 0)
+ event->flags |= EVENT_FL_ISFUNCRET;
+
+ else if (strcmp(event->name, "bprint") == 0)
+ event->flags |= EVENT_FL_ISBPRINT;
+
+ event->id = event_read_id();
+ if (event->id < 0)
+ die("failed to read ftrace event id");
+
+ add_event(event);
+
+ ret = event_read_format(event);
+ if (ret < 0)
+ die("failed to read ftrace event format");
+
+ ret = event_read_print(event);
+ if (ret < 0)
+ die("failed to read ftrace event print fmt");
+
+ /* New ftrace handles args */
+ if (ret > 0)
+ return 0;
+ /*
+ * The arguments for ftrace files are parsed by the fields.
+ * Set up the fields as their arguments.
+ */
+ list = &event->print_fmt.args;
+ for (field = event->format.fields; field; field = field->next) {
+ arg = malloc_or_die(sizeof(*arg));
+ memset(arg, 0, sizeof(*arg));
+ *list = arg;
+ list = &arg->next;
+ arg->type = PRINT_FIELD;
+ arg->field.name = field->name;
+ arg->field.field = field;
+ }
+ return 0;
+}
+
+int parse_event_file(char *buf, unsigned long size, char *sys)
+{
+ struct event *event;
+ int ret;
+
+ init_input_buf(buf, size);
+
+ event = alloc_event();
+ if (!event)
+ return -ENOMEM;
+
+ event->name = event_read_name();
+ if (!event->name)
+ die("failed to read event name");
+
+ event->id = event_read_id();
+ if (event->id < 0)
+ die("failed to read event id");
+
+ ret = event_read_format(event);
+ if (ret < 0) {
+ warning("failed to read event format for %s", event->name);
+ goto event_failed;
+ }
+
+ ret = event_read_print(event);
+ if (ret < 0) {
+ warning("failed to read event print fmt for %s", event->name);
+ goto event_failed;
+ }
+
+ event->system = strdup(sys);
+
+#define PRINT_ARGS 0
+ if (PRINT_ARGS && event->print_fmt.args)
+ print_args(event->print_fmt.args);
+
+ add_event(event);
+ return 0;
+
+ event_failed:
+ event->flags |= EVENT_FL_FAILED;
+ /* still add it even if it failed */
+ add_event(event);
+ return -1;
+}
+
+void parse_set_info(int nr_cpus, int long_sz)
+{
+ cpus = nr_cpus;
+ long_size = long_sz;
+}
+
+int common_pc(struct scripting_context *context)
+{
+ return parse_common_pc(context->event_data);
+}
+
+int common_flags(struct scripting_context *context)
+{
+ return parse_common_flags(context->event_data);
+}
+
+int common_lock_depth(struct scripting_context *context)
+{
+ return parse_common_lock_depth(context->event_data);
+}
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
new file mode 100644
index 00000000..f55cc3a7
--- /dev/null
+++ b/tools/perf/util/trace-event-read.c
@@ -0,0 +1,539 @@
+/*
+ * Copyright (C) 2009, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#define _FILE_OFFSET_BITS 64
+
+#include <dirent.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <ctype.h>
+#include <errno.h>
+
+#include "../perf.h"
+#include "util.h"
+#include "trace-event.h"
+
+static int input_fd;
+
+static int read_page;
+
+int file_bigendian;
+int host_bigendian;
+static int long_size;
+
+static unsigned long page_size;
+
+static ssize_t calc_data_size;
+static bool repipe;
+
+static int do_read(int fd, void *buf, int size)
+{
+ int rsize = size;
+
+ while (size) {
+ int ret = read(fd, buf, size);
+
+ if (ret <= 0)
+ return -1;
+
+ if (repipe) {
+ int retw = write(STDOUT_FILENO, buf, ret);
+
+ if (retw <= 0 || retw != ret)
+ die("repiping input file");
+ }
+
+ size -= ret;
+ buf += ret;
+ }
+
+ return rsize;
+}
+
+static int read_or_die(void *data, int size)
+{
+ int r;
+
+ r = do_read(input_fd, data, size);
+ if (r <= 0)
+ die("reading input file (size expected=%d received=%d)",
+ size, r);
+
+ if (calc_data_size)
+ calc_data_size += r;
+
+ return r;
+}
+
+/* If it fails, the next read will report it */
+static void skip(int size)
+{
+ char buf[BUFSIZ];
+ int r;
+
+ while (size) {
+ r = size > BUFSIZ ? BUFSIZ : size;
+ read_or_die(buf, r);
+ size -= r;
+ };
+}
+
+static unsigned int read4(void)
+{
+ unsigned int data;
+
+ read_or_die(&data, 4);
+ return __data2host4(data);
+}
+
+static unsigned long long read8(void)
+{
+ unsigned long long data;
+
+ read_or_die(&data, 8);
+ return __data2host8(data);
+}
+
+static char *read_string(void)
+{
+ char buf[BUFSIZ];
+ char *str = NULL;
+ int size = 0;
+ off_t r;
+ char c;
+
+ for (;;) {
+ r = read(input_fd, &c, 1);
+ if (r < 0)
+ die("reading input file");
+
+ if (!r)
+ die("no data");
+
+ if (repipe) {
+ int retw = write(STDOUT_FILENO, &c, 1);
+
+ if (retw <= 0 || retw != r)
+ die("repiping input file string");
+ }
+
+ buf[size++] = c;
+
+ if (!c)
+ break;
+ }
+
+ if (calc_data_size)
+ calc_data_size += size;
+
+ str = malloc_or_die(size);
+ memcpy(str, buf, size);
+
+ return str;
+}
+
+static void read_proc_kallsyms(void)
+{
+ unsigned int size;
+ char *buf;
+
+ size = read4();
+ if (!size)
+ return;
+
+ buf = malloc_or_die(size + 1);
+ read_or_die(buf, size);
+ buf[size] = '\0';
+
+ parse_proc_kallsyms(buf, size);
+
+ free(buf);
+}
+
+static void read_ftrace_printk(void)
+{
+ unsigned int size;
+ char *buf;
+
+ size = read4();
+ if (!size)
+ return;
+
+ buf = malloc_or_die(size);
+ read_or_die(buf, size);
+
+ parse_ftrace_printk(buf, size);
+
+ free(buf);
+}
+
+static void read_header_files(void)
+{
+ unsigned long long size;
+ char *header_event;
+ char buf[BUFSIZ];
+
+ read_or_die(buf, 12);
+
+ if (memcmp(buf, "header_page", 12) != 0)
+ die("did not read header page");
+
+ size = read8();
+ skip(size);
+
+ /*
+ * The size field in the page is of type long,
+ * use that instead, since it represents the kernel.
+ */
+ long_size = header_page_size_size;
+
+ read_or_die(buf, 13);
+ if (memcmp(buf, "header_event", 13) != 0)
+ die("did not read header event");
+
+ size = read8();
+ header_event = malloc_or_die(size);
+ read_or_die(header_event, size);
+ free(header_event);
+}
+
+static void read_ftrace_file(unsigned long long size)
+{
+ char *buf;
+
+ buf = malloc_or_die(size);
+ read_or_die(buf, size);
+ parse_ftrace_file(buf, size);
+ free(buf);
+}
+
+static void read_event_file(char *sys, unsigned long long size)
+{
+ char *buf;
+
+ buf = malloc_or_die(size);
+ read_or_die(buf, size);
+ parse_event_file(buf, size, sys);
+ free(buf);
+}
+
+static void read_ftrace_files(void)
+{
+ unsigned long long size;
+ int count;
+ int i;
+
+ count = read4();
+
+ for (i = 0; i < count; i++) {
+ size = read8();
+ read_ftrace_file(size);
+ }
+}
+
+static void read_event_files(void)
+{
+ unsigned long long size;
+ char *sys;
+ int systems;
+ int count;
+ int i,x;
+
+ systems = read4();
+
+ for (i = 0; i < systems; i++) {
+ sys = read_string();
+
+ count = read4();
+ for (x=0; x < count; x++) {
+ size = read8();
+ read_event_file(sys, size);
+ }
+ }
+}
+
+struct cpu_data {
+ unsigned long long offset;
+ unsigned long long size;
+ unsigned long long timestamp;
+ struct record *next;
+ char *page;
+ int cpu;
+ int index;
+ int page_size;
+};
+
+static struct cpu_data *cpu_data;
+
+static void update_cpu_data_index(int cpu)
+{
+ cpu_data[cpu].offset += page_size;
+ cpu_data[cpu].size -= page_size;
+ cpu_data[cpu].index = 0;
+}
+
+static void get_next_page(int cpu)
+{
+ off_t save_seek;
+ off_t ret;
+
+ if (!cpu_data[cpu].page)
+ return;
+
+ if (read_page) {
+ if (cpu_data[cpu].size <= page_size) {
+ free(cpu_data[cpu].page);
+ cpu_data[cpu].page = NULL;
+ return;
+ }
+
+ update_cpu_data_index(cpu);
+
+ /* other parts of the code may expect the pointer to not move */
+ save_seek = lseek(input_fd, 0, SEEK_CUR);
+
+ ret = lseek(input_fd, cpu_data[cpu].offset, SEEK_SET);
+ if (ret == (off_t)-1)
+ die("failed to lseek");
+ ret = read(input_fd, cpu_data[cpu].page, page_size);
+ if (ret < 0)
+ die("failed to read page");
+
+ /* reset the file pointer back */
+ lseek(input_fd, save_seek, SEEK_SET);
+
+ return;
+ }
+
+ munmap(cpu_data[cpu].page, page_size);
+ cpu_data[cpu].page = NULL;
+
+ if (cpu_data[cpu].size <= page_size)
+ return;
+
+ update_cpu_data_index(cpu);
+
+ cpu_data[cpu].page = mmap(NULL, page_size, PROT_READ, MAP_PRIVATE,
+ input_fd, cpu_data[cpu].offset);
+ if (cpu_data[cpu].page == MAP_FAILED)
+ die("failed to mmap cpu %d at offset 0x%llx",
+ cpu, cpu_data[cpu].offset);
+}
+
+static unsigned int type_len4host(unsigned int type_len_ts)
+{
+ if (file_bigendian)
+ return (type_len_ts >> 27) & ((1 << 5) - 1);
+ else
+ return type_len_ts & ((1 << 5) - 1);
+}
+
+static unsigned int ts4host(unsigned int type_len_ts)
+{
+ if (file_bigendian)
+ return type_len_ts & ((1 << 27) - 1);
+ else
+ return type_len_ts >> 5;
+}
+
+static int calc_index(void *ptr, int cpu)
+{
+ return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page;
+}
+
+struct record *trace_peek_data(int cpu)
+{
+ struct record *data;
+ void *page = cpu_data[cpu].page;
+ int idx = cpu_data[cpu].index;
+ void *ptr = page + idx;
+ unsigned long long extend;
+ unsigned int type_len_ts;
+ unsigned int type_len;
+ unsigned int delta;
+ unsigned int length = 0;
+
+ if (cpu_data[cpu].next)
+ return cpu_data[cpu].next;
+
+ if (!page)
+ return NULL;
+
+ if (!idx) {
+ /* FIXME: handle header page */
+ if (header_page_ts_size != 8)
+ die("expected a long long type for timestamp");
+ cpu_data[cpu].timestamp = data2host8(ptr);
+ ptr += 8;
+ switch (header_page_size_size) {
+ case 4:
+ cpu_data[cpu].page_size = data2host4(ptr);
+ ptr += 4;
+ break;
+ case 8:
+ cpu_data[cpu].page_size = data2host8(ptr);
+ ptr += 8;
+ break;
+ default:
+ die("bad long size");
+ }
+ ptr = cpu_data[cpu].page + header_page_data_offset;
+ }
+
+read_again:
+ idx = calc_index(ptr, cpu);
+
+ if (idx >= cpu_data[cpu].page_size) {
+ get_next_page(cpu);
+ return trace_peek_data(cpu);
+ }
+
+ type_len_ts = data2host4(ptr);
+ ptr += 4;
+
+ type_len = type_len4host(type_len_ts);
+ delta = ts4host(type_len_ts);
+
+ switch (type_len) {
+ case RINGBUF_TYPE_PADDING:
+ if (!delta)
+ die("error, hit unexpected end of page");
+ length = data2host4(ptr);
+ ptr += 4;
+ length *= 4;
+ ptr += length;
+ goto read_again;
+
+ case RINGBUF_TYPE_TIME_EXTEND:
+ extend = data2host4(ptr);
+ ptr += 4;
+ extend <<= TS_SHIFT;
+ extend += delta;
+ cpu_data[cpu].timestamp += extend;
+ goto read_again;
+
+ case RINGBUF_TYPE_TIME_STAMP:
+ ptr += 12;
+ break;
+ case 0:
+ length = data2host4(ptr);
+ ptr += 4;
+ die("here! length=%d", length);
+ break;
+ default:
+ length = type_len * 4;
+ break;
+ }
+
+ cpu_data[cpu].timestamp += delta;
+
+ data = malloc_or_die(sizeof(*data));
+ memset(data, 0, sizeof(*data));
+
+ data->ts = cpu_data[cpu].timestamp;
+ data->size = length;
+ data->data = ptr;
+ ptr += length;
+
+ cpu_data[cpu].index = calc_index(ptr, cpu);
+ cpu_data[cpu].next = data;
+
+ return data;
+}
+
+struct record *trace_read_data(int cpu)
+{
+ struct record *data;
+
+ data = trace_peek_data(cpu);
+ cpu_data[cpu].next = NULL;
+
+ return data;
+}
+
+ssize_t trace_report(int fd, bool __repipe)
+{
+ char buf[BUFSIZ];
+ char test[] = { 23, 8, 68 };
+ char *version;
+ int show_version = 0;
+ int show_funcs = 0;
+ int show_printk = 0;
+ ssize_t size;
+
+ calc_data_size = 1;
+ repipe = __repipe;
+
+ input_fd = fd;
+
+ read_or_die(buf, 3);
+ if (memcmp(buf, test, 3) != 0)
+ die("no trace data in the file");
+
+ read_or_die(buf, 7);
+ if (memcmp(buf, "tracing", 7) != 0)
+ die("not a trace file (missing 'tracing' tag)");
+
+ version = read_string();
+ if (show_version)
+ printf("version = %s\n", version);
+ free(version);
+
+ read_or_die(buf, 1);
+ file_bigendian = buf[0];
+ host_bigendian = bigendian();
+
+ read_or_die(buf, 1);
+ long_size = buf[0];
+
+ page_size = read4();
+
+ read_header_files();
+
+ read_ftrace_files();
+ read_event_files();
+ read_proc_kallsyms();
+ read_ftrace_printk();
+
+ size = calc_data_size - 1;
+ calc_data_size = 0;
+ repipe = false;
+
+ if (show_funcs) {
+ print_funcs();
+ return size;
+ }
+ if (show_printk) {
+ print_printk();
+ return size;
+ }
+
+ return size;
+}
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
new file mode 100644
index 00000000..f7af2fca
--- /dev/null
+++ b/tools/perf/util/trace-event-scripting.c
@@ -0,0 +1,167 @@
+/*
+ * trace-event-scripting. Scripting engine common and initialization code.
+ *
+ * Copyright (C) 2009-2010 Tom Zanussi <tzanussi@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+
+#include "../perf.h"
+#include "util.h"
+#include "trace-event.h"
+
+struct scripting_context *scripting_context;
+
+static int stop_script_unsupported(void)
+{
+ return 0;
+}
+
+static void process_event_unsupported(int cpu __unused,
+ void *data __unused,
+ int size __unused,
+ unsigned long long nsecs __unused,
+ char *comm __unused)
+{
+}
+
+static void print_python_unsupported_msg(void)
+{
+ fprintf(stderr, "Python scripting not supported."
+ " Install libpython and rebuild perf to enable it.\n"
+ "For example:\n # apt-get install python-dev (ubuntu)"
+ "\n # yum install python-devel (Fedora)"
+ "\n etc.\n");
+}
+
+static int python_start_script_unsupported(const char *script __unused,
+ int argc __unused,
+ const char **argv __unused)
+{
+ print_python_unsupported_msg();
+
+ return -1;
+}
+
+static int python_generate_script_unsupported(const char *outfile __unused)
+{
+ print_python_unsupported_msg();
+
+ return -1;
+}
+
+struct scripting_ops python_scripting_unsupported_ops = {
+ .name = "Python",
+ .start_script = python_start_script_unsupported,
+ .stop_script = stop_script_unsupported,
+ .process_event = process_event_unsupported,
+ .generate_script = python_generate_script_unsupported,
+};
+
+static void register_python_scripting(struct scripting_ops *scripting_ops)
+{
+ int err;
+ err = script_spec_register("Python", scripting_ops);
+ if (err)
+ die("error registering Python script extension");
+
+ err = script_spec_register("py", scripting_ops);
+ if (err)
+ die("error registering py script extension");
+
+ scripting_context = malloc(sizeof(struct scripting_context));
+}
+
+#ifdef NO_LIBPYTHON
+void setup_python_scripting(void)
+{
+ register_python_scripting(&python_scripting_unsupported_ops);
+}
+#else
+extern struct scripting_ops python_scripting_ops;
+
+void setup_python_scripting(void)
+{
+ register_python_scripting(&python_scripting_ops);
+}
+#endif
+
+static void print_perl_unsupported_msg(void)
+{
+ fprintf(stderr, "Perl scripting not supported."
+ " Install libperl and rebuild perf to enable it.\n"
+ "For example:\n # apt-get install libperl-dev (ubuntu)"
+ "\n # yum install 'perl(ExtUtils::Embed)' (Fedora)"
+ "\n etc.\n");
+}
+
+static int perl_start_script_unsupported(const char *script __unused,
+ int argc __unused,
+ const char **argv __unused)
+{
+ print_perl_unsupported_msg();
+
+ return -1;
+}
+
+static int perl_generate_script_unsupported(const char *outfile __unused)
+{
+ print_perl_unsupported_msg();
+
+ return -1;
+}
+
+struct scripting_ops perl_scripting_unsupported_ops = {
+ .name = "Perl",
+ .start_script = perl_start_script_unsupported,
+ .stop_script = stop_script_unsupported,
+ .process_event = process_event_unsupported,
+ .generate_script = perl_generate_script_unsupported,
+};
+
+static void register_perl_scripting(struct scripting_ops *scripting_ops)
+{
+ int err;
+ err = script_spec_register("Perl", scripting_ops);
+ if (err)
+ die("error registering Perl script extension");
+
+ err = script_spec_register("pl", scripting_ops);
+ if (err)
+ die("error registering pl script extension");
+
+ scripting_context = malloc(sizeof(struct scripting_context));
+}
+
+#ifdef NO_LIBPERL
+void setup_perl_scripting(void)
+{
+ register_perl_scripting(&perl_scripting_unsupported_ops);
+}
+#else
+extern struct scripting_ops perl_scripting_ops;
+
+void setup_perl_scripting(void)
+{
+ register_perl_scripting(&perl_scripting_ops);
+}
+#endif
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
new file mode 100644
index 00000000..b3e86b1e
--- /dev/null
+++ b/tools/perf/util/trace-event.h
@@ -0,0 +1,300 @@
+#ifndef __PERF_TRACE_EVENTS_H
+#define __PERF_TRACE_EVENTS_H
+
+#include <stdbool.h>
+#include "parse-events.h"
+
+#define __unused __attribute__((unused))
+
+
+#ifndef PAGE_MASK
+#define PAGE_MASK (page_size - 1)
+#endif
+
+enum {
+ RINGBUF_TYPE_PADDING = 29,
+ RINGBUF_TYPE_TIME_EXTEND = 30,
+ RINGBUF_TYPE_TIME_STAMP = 31,
+};
+
+#ifndef TS_SHIFT
+#define TS_SHIFT 27
+#endif
+
+#define NSECS_PER_SEC 1000000000ULL
+#define NSECS_PER_USEC 1000ULL
+
+enum format_flags {
+ FIELD_IS_ARRAY = 1,
+ FIELD_IS_POINTER = 2,
+ FIELD_IS_SIGNED = 4,
+ FIELD_IS_STRING = 8,
+ FIELD_IS_DYNAMIC = 16,
+ FIELD_IS_FLAG = 32,
+ FIELD_IS_SYMBOLIC = 64,
+};
+
+struct format_field {
+ struct format_field *next;
+ char *type;
+ char *name;
+ int offset;
+ int size;
+ unsigned long flags;
+};
+
+struct format {
+ int nr_common;
+ int nr_fields;
+ struct format_field *common_fields;
+ struct format_field *fields;
+};
+
+struct print_arg_atom {
+ char *atom;
+};
+
+struct print_arg_string {
+ char *string;
+ int offset;
+};
+
+struct print_arg_field {
+ char *name;
+ struct format_field *field;
+};
+
+struct print_flag_sym {
+ struct print_flag_sym *next;
+ char *value;
+ char *str;
+};
+
+struct print_arg_typecast {
+ char *type;
+ struct print_arg *item;
+};
+
+struct print_arg_flags {
+ struct print_arg *field;
+ char *delim;
+ struct print_flag_sym *flags;
+};
+
+struct print_arg_symbol {
+ struct print_arg *field;
+ struct print_flag_sym *symbols;
+};
+
+struct print_arg;
+
+struct print_arg_op {
+ char *op;
+ int prio;
+ struct print_arg *left;
+ struct print_arg *right;
+};
+
+struct print_arg_func {
+ char *name;
+ struct print_arg *args;
+};
+
+enum print_arg_type {
+ PRINT_NULL,
+ PRINT_ATOM,
+ PRINT_FIELD,
+ PRINT_FLAGS,
+ PRINT_SYMBOL,
+ PRINT_TYPE,
+ PRINT_STRING,
+ PRINT_OP,
+};
+
+struct print_arg {
+ struct print_arg *next;
+ enum print_arg_type type;
+ union {
+ struct print_arg_atom atom;
+ struct print_arg_field field;
+ struct print_arg_typecast typecast;
+ struct print_arg_flags flags;
+ struct print_arg_symbol symbol;
+ struct print_arg_func func;
+ struct print_arg_string string;
+ struct print_arg_op op;
+ };
+};
+
+struct print_fmt {
+ char *format;
+ struct print_arg *args;
+};
+
+struct event {
+ struct event *next;
+ char *name;
+ int id;
+ int flags;
+ struct format format;
+ struct print_fmt print_fmt;
+ char *system;
+};
+
+enum {
+ EVENT_FL_ISFTRACE = 0x01,
+ EVENT_FL_ISPRINT = 0x02,
+ EVENT_FL_ISBPRINT = 0x04,
+ EVENT_FL_ISFUNC = 0x08,
+ EVENT_FL_ISFUNCENT = 0x10,
+ EVENT_FL_ISFUNCRET = 0x20,
+
+ EVENT_FL_FAILED = 0x80000000
+};
+
+struct record {
+ unsigned long long ts;
+ int size;
+ void *data;
+};
+
+struct record *trace_peek_data(int cpu);
+struct record *trace_read_data(int cpu);
+
+void parse_set_info(int nr_cpus, int long_sz);
+
+ssize_t trace_report(int fd, bool repipe);
+
+void *malloc_or_die(unsigned int size);
+
+void parse_cmdlines(char *file, int size);
+void parse_proc_kallsyms(char *file, unsigned int size);
+void parse_ftrace_printk(char *file, unsigned int size);
+
+void print_funcs(void);
+void print_printk(void);
+
+int parse_ftrace_file(char *buf, unsigned long size);
+int parse_event_file(char *buf, unsigned long size, char *sys);
+void print_event(int cpu, void *data, int size, unsigned long long nsecs,
+ char *comm);
+
+extern int file_bigendian;
+extern int host_bigendian;
+
+int bigendian(void);
+
+static inline unsigned short __data2host2(unsigned short data)
+{
+ unsigned short swap;
+
+ if (host_bigendian == file_bigendian)
+ return data;
+
+ swap = ((data & 0xffULL) << 8) |
+ ((data & (0xffULL << 8)) >> 8);
+
+ return swap;
+}
+
+static inline unsigned int __data2host4(unsigned int data)
+{
+ unsigned int swap;
+
+ if (host_bigendian == file_bigendian)
+ return data;
+
+ swap = ((data & 0xffULL) << 24) |
+ ((data & (0xffULL << 8)) << 8) |
+ ((data & (0xffULL << 16)) >> 8) |
+ ((data & (0xffULL << 24)) >> 24);
+
+ return swap;
+}
+
+static inline unsigned long long __data2host8(unsigned long long data)
+{
+ unsigned long long swap;
+
+ if (host_bigendian == file_bigendian)
+ return data;
+
+ swap = ((data & 0xffULL) << 56) |
+ ((data & (0xffULL << 8)) << 40) |
+ ((data & (0xffULL << 16)) << 24) |
+ ((data & (0xffULL << 24)) << 8) |
+ ((data & (0xffULL << 32)) >> 8) |
+ ((data & (0xffULL << 40)) >> 24) |
+ ((data & (0xffULL << 48)) >> 40) |
+ ((data & (0xffULL << 56)) >> 56);
+
+ return swap;
+}
+
+#define data2host2(ptr) __data2host2(*(unsigned short *)ptr)
+#define data2host4(ptr) __data2host4(*(unsigned int *)ptr)
+#define data2host8(ptr) ({ \
+ unsigned long long __val; \
+ \
+ memcpy(&__val, (ptr), sizeof(unsigned long long)); \
+ __data2host8(__val); \
+})
+
+extern int header_page_ts_offset;
+extern int header_page_ts_size;
+extern int header_page_size_offset;
+extern int header_page_size_size;
+extern int header_page_data_offset;
+extern int header_page_data_size;
+
+extern bool latency_format;
+
+int trace_parse_common_type(void *data);
+int trace_parse_common_pid(void *data);
+int parse_common_pc(void *data);
+int parse_common_flags(void *data);
+int parse_common_lock_depth(void *data);
+struct event *trace_find_event(int id);
+struct event *trace_find_next_event(struct event *event);
+unsigned long long read_size(void *ptr, int size);
+unsigned long long
+raw_field_value(struct event *event, const char *name, void *data);
+void *raw_field_ptr(struct event *event, const char *name, void *data);
+unsigned long long eval_flag(const char *flag);
+
+int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events);
+ssize_t read_tracing_data_size(int fd, struct perf_event_attr *pattrs,
+ int nb_events);
+
+/* taken from kernel/trace/trace.h */
+enum trace_flag_type {
+ TRACE_FLAG_IRQS_OFF = 0x01,
+ TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
+ TRACE_FLAG_NEED_RESCHED = 0x04,
+ TRACE_FLAG_HARDIRQ = 0x08,
+ TRACE_FLAG_SOFTIRQ = 0x10,
+};
+
+struct scripting_ops {
+ const char *name;
+ int (*start_script) (const char *script, int argc, const char **argv);
+ int (*stop_script) (void);
+ void (*process_event) (int cpu, void *data, int size,
+ unsigned long long nsecs, char *comm);
+ int (*generate_script) (const char *outfile);
+};
+
+int script_spec_register(const char *spec, struct scripting_ops *ops);
+
+void setup_perl_scripting(void);
+void setup_python_scripting(void);
+
+struct scripting_context {
+ void *event_data;
+};
+
+int common_pc(struct scripting_context *context);
+int common_flags(struct scripting_context *context);
+int common_lock_depth(struct scripting_context *context);
+
+#endif /* __PERF_TRACE_EVENTS_H */
diff --git a/tools/perf/util/types.h b/tools/perf/util/types.h
new file mode 100644
index 00000000..7d6b8331
--- /dev/null
+++ b/tools/perf/util/types.h
@@ -0,0 +1,17 @@
+#ifndef __PERF_TYPES_H
+#define __PERF_TYPES_H
+
+/*
+ * We define u64 as unsigned long long for every architecture
+ * so that we can print it with %Lx without getting warnings.
+ */
+typedef unsigned long long u64;
+typedef signed long long s64;
+typedef unsigned int u32;
+typedef signed int s32;
+typedef unsigned short u16;
+typedef signed short s16;
+typedef unsigned char u8;
+typedef signed char s8;
+
+#endif /* __PERF_TYPES_H */
diff --git a/tools/perf/util/ui/browser.c b/tools/perf/util/ui/browser.c
new file mode 100644
index 00000000..66f2d583
--- /dev/null
+++ b/tools/perf/util/ui/browser.c
@@ -0,0 +1,329 @@
+#define _GNU_SOURCE
+#include <stdio.h>
+#undef _GNU_SOURCE
+/*
+ * slang versions <= 2.0.6 have a "#if HAVE_LONG_LONG" that breaks
+ * the build if it isn't defined. Use the equivalent one that glibc
+ * has on features.h.
+ */
+#include <features.h>
+#ifndef HAVE_LONG_LONG
+#define HAVE_LONG_LONG __GLIBC_HAVE_LONG_LONG
+#endif
+#include <slang.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <stdlib.h>
+#include <sys/ttydefaults.h>
+#include "browser.h"
+#include "helpline.h"
+#include "../color.h"
+#include "../util.h"
+
+#if SLANG_VERSION < 20104
+#define sltt_set_color(obj, name, fg, bg) \
+ SLtt_set_color(obj,(char *)name, (char *)fg, (char *)bg)
+#else
+#define sltt_set_color SLtt_set_color
+#endif
+
+newtComponent newt_form__new(void);
+
+int ui_browser__percent_color(double percent, bool current)
+{
+ if (current)
+ return HE_COLORSET_SELECTED;
+ if (percent >= MIN_RED)
+ return HE_COLORSET_TOP;
+ if (percent >= MIN_GREEN)
+ return HE_COLORSET_MEDIUM;
+ return HE_COLORSET_NORMAL;
+}
+
+void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence)
+{
+ struct list_head *head = self->entries;
+ struct list_head *pos;
+
+ switch (whence) {
+ case SEEK_SET:
+ pos = head->next;
+ break;
+ case SEEK_CUR:
+ pos = self->top;
+ break;
+ case SEEK_END:
+ pos = head->prev;
+ break;
+ default:
+ return;
+ }
+
+ if (offset > 0) {
+ while (offset-- != 0)
+ pos = pos->next;
+ } else {
+ while (offset++ != 0)
+ pos = pos->prev;
+ }
+
+ self->top = pos;
+}
+
+void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence)
+{
+ struct rb_root *root = self->entries;
+ struct rb_node *nd;
+
+ switch (whence) {
+ case SEEK_SET:
+ nd = rb_first(root);
+ break;
+ case SEEK_CUR:
+ nd = self->top;
+ break;
+ case SEEK_END:
+ nd = rb_last(root);
+ break;
+ default:
+ return;
+ }
+
+ if (offset > 0) {
+ while (offset-- != 0)
+ nd = rb_next(nd);
+ } else {
+ while (offset++ != 0)
+ nd = rb_prev(nd);
+ }
+
+ self->top = nd;
+}
+
+unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self)
+{
+ struct rb_node *nd;
+ int row = 0;
+
+ if (self->top == NULL)
+ self->top = rb_first(self->entries);
+
+ nd = self->top;
+
+ while (nd != NULL) {
+ SLsmg_gotorc(self->y + row, self->x);
+ self->write(self, nd, row);
+ if (++row == self->height)
+ break;
+ nd = rb_next(nd);
+ }
+
+ return row;
+}
+
+bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row)
+{
+ return self->top_idx + row == self->index;
+}
+
+void ui_browser__refresh_dimensions(struct ui_browser *self)
+{
+ int cols, rows;
+ newtGetScreenSize(&cols, &rows);
+
+ if (self->width > cols - 4)
+ self->width = cols - 4;
+ self->height = rows - 5;
+ if (self->height > self->nr_entries)
+ self->height = self->nr_entries;
+ self->y = (rows - self->height) / 2;
+ self->x = (cols - self->width) / 2;
+}
+
+void ui_browser__reset_index(struct ui_browser *self)
+{
+ self->index = self->top_idx = 0;
+ self->seek(self, 0, SEEK_SET);
+}
+
+int ui_browser__show(struct ui_browser *self, const char *title,
+ const char *helpline, ...)
+{
+ va_list ap;
+
+ if (self->form != NULL) {
+ newtFormDestroy(self->form);
+ newtPopWindow();
+ }
+ ui_browser__refresh_dimensions(self);
+ newtCenteredWindow(self->width, self->height, title);
+ self->form = newt_form__new();
+ if (self->form == NULL)
+ return -1;
+
+ self->sb = newtVerticalScrollbar(self->width, 0, self->height,
+ HE_COLORSET_NORMAL,
+ HE_COLORSET_SELECTED);
+ if (self->sb == NULL)
+ return -1;
+
+ newtFormAddHotKey(self->form, NEWT_KEY_UP);
+ newtFormAddHotKey(self->form, NEWT_KEY_DOWN);
+ newtFormAddHotKey(self->form, NEWT_KEY_PGUP);
+ newtFormAddHotKey(self->form, NEWT_KEY_PGDN);
+ newtFormAddHotKey(self->form, NEWT_KEY_HOME);
+ newtFormAddHotKey(self->form, NEWT_KEY_END);
+ newtFormAddHotKey(self->form, ' ');
+ newtFormAddComponent(self->form, self->sb);
+
+ va_start(ap, helpline);
+ ui_helpline__vpush(helpline, ap);
+ va_end(ap);
+ return 0;
+}
+
+void ui_browser__hide(struct ui_browser *self)
+{
+ newtFormDestroy(self->form);
+ newtPopWindow();
+ self->form = NULL;
+ ui_helpline__pop();
+}
+
+int ui_browser__refresh(struct ui_browser *self)
+{
+ int row;
+
+ newtScrollbarSet(self->sb, self->index, self->nr_entries - 1);
+ row = self->refresh(self);
+ SLsmg_set_color(HE_COLORSET_NORMAL);
+ SLsmg_fill_region(self->y + row, self->x,
+ self->height - row, self->width, ' ');
+
+ return 0;
+}
+
+int ui_browser__run(struct ui_browser *self, struct newtExitStruct *es)
+{
+ if (ui_browser__refresh(self) < 0)
+ return -1;
+
+ while (1) {
+ off_t offset;
+
+ newtFormRun(self->form, es);
+
+ if (es->reason != NEWT_EXIT_HOTKEY)
+ break;
+ if (is_exit_key(es->u.key))
+ return es->u.key;
+ switch (es->u.key) {
+ case NEWT_KEY_DOWN:
+ if (self->index == self->nr_entries - 1)
+ break;
+ ++self->index;
+ if (self->index == self->top_idx + self->height) {
+ ++self->top_idx;
+ self->seek(self, +1, SEEK_CUR);
+ }
+ break;
+ case NEWT_KEY_UP:
+ if (self->index == 0)
+ break;
+ --self->index;
+ if (self->index < self->top_idx) {
+ --self->top_idx;
+ self->seek(self, -1, SEEK_CUR);
+ }
+ break;
+ case NEWT_KEY_PGDN:
+ case ' ':
+ if (self->top_idx + self->height > self->nr_entries - 1)
+ break;
+
+ offset = self->height;
+ if (self->index + offset > self->nr_entries - 1)
+ offset = self->nr_entries - 1 - self->index;
+ self->index += offset;
+ self->top_idx += offset;
+ self->seek(self, +offset, SEEK_CUR);
+ break;
+ case NEWT_KEY_PGUP:
+ if (self->top_idx == 0)
+ break;
+
+ if (self->top_idx < self->height)
+ offset = self->top_idx;
+ else
+ offset = self->height;
+
+ self->index -= offset;
+ self->top_idx -= offset;
+ self->seek(self, -offset, SEEK_CUR);
+ break;
+ case NEWT_KEY_HOME:
+ ui_browser__reset_index(self);
+ break;
+ case NEWT_KEY_END:
+ offset = self->height - 1;
+ if (offset >= self->nr_entries)
+ offset = self->nr_entries - 1;
+
+ self->index = self->nr_entries - 1;
+ self->top_idx = self->index - offset;
+ self->seek(self, -offset, SEEK_END);
+ break;
+ default:
+ return es->u.key;
+ }
+ if (ui_browser__refresh(self) < 0)
+ return -1;
+ }
+ return 0;
+}
+
+unsigned int ui_browser__list_head_refresh(struct ui_browser *self)
+{
+ struct list_head *pos;
+ struct list_head *head = self->entries;
+ int row = 0;
+
+ if (self->top == NULL || self->top == self->entries)
+ self->top = head->next;
+
+ pos = self->top;
+
+ list_for_each_from(pos, head) {
+ SLsmg_gotorc(self->y + row, self->x);
+ self->write(self, pos, row);
+ if (++row == self->height)
+ break;
+ }
+
+ return row;
+}
+
+static struct newtPercentTreeColors {
+ const char *topColorFg, *topColorBg;
+ const char *mediumColorFg, *mediumColorBg;
+ const char *normalColorFg, *normalColorBg;
+ const char *selColorFg, *selColorBg;
+ const char *codeColorFg, *codeColorBg;
+} defaultPercentTreeColors = {
+ "red", "lightgray",
+ "green", "lightgray",
+ "black", "lightgray",
+ "lightgray", "magenta",
+ "blue", "lightgray",
+};
+
+void ui_browser__init(void)
+{
+ struct newtPercentTreeColors *c = &defaultPercentTreeColors;
+
+ sltt_set_color(HE_COLORSET_TOP, NULL, c->topColorFg, c->topColorBg);
+ sltt_set_color(HE_COLORSET_MEDIUM, NULL, c->mediumColorFg, c->mediumColorBg);
+ sltt_set_color(HE_COLORSET_NORMAL, NULL, c->normalColorFg, c->normalColorBg);
+ sltt_set_color(HE_COLORSET_SELECTED, NULL, c->selColorFg, c->selColorBg);
+ sltt_set_color(HE_COLORSET_CODE, NULL, c->codeColorFg, c->codeColorBg);
+}
diff --git a/tools/perf/util/ui/browser.h b/tools/perf/util/ui/browser.h
new file mode 100644
index 00000000..0b9f8292
--- /dev/null
+++ b/tools/perf/util/ui/browser.h
@@ -0,0 +1,46 @@
+#ifndef _PERF_UI_BROWSER_H_
+#define _PERF_UI_BROWSER_H_ 1
+
+#include <stdbool.h>
+#include <newt.h>
+#include <sys/types.h>
+#include "../types.h"
+
+#define HE_COLORSET_TOP 50
+#define HE_COLORSET_MEDIUM 51
+#define HE_COLORSET_NORMAL 52
+#define HE_COLORSET_SELECTED 53
+#define HE_COLORSET_CODE 54
+
+struct ui_browser {
+ newtComponent form, sb;
+ u64 index, top_idx;
+ void *top, *entries;
+ u16 y, x, width, height;
+ void *priv;
+ unsigned int (*refresh)(struct ui_browser *self);
+ void (*write)(struct ui_browser *self, void *entry, int row);
+ void (*seek)(struct ui_browser *self, off_t offset, int whence);
+ u32 nr_entries;
+};
+
+
+int ui_browser__percent_color(double percent, bool current);
+bool ui_browser__is_current_entry(struct ui_browser *self, unsigned row);
+void ui_browser__refresh_dimensions(struct ui_browser *self);
+void ui_browser__reset_index(struct ui_browser *self);
+
+int ui_browser__show(struct ui_browser *self, const char *title,
+ const char *helpline, ...);
+void ui_browser__hide(struct ui_browser *self);
+int ui_browser__refresh(struct ui_browser *self);
+int ui_browser__run(struct ui_browser *self, struct newtExitStruct *es);
+
+void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence);
+unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self);
+
+void ui_browser__list_head_seek(struct ui_browser *self, off_t offset, int whence);
+unsigned int ui_browser__list_head_refresh(struct ui_browser *self);
+
+void ui_browser__init(void);
+#endif /* _PERF_UI_BROWSER_H_ */
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
new file mode 100644
index 00000000..a90273e6
--- /dev/null
+++ b/tools/perf/util/ui/browsers/annotate.c
@@ -0,0 +1,241 @@
+#include "../browser.h"
+#include "../helpline.h"
+#include "../libslang.h"
+#include "../../hist.h"
+#include "../../sort.h"
+#include "../../symbol.h"
+
+static void ui__error_window(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ newtWinMessagev((char *)"Error", (char *)"Ok", (char *)fmt, ap);
+ va_end(ap);
+}
+
+struct annotate_browser {
+ struct ui_browser b;
+ struct rb_root entries;
+ struct rb_node *curr_hot;
+};
+
+struct objdump_line_rb_node {
+ struct rb_node rb_node;
+ double percent;
+ u32 idx;
+};
+
+static inline
+struct objdump_line_rb_node *objdump_line__rb(struct objdump_line *self)
+{
+ return (struct objdump_line_rb_node *)(self + 1);
+}
+
+static void annotate_browser__write(struct ui_browser *self, void *entry, int row)
+{
+ struct objdump_line *ol = rb_entry(entry, struct objdump_line, node);
+ bool current_entry = ui_browser__is_current_entry(self, row);
+ int width = self->width;
+
+ if (ol->offset != -1) {
+ struct objdump_line_rb_node *olrb = objdump_line__rb(ol);
+ int color = ui_browser__percent_color(olrb->percent, current_entry);
+ SLsmg_set_color(color);
+ slsmg_printf(" %7.2f ", olrb->percent);
+ if (!current_entry)
+ SLsmg_set_color(HE_COLORSET_CODE);
+ } else {
+ int color = ui_browser__percent_color(0, current_entry);
+ SLsmg_set_color(color);
+ slsmg_write_nstring(" ", 9);
+ }
+
+ SLsmg_write_char(':');
+ slsmg_write_nstring(" ", 8);
+ if (!*ol->line)
+ slsmg_write_nstring(" ", width - 18);
+ else
+ slsmg_write_nstring(ol->line, width - 18);
+}
+
+static double objdump_line__calc_percent(struct objdump_line *self,
+ struct list_head *head,
+ struct symbol *sym)
+{
+ double percent = 0.0;
+
+ if (self->offset != -1) {
+ int len = sym->end - sym->start;
+ unsigned int hits = 0;
+ struct sym_priv *priv = symbol__priv(sym);
+ struct sym_ext *sym_ext = priv->ext;
+ struct sym_hist *h = priv->hist;
+ s64 offset = self->offset;
+ struct objdump_line *next = objdump__get_next_ip_line(head, self);
+
+
+ while (offset < (s64)len &&
+ (next == NULL || offset < next->offset)) {
+ if (sym_ext) {
+ percent += sym_ext[offset].percent;
+ } else
+ hits += h->ip[offset];
+
+ ++offset;
+ }
+
+ if (sym_ext == NULL && h->sum)
+ percent = 100.0 * hits / h->sum;
+ }
+
+ return percent;
+}
+
+static void objdump__insert_line(struct rb_root *self,
+ struct objdump_line_rb_node *line)
+{
+ struct rb_node **p = &self->rb_node;
+ struct rb_node *parent = NULL;
+ struct objdump_line_rb_node *l;
+
+ while (*p != NULL) {
+ parent = *p;
+ l = rb_entry(parent, struct objdump_line_rb_node, rb_node);
+ if (line->percent < l->percent)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&line->rb_node, parent, p);
+ rb_insert_color(&line->rb_node, self);
+}
+
+static void annotate_browser__set_top(struct annotate_browser *self,
+ struct rb_node *nd)
+{
+ struct objdump_line_rb_node *rbpos;
+ struct objdump_line *pos;
+ unsigned back;
+
+ ui_browser__refresh_dimensions(&self->b);
+ back = self->b.height / 2;
+ rbpos = rb_entry(nd, struct objdump_line_rb_node, rb_node);
+ pos = ((struct objdump_line *)rbpos) - 1;
+ self->b.top_idx = self->b.index = rbpos->idx;
+
+ while (self->b.top_idx != 0 && back != 0) {
+ pos = list_entry(pos->node.prev, struct objdump_line, node);
+
+ --self->b.top_idx;
+ --back;
+ }
+
+ self->b.top = pos;
+ self->curr_hot = nd;
+}
+
+static int annotate_browser__run(struct annotate_browser *self,
+ struct newtExitStruct *es)
+{
+ struct rb_node *nd;
+ struct hist_entry *he = self->b.priv;
+
+ if (ui_browser__show(&self->b, he->ms.sym->name,
+ "<- or ESC: exit, TAB/shift+TAB: cycle thru samples") < 0)
+ return -1;
+
+ newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
+ newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT);
+
+ nd = self->curr_hot;
+ if (nd) {
+ newtFormAddHotKey(self->b.form, NEWT_KEY_TAB);
+ newtFormAddHotKey(self->b.form, NEWT_KEY_UNTAB);
+ }
+
+ while (1) {
+ ui_browser__run(&self->b, es);
+
+ if (es->reason != NEWT_EXIT_HOTKEY)
+ break;
+
+ switch (es->u.key) {
+ case NEWT_KEY_TAB:
+ nd = rb_prev(nd);
+ if (nd == NULL)
+ nd = rb_last(&self->entries);
+ annotate_browser__set_top(self, nd);
+ break;
+ case NEWT_KEY_UNTAB:
+ nd = rb_next(nd);
+ if (nd == NULL)
+ nd = rb_first(&self->entries);
+ annotate_browser__set_top(self, nd);
+ break;
+ default:
+ goto out;
+ }
+ }
+out:
+ ui_browser__hide(&self->b);
+ return es->u.key;
+}
+
+int hist_entry__tui_annotate(struct hist_entry *self)
+{
+ struct newtExitStruct es;
+ struct objdump_line *pos, *n;
+ struct objdump_line_rb_node *rbpos;
+ LIST_HEAD(head);
+ struct annotate_browser browser = {
+ .b = {
+ .entries = &head,
+ .refresh = ui_browser__list_head_refresh,
+ .seek = ui_browser__list_head_seek,
+ .write = annotate_browser__write,
+ .priv = self,
+ },
+ };
+ int ret;
+
+ if (self->ms.sym == NULL)
+ return -1;
+
+ if (self->ms.map->dso->annotate_warned)
+ return -1;
+
+ if (hist_entry__annotate(self, &head, sizeof(*rbpos)) < 0) {
+ ui__error_window(ui_helpline__last_msg);
+ return -1;
+ }
+
+ ui_helpline__push("Press <- or ESC to exit");
+
+ list_for_each_entry(pos, &head, node) {
+ size_t line_len = strlen(pos->line);
+ if (browser.b.width < line_len)
+ browser.b.width = line_len;
+ rbpos = objdump_line__rb(pos);
+ rbpos->idx = browser.b.nr_entries++;
+ rbpos->percent = objdump_line__calc_percent(pos, &head, self->ms.sym);
+ if (rbpos->percent < 0.01)
+ continue;
+ objdump__insert_line(&browser.entries, rbpos);
+ }
+
+ /*
+ * Position the browser at the hottest line.
+ */
+ browser.curr_hot = rb_last(&browser.entries);
+ if (browser.curr_hot)
+ annotate_browser__set_top(&browser, browser.curr_hot);
+
+ browser.b.width += 18; /* Percentage */
+ ret = annotate_browser__run(&browser, &es);
+ list_for_each_entry_safe(pos, n, &head, node) {
+ list_del(&pos->node);
+ objdump_line__free(pos);
+ }
+ return ret;
+}
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
new file mode 100644
index 00000000..6866aa4c
--- /dev/null
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -0,0 +1,948 @@
+#define _GNU_SOURCE
+#include <stdio.h>
+#undef _GNU_SOURCE
+#include "../libslang.h"
+#include <stdlib.h>
+#include <string.h>
+#include <newt.h>
+#include <linux/rbtree.h>
+
+#include "../../hist.h"
+#include "../../pstack.h"
+#include "../../sort.h"
+#include "../../util.h"
+
+#include "../browser.h"
+#include "../helpline.h"
+#include "../util.h"
+#include "map.h"
+
+struct hist_browser {
+ struct ui_browser b;
+ struct hists *hists;
+ struct hist_entry *he_selection;
+ struct map_symbol *selection;
+};
+
+static void hist_browser__refresh_dimensions(struct hist_browser *self)
+{
+ /* 3 == +/- toggle symbol before actual hist_entry rendering */
+ self->b.width = 3 + (hists__sort_list_width(self->hists) +
+ sizeof("[k]"));
+}
+
+static void hist_browser__reset(struct hist_browser *self)
+{
+ self->b.nr_entries = self->hists->nr_entries;
+ hist_browser__refresh_dimensions(self);
+ ui_browser__reset_index(&self->b);
+}
+
+static char tree__folded_sign(bool unfolded)
+{
+ return unfolded ? '-' : '+';
+}
+
+static char map_symbol__folded(const struct map_symbol *self)
+{
+ return self->has_children ? tree__folded_sign(self->unfolded) : ' ';
+}
+
+static char hist_entry__folded(const struct hist_entry *self)
+{
+ return map_symbol__folded(&self->ms);
+}
+
+static char callchain_list__folded(const struct callchain_list *self)
+{
+ return map_symbol__folded(&self->ms);
+}
+
+static int callchain_node__count_rows_rb_tree(struct callchain_node *self)
+{
+ int n = 0;
+ struct rb_node *nd;
+
+ for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+ struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
+ struct callchain_list *chain;
+ char folded_sign = ' '; /* No children */
+
+ list_for_each_entry(chain, &child->val, list) {
+ ++n;
+ /* We need this because we may not have children */
+ folded_sign = callchain_list__folded(chain);
+ if (folded_sign == '+')
+ break;
+ }
+
+ if (folded_sign == '-') /* Have children and they're unfolded */
+ n += callchain_node__count_rows_rb_tree(child);
+ }
+
+ return n;
+}
+
+static int callchain_node__count_rows(struct callchain_node *node)
+{
+ struct callchain_list *chain;
+ bool unfolded = false;
+ int n = 0;
+
+ list_for_each_entry(chain, &node->val, list) {
+ ++n;
+ unfolded = chain->ms.unfolded;
+ }
+
+ if (unfolded)
+ n += callchain_node__count_rows_rb_tree(node);
+
+ return n;
+}
+
+static int callchain__count_rows(struct rb_root *chain)
+{
+ struct rb_node *nd;
+ int n = 0;
+
+ for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
+ struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
+ n += callchain_node__count_rows(node);
+ }
+
+ return n;
+}
+
+static bool map_symbol__toggle_fold(struct map_symbol *self)
+{
+ if (!self->has_children)
+ return false;
+
+ self->unfolded = !self->unfolded;
+ return true;
+}
+
+static void callchain_node__init_have_children_rb_tree(struct callchain_node *self)
+{
+ struct rb_node *nd = rb_first(&self->rb_root);
+
+ for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) {
+ struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
+ struct callchain_list *chain;
+ int first = true;
+
+ list_for_each_entry(chain, &child->val, list) {
+ if (first) {
+ first = false;
+ chain->ms.has_children = chain->list.next != &child->val ||
+ rb_first(&child->rb_root) != NULL;
+ } else
+ chain->ms.has_children = chain->list.next == &child->val &&
+ rb_first(&child->rb_root) != NULL;
+ }
+
+ callchain_node__init_have_children_rb_tree(child);
+ }
+}
+
+static void callchain_node__init_have_children(struct callchain_node *self)
+{
+ struct callchain_list *chain;
+
+ list_for_each_entry(chain, &self->val, list)
+ chain->ms.has_children = rb_first(&self->rb_root) != NULL;
+
+ callchain_node__init_have_children_rb_tree(self);
+}
+
+static void callchain__init_have_children(struct rb_root *self)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(self); nd; nd = rb_next(nd)) {
+ struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
+ callchain_node__init_have_children(node);
+ }
+}
+
+static void hist_entry__init_have_children(struct hist_entry *self)
+{
+ if (!self->init_have_children) {
+ callchain__init_have_children(&self->sorted_chain);
+ self->init_have_children = true;
+ }
+}
+
+static bool hist_browser__toggle_fold(struct hist_browser *self)
+{
+ if (map_symbol__toggle_fold(self->selection)) {
+ struct hist_entry *he = self->he_selection;
+
+ hist_entry__init_have_children(he);
+ self->hists->nr_entries -= he->nr_rows;
+
+ if (he->ms.unfolded)
+ he->nr_rows = callchain__count_rows(&he->sorted_chain);
+ else
+ he->nr_rows = 0;
+ self->hists->nr_entries += he->nr_rows;
+ self->b.nr_entries = self->hists->nr_entries;
+
+ return true;
+ }
+
+ /* If it doesn't have children, no toggling performed */
+ return false;
+}
+
+static int hist_browser__run(struct hist_browser *self, const char *title,
+ struct newtExitStruct *es)
+{
+ char str[256], unit;
+ unsigned long nr_events = self->hists->stats.nr_events[PERF_RECORD_SAMPLE];
+
+ self->b.entries = &self->hists->entries;
+ self->b.nr_entries = self->hists->nr_entries;
+
+ hist_browser__refresh_dimensions(self);
+
+ nr_events = convert_unit(nr_events, &unit);
+ snprintf(str, sizeof(str), "Events: %lu%c ",
+ nr_events, unit);
+ newtDrawRootText(0, 0, str);
+
+ if (ui_browser__show(&self->b, title,
+ "Press '?' for help on key bindings") < 0)
+ return -1;
+
+ newtFormAddHotKey(self->b.form, 'a');
+ newtFormAddHotKey(self->b.form, '?');
+ newtFormAddHotKey(self->b.form, 'h');
+ newtFormAddHotKey(self->b.form, 'd');
+ newtFormAddHotKey(self->b.form, 'D');
+ newtFormAddHotKey(self->b.form, 't');
+
+ newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
+ newtFormAddHotKey(self->b.form, NEWT_KEY_RIGHT);
+ newtFormAddHotKey(self->b.form, NEWT_KEY_ENTER);
+
+ while (1) {
+ ui_browser__run(&self->b, es);
+
+ if (es->reason != NEWT_EXIT_HOTKEY)
+ break;
+ switch (es->u.key) {
+ case 'D': { /* Debug */
+ static int seq;
+ struct hist_entry *h = rb_entry(self->b.top,
+ struct hist_entry, rb_node);
+ ui_helpline__pop();
+ ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
+ seq++, self->b.nr_entries,
+ self->hists->nr_entries,
+ self->b.height,
+ self->b.index,
+ self->b.top_idx,
+ h->row_offset, h->nr_rows);
+ }
+ continue;
+ case NEWT_KEY_ENTER:
+ if (hist_browser__toggle_fold(self))
+ break;
+ /* fall thru */
+ default:
+ return 0;
+ }
+ }
+
+ ui_browser__hide(&self->b);
+ return 0;
+}
+
+static char *callchain_list__sym_name(struct callchain_list *self,
+ char *bf, size_t bfsize)
+{
+ if (self->ms.sym)
+ return self->ms.sym->name;
+
+ snprintf(bf, bfsize, "%#Lx", self->ip);
+ return bf;
+}
+
+#define LEVEL_OFFSET_STEP 3
+
+static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
+ struct callchain_node *chain_node,
+ u64 total, int level,
+ unsigned short row,
+ off_t *row_offset,
+ bool *is_current_entry)
+{
+ struct rb_node *node;
+ int first_row = row, width, offset = level * LEVEL_OFFSET_STEP;
+ u64 new_total, remaining;
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ new_total = chain_node->children_hit;
+ else
+ new_total = total;
+
+ remaining = new_total;
+ node = rb_first(&chain_node->rb_root);
+ while (node) {
+ struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
+ struct rb_node *next = rb_next(node);
+ u64 cumul = cumul_hits(child);
+ struct callchain_list *chain;
+ char folded_sign = ' ';
+ int first = true;
+ int extra_offset = 0;
+
+ remaining -= cumul;
+
+ list_for_each_entry(chain, &child->val, list) {
+ char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str;
+ const char *str;
+ int color;
+ bool was_first = first;
+
+ if (first) {
+ first = false;
+ chain->ms.has_children = chain->list.next != &child->val ||
+ rb_first(&child->rb_root) != NULL;
+ } else {
+ extra_offset = LEVEL_OFFSET_STEP;
+ chain->ms.has_children = chain->list.next == &child->val &&
+ rb_first(&child->rb_root) != NULL;
+ }
+
+ folded_sign = callchain_list__folded(chain);
+ if (*row_offset != 0) {
+ --*row_offset;
+ goto do_next;
+ }
+
+ alloc_str = NULL;
+ str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+ if (was_first) {
+ double percent = cumul * 100.0 / new_total;
+
+ if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0)
+ str = "Not enough memory!";
+ else
+ str = alloc_str;
+ }
+
+ color = HE_COLORSET_NORMAL;
+ width = self->b.width - (offset + extra_offset + 2);
+ if (ui_browser__is_current_entry(&self->b, row)) {
+ self->selection = &chain->ms;
+ color = HE_COLORSET_SELECTED;
+ *is_current_entry = true;
+ }
+
+ SLsmg_set_color(color);
+ SLsmg_gotorc(self->b.y + row, self->b.x);
+ slsmg_write_nstring(" ", offset + extra_offset);
+ slsmg_printf("%c ", folded_sign);
+ slsmg_write_nstring(str, width);
+ free(alloc_str);
+
+ if (++row == self->b.height)
+ goto out;
+do_next:
+ if (folded_sign == '+')
+ break;
+ }
+
+ if (folded_sign == '-') {
+ const int new_level = level + (extra_offset ? 2 : 1);
+ row += hist_browser__show_callchain_node_rb_tree(self, child, new_total,
+ new_level, row, row_offset,
+ is_current_entry);
+ }
+ if (row == self->b.height)
+ goto out;
+ node = next;
+ }
+out:
+ return row - first_row;
+}
+
+static int hist_browser__show_callchain_node(struct hist_browser *self,
+ struct callchain_node *node,
+ int level, unsigned short row,
+ off_t *row_offset,
+ bool *is_current_entry)
+{
+ struct callchain_list *chain;
+ int first_row = row,
+ offset = level * LEVEL_OFFSET_STEP,
+ width = self->b.width - offset;
+ char folded_sign = ' ';
+
+ list_for_each_entry(chain, &node->val, list) {
+ char ipstr[BITS_PER_LONG / 4 + 1], *s;
+ int color;
+ /*
+ * FIXME: This should be moved to somewhere else,
+ * probably when the callchain is created, so as not to
+ * traverse it all over again
+ */
+ chain->ms.has_children = rb_first(&node->rb_root) != NULL;
+ folded_sign = callchain_list__folded(chain);
+
+ if (*row_offset != 0) {
+ --*row_offset;
+ continue;
+ }
+
+ color = HE_COLORSET_NORMAL;
+ if (ui_browser__is_current_entry(&self->b, row)) {
+ self->selection = &chain->ms;
+ color = HE_COLORSET_SELECTED;
+ *is_current_entry = true;
+ }
+
+ s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+ SLsmg_gotorc(self->b.y + row, self->b.x);
+ SLsmg_set_color(color);
+ slsmg_write_nstring(" ", offset);
+ slsmg_printf("%c ", folded_sign);
+ slsmg_write_nstring(s, width - 2);
+
+ if (++row == self->b.height)
+ goto out;
+ }
+
+ if (folded_sign == '-')
+ row += hist_browser__show_callchain_node_rb_tree(self, node,
+ self->hists->stats.total_period,
+ level + 1, row,
+ row_offset,
+ is_current_entry);
+out:
+ return row - first_row;
+}
+
+static int hist_browser__show_callchain(struct hist_browser *self,
+ struct rb_root *chain,
+ int level, unsigned short row,
+ off_t *row_offset,
+ bool *is_current_entry)
+{
+ struct rb_node *nd;
+ int first_row = row;
+
+ for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
+ struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
+
+ row += hist_browser__show_callchain_node(self, node, level,
+ row, row_offset,
+ is_current_entry);
+ if (row == self->b.height)
+ break;
+ }
+
+ return row - first_row;
+}
+
+static int hist_browser__show_entry(struct hist_browser *self,
+ struct hist_entry *entry,
+ unsigned short row)
+{
+ char s[256];
+ double percent;
+ int printed = 0;
+ int color, width = self->b.width;
+ char folded_sign = ' ';
+ bool current_entry = ui_browser__is_current_entry(&self->b, row);
+ off_t row_offset = entry->row_offset;
+
+ if (current_entry) {
+ self->he_selection = entry;
+ self->selection = &entry->ms;
+ }
+
+ if (symbol_conf.use_callchain) {
+ entry->ms.has_children = !RB_EMPTY_ROOT(&entry->sorted_chain);
+ folded_sign = hist_entry__folded(entry);
+ }
+
+ if (row_offset == 0) {
+ hist_entry__snprintf(entry, s, sizeof(s), self->hists, NULL, false,
+ 0, false, self->hists->stats.total_period);
+ percent = (entry->period * 100.0) / self->hists->stats.total_period;
+
+ color = HE_COLORSET_SELECTED;
+ if (!current_entry) {
+ if (percent >= MIN_RED)
+ color = HE_COLORSET_TOP;
+ else if (percent >= MIN_GREEN)
+ color = HE_COLORSET_MEDIUM;
+ else
+ color = HE_COLORSET_NORMAL;
+ }
+
+ SLsmg_set_color(color);
+ SLsmg_gotorc(self->b.y + row, self->b.x);
+ if (symbol_conf.use_callchain) {
+ slsmg_printf("%c ", folded_sign);
+ width -= 2;
+ }
+ slsmg_write_nstring(s, width);
+ ++row;
+ ++printed;
+ } else
+ --row_offset;
+
+ if (folded_sign == '-' && row != self->b.height) {
+ printed += hist_browser__show_callchain(self, &entry->sorted_chain,
+ 1, row, &row_offset,
+ &current_entry);
+ if (current_entry)
+ self->he_selection = entry;
+ }
+
+ return printed;
+}
+
+static unsigned int hist_browser__refresh(struct ui_browser *self)
+{
+ unsigned row = 0;
+ struct rb_node *nd;
+ struct hist_browser *hb = container_of(self, struct hist_browser, b);
+
+ if (self->top == NULL)
+ self->top = rb_first(&hb->hists->entries);
+
+ for (nd = self->top; nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (h->filtered)
+ continue;
+
+ row += hist_browser__show_entry(hb, h, row);
+ if (row == self->height)
+ break;
+ }
+
+ return row;
+}
+
+static struct rb_node *hists__filter_entries(struct rb_node *nd)
+{
+ while (nd != NULL) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+ if (!h->filtered)
+ return nd;
+
+ nd = rb_next(nd);
+ }
+
+ return NULL;
+}
+
+static struct rb_node *hists__filter_prev_entries(struct rb_node *nd)
+{
+ while (nd != NULL) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+ if (!h->filtered)
+ return nd;
+
+ nd = rb_prev(nd);
+ }
+
+ return NULL;
+}
+
+static void ui_browser__hists_seek(struct ui_browser *self,
+ off_t offset, int whence)
+{
+ struct hist_entry *h;
+ struct rb_node *nd;
+ bool first = true;
+
+ switch (whence) {
+ case SEEK_SET:
+ nd = hists__filter_entries(rb_first(self->entries));
+ break;
+ case SEEK_CUR:
+ nd = self->top;
+ goto do_offset;
+ case SEEK_END:
+ nd = hists__filter_prev_entries(rb_last(self->entries));
+ first = false;
+ break;
+ default:
+ return;
+ }
+
+ /*
+ * Moves not relative to the first visible entry invalidates its
+ * row_offset:
+ */
+ h = rb_entry(self->top, struct hist_entry, rb_node);
+ h->row_offset = 0;
+
+ /*
+ * Here we have to check if nd is expanded (+), if it is we can't go
+ * the next top level hist_entry, instead we must compute an offset of
+ * what _not_ to show and not change the first visible entry.
+ *
+ * This offset increments when we are going from top to bottom and
+ * decreases when we're going from bottom to top.
+ *
+ * As we don't have backpointers to the top level in the callchains
+ * structure, we need to always print the whole hist_entry callchain,
+ * skipping the first ones that are before the first visible entry
+ * and stop when we printed enough lines to fill the screen.
+ */
+do_offset:
+ if (offset > 0) {
+ do {
+ h = rb_entry(nd, struct hist_entry, rb_node);
+ if (h->ms.unfolded) {
+ u16 remaining = h->nr_rows - h->row_offset;
+ if (offset > remaining) {
+ offset -= remaining;
+ h->row_offset = 0;
+ } else {
+ h->row_offset += offset;
+ offset = 0;
+ self->top = nd;
+ break;
+ }
+ }
+ nd = hists__filter_entries(rb_next(nd));
+ if (nd == NULL)
+ break;
+ --offset;
+ self->top = nd;
+ } while (offset != 0);
+ } else if (offset < 0) {
+ while (1) {
+ h = rb_entry(nd, struct hist_entry, rb_node);
+ if (h->ms.unfolded) {
+ if (first) {
+ if (-offset > h->row_offset) {
+ offset += h->row_offset;
+ h->row_offset = 0;
+ } else {
+ h->row_offset += offset;
+ offset = 0;
+ self->top = nd;
+ break;
+ }
+ } else {
+ if (-offset > h->nr_rows) {
+ offset += h->nr_rows;
+ h->row_offset = 0;
+ } else {
+ h->row_offset = h->nr_rows + offset;
+ offset = 0;
+ self->top = nd;
+ break;
+ }
+ }
+ }
+
+ nd = hists__filter_prev_entries(rb_prev(nd));
+ if (nd == NULL)
+ break;
+ ++offset;
+ self->top = nd;
+ if (offset == 0) {
+ /*
+ * Last unfiltered hist_entry, check if it is
+ * unfolded, if it is then we should have
+ * row_offset at its last entry.
+ */
+ h = rb_entry(nd, struct hist_entry, rb_node);
+ if (h->ms.unfolded)
+ h->row_offset = h->nr_rows;
+ break;
+ }
+ first = false;
+ }
+ } else {
+ self->top = nd;
+ h = rb_entry(nd, struct hist_entry, rb_node);
+ h->row_offset = 0;
+ }
+}
+
+static struct hist_browser *hist_browser__new(struct hists *hists)
+{
+ struct hist_browser *self = zalloc(sizeof(*self));
+
+ if (self) {
+ self->hists = hists;
+ self->b.refresh = hist_browser__refresh;
+ self->b.seek = ui_browser__hists_seek;
+ }
+
+ return self;
+}
+
+static void hist_browser__delete(struct hist_browser *self)
+{
+ newtFormDestroy(self->b.form);
+ newtPopWindow();
+ free(self);
+}
+
+static struct hist_entry *hist_browser__selected_entry(struct hist_browser *self)
+{
+ return self->he_selection;
+}
+
+static struct thread *hist_browser__selected_thread(struct hist_browser *self)
+{
+ return self->he_selection->thread;
+}
+
+static int hist_browser__title(char *bf, size_t size, const char *ev_name,
+ const struct dso *dso, const struct thread *thread)
+{
+ int printed = 0;
+
+ if (thread)
+ printed += snprintf(bf + printed, size - printed,
+ "Thread: %s(%d)",
+ (thread->comm_set ? thread->comm : ""),
+ thread->pid);
+ if (dso)
+ printed += snprintf(bf + printed, size - printed,
+ "%sDSO: %s", thread ? " " : "",
+ dso->short_name);
+ return printed ?: snprintf(bf, size, "Event: %s", ev_name);
+}
+
+int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
+{
+ struct hist_browser *browser = hist_browser__new(self);
+ struct pstack *fstack;
+ const struct thread *thread_filter = NULL;
+ const struct dso *dso_filter = NULL;
+ struct newtExitStruct es;
+ char msg[160];
+ int key = -1;
+
+ if (browser == NULL)
+ return -1;
+
+ fstack = pstack__new(2);
+ if (fstack == NULL)
+ goto out;
+
+ ui_helpline__push(helpline);
+
+ hist_browser__title(msg, sizeof(msg), ev_name,
+ dso_filter, thread_filter);
+
+ while (1) {
+ const struct thread *thread;
+ const struct dso *dso;
+ char *options[16];
+ int nr_options = 0, choice = 0, i,
+ annotate = -2, zoom_dso = -2, zoom_thread = -2,
+ browse_map = -2;
+
+ if (hist_browser__run(browser, msg, &es))
+ break;
+
+ thread = hist_browser__selected_thread(browser);
+ dso = browser->selection->map ? browser->selection->map->dso : NULL;
+
+ if (es.reason == NEWT_EXIT_HOTKEY) {
+ key = es.u.key;
+
+ switch (key) {
+ case NEWT_KEY_F1:
+ goto do_help;
+ case NEWT_KEY_TAB:
+ case NEWT_KEY_UNTAB:
+ /*
+ * Exit the browser, let hists__browser_tree
+ * go to the next or previous
+ */
+ goto out_free_stack;
+ default:;
+ }
+
+ switch (key) {
+ case 'a':
+ if (browser->selection->map == NULL ||
+ browser->selection->map->dso->annotate_warned)
+ continue;
+ goto do_annotate;
+ case 'd':
+ goto zoom_dso;
+ case 't':
+ goto zoom_thread;
+ case 'h':
+ case '?':
+do_help:
+ ui__help_window("-> Zoom into DSO/Threads & Annotate current symbol\n"
+ "<- Zoom out\n"
+ "a Annotate current symbol\n"
+ "h/?/F1 Show this window\n"
+ "d Zoom into current DSO\n"
+ "t Zoom into current Thread\n"
+ "q/CTRL+C Exit browser");
+ continue;
+ default:;
+ }
+ if (is_exit_key(key)) {
+ if (key == NEWT_KEY_ESCAPE &&
+ !ui__dialog_yesno("Do you really want to exit?"))
+ continue;
+ break;
+ }
+
+ if (es.u.key == NEWT_KEY_LEFT) {
+ const void *top;
+
+ if (pstack__empty(fstack))
+ continue;
+ top = pstack__pop(fstack);
+ if (top == &dso_filter)
+ goto zoom_out_dso;
+ if (top == &thread_filter)
+ goto zoom_out_thread;
+ continue;
+ }
+ }
+
+ if (browser->selection->sym != NULL &&
+ !browser->selection->map->dso->annotate_warned &&
+ asprintf(&options[nr_options], "Annotate %s",
+ browser->selection->sym->name) > 0)
+ annotate = nr_options++;
+
+ if (thread != NULL &&
+ asprintf(&options[nr_options], "Zoom %s %s(%d) thread",
+ (thread_filter ? "out of" : "into"),
+ (thread->comm_set ? thread->comm : ""),
+ thread->pid) > 0)
+ zoom_thread = nr_options++;
+
+ if (dso != NULL &&
+ asprintf(&options[nr_options], "Zoom %s %s DSO",
+ (dso_filter ? "out of" : "into"),
+ (dso->kernel ? "the Kernel" : dso->short_name)) > 0)
+ zoom_dso = nr_options++;
+
+ if (browser->selection->map != NULL &&
+ asprintf(&options[nr_options], "Browse map details") > 0)
+ browse_map = nr_options++;
+
+ options[nr_options++] = (char *)"Exit";
+
+ choice = ui__popup_menu(nr_options, options);
+
+ for (i = 0; i < nr_options - 1; ++i)
+ free(options[i]);
+
+ if (choice == nr_options - 1)
+ break;
+
+ if (choice == -1)
+ continue;
+
+ if (choice == annotate) {
+ struct hist_entry *he;
+do_annotate:
+ if (browser->selection->map->dso->origin == DSO__ORIG_KERNEL) {
+ browser->selection->map->dso->annotate_warned = 1;
+ ui_helpline__puts("No vmlinux file found, can't "
+ "annotate with just a "
+ "kallsyms file");
+ continue;
+ }
+
+ he = hist_browser__selected_entry(browser);
+ if (he == NULL)
+ continue;
+
+ hist_entry__tui_annotate(he);
+ } else if (choice == browse_map)
+ map__browse(browser->selection->map);
+ else if (choice == zoom_dso) {
+zoom_dso:
+ if (dso_filter) {
+ pstack__remove(fstack, &dso_filter);
+zoom_out_dso:
+ ui_helpline__pop();
+ dso_filter = NULL;
+ } else {
+ if (dso == NULL)
+ continue;
+ ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"",
+ dso->kernel ? "the Kernel" : dso->short_name);
+ dso_filter = dso;
+ pstack__push(fstack, &dso_filter);
+ }
+ hists__filter_by_dso(self, dso_filter);
+ hist_browser__title(msg, sizeof(msg), ev_name,
+ dso_filter, thread_filter);
+ hist_browser__reset(browser);
+ } else if (choice == zoom_thread) {
+zoom_thread:
+ if (thread_filter) {
+ pstack__remove(fstack, &thread_filter);
+zoom_out_thread:
+ ui_helpline__pop();
+ thread_filter = NULL;
+ } else {
+ ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"",
+ thread->comm_set ? thread->comm : "",
+ thread->pid);
+ thread_filter = thread;
+ pstack__push(fstack, &thread_filter);
+ }
+ hists__filter_by_thread(self, thread_filter);
+ hist_browser__title(msg, sizeof(msg), ev_name,
+ dso_filter, thread_filter);
+ hist_browser__reset(browser);
+ }
+ }
+out_free_stack:
+ pstack__delete(fstack);
+out:
+ hist_browser__delete(browser);
+ return key;
+}
+
+int hists__tui_browse_tree(struct rb_root *self, const char *help)
+{
+ struct rb_node *first = rb_first(self), *nd = first, *next;
+ int key = 0;
+
+ while (nd) {
+ struct hists *hists = rb_entry(nd, struct hists, rb_node);
+ const char *ev_name = __event_name(hists->type, hists->config);
+
+ key = hists__browse(hists, help, ev_name);
+
+ if (is_exit_key(key))
+ break;
+
+ switch (key) {
+ case NEWT_KEY_TAB:
+ next = rb_next(nd);
+ if (next)
+ nd = next;
+ break;
+ case NEWT_KEY_UNTAB:
+ if (nd == first)
+ continue;
+ nd = rb_prev(nd);
+ default:
+ break;
+ }
+ }
+
+ return key;
+}
diff --git a/tools/perf/util/ui/browsers/map.c b/tools/perf/util/ui/browsers/map.c
new file mode 100644
index 00000000..142b825b
--- /dev/null
+++ b/tools/perf/util/ui/browsers/map.c
@@ -0,0 +1,161 @@
+#include "../libslang.h"
+#include <elf.h>
+#include <newt.h>
+#include <sys/ttydefaults.h>
+#include <ctype.h>
+#include <string.h>
+#include <linux/bitops.h>
+#include "../../debug.h"
+#include "../../symbol.h"
+#include "../browser.h"
+#include "../helpline.h"
+#include "map.h"
+
+static int ui_entry__read(const char *title, char *bf, size_t size, int width)
+{
+ struct newtExitStruct es;
+ newtComponent form, entry;
+ const char *result;
+ int err = -1;
+
+ newtCenteredWindow(width, 1, title);
+ form = newtForm(NULL, NULL, 0);
+ if (form == NULL)
+ return -1;
+
+ entry = newtEntry(0, 0, "0x", width, &result, NEWT_FLAG_SCROLL);
+ if (entry == NULL)
+ goto out_free_form;
+
+ newtFormAddComponent(form, entry);
+ newtFormAddHotKey(form, NEWT_KEY_ENTER);
+ newtFormAddHotKey(form, NEWT_KEY_ESCAPE);
+ newtFormAddHotKey(form, NEWT_KEY_LEFT);
+ newtFormAddHotKey(form, CTRL('c'));
+ newtFormRun(form, &es);
+
+ if (result != NULL) {
+ strncpy(bf, result, size);
+ err = 0;
+ }
+out_free_form:
+ newtPopWindow();
+ newtFormDestroy(form);
+ return 0;
+}
+
+struct map_browser {
+ struct ui_browser b;
+ struct map *map;
+ u16 namelen;
+ u8 addrlen;
+};
+
+static void map_browser__write(struct ui_browser *self, void *nd, int row)
+{
+ struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
+ struct map_browser *mb = container_of(self, struct map_browser, b);
+ bool current_entry = ui_browser__is_current_entry(self, row);
+ int color = ui_browser__percent_color(0, current_entry);
+
+ SLsmg_set_color(color);
+ slsmg_printf("%*llx %*llx %c ",
+ mb->addrlen, sym->start, mb->addrlen, sym->end,
+ sym->binding == STB_GLOBAL ? 'g' :
+ sym->binding == STB_LOCAL ? 'l' : 'w');
+ slsmg_write_nstring(sym->name, mb->namelen);
+}
+
+/* FIXME uber-kludgy, see comment on cmd_report... */
+static u32 *symbol__browser_index(struct symbol *self)
+{
+ return ((void *)self) - sizeof(struct rb_node) - sizeof(u32);
+}
+
+static int map_browser__search(struct map_browser *self)
+{
+ char target[512];
+ struct symbol *sym;
+ int err = ui_entry__read("Search by name/addr", target, sizeof(target), 40);
+
+ if (err)
+ return err;
+
+ if (target[0] == '0' && tolower(target[1]) == 'x') {
+ u64 addr = strtoull(target, NULL, 16);
+ sym = map__find_symbol(self->map, addr, NULL);
+ } else
+ sym = map__find_symbol_by_name(self->map, target, NULL);
+
+ if (sym != NULL) {
+ u32 *idx = symbol__browser_index(sym);
+
+ self->b.top = &sym->rb_node;
+ self->b.index = self->b.top_idx = *idx;
+ } else
+ ui_helpline__fpush("%s not found!", target);
+
+ return 0;
+}
+
+static int map_browser__run(struct map_browser *self, struct newtExitStruct *es)
+{
+ if (ui_browser__show(&self->b, self->map->dso->long_name,
+ "Press <- or ESC to exit, %s / to search",
+ verbose ? "" : "restart with -v to use") < 0)
+ return -1;
+
+ newtFormAddHotKey(self->b.form, NEWT_KEY_LEFT);
+ newtFormAddHotKey(self->b.form, NEWT_KEY_ENTER);
+ if (verbose)
+ newtFormAddHotKey(self->b.form, '/');
+
+ while (1) {
+ ui_browser__run(&self->b, es);
+
+ if (es->reason != NEWT_EXIT_HOTKEY)
+ break;
+ if (verbose && es->u.key == '/')
+ map_browser__search(self);
+ else
+ break;
+ }
+
+ ui_browser__hide(&self->b);
+ return 0;
+}
+
+int map__browse(struct map *self)
+{
+ struct map_browser mb = {
+ .b = {
+ .entries = &self->dso->symbols[self->type],
+ .refresh = ui_browser__rb_tree_refresh,
+ .seek = ui_browser__rb_tree_seek,
+ .write = map_browser__write,
+ },
+ .map = self,
+ };
+ struct newtExitStruct es;
+ struct rb_node *nd;
+ char tmp[BITS_PER_LONG / 4];
+ u64 maxaddr = 0;
+
+ for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) {
+ struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
+
+ if (mb.namelen < pos->namelen)
+ mb.namelen = pos->namelen;
+ if (maxaddr < pos->end)
+ maxaddr = pos->end;
+ if (verbose) {
+ u32 *idx = symbol__browser_index(pos);
+ *idx = mb.b.nr_entries;
+ }
+ ++mb.b.nr_entries;
+ }
+
+ mb.addrlen = snprintf(tmp, sizeof(tmp), "%llx", maxaddr);
+ mb.b.width += mb.addrlen * 2 + 4 + mb.namelen;
+ return map_browser__run(&mb, &es);
+}
diff --git a/tools/perf/util/ui/browsers/map.h b/tools/perf/util/ui/browsers/map.h
new file mode 100644
index 00000000..df8581a4
--- /dev/null
+++ b/tools/perf/util/ui/browsers/map.h
@@ -0,0 +1,6 @@
+#ifndef _PERF_UI_MAP_BROWSER_H_
+#define _PERF_UI_MAP_BROWSER_H_ 1
+struct map;
+
+int map__browse(struct map *self);
+#endif /* _PERF_UI_MAP_BROWSER_H_ */
diff --git a/tools/perf/util/ui/helpline.c b/tools/perf/util/ui/helpline.c
new file mode 100644
index 00000000..8d79daa4
--- /dev/null
+++ b/tools/perf/util/ui/helpline.c
@@ -0,0 +1,69 @@
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <newt.h>
+
+#include "../debug.h"
+#include "helpline.h"
+
+void ui_helpline__pop(void)
+{
+ newtPopHelpLine();
+}
+
+void ui_helpline__push(const char *msg)
+{
+ newtPushHelpLine(msg);
+}
+
+void ui_helpline__vpush(const char *fmt, va_list ap)
+{
+ char *s;
+
+ if (vasprintf(&s, fmt, ap) < 0)
+ vfprintf(stderr, fmt, ap);
+ else {
+ ui_helpline__push(s);
+ free(s);
+ }
+}
+
+void ui_helpline__fpush(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ ui_helpline__vpush(fmt, ap);
+ va_end(ap);
+}
+
+void ui_helpline__puts(const char *msg)
+{
+ ui_helpline__pop();
+ ui_helpline__push(msg);
+}
+
+void ui_helpline__init(void)
+{
+ ui_helpline__puts(" ");
+}
+
+char ui_helpline__last_msg[1024];
+
+int ui_helpline__show_help(const char *format, va_list ap)
+{
+ int ret;
+ static int backlog;
+
+ ret = vsnprintf(ui_helpline__last_msg + backlog,
+ sizeof(ui_helpline__last_msg) - backlog, format, ap);
+ backlog += ret;
+
+ if (ui_helpline__last_msg[backlog - 1] == '\n') {
+ ui_helpline__puts(ui_helpline__last_msg);
+ newtRefresh();
+ backlog = 0;
+ }
+
+ return ret;
+}
diff --git a/tools/perf/util/ui/helpline.h b/tools/perf/util/ui/helpline.h
new file mode 100644
index 00000000..ab6028d0
--- /dev/null
+++ b/tools/perf/util/ui/helpline.h
@@ -0,0 +1,11 @@
+#ifndef _PERF_UI_HELPLINE_H_
+#define _PERF_UI_HELPLINE_H_ 1
+
+void ui_helpline__init(void);
+void ui_helpline__pop(void);
+void ui_helpline__push(const char *msg);
+void ui_helpline__vpush(const char *fmt, va_list ap);
+void ui_helpline__fpush(const char *fmt, ...);
+void ui_helpline__puts(const char *msg);
+
+#endif /* _PERF_UI_HELPLINE_H_ */
diff --git a/tools/perf/util/ui/libslang.h b/tools/perf/util/ui/libslang.h
new file mode 100644
index 00000000..5623da8e
--- /dev/null
+++ b/tools/perf/util/ui/libslang.h
@@ -0,0 +1,27 @@
+#ifndef _PERF_UI_SLANG_H_
+#define _PERF_UI_SLANG_H_ 1
+/*
+ * slang versions <= 2.0.6 have a "#if HAVE_LONG_LONG" that breaks
+ * the build if it isn't defined. Use the equivalent one that glibc
+ * has on features.h.
+ */
+#include <features.h>
+#ifndef HAVE_LONG_LONG
+#define HAVE_LONG_LONG __GLIBC_HAVE_LONG_LONG
+#endif
+#include <slang.h>
+
+#if SLANG_VERSION < 20104
+#define slsmg_printf(msg, args...) \
+ SLsmg_printf((char *)msg, ##args)
+#define slsmg_write_nstring(msg, len) \
+ SLsmg_write_nstring((char *)msg, len)
+#define sltt_set_color(obj, name, fg, bg) \
+ SLtt_set_color(obj,(char *)name, (char *)fg, (char *)bg)
+#else
+#define slsmg_printf SLsmg_printf
+#define slsmg_write_nstring SLsmg_write_nstring
+#define sltt_set_color SLtt_set_color
+#endif
+
+#endif /* _PERF_UI_SLANG_H_ */
diff --git a/tools/perf/util/ui/progress.c b/tools/perf/util/ui/progress.c
new file mode 100644
index 00000000..d7fc399d
--- /dev/null
+++ b/tools/perf/util/ui/progress.c
@@ -0,0 +1,60 @@
+#include <stdlib.h>
+#include <newt.h>
+#include "../cache.h"
+#include "progress.h"
+
+struct ui_progress {
+ newtComponent form, scale;
+};
+
+struct ui_progress *ui_progress__new(const char *title, u64 total)
+{
+ struct ui_progress *self = malloc(sizeof(*self));
+
+ if (self != NULL) {
+ int cols;
+
+ if (use_browser <= 0)
+ return self;
+ newtGetScreenSize(&cols, NULL);
+ cols -= 4;
+ newtCenteredWindow(cols, 1, title);
+ self->form = newtForm(NULL, NULL, 0);
+ if (self->form == NULL)
+ goto out_free_self;
+ self->scale = newtScale(0, 0, cols, total);
+ if (self->scale == NULL)
+ goto out_free_form;
+ newtFormAddComponent(self->form, self->scale);
+ newtRefresh();
+ }
+
+ return self;
+
+out_free_form:
+ newtFormDestroy(self->form);
+out_free_self:
+ free(self);
+ return NULL;
+}
+
+void ui_progress__update(struct ui_progress *self, u64 curr)
+{
+ /*
+ * FIXME: We should have a per UI backend way of showing progress,
+ * stdio will just show a percentage as NN%, etc.
+ */
+ if (use_browser <= 0)
+ return;
+ newtScaleSet(self->scale, curr);
+ newtRefresh();
+}
+
+void ui_progress__delete(struct ui_progress *self)
+{
+ if (use_browser > 0) {
+ newtFormDestroy(self->form);
+ newtPopWindow();
+ }
+ free(self);
+}
diff --git a/tools/perf/util/ui/progress.h b/tools/perf/util/ui/progress.h
new file mode 100644
index 00000000..a3820a0b
--- /dev/null
+++ b/tools/perf/util/ui/progress.h
@@ -0,0 +1,11 @@
+#ifndef _PERF_UI_PROGRESS_H_
+#define _PERF_UI_PROGRESS_H_ 1
+
+struct ui_progress;
+
+struct ui_progress *ui_progress__new(const char *title, u64 total);
+void ui_progress__delete(struct ui_progress *self);
+
+void ui_progress__update(struct ui_progress *self, u64 curr);
+
+#endif
diff --git a/tools/perf/util/ui/setup.c b/tools/perf/util/ui/setup.c
new file mode 100644
index 00000000..66208503
--- /dev/null
+++ b/tools/perf/util/ui/setup.c
@@ -0,0 +1,42 @@
+#include <newt.h>
+#include <signal.h>
+#include <stdbool.h>
+
+#include "../cache.h"
+#include "../debug.h"
+#include "browser.h"
+#include "helpline.h"
+
+static void newt_suspend(void *d __used)
+{
+ newtSuspend();
+ raise(SIGTSTP);
+ newtResume();
+}
+
+void setup_browser(void)
+{
+ if (!isatty(1) || !use_browser || dump_trace) {
+ use_browser = 0;
+ setup_pager();
+ return;
+ }
+
+ use_browser = 1;
+ newtInit();
+ newtCls();
+ newtSetSuspendCallback(newt_suspend, NULL);
+ ui_helpline__init();
+ ui_browser__init();
+}
+
+void exit_browser(bool wait_for_ok)
+{
+ if (use_browser > 0) {
+ if (wait_for_ok) {
+ char title[] = "Fatal Error", ok[] = "Ok";
+ newtWinMessage(title, ok, ui_helpline__last_msg);
+ }
+ newtFinished();
+ }
+}
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c
new file mode 100644
index 00000000..04600e26
--- /dev/null
+++ b/tools/perf/util/ui/util.c
@@ -0,0 +1,114 @@
+#include <newt.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <sys/ttydefaults.h>
+
+#include "../cache.h"
+#include "../debug.h"
+#include "browser.h"
+#include "helpline.h"
+#include "util.h"
+
+newtComponent newt_form__new(void);
+
+static void newt_form__set_exit_keys(newtComponent self)
+{
+ newtFormAddHotKey(self, NEWT_KEY_LEFT);
+ newtFormAddHotKey(self, NEWT_KEY_ESCAPE);
+ newtFormAddHotKey(self, 'Q');
+ newtFormAddHotKey(self, 'q');
+ newtFormAddHotKey(self, CTRL('c'));
+}
+
+newtComponent newt_form__new(void)
+{
+ newtComponent self = newtForm(NULL, NULL, 0);
+ if (self)
+ newt_form__set_exit_keys(self);
+ return self;
+}
+
+int ui__popup_menu(int argc, char * const argv[])
+{
+ struct newtExitStruct es;
+ int i, rc = -1, max_len = 5;
+ newtComponent listbox, form = newt_form__new();
+
+ if (form == NULL)
+ return -1;
+
+ listbox = newtListbox(0, 0, argc, NEWT_FLAG_RETURNEXIT);
+ if (listbox == NULL)
+ goto out_destroy_form;
+
+ newtFormAddComponent(form, listbox);
+
+ for (i = 0; i < argc; ++i) {
+ int len = strlen(argv[i]);
+ if (len > max_len)
+ max_len = len;
+ if (newtListboxAddEntry(listbox, argv[i], (void *)(long)i))
+ goto out_destroy_form;
+ }
+
+ newtCenteredWindow(max_len, argc, NULL);
+ newtFormRun(form, &es);
+ rc = newtListboxGetCurrent(listbox) - NULL;
+ if (es.reason == NEWT_EXIT_HOTKEY)
+ rc = -1;
+ newtPopWindow();
+out_destroy_form:
+ newtFormDestroy(form);
+ return rc;
+}
+
+int ui__help_window(const char *text)
+{
+ struct newtExitStruct es;
+ newtComponent tb, form = newt_form__new();
+ int rc = -1;
+ int max_len = 0, nr_lines = 0;
+ const char *t;
+
+ if (form == NULL)
+ return -1;
+
+ t = text;
+ while (1) {
+ const char *sep = strchr(t, '\n');
+ int len;
+
+ if (sep == NULL)
+ sep = strchr(t, '\0');
+ len = sep - t;
+ if (max_len < len)
+ max_len = len;
+ ++nr_lines;
+ if (*sep == '\0')
+ break;
+ t = sep + 1;
+ }
+
+ tb = newtTextbox(0, 0, max_len, nr_lines, 0);
+ if (tb == NULL)
+ goto out_destroy_form;
+
+ newtTextboxSetText(tb, text);
+ newtFormAddComponent(form, tb);
+ newtCenteredWindow(max_len, nr_lines, NULL);
+ newtFormRun(form, &es);
+ newtPopWindow();
+ rc = 0;
+out_destroy_form:
+ newtFormDestroy(form);
+ return rc;
+}
+
+bool ui__dialog_yesno(const char *msg)
+{
+ /* newtWinChoice should really be accepting const char pointers... */
+ char yes[] = "Yes", no[] = "No";
+ return newtWinChoice(NULL, yes, no, (char *)msg) == 1;
+}
diff --git a/tools/perf/util/ui/util.h b/tools/perf/util/ui/util.h
new file mode 100644
index 00000000..afcbc1d9
--- /dev/null
+++ b/tools/perf/util/ui/util.h
@@ -0,0 +1,10 @@
+#ifndef _PERF_UI_UTIL_H_
+#define _PERF_UI_UTIL_H_ 1
+
+#include <stdbool.h>
+
+int ui__popup_menu(int argc, char * const argv[]);
+int ui__help_window(const char *text);
+bool ui__dialog_yesno(const char *msg);
+
+#endif /* _PERF_UI_UTIL_H_ */
diff --git a/tools/perf/util/usage.c b/tools/perf/util/usage.c
new file mode 100644
index 00000000..e16bf9a7
--- /dev/null
+++ b/tools/perf/util/usage.c
@@ -0,0 +1,80 @@
+/*
+ * GIT - The information manager from hell
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ */
+#include "util.h"
+
+static void report(const char *prefix, const char *err, va_list params)
+{
+ char msg[1024];
+ vsnprintf(msg, sizeof(msg), err, params);
+ fprintf(stderr, " %s%s\n", prefix, msg);
+}
+
+static NORETURN void usage_builtin(const char *err)
+{
+ fprintf(stderr, "\n Usage: %s\n", err);
+ exit(129);
+}
+
+static NORETURN void die_builtin(const char *err, va_list params)
+{
+ report(" Fatal: ", err, params);
+ exit(128);
+}
+
+static void error_builtin(const char *err, va_list params)
+{
+ report(" Error: ", err, params);
+}
+
+static void warn_builtin(const char *warn, va_list params)
+{
+ report(" Warning: ", warn, params);
+}
+
+/* If we are in a dlopen()ed .so write to a global variable would segfault
+ * (ugh), so keep things static. */
+static void (*usage_routine)(const char *err) NORETURN = usage_builtin;
+static void (*die_routine)(const char *err, va_list params) NORETURN = die_builtin;
+static void (*error_routine)(const char *err, va_list params) = error_builtin;
+static void (*warn_routine)(const char *err, va_list params) = warn_builtin;
+
+void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN)
+{
+ die_routine = routine;
+}
+
+void usage(const char *err)
+{
+ usage_routine(err);
+}
+
+void die(const char *err, ...)
+{
+ va_list params;
+
+ va_start(params, err);
+ die_routine(err, params);
+ va_end(params);
+}
+
+int error(const char *err, ...)
+{
+ va_list params;
+
+ va_start(params, err);
+ error_routine(err, params);
+ va_end(params);
+ return -1;
+}
+
+void warning(const char *warn, ...)
+{
+ va_list params;
+
+ va_start(params, warn);
+ warn_routine(warn, params);
+ va_end(params);
+}
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
new file mode 100644
index 00000000..21426567
--- /dev/null
+++ b/tools/perf/util/util.c
@@ -0,0 +1,116 @@
+#include "util.h"
+#include <sys/mman.h>
+
+int mkdir_p(char *path, mode_t mode)
+{
+ struct stat st;
+ int err;
+ char *d = path;
+
+ if (*d != '/')
+ return -1;
+
+ if (stat(path, &st) == 0)
+ return 0;
+
+ while (*++d == '/');
+
+ while ((d = strchr(d, '/'))) {
+ *d = '\0';
+ err = stat(path, &st) && mkdir(path, mode);
+ *d++ = '/';
+ if (err)
+ return -1;
+ while (*d == '/')
+ ++d;
+ }
+ return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0;
+}
+
+static int slow_copyfile(const char *from, const char *to)
+{
+ int err = 0;
+ char *line = NULL;
+ size_t n;
+ FILE *from_fp = fopen(from, "r"), *to_fp;
+
+ if (from_fp == NULL)
+ goto out;
+
+ to_fp = fopen(to, "w");
+ if (to_fp == NULL)
+ goto out_fclose_from;
+
+ while (getline(&line, &n, from_fp) > 0)
+ if (fputs(line, to_fp) == EOF)
+ goto out_fclose_to;
+ err = 0;
+out_fclose_to:
+ fclose(to_fp);
+ free(line);
+out_fclose_from:
+ fclose(from_fp);
+out:
+ return err;
+}
+
+int copyfile(const char *from, const char *to)
+{
+ int fromfd, tofd;
+ struct stat st;
+ void *addr;
+ int err = -1;
+
+ if (stat(from, &st))
+ goto out;
+
+ if (st.st_size == 0) /* /proc? do it slowly... */
+ return slow_copyfile(from, to);
+
+ fromfd = open(from, O_RDONLY);
+ if (fromfd < 0)
+ goto out;
+
+ tofd = creat(to, 0755);
+ if (tofd < 0)
+ goto out_close_from;
+
+ addr = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fromfd, 0);
+ if (addr == MAP_FAILED)
+ goto out_close_to;
+
+ if (write(tofd, addr, st.st_size) == st.st_size)
+ err = 0;
+
+ munmap(addr, st.st_size);
+out_close_to:
+ close(tofd);
+ if (err)
+ unlink(to);
+out_close_from:
+ close(fromfd);
+out:
+ return err;
+}
+
+unsigned long convert_unit(unsigned long value, char *unit)
+{
+ *unit = ' ';
+
+ if (value > 1000) {
+ value /= 1000;
+ *unit = 'K';
+ }
+
+ if (value > 1000) {
+ value /= 1000;
+ *unit = 'M';
+ }
+
+ if (value > 1000) {
+ value /= 1000;
+ *unit = 'G';
+ }
+
+ return value;
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
new file mode 100644
index 00000000..f380fed7
--- /dev/null
+++ b/tools/perf/util/util.h
@@ -0,0 +1,285 @@
+#ifndef GIT_COMPAT_UTIL_H
+#define GIT_COMPAT_UTIL_H
+
+#define _FILE_OFFSET_BITS 64
+
+#ifndef FLEX_ARRAY
+/*
+ * See if our compiler is known to support flexible array members.
+ */
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
+# define FLEX_ARRAY /* empty */
+#elif defined(__GNUC__)
+# if (__GNUC__ >= 3)
+# define FLEX_ARRAY /* empty */
+# else
+# define FLEX_ARRAY 0 /* older GNU extension */
+# endif
+#endif
+
+/*
+ * Otherwise, default to safer but a bit wasteful traditional style
+ */
+#ifndef FLEX_ARRAY
+# define FLEX_ARRAY 1
+#endif
+#endif
+
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
+
+#ifdef __GNUC__
+#define TYPEOF(x) (__typeof__(x))
+#else
+#define TYPEOF(x)
+#endif
+
+#define MSB(x, bits) ((x) & TYPEOF(x)(~0ULL << (sizeof(x) * 8 - (bits))))
+#define HAS_MULTI_BITS(i) ((i) & ((i) - 1)) /* checks if an integer has more than 1 bit set */
+
+/* Approximation of the length of the decimal representation of this type. */
+#define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1)
+
+#define _ALL_SOURCE 1
+#define _GNU_SOURCE 1
+#define _BSD_SOURCE 1
+#define HAS_BOOL
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <sys/statfs.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <errno.h>
+#include <limits.h>
+#include <sys/param.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <sys/time.h>
+#include <time.h>
+#include <signal.h>
+#include <fnmatch.h>
+#include <assert.h>
+#include <regex.h>
+#include <utime.h>
+#include <sys/wait.h>
+#include <sys/poll.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#ifndef NO_SYS_SELECT_H
+#include <sys/select.h>
+#endif
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <pwd.h>
+#include <inttypes.h>
+#include "../../../include/linux/magic.h"
+#include "types.h"
+#include <sys/ttydefaults.h>
+
+#ifndef NO_ICONV
+#include <iconv.h>
+#endif
+
+extern const char *graph_line;
+extern const char *graph_dotted_line;
+extern char buildid_dir[];
+
+/* On most systems <limits.h> would have given us this, but
+ * not on some systems (e.g. GNU/Hurd).
+ */
+#ifndef PATH_MAX
+#define PATH_MAX 4096
+#endif
+
+#ifndef PRIuMAX
+#define PRIuMAX "llu"
+#endif
+
+#ifndef PRIu32
+#define PRIu32 "u"
+#endif
+
+#ifndef PRIx32
+#define PRIx32 "x"
+#endif
+
+#ifndef PATH_SEP
+#define PATH_SEP ':'
+#endif
+
+#ifndef STRIP_EXTENSION
+#define STRIP_EXTENSION ""
+#endif
+
+#ifndef has_dos_drive_prefix
+#define has_dos_drive_prefix(path) 0
+#endif
+
+#ifndef is_dir_sep
+#define is_dir_sep(c) ((c) == '/')
+#endif
+
+#ifdef __GNUC__
+#define NORETURN __attribute__((__noreturn__))
+#else
+#define NORETURN
+#ifndef __attribute__
+#define __attribute__(x)
+#endif
+#endif
+
+/* General helper functions */
+extern void usage(const char *err) NORETURN;
+extern void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, 2)));
+extern int error(const char *err, ...) __attribute__((format (printf, 1, 2)));
+extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2)));
+
+#include "../../../include/linux/stringify.h"
+
+#define DIE_IF(cnd) \
+ do { if (cnd) \
+ die(" at (" __FILE__ ":" __stringify(__LINE__) "): " \
+ __stringify(cnd) "\n"); \
+ } while (0)
+
+
+extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN);
+
+extern int prefixcmp(const char *str, const char *prefix);
+extern void set_buildid_dir(void);
+extern void disable_buildid_cache(void);
+
+static inline const char *skip_prefix(const char *str, const char *prefix)
+{
+ size_t len = strlen(prefix);
+ return strncmp(str, prefix, len) ? NULL : str + len;
+}
+
+#ifdef __GLIBC_PREREQ
+#if __GLIBC_PREREQ(2, 1)
+#define HAVE_STRCHRNUL
+#endif
+#endif
+
+#ifndef HAVE_STRCHRNUL
+#define strchrnul gitstrchrnul
+static inline char *gitstrchrnul(const char *s, int c)
+{
+ while (*s && *s != c)
+ s++;
+ return (char *)s;
+}
+#endif
+
+/*
+ * Wrappers:
+ */
+extern char *xstrdup(const char *str);
+extern void *xrealloc(void *ptr, size_t size) __attribute__((weak));
+
+
+static inline void *zalloc(size_t size)
+{
+ return calloc(1, size);
+}
+
+static inline int has_extension(const char *filename, const char *ext)
+{
+ size_t len = strlen(filename);
+ size_t extlen = strlen(ext);
+
+ return len > extlen && !memcmp(filename + len - extlen, ext, extlen);
+}
+
+/* Sane ctype - no locale, and works with signed chars */
+#undef isascii
+#undef isspace
+#undef isdigit
+#undef isxdigit
+#undef isalpha
+#undef isprint
+#undef isalnum
+#undef tolower
+#undef toupper
+
+extern unsigned char sane_ctype[256];
+#define GIT_SPACE 0x01
+#define GIT_DIGIT 0x02
+#define GIT_ALPHA 0x04
+#define GIT_GLOB_SPECIAL 0x08
+#define GIT_REGEX_SPECIAL 0x10
+#define GIT_PRINT_EXTRA 0x20
+#define GIT_PRINT 0x3E
+#define sane_istest(x,mask) ((sane_ctype[(unsigned char)(x)] & (mask)) != 0)
+#define isascii(x) (((x) & ~0x7f) == 0)
+#define isspace(x) sane_istest(x,GIT_SPACE)
+#define isdigit(x) sane_istest(x,GIT_DIGIT)
+#define isxdigit(x) \
+ (sane_istest(toupper(x), GIT_ALPHA | GIT_DIGIT) && toupper(x) < 'G')
+#define isalpha(x) sane_istest(x,GIT_ALPHA)
+#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT)
+#define isprint(x) sane_istest(x,GIT_PRINT)
+#define tolower(x) sane_case((unsigned char)(x), 0x20)
+#define toupper(x) sane_case((unsigned char)(x), 0)
+
+static inline int sane_case(int x, int high)
+{
+ if (sane_istest(x, GIT_ALPHA))
+ x = (x & ~0x20) | high;
+ return x;
+}
+
+#ifndef DIR_HAS_BSD_GROUP_SEMANTICS
+# define FORCE_DIR_SET_GID S_ISGID
+#else
+# define FORCE_DIR_SET_GID 0
+#endif
+
+#ifdef NO_NSEC
+#undef USE_NSEC
+#define ST_CTIME_NSEC(st) 0
+#define ST_MTIME_NSEC(st) 0
+#else
+#ifdef USE_ST_TIMESPEC
+#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctimespec.tv_nsec))
+#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtimespec.tv_nsec))
+#else
+#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctim.tv_nsec))
+#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtim.tv_nsec))
+#endif
+#endif
+
+int mkdir_p(char *path, mode_t mode);
+int copyfile(const char *from, const char *to);
+
+s64 perf_atoll(const char *str);
+char **argv_split(const char *str, int *argcp);
+void argv_free(char **argv);
+bool strglobmatch(const char *str, const char *pat);
+bool strlazymatch(const char *str, const char *pat);
+unsigned long convert_unit(unsigned long value, char *unit);
+
+#ifndef ESC
+#define ESC 27
+#endif
+
+static inline bool is_exit_key(int key)
+{
+ char up;
+ if (key == CTRL('c') || key == ESC)
+ return true;
+ up = toupper(key);
+ return up == 'Q';
+}
+
+#define _STR(x) #x
+#define STR(x) _STR(x)
+
+#endif
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c
new file mode 100644
index 00000000..cfa55d68
--- /dev/null
+++ b/tools/perf/util/values.c
@@ -0,0 +1,231 @@
+#include <stdlib.h>
+
+#include "util.h"
+#include "values.h"
+
+void perf_read_values_init(struct perf_read_values *values)
+{
+ values->threads_max = 16;
+ values->pid = malloc(values->threads_max * sizeof(*values->pid));
+ values->tid = malloc(values->threads_max * sizeof(*values->tid));
+ values->value = malloc(values->threads_max * sizeof(*values->value));
+ if (!values->pid || !values->tid || !values->value)
+ die("failed to allocate read_values threads arrays");
+ values->threads = 0;
+
+ values->counters_max = 16;
+ values->counterrawid = malloc(values->counters_max
+ * sizeof(*values->counterrawid));
+ values->countername = malloc(values->counters_max
+ * sizeof(*values->countername));
+ if (!values->counterrawid || !values->countername)
+ die("failed to allocate read_values counters arrays");
+ values->counters = 0;
+}
+
+void perf_read_values_destroy(struct perf_read_values *values)
+{
+ int i;
+
+ if (!values->threads_max || !values->counters_max)
+ return;
+
+ for (i = 0; i < values->threads; i++)
+ free(values->value[i]);
+ free(values->pid);
+ free(values->tid);
+ free(values->counterrawid);
+ for (i = 0; i < values->counters; i++)
+ free(values->countername[i]);
+ free(values->countername);
+}
+
+static void perf_read_values__enlarge_threads(struct perf_read_values *values)
+{
+ values->threads_max *= 2;
+ values->pid = realloc(values->pid,
+ values->threads_max * sizeof(*values->pid));
+ values->tid = realloc(values->tid,
+ values->threads_max * sizeof(*values->tid));
+ values->value = realloc(values->value,
+ values->threads_max * sizeof(*values->value));
+ if (!values->pid || !values->tid || !values->value)
+ die("failed to enlarge read_values threads arrays");
+}
+
+static int perf_read_values__findnew_thread(struct perf_read_values *values,
+ u32 pid, u32 tid)
+{
+ int i;
+
+ for (i = 0; i < values->threads; i++)
+ if (values->pid[i] == pid && values->tid[i] == tid)
+ return i;
+
+ if (values->threads == values->threads_max)
+ perf_read_values__enlarge_threads(values);
+
+ i = values->threads++;
+ values->pid[i] = pid;
+ values->tid[i] = tid;
+ values->value[i] = malloc(values->counters_max * sizeof(**values->value));
+ if (!values->value[i])
+ die("failed to allocate read_values counters array");
+
+ return i;
+}
+
+static void perf_read_values__enlarge_counters(struct perf_read_values *values)
+{
+ int i;
+
+ values->counters_max *= 2;
+ values->counterrawid = realloc(values->counterrawid,
+ values->counters_max * sizeof(*values->counterrawid));
+ values->countername = realloc(values->countername,
+ values->counters_max * sizeof(*values->countername));
+ if (!values->counterrawid || !values->countername)
+ die("failed to enlarge read_values counters arrays");
+
+ for (i = 0; i < values->threads; i++) {
+ values->value[i] = realloc(values->value[i],
+ values->counters_max * sizeof(**values->value));
+ if (!values->value[i])
+ die("failed to enlarge read_values counters arrays");
+ }
+}
+
+static int perf_read_values__findnew_counter(struct perf_read_values *values,
+ u64 rawid, const char *name)
+{
+ int i;
+
+ for (i = 0; i < values->counters; i++)
+ if (values->counterrawid[i] == rawid)
+ return i;
+
+ if (values->counters == values->counters_max)
+ perf_read_values__enlarge_counters(values);
+
+ i = values->counters++;
+ values->counterrawid[i] = rawid;
+ values->countername[i] = strdup(name);
+
+ return i;
+}
+
+void perf_read_values_add_value(struct perf_read_values *values,
+ u32 pid, u32 tid,
+ u64 rawid, const char *name, u64 value)
+{
+ int tindex, cindex;
+
+ tindex = perf_read_values__findnew_thread(values, pid, tid);
+ cindex = perf_read_values__findnew_counter(values, rawid, name);
+
+ values->value[tindex][cindex] = value;
+}
+
+static void perf_read_values__display_pretty(FILE *fp,
+ struct perf_read_values *values)
+{
+ int i, j;
+ int pidwidth, tidwidth;
+ int *counterwidth;
+
+ counterwidth = malloc(values->counters * sizeof(*counterwidth));
+ if (!counterwidth)
+ die("failed to allocate counterwidth array");
+ tidwidth = 3;
+ pidwidth = 3;
+ for (j = 0; j < values->counters; j++)
+ counterwidth[j] = strlen(values->countername[j]);
+ for (i = 0; i < values->threads; i++) {
+ int width;
+
+ width = snprintf(NULL, 0, "%d", values->pid[i]);
+ if (width > pidwidth)
+ pidwidth = width;
+ width = snprintf(NULL, 0, "%d", values->tid[i]);
+ if (width > tidwidth)
+ tidwidth = width;
+ for (j = 0; j < values->counters; j++) {
+ width = snprintf(NULL, 0, "%Lu", values->value[i][j]);
+ if (width > counterwidth[j])
+ counterwidth[j] = width;
+ }
+ }
+
+ fprintf(fp, "# %*s %*s", pidwidth, "PID", tidwidth, "TID");
+ for (j = 0; j < values->counters; j++)
+ fprintf(fp, " %*s", counterwidth[j], values->countername[j]);
+ fprintf(fp, "\n");
+
+ for (i = 0; i < values->threads; i++) {
+ fprintf(fp, " %*d %*d", pidwidth, values->pid[i],
+ tidwidth, values->tid[i]);
+ for (j = 0; j < values->counters; j++)
+ fprintf(fp, " %*Lu",
+ counterwidth[j], values->value[i][j]);
+ fprintf(fp, "\n");
+ }
+ free(counterwidth);
+}
+
+static void perf_read_values__display_raw(FILE *fp,
+ struct perf_read_values *values)
+{
+ int width, pidwidth, tidwidth, namewidth, rawwidth, countwidth;
+ int i, j;
+
+ tidwidth = 3; /* TID */
+ pidwidth = 3; /* PID */
+ namewidth = 4; /* "Name" */
+ rawwidth = 3; /* "Raw" */
+ countwidth = 5; /* "Count" */
+
+ for (i = 0; i < values->threads; i++) {
+ width = snprintf(NULL, 0, "%d", values->pid[i]);
+ if (width > pidwidth)
+ pidwidth = width;
+ width = snprintf(NULL, 0, "%d", values->tid[i]);
+ if (width > tidwidth)
+ tidwidth = width;
+ }
+ for (j = 0; j < values->counters; j++) {
+ width = strlen(values->countername[j]);
+ if (width > namewidth)
+ namewidth = width;
+ width = snprintf(NULL, 0, "%llx", values->counterrawid[j]);
+ if (width > rawwidth)
+ rawwidth = width;
+ }
+ for (i = 0; i < values->threads; i++) {
+ for (j = 0; j < values->counters; j++) {
+ width = snprintf(NULL, 0, "%Lu", values->value[i][j]);
+ if (width > countwidth)
+ countwidth = width;
+ }
+ }
+
+ fprintf(fp, "# %*s %*s %*s %*s %*s\n",
+ pidwidth, "PID", tidwidth, "TID",
+ namewidth, "Name", rawwidth, "Raw",
+ countwidth, "Count");
+ for (i = 0; i < values->threads; i++)
+ for (j = 0; j < values->counters; j++)
+ fprintf(fp, " %*d %*d %*s %*llx %*Lu\n",
+ pidwidth, values->pid[i],
+ tidwidth, values->tid[i],
+ namewidth, values->countername[j],
+ rawwidth, values->counterrawid[j],
+ countwidth, values->value[i][j]);
+}
+
+void perf_read_values_display(FILE *fp, struct perf_read_values *values, int raw)
+{
+ if (raw)
+ perf_read_values__display_raw(fp, values);
+ else
+ perf_read_values__display_pretty(fp, values);
+}
diff --git a/tools/perf/util/values.h b/tools/perf/util/values.h
new file mode 100644
index 00000000..2fa967e1
--- /dev/null
+++ b/tools/perf/util/values.h
@@ -0,0 +1,27 @@
+#ifndef __PERF_VALUES_H
+#define __PERF_VALUES_H
+
+#include "types.h"
+
+struct perf_read_values {
+ int threads;
+ int threads_max;
+ u32 *pid, *tid;
+ int counters;
+ int counters_max;
+ u64 *counterrawid;
+ char **countername;
+ u64 **value;
+};
+
+void perf_read_values_init(struct perf_read_values *values);
+void perf_read_values_destroy(struct perf_read_values *values);
+
+void perf_read_values_add_value(struct perf_read_values *values,
+ u32 pid, u32 tid,
+ u64 rawid, const char *name, u64 value);
+
+void perf_read_values_display(FILE *fp, struct perf_read_values *values,
+ int raw);
+
+#endif /* __PERF_VALUES_H */
diff --git a/tools/perf/util/wrapper.c b/tools/perf/util/wrapper.c
new file mode 100644
index 00000000..73e900ed
--- /dev/null
+++ b/tools/perf/util/wrapper.c
@@ -0,0 +1,40 @@
+/*
+ * Various trivial helper wrappers around standard functions
+ */
+#include "cache.h"
+
+/*
+ * There's no pack memory to release - but stay close to the Git
+ * version so wrap this away:
+ */
+static inline void release_pack_memory(size_t size __used, int flag __used)
+{
+}
+
+char *xstrdup(const char *str)
+{
+ char *ret = strdup(str);
+ if (!ret) {
+ release_pack_memory(strlen(str) + 1, -1);
+ ret = strdup(str);
+ if (!ret)
+ die("Out of memory, strdup failed");
+ }
+ return ret;
+}
+
+void *xrealloc(void *ptr, size_t size)
+{
+ void *ret = realloc(ptr, size);
+ if (!ret && !size)
+ ret = realloc(ptr, 1);
+ if (!ret) {
+ release_pack_memory(size, -1);
+ ret = realloc(ptr, size);
+ if (!ret && !size)
+ ret = realloc(ptr, 1);
+ if (!ret)
+ die("Out of memory, realloc failed");
+ }
+ return ret;
+}
diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
new file mode 100644
index 00000000..bbe2e3a2
--- /dev/null
+++ b/tools/usb/ffs-test.c
@@ -0,0 +1,554 @@
+/*
+ * ffs-test.c.c -- user mode filesystem api for usb composite function
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* $(CROSS_COMPILE)cc -Wall -Wextra -g -o ffs-test ffs-test.c -lpthread */
+
+
+#define _BSD_SOURCE /* for endian.h */
+
+#include <endian.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <linux/usb/functionfs.h>
+
+
+/******************** Little Endian Handling ********************************/
+
+#define cpu_to_le16(x) htole16(x)
+#define cpu_to_le32(x) htole32(x)
+#define le32_to_cpu(x) le32toh(x)
+#define le16_to_cpu(x) le16toh(x)
+
+static inline __u16 get_unaligned_le16(const void *_ptr)
+{
+ const __u8 *ptr = _ptr;
+ return ptr[0] | (ptr[1] << 8);
+}
+
+static inline __u32 get_unaligned_le32(const void *_ptr)
+{
+ const __u8 *ptr = _ptr;
+ return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
+}
+
+static inline void put_unaligned_le16(__u16 val, void *_ptr)
+{
+ __u8 *ptr = _ptr;
+ *ptr++ = val;
+ *ptr++ = val >> 8;
+}
+
+static inline void put_unaligned_le32(__u32 val, void *_ptr)
+{
+ __u8 *ptr = _ptr;
+ *ptr++ = val;
+ *ptr++ = val >> 8;
+ *ptr++ = val >> 16;
+ *ptr++ = val >> 24;
+}
+
+
+/******************** Messages and Errors ***********************************/
+
+static const char argv0[] = "ffs-test";
+
+static unsigned verbosity = 7;
+
+static void _msg(unsigned level, const char *fmt, ...)
+{
+ if (level < 2)
+ level = 2;
+ else if (level > 7)
+ level = 7;
+
+ if (level <= verbosity) {
+ static const char levels[8][6] = {
+ [2] = "crit:",
+ [3] = "err: ",
+ [4] = "warn:",
+ [5] = "note:",
+ [6] = "info:",
+ [7] = "dbg: "
+ };
+
+ int _errno = errno;
+ va_list ap;
+
+ fprintf(stderr, "%s: %s ", argv0, levels[level]);
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+
+ if (fmt[strlen(fmt) - 1] != '\n') {
+ char buffer[128];
+ strerror_r(_errno, buffer, sizeof buffer);
+ fprintf(stderr, ": (-%d) %s\n", _errno, buffer);
+ }
+
+ fflush(stderr);
+ }
+}
+
+#define die(...) (_msg(2, __VA_ARGS__), exit(1))
+#define err(...) _msg(3, __VA_ARGS__)
+#define warn(...) _msg(4, __VA_ARGS__)
+#define note(...) _msg(5, __VA_ARGS__)
+#define info(...) _msg(6, __VA_ARGS__)
+#define debug(...) _msg(7, __VA_ARGS__)
+
+#define die_on(cond, ...) do { \
+ if (cond) \
+ die(__VA_ARGS__); \
+ } while (0)
+
+
+/******************** Descriptors and Strings *******************************/
+
+static const struct {
+ struct usb_functionfs_descs_head header;
+ struct {
+ struct usb_interface_descriptor intf;
+ struct usb_endpoint_descriptor_no_audio sink;
+ struct usb_endpoint_descriptor_no_audio source;
+ } __attribute__((packed)) fs_descs, hs_descs;
+} __attribute__((packed)) descriptors = {
+ .header = {
+ .magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC),
+ .length = cpu_to_le32(sizeof descriptors),
+ .fs_count = 3,
+ .hs_count = 3,
+ },
+ .fs_descs = {
+ .intf = {
+ .bLength = sizeof descriptors.fs_descs.intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .iInterface = 1,
+ },
+ .sink = {
+ .bLength = sizeof descriptors.fs_descs.sink,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 1 | USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ /* .wMaxPacketSize = autoconfiguration (kernel) */
+ },
+ .source = {
+ .bLength = sizeof descriptors.fs_descs.source,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 2 | USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ /* .wMaxPacketSize = autoconfiguration (kernel) */
+ },
+ },
+ .hs_descs = {
+ .intf = {
+ .bLength = sizeof descriptors.fs_descs.intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 2,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .iInterface = 1,
+ },
+ .sink = {
+ .bLength = sizeof descriptors.hs_descs.sink,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 1 | USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+ },
+ .source = {
+ .bLength = sizeof descriptors.hs_descs.source,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 2 | USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+ .bInterval = 1, /* NAK every 1 uframe */
+ },
+ },
+};
+
+
+#define STR_INTERFACE_ "Source/Sink"
+
+static const struct {
+ struct usb_functionfs_strings_head header;
+ struct {
+ __le16 code;
+ const char str1[sizeof STR_INTERFACE_];
+ } __attribute__((packed)) lang0;
+} __attribute__((packed)) strings = {
+ .header = {
+ .magic = cpu_to_le32(FUNCTIONFS_STRINGS_MAGIC),
+ .length = cpu_to_le32(sizeof strings),
+ .str_count = cpu_to_le32(1),
+ .lang_count = cpu_to_le32(1),
+ },
+ .lang0 = {
+ cpu_to_le16(0x0409), /* en-us */
+ STR_INTERFACE_,
+ },
+};
+
+#define STR_INTERFACE strings.lang0.str1
+
+
+/******************** Files and Threads Handling ****************************/
+
+struct thread;
+
+static ssize_t read_wrap(struct thread *t, void *buf, size_t nbytes);
+static ssize_t write_wrap(struct thread *t, const void *buf, size_t nbytes);
+static ssize_t ep0_consume(struct thread *t, const void *buf, size_t nbytes);
+static ssize_t fill_in_buf(struct thread *t, void *buf, size_t nbytes);
+static ssize_t empty_out_buf(struct thread *t, const void *buf, size_t nbytes);
+
+
+static struct thread {
+ const char *const filename;
+ size_t buf_size;
+
+ ssize_t (*in)(struct thread *, void *, size_t);
+ const char *const in_name;
+
+ ssize_t (*out)(struct thread *, const void *, size_t);
+ const char *const out_name;
+
+ int fd;
+ pthread_t id;
+ void *buf;
+ ssize_t status;
+} threads[] = {
+ {
+ "ep0", 4 * sizeof(struct usb_functionfs_event),
+ read_wrap, NULL,
+ ep0_consume, "<consume>",
+ 0, 0, NULL, 0
+ },
+ {
+ "ep1", 8 * 1024,
+ fill_in_buf, "<in>",
+ write_wrap, NULL,
+ 0, 0, NULL, 0
+ },
+ {
+ "ep2", 8 * 1024,
+ read_wrap, NULL,
+ empty_out_buf, "<out>",
+ 0, 0, NULL, 0
+ },
+};
+
+
+static void init_thread(struct thread *t)
+{
+ t->buf = malloc(t->buf_size);
+ die_on(!t->buf, "malloc");
+
+ t->fd = open(t->filename, O_RDWR);
+ die_on(t->fd < 0, "%s", t->filename);
+}
+
+static void cleanup_thread(void *arg)
+{
+ struct thread *t = arg;
+ int ret, fd;
+
+ fd = t->fd;
+ if (t->fd < 0)
+ return;
+ t->fd = -1;
+
+ /* test the FIFO ioctls (non-ep0 code paths) */
+ if (t != threads) {
+ ret = ioctl(fd, FUNCTIONFS_FIFO_STATUS);
+ if (ret < 0) {
+ /* ENODEV reported after disconnect */
+ if (errno != ENODEV)
+ err("%s: get fifo status", t->filename);
+ } else if (ret) {
+ warn("%s: unclaimed = %d\n", t->filename, ret);
+ if (ioctl(fd, FUNCTIONFS_FIFO_FLUSH) < 0)
+ err("%s: fifo flush", t->filename);
+ }
+ }
+
+ if (close(fd) < 0)
+ err("%s: close", t->filename);
+
+ free(t->buf);
+ t->buf = NULL;
+}
+
+static void *start_thread_helper(void *arg)
+{
+ const char *name, *op, *in_name, *out_name;
+ struct thread *t = arg;
+ ssize_t ret;
+
+ info("%s: starts\n", t->filename);
+ in_name = t->in_name ? t->in_name : t->filename;
+ out_name = t->out_name ? t->out_name : t->filename;
+
+ pthread_cleanup_push(cleanup_thread, arg);
+
+ for (;;) {
+ pthread_testcancel();
+
+ ret = t->in(t, t->buf, t->buf_size);
+ if (ret > 0) {
+ ret = t->out(t, t->buf, t->buf_size);
+ name = out_name;
+ op = "write";
+ } else {
+ name = in_name;
+ op = "read";
+ }
+
+ if (ret > 0) {
+ /* nop */
+ } else if (!ret) {
+ debug("%s: %s: EOF", name, op);
+ break;
+ } else if (errno == EINTR || errno == EAGAIN) {
+ debug("%s: %s", name, op);
+ } else {
+ warn("%s: %s", name, op);
+ break;
+ }
+ }
+
+ pthread_cleanup_pop(1);
+
+ t->status = ret;
+ info("%s: ends\n", t->filename);
+ return NULL;
+}
+
+static void start_thread(struct thread *t)
+{
+ debug("%s: starting\n", t->filename);
+
+ die_on(pthread_create(&t->id, NULL, start_thread_helper, t) < 0,
+ "pthread_create(%s)", t->filename);
+}
+
+static void join_thread(struct thread *t)
+{
+ int ret = pthread_join(t->id, NULL);
+
+ if (ret < 0)
+ err("%s: joining thread", t->filename);
+ else
+ debug("%s: joined\n", t->filename);
+}
+
+
+static ssize_t read_wrap(struct thread *t, void *buf, size_t nbytes)
+{
+ return read(t->fd, buf, nbytes);
+}
+
+static ssize_t write_wrap(struct thread *t, const void *buf, size_t nbytes)
+{
+ return write(t->fd, buf, nbytes);
+}
+
+
+/******************** Empty/Fill buffer routines ****************************/
+
+/* 0 -- stream of zeros, 1 -- i % 63, 2 -- pipe */
+enum pattern { PAT_ZERO, PAT_SEQ, PAT_PIPE };
+static enum pattern pattern;
+
+static ssize_t
+fill_in_buf(struct thread *ignore, void *buf, size_t nbytes)
+{
+ size_t i;
+ __u8 *p;
+
+ (void)ignore;
+
+ switch (pattern) {
+ case PAT_ZERO:
+ memset(buf, 0, nbytes);
+ break;
+
+ case PAT_SEQ:
+ for (p = buf, i = 0; i < nbytes; ++i, ++p)
+ *p = i % 63;
+ break;
+
+ case PAT_PIPE:
+ return fread(buf, 1, nbytes, stdin);
+ }
+
+ return nbytes;
+}
+
+static ssize_t
+empty_out_buf(struct thread *ignore, const void *buf, size_t nbytes)
+{
+ const __u8 *p;
+ __u8 expected;
+ ssize_t ret;
+ size_t len;
+
+ (void)ignore;
+
+ switch (pattern) {
+ case PAT_ZERO:
+ expected = 0;
+ for (p = buf, len = 0; len < nbytes; ++p, ++len)
+ if (*p)
+ goto invalid;
+ break;
+
+ case PAT_SEQ:
+ for (p = buf, len = 0; len < nbytes; ++p, ++len)
+ if (*p != len % 63) {
+ expected = len % 63;
+ goto invalid;
+ }
+ break;
+
+ case PAT_PIPE:
+ ret = fwrite(buf, nbytes, 1, stdout);
+ if (ret > 0)
+ fflush(stdout);
+ break;
+
+invalid:
+ err("bad OUT byte %zd, expected %02x got %02x\n",
+ len, expected, *p);
+ for (p = buf, len = 0; len < nbytes; ++p, ++len) {
+ if (0 == (len % 32))
+ fprintf(stderr, "%4d:", len);
+ fprintf(stderr, " %02x", *p);
+ if (31 == (len % 32))
+ fprintf(stderr, "\n");
+ }
+ fflush(stderr);
+ errno = EILSEQ;
+ return -1;
+ }
+
+ return len;
+}
+
+
+/******************** Endpoints routines ************************************/
+
+static void handle_setup(const struct usb_ctrlrequest *setup)
+{
+ printf("bRequestType = %d\n", setup->bRequestType);
+ printf("bRequest = %d\n", setup->bRequest);
+ printf("wValue = %d\n", le16_to_cpu(setup->wValue));
+ printf("wIndex = %d\n", le16_to_cpu(setup->wIndex));
+ printf("wLength = %d\n", le16_to_cpu(setup->wLength));
+}
+
+static ssize_t
+ep0_consume(struct thread *ignore, const void *buf, size_t nbytes)
+{
+ static const char *const names[] = {
+ [FUNCTIONFS_BIND] = "BIND",
+ [FUNCTIONFS_UNBIND] = "UNBIND",
+ [FUNCTIONFS_ENABLE] = "ENABLE",
+ [FUNCTIONFS_DISABLE] = "DISABLE",
+ [FUNCTIONFS_SETUP] = "SETUP",
+ [FUNCTIONFS_SUSPEND] = "SUSPEND",
+ [FUNCTIONFS_RESUME] = "RESUME",
+ };
+
+ const struct usb_functionfs_event *event = buf;
+ size_t n;
+
+ (void)ignore;
+
+ for (n = nbytes / sizeof *event; n; --n, ++event)
+ switch (event->type) {
+ case FUNCTIONFS_BIND:
+ case FUNCTIONFS_UNBIND:
+ case FUNCTIONFS_ENABLE:
+ case FUNCTIONFS_DISABLE:
+ case FUNCTIONFS_SETUP:
+ case FUNCTIONFS_SUSPEND:
+ case FUNCTIONFS_RESUME:
+ printf("Event %s\n", names[event->type]);
+ if (event->type == FUNCTIONFS_SETUP)
+ handle_setup(&event->u.setup);
+ break;
+
+ default:
+ printf("Event %03u (unknown)\n", event->type);
+ }
+
+ return nbytes;
+}
+
+static void ep0_init(struct thread *t)
+{
+ ssize_t ret;
+
+ info("%s: writing descriptors\n", t->filename);
+ ret = write(t->fd, &descriptors, sizeof descriptors);
+ die_on(ret < 0, "%s: write: descriptors", t->filename);
+
+ info("%s: writing strings\n", t->filename);
+ ret = write(t->fd, &strings, sizeof strings);
+ die_on(ret < 0, "%s: write: strings", t->filename);
+}
+
+
+/******************** Main **************************************************/
+
+int main(void)
+{
+ unsigned i;
+
+ /* XXX TODO: Argument parsing missing */
+
+ init_thread(threads);
+ ep0_init(threads);
+
+ for (i = 1; i < sizeof threads / sizeof *threads; ++i)
+ init_thread(threads + i);
+
+ for (i = 1; i < sizeof threads / sizeof *threads; ++i)
+ start_thread(threads + i);
+
+ start_thread_helper(threads);
+
+ for (i = 1; i < sizeof threads / sizeof *threads; ++i)
+ join_thread(threads + i);
+
+ return 0;
+}
diff --git a/tools/usb/testusb.c b/tools/usb/testusb.c
new file mode 100644
index 00000000..f08e8946
--- /dev/null
+++ b/tools/usb/testusb.c
@@ -0,0 +1,547 @@
+/* $(CROSS_COMPILE)cc -Wall -Wextra -g -lpthread -o testusb testusb.c */
+
+/*
+ * Copyright (c) 2002 by David Brownell
+ * Copyright (c) 2010 by Samsung Electronics
+ * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * This program issues ioctls to perform the tests implemented by the
+ * kernel driver. It can generate a variety of transfer patterns; you
+ * should make sure to test both regular streaming and mixes of
+ * transfer sizes (including short transfers).
+ *
+ * For more information on how this can be used and on USB testing
+ * refer to <URL:http://www.linux-usb.org/usbtest/>.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <ftw.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <errno.h>
+#include <limits.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include <sys/ioctl.h>
+#include <linux/usbdevice_fs.h>
+
+/*-------------------------------------------------------------------------*/
+
+#define TEST_CASES 30
+
+// FIXME make these public somewhere; usbdevfs.h?
+
+struct usbtest_param {
+ // inputs
+ unsigned test_num; /* 0..(TEST_CASES-1) */
+ unsigned iterations;
+ unsigned length;
+ unsigned vary;
+ unsigned sglen;
+
+ // outputs
+ struct timeval duration;
+};
+#define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param)
+
+/*-------------------------------------------------------------------------*/
+
+/* #include <linux/usb_ch9.h> */
+
+#define USB_DT_DEVICE 0x01
+#define USB_DT_INTERFACE 0x04
+
+#define USB_CLASS_PER_INTERFACE 0 /* for DeviceClass */
+#define USB_CLASS_VENDOR_SPEC 0xff
+
+
+struct usb_device_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u16 bcdUSB;
+ __u8 bDeviceClass;
+ __u8 bDeviceSubClass;
+ __u8 bDeviceProtocol;
+ __u8 bMaxPacketSize0;
+ __u16 idVendor;
+ __u16 idProduct;
+ __u16 bcdDevice;
+ __u8 iManufacturer;
+ __u8 iProduct;
+ __u8 iSerialNumber;
+ __u8 bNumConfigurations;
+} __attribute__ ((packed));
+
+struct usb_interface_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+
+ __u8 bInterfaceNumber;
+ __u8 bAlternateSetting;
+ __u8 bNumEndpoints;
+ __u8 bInterfaceClass;
+ __u8 bInterfaceSubClass;
+ __u8 bInterfaceProtocol;
+ __u8 iInterface;
+} __attribute__ ((packed));
+
+enum usb_device_speed {
+ USB_SPEED_UNKNOWN = 0, /* enumerating */
+ USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */
+ USB_SPEED_HIGH /* usb 2.0 */
+};
+
+/*-------------------------------------------------------------------------*/
+
+static char *speed (enum usb_device_speed s)
+{
+ switch (s) {
+ case USB_SPEED_UNKNOWN: return "unknown";
+ case USB_SPEED_LOW: return "low";
+ case USB_SPEED_FULL: return "full";
+ case USB_SPEED_HIGH: return "high";
+ default: return "??";
+ }
+}
+
+struct testdev {
+ struct testdev *next;
+ char *name;
+ pthread_t thread;
+ enum usb_device_speed speed;
+ unsigned ifnum : 8;
+ unsigned forever : 1;
+ int test;
+
+ struct usbtest_param param;
+};
+static struct testdev *testdevs;
+
+static int testdev_ffs_ifnum(FILE *fd)
+{
+ union {
+ char buf[255];
+ struct usb_interface_descriptor intf;
+ } u;
+
+ for (;;) {
+ if (fread(u.buf, 1, 1, fd) != 1)
+ return -1;
+ if (fread(u.buf + 1, (unsigned char)u.buf[0] - 1, 1, fd) != 1)
+ return -1;
+
+ if (u.intf.bLength == sizeof u.intf
+ && u.intf.bDescriptorType == USB_DT_INTERFACE
+ && u.intf.bNumEndpoints == 2
+ && u.intf.bInterfaceClass == USB_CLASS_VENDOR_SPEC
+ && u.intf.bInterfaceSubClass == 0
+ && u.intf.bInterfaceProtocol == 0)
+ return (unsigned char)u.intf.bInterfaceNumber;
+ }
+}
+
+static int testdev_ifnum(FILE *fd)
+{
+ struct usb_device_descriptor dev;
+
+ if (fread(&dev, sizeof dev, 1, fd) != 1)
+ return -1;
+
+ if (dev.bLength != sizeof dev || dev.bDescriptorType != USB_DT_DEVICE)
+ return -1;
+
+ /* FX2 with (tweaked) bulksrc firmware */
+ if (dev.idVendor == 0x0547 && dev.idProduct == 0x1002)
+ return 0;
+
+ /*----------------------------------------------------*/
+
+ /* devices that start up using the EZ-USB default device and
+ * which we can use after loading simple firmware. hotplug
+ * can fxload it, and then run this test driver.
+ *
+ * we return false positives in two cases:
+ * - the device has a "real" driver (maybe usb-serial) that
+ * renumerates. the device should vanish quickly.
+ * - the device doesn't have the test firmware installed.
+ */
+
+ /* generic EZ-USB FX controller */
+ if (dev.idVendor == 0x0547 && dev.idProduct == 0x2235)
+ return 0;
+
+ /* generic EZ-USB FX2 controller */
+ if (dev.idVendor == 0x04b4 && dev.idProduct == 0x8613)
+ return 0;
+
+ /* CY3671 development board with EZ-USB FX */
+ if (dev.idVendor == 0x0547 && dev.idProduct == 0x0080)
+ return 0;
+
+ /* Keyspan 19Qi uses an21xx (original EZ-USB) */
+ if (dev.idVendor == 0x06cd && dev.idProduct == 0x010b)
+ return 0;
+
+ /*----------------------------------------------------*/
+
+ /* "gadget zero", Linux-USB test software */
+ if (dev.idVendor == 0x0525 && dev.idProduct == 0xa4a0)
+ return 0;
+
+ /* user mode subset of that */
+ if (dev.idVendor == 0x0525 && dev.idProduct == 0xa4a4)
+ return testdev_ffs_ifnum(fd);
+ /* return 0; */
+
+ /* iso version of usermode code */
+ if (dev.idVendor == 0x0525 && dev.idProduct == 0xa4a3)
+ return 0;
+
+ /* some GPL'd test firmware uses these IDs */
+
+ if (dev.idVendor == 0xfff0 && dev.idProduct == 0xfff0)
+ return 0;
+
+ /*----------------------------------------------------*/
+
+ /* iBOT2 high speed webcam */
+ if (dev.idVendor == 0x0b62 && dev.idProduct == 0x0059)
+ return 0;
+
+ /*----------------------------------------------------*/
+
+ /* the FunctionFS gadget can have the source/sink interface
+ * anywhere. We look for an interface descriptor that match
+ * what we expect. We ignore configuratiens thou. */
+
+ if (dev.idVendor == 0x0525 && dev.idProduct == 0xa4ac
+ && (dev.bDeviceClass == USB_CLASS_PER_INTERFACE
+ || dev.bDeviceClass == USB_CLASS_VENDOR_SPEC))
+ return testdev_ffs_ifnum(fd);
+
+ return -1;
+}
+
+static int find_testdev(const char *name, const struct stat *sb, int flag)
+{
+ FILE *fd;
+ int ifnum;
+ struct testdev *entry;
+
+ (void)sb; /* unused */
+
+ if (flag != FTW_F)
+ return 0;
+ /* ignore /proc/bus/usb/{devices,drivers} */
+ if (strrchr(name, '/')[1] == 'd')
+ return 0;
+
+ fd = fopen(name, "rb");
+ if (!fd) {
+ perror(name);
+ return 0;
+ }
+
+ ifnum = testdev_ifnum(fd);
+ fclose(fd);
+ if (ifnum < 0)
+ return 0;
+
+ entry = calloc(1, sizeof *entry);
+ if (!entry)
+ goto nomem;
+
+ entry->name = strdup(name);
+ if (!entry->name) {
+ free(entry);
+nomem:
+ perror("malloc");
+ return 0;
+ }
+
+ entry->ifnum = ifnum;
+
+ /* FIXME ask usbfs what speed; update USBDEVFS_CONNECTINFO so
+ * it tells about high speed etc */
+
+ fprintf(stderr, "%s speed\t%s\t%u\n",
+ speed(entry->speed), entry->name, entry->ifnum);
+
+ entry->next = testdevs;
+ testdevs = entry;
+ return 0;
+}
+
+static int
+usbdev_ioctl (int fd, int ifno, unsigned request, void *param)
+{
+ struct usbdevfs_ioctl wrapper;
+
+ wrapper.ifno = ifno;
+ wrapper.ioctl_code = request;
+ wrapper.data = param;
+
+ return ioctl (fd, USBDEVFS_IOCTL, &wrapper);
+}
+
+static void *handle_testdev (void *arg)
+{
+ struct testdev *dev = arg;
+ int fd, i;
+ int status;
+
+ if ((fd = open (dev->name, O_RDWR)) < 0) {
+ perror ("can't open dev file r/w");
+ return 0;
+ }
+
+restart:
+ for (i = 0; i < TEST_CASES; i++) {
+ if (dev->test != -1 && dev->test != i)
+ continue;
+ dev->param.test_num = i;
+
+ status = usbdev_ioctl (fd, dev->ifnum,
+ USBTEST_REQUEST, &dev->param);
+ if (status < 0 && errno == EOPNOTSUPP)
+ continue;
+
+ /* FIXME need a "syslog it" option for background testing */
+
+ /* NOTE: each thread emits complete lines; no fragments! */
+ if (status < 0) {
+ char buf [80];
+ int err = errno;
+
+ if (strerror_r (errno, buf, sizeof buf)) {
+ snprintf (buf, sizeof buf, "error %d", err);
+ errno = err;
+ }
+ printf ("%s test %d --> %d (%s)\n",
+ dev->name, i, errno, buf);
+ } else
+ printf ("%s test %d, %4d.%.06d secs\n", dev->name, i,
+ (int) dev->param.duration.tv_sec,
+ (int) dev->param.duration.tv_usec);
+
+ fflush (stdout);
+ }
+ if (dev->forever)
+ goto restart;
+
+ close (fd);
+ return arg;
+}
+
+static const char *usbfs_dir_find(void)
+{
+ static char usbfs_path_0[] = "/dev/usb/devices";
+ static char usbfs_path_1[] = "/proc/bus/usb/devices";
+
+ static char *const usbfs_paths[] = {
+ usbfs_path_0, usbfs_path_1
+ };
+
+ static char *const *
+ end = usbfs_paths + sizeof usbfs_paths / sizeof *usbfs_paths;
+
+ char *const *it = usbfs_paths;
+ do {
+ int fd = open(*it, O_RDONLY);
+ close(fd);
+ if (fd >= 0) {
+ strrchr(*it, '/')[0] = '\0';
+ return *it;
+ }
+ } while (++it != end);
+
+ return NULL;
+}
+
+static int parse_num(unsigned *num, const char *str)
+{
+ unsigned long val;
+ char *end;
+
+ errno = 0;
+ val = strtoul(str, &end, 0);
+ if (errno || *end || val > UINT_MAX)
+ return -1;
+ *num = val;
+ return 0;
+}
+
+int main (int argc, char **argv)
+{
+
+ int c;
+ struct testdev *entry;
+ char *device;
+ const char *usbfs_dir = NULL;
+ int all = 0, forever = 0, not = 0;
+ int test = -1 /* all */;
+ struct usbtest_param param;
+
+ /* pick defaults that works with all speeds, without short packets.
+ *
+ * Best per-frame data rates:
+ * high speed, bulk 512 * 13 * 8 = 53248
+ * interrupt 1024 * 3 * 8 = 24576
+ * full speed, bulk/intr 64 * 19 = 1216
+ * interrupt 64 * 1 = 64
+ * low speed, interrupt 8 * 1 = 8
+ */
+ param.iterations = 1000;
+ param.length = 512;
+ param.vary = 512;
+ param.sglen = 32;
+
+ /* for easy use when hotplugging */
+ device = getenv ("DEVICE");
+
+ while ((c = getopt (argc, argv, "D:aA:c:g:hns:t:v:")) != EOF)
+ switch (c) {
+ case 'D': /* device, if only one */
+ device = optarg;
+ continue;
+ case 'A': /* use all devices with specified usbfs dir */
+ usbfs_dir = optarg;
+ /* FALL THROUGH */
+ case 'a': /* use all devices */
+ device = NULL;
+ all = 1;
+ continue;
+ case 'c': /* count iterations */
+ if (parse_num(&param.iterations, optarg))
+ goto usage;
+ continue;
+ case 'g': /* scatter/gather entries */
+ if (parse_num(&param.sglen, optarg))
+ goto usage;
+ continue;
+ case 'l': /* loop forever */
+ forever = 1;
+ continue;
+ case 'n': /* no test running! */
+ not = 1;
+ continue;
+ case 's': /* size of packet */
+ if (parse_num(&param.length, optarg))
+ goto usage;
+ continue;
+ case 't': /* run just one test */
+ test = atoi (optarg);
+ if (test < 0)
+ goto usage;
+ continue;
+ case 'v': /* vary packet size by ... */
+ if (parse_num(&param.vary, optarg))
+ goto usage;
+ continue;
+ case '?':
+ case 'h':
+ default:
+usage:
+ fprintf (stderr, "usage: %s [-n] [-D dev | -a | -A usbfs-dir]\n"
+ "\t[-c iterations] [-t testnum]\n"
+ "\t[-s packetsize] [-g sglen] [-v vary]\n",
+ argv [0]);
+ return 1;
+ }
+ if (optind != argc)
+ goto usage;
+ if (!all && !device) {
+ fprintf (stderr, "must specify '-a' or '-D dev', "
+ "or DEVICE=/proc/bus/usb/BBB/DDD in env\n");
+ goto usage;
+ }
+
+ /* Find usbfs mount point */
+ if (!usbfs_dir) {
+ usbfs_dir = usbfs_dir_find();
+ if (!usbfs_dir) {
+ fputs ("usbfs files are missing\n", stderr);
+ return -1;
+ }
+ }
+
+ /* collect and list the test devices */
+ if (ftw (usbfs_dir, find_testdev, 3) != 0) {
+ fputs ("ftw failed; is usbfs missing?\n", stderr);
+ return -1;
+ }
+
+ /* quit, run single test, or create test threads */
+ if (!testdevs && !device) {
+ fputs ("no test devices recognized\n", stderr);
+ return -1;
+ }
+ if (not)
+ return 0;
+ if (testdevs && testdevs->next == 0 && !device)
+ device = testdevs->name;
+ for (entry = testdevs; entry; entry = entry->next) {
+ int status;
+
+ entry->param = param;
+ entry->forever = forever;
+ entry->test = test;
+
+ if (device) {
+ if (strcmp (entry->name, device))
+ continue;
+ return handle_testdev (entry) != entry;
+ }
+ status = pthread_create (&entry->thread, 0, handle_testdev, entry);
+ if (status) {
+ perror ("pthread_create");
+ continue;
+ }
+ }
+ if (device) {
+ struct testdev dev;
+
+ /* kernel can recognize test devices we don't */
+ fprintf (stderr, "%s: %s may see only control tests\n",
+ argv [0], device);
+
+ memset (&dev, 0, sizeof dev);
+ dev.name = device;
+ dev.param = param;
+ dev.forever = forever;
+ dev.test = test;
+ return handle_testdev (&dev) != &dev;
+ }
+
+ /* wait for tests to complete */
+ for (entry = testdevs; entry; entry = entry->next) {
+ void *retval;
+
+ if (pthread_join (entry->thread, &retval))
+ perror ("pthread_join");
+ /* testing errors discarded! */
+ }
+
+ return 0;
+}